instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
4.94k
| PASS_TO_PASS
listlengths 0
7.82k
| meta
dict | created_at
stringlengths 25
25
| license
stringclasses 8
values | __index_level_0__
int64 0
6.41k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
lebrice__SimpleParsing-51
|
diff --git a/simple_parsing/conflicts.py b/simple_parsing/conflicts.py
index 3d7af91..8efd80c 100644
--- a/simple_parsing/conflicts.py
+++ b/simple_parsing/conflicts.py
@@ -119,6 +119,8 @@ class ConflictResolver:
else:
field_wrappers.extend(w.fields)
+ # TODO: #49: Also consider the conflicts with regular argparse arguments.
+
conflicts: Dict[str, List[FieldWrapper]] = defaultdict(list)
for field_wrapper in field_wrappers:
for option_string in field_wrapper.option_strings:
diff --git a/simple_parsing/docstring.py b/simple_parsing/docstring.py
index 891baae..4506791 100644
--- a/simple_parsing/docstring.py
+++ b/simple_parsing/docstring.py
@@ -83,7 +83,7 @@ def get_attribute_docstring(some_dataclass: Type, field_name: str) -> AttributeD
return AttributeDocString()
def _contains_attribute_definition(line_str: str) -> bool:
- """Returns wether or not a line contains a an class attribute definition.
+ """Returns wether or not a line contains a an dataclass field definition.
Arguments:
line_str {str} -- the line content
@@ -93,8 +93,11 @@ def _contains_attribute_definition(line_str: str) -> bool:
"""
parts = line_str.split("#", maxsplit=1)
before_comment = parts[0].strip()
- parts = before_comment.split(":")
+
+ before_first_equal = before_comment.split("=", maxsplit=1)[0]
+ parts = before_first_equal.split(":")
if len(parts) != 2:
+ # For now, I don't think it's possible to have a type annotation contain :
return False
attr_name = parts[0]
attr_type = parts[1]
diff --git a/simple_parsing/wrappers/dataclass_wrapper.py b/simple_parsing/wrappers/dataclass_wrapper.py
index 5cc9785..62b3826 100644
--- a/simple_parsing/wrappers/dataclass_wrapper.py
+++ b/simple_parsing/wrappers/dataclass_wrapper.py
@@ -24,7 +24,8 @@ class DataclassWrapper(Wrapper[Dataclass]):
# super().__init__(dataclass, name)
self.dataclass = dataclass
self._name = name
- self.default = default
+ self.default = default
+ self.prefix = prefix
self.fields: List[FieldWrapper] = []
self._destinations: List[str] = []
@@ -75,7 +76,7 @@ class DataclassWrapper(Wrapper[Dataclass]):
else:
# a normal attribute
- field_wrapper = FieldWrapper(field, parent=self)
+ field_wrapper = FieldWrapper(field, parent=self, prefix=self.prefix)
logger.debug(f"wrapped field at {field_wrapper.dest} has a default value of {field_wrapper.default}")
self.fields.append(field_wrapper)
|
lebrice/SimpleParsing
|
5aa7bb01e12308ddfa68f306c25fb20dfe7ac972
|
diff --git a/test/conftest.py b/test/conftest.py
index d821c60..3fe8689 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -48,6 +48,15 @@ def simple_attribute(request):
logging.debug(f"Attribute type: {some_type}, passed value: '{passed_value}', expected: '{expected_value}'")
return request.param
[email protected]
+def assert_equals_stdout(capsys):
+ def strip(string): return "".join(string.split())
+
+ def should_equal(expected: str, file_path: str=None):
+ out = capsys.readouterr().out
+ assert strip(out) == strip(expected), file_path
+ return should_equal
+
@pytest.fixture(scope="module")
def parser():
diff --git a/test/test_issue_46.py b/test/test_issue_46.py
new file mode 100644
index 0000000..5538fff
--- /dev/null
+++ b/test/test_issue_46.py
@@ -0,0 +1,99 @@
+from dataclasses import dataclass
+import simple_parsing
+import textwrap
+import pytest
+
+
+@dataclass
+class JBuildRelease:
+ id: int
+ url: str
+ docker_image: str
+
+
+def test_issue_46(assert_equals_stdout):
+ parser = simple_parsing.ArgumentParser()
+ parser.add_argument('--run_id', type=str)
+ parser.add_arguments(JBuildRelease, dest="jbuild", prefix="jbuild")
+
+ parser.print_help()
+
+ assert_equals_stdout(textwrap.dedent("""\
+ usage: pytest [-h] [--run_id str] --jbuildid int --jbuildurl str
+ --jbuilddocker_image str
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --run_id str
+
+ JBuildRelease ['jbuild']:
+ JBuildRelease(id:int, url:str, docker_image:str)
+
+ --jbuildid int
+ --jbuildurl str
+ --jbuilddocker_image str
+ """
+ ))
+ from .testutils import raises_missing_required_arg
+ with raises_missing_required_arg():
+ args = parser.parse_args("--id 123 --jbuild.id 456 --jbuild.url bob --jbuild.docker_image foo".split())
+
+
+def test_issue_46_solution2(assert_equals_stdout):
+ # This (now) works:
+ parser = simple_parsing.ArgumentParser(add_dest_to_option_strings=True)
+ parser.add_argument('--run_id', type=str)
+ parser.add_arguments(JBuildRelease, dest="jbuild", prefix="jbuild.")
+
+ parser.print_help()
+ assert_equals_stdout(textwrap.dedent("""\
+ usage: pytest [-h] [--run_id str] --jbuild.id int --jbuild.url str
+ --jbuild.docker_image str
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --run_id str
+
+ JBuildRelease ['jbuild']:
+ JBuildRelease(id:int, url:str, docker_image:str)
+
+ --jbuild.id int
+ --jbuild.url str
+ --jbuild.docker_image str
+ """
+ ))
+
+
[email protected](reason="TODO: Issue #49")
+def test_conflict_with_regular_argparse_arg():
+ # This _should_ work, but it doesn't, adding a new issue for this:
+ # the problem: SimpleParsing doesn't yet detect
+ # conflicts between arguments added the usual way with `add_argument` and those
+ # added through `add_arguments`.
+ parser = simple_parsing.ArgumentParser()
+ parser.add_argument('--id', type=str)
+ parser.add_arguments(JBuildRelease, dest="jbuild")
+ args = parser.parse_args("--id 123 --jbuild.id 456 --jbuild.url bob --jbuild.docker_image foo".split())
+ assert args.id == 123
+ assert args.jbuild.id == 456
+
+
[email protected](reason="TODO: Issue #49")
+def test_workaround():
+ from simple_parsing import mutable_field, ConflictResolution
+ # This also doesn't work, since the prefix is only added to the 'offending'
+ # argument, rather than to all the args in that group.
+ @dataclass
+ class Main:
+ id: int
+ jbuild: JBuildRelease
+
+ parser = simple_parsing.ArgumentParser()
+ parser.add_arguments(Main, "main")
+ args = parser.parse_args("--id 123 --jbuild.id 456 --jbuild.url bob --jbuild.docker_image foo".split())
+ args = args.main
+ assert args.id == 123
+ assert args.jbuild.id == 456
+
+
+
\ No newline at end of file
diff --git a/test/test_issue_48.py b/test/test_issue_48.py
new file mode 100644
index 0000000..4ddc2d8
--- /dev/null
+++ b/test/test_issue_48.py
@@ -0,0 +1,45 @@
+from simple_parsing import ArgumentParser, field
+from dataclasses import dataclass
+
+@dataclass
+class InputArgs:
+ # Start date from which to collect data about base users. Input in iso format (YYYY-MM-DD).
+ # The date is included in the data
+ start_date: str = field(alias="s", metadata={'a':'b'})
+
+ # End date for collecting base users. Input in iso format (YYYY-MM-DD). The date is included in the data.
+ # Should not be before `start_date`
+ end_date: str = field(alias="e")
+
+
+from io import StringIO
+import textwrap
+
+
+def test_issue_48():
+ parser = ArgumentParser("Prepare input data for training")
+ parser.add_arguments(InputArgs, dest="args")
+ s = StringIO()
+ parser.print_help(file=s)
+ s.seek(0)
+ assert s.read().replace(" ", "") == textwrap.dedent("""\
+ usage: Prepare input data for training [-h] -s str -e str
+
+ optional arguments:
+ -h, --help show this help message and exit
+
+ InputArgs ['args']:
+ InputArgs(start_date:str, end_date:str)
+
+ -s str, --start_date str
+ Start date from which to collect data about base
+ users. Input in iso format (YYYY-MM-DD). The date is
+ included in the data (default: None)
+ -e str, --end_date str
+ End date for collecting base users. Input in iso
+ format (YYYY-MM-DD). The date is included in the data.
+ Should not be before `start_date` (default: None)
+ """).replace(" ", "")
+
+
+ # args = parser.parse_args()
\ No newline at end of file
|
Help does not appear if using metadata in fields
**Describe the bug**
Hi, it seems, that when the dataclass has metadata defined through `field` (from `simple_parsing`, of course), the help in form of comments does not get picked up.
**To Reproduce**
```python
from simple_parsing import ArgumentParser, field
from dataclasses import dataclass
@dataclass
class InputArgs:
# Start date from which to collect data about base users. Input in iso format (YYYY-MM-DD).
# The date is included in the data
start_date: str = field(alias="s", metadata={'a':'b'})
# End date for collecting base users. Input in iso format (YYYY-MM-DD). The date is included in the data.
# Should not be before `start_date`
end_date: str = field(alias="e")
def parse_args() -> InputArgs:
parser = ArgumentParser("Prepare input data for training")
parser.add_arguments(InputArgs, dest="args")
args = parser.parse_args()
return args.args
if __name__ == "__main__":
parsed_args = parse_args()
print(parsed_args)
```
`python test.py -h`
## Output
```
usage: Prepare input data for training [-h] -s str -e str
optional arguments:
-h, --help show this help message and exit
InputArgs ['args']:
InputArgs(start_date: str, end_date: str)
-s str, --start_date str
-e str, --end_date str
Start date from which to collect data about base
users. Input in iso format (YYYY-MM-DD). The date is
included in the data End date for collecting base
users. Input in iso format (YYYY-MM-DD). The date is
included in the data. Should not be before
`start_date` (default: None)
```
## Expected output:
```
usage: Prepare input data for training [-h] -s str -e str
optional arguments:
-h, --help show this help message and exit
InputArgs ['args']:
InputArgs(start_date: str, end_date: str)
-s str, --start_date str
Start date from which to collect data about base
users. Input in iso format (YYYY-MM-DD). The date is
included in the data (default: None)
-e str, --end_date str
End date for collecting base users. Input in iso
format (YYYY-MM-DD). The date is included in the data.
Should not be before `start_date` (default: None)
```
|
0.0
|
5aa7bb01e12308ddfa68f306c25fb20dfe7ac972
|
[
"test/test_issue_46.py::test_issue_46",
"test/test_issue_46.py::test_issue_46_solution2",
"test/test_issue_48.py::test_issue_48"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-03 02:12:15+00:00
|
mit
| 3,535 |
|
lebrice__SimpleParsing-99
|
diff --git a/simple_parsing/utils.py b/simple_parsing/utils.py
index 56f41da..88b14ff 100644
--- a/simple_parsing/utils.py
+++ b/simple_parsing/utils.py
@@ -187,8 +187,8 @@ def get_item_type(container_type: Type[Container[T]]) -> T:
def get_argparse_type_for_container(
- container_type: Type,
-) -> Union[Type, Callable[[str], bool]]:
+ container_type: Type[Container[T]],
+) -> Union[Type[T], Callable[[str], T]]:
"""Gets the argparse 'type' option to be used for a given container type.
When an annotation is present, the 'type' option of argparse is set to that type.
if not, then the default value of 'str' is returned.
@@ -208,6 +208,12 @@ def get_argparse_type_for_container(
return str2bool
if T is Any:
return str
+ if is_enum(T):
+ # IDEA: Fix this weirdness by first moving all this weird parsing logic into the
+ # field wrapper class, and then split it up into different subclasses of FieldWrapper,
+ # each for a different type of field.
+ from simple_parsing.wrappers.field_parsing import parse_enum
+ return parse_enum(T)
return T
diff --git a/simple_parsing/wrappers/field_parsing.py b/simple_parsing/wrappers/field_parsing.py
index 05bf312..09baf37 100644
--- a/simple_parsing/wrappers/field_parsing.py
+++ b/simple_parsing/wrappers/field_parsing.py
@@ -3,6 +3,8 @@
Somewhat analogous to the 'parse' function in the
`helpers.serialization.parsing` package.
"""
+import enum
+import functools
from dataclasses import Field
from logging import getLogger
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union
@@ -11,6 +13,7 @@ import typing_inspect as tpi
from ..utils import (
get_type_arguments,
+ is_enum,
is_homogeneous_tuple_type,
is_list,
is_tuple,
@@ -122,7 +125,10 @@ def get_parsing_fn(t: Type[T]) -> Callable[[Any], T]:
logger.debug(f"parsing a Union field: {t}")
args = get_type_arguments(t)
return parse_union(*args)
-
+
+ elif is_enum(t):
+ logger.debug(f"Parsing an Enum field of type {t}")
+ return parse_enum(t)
# import typing_inspect as tpi
# from .serializable import get_dataclass_type_from_forward_ref, Serializable
@@ -237,7 +243,6 @@ def parse_tuple(
return parsed_value
- _parse_tuple.__name__ = "BOB"
return _parse_tuple
@@ -256,3 +261,36 @@ def no_op(v: T) -> T:
[type]: The value unchanged.
"""
return v
+
+
+E = TypeVar("E", bound=enum.Enum)
+
+
+def parse_enum(enum_type: Type[E]) -> Callable[[str], E]:
+ """Returns a function to use to parse an enum of type `enum_type` from a string.
+
+ Parameters
+ ----------
+ - enum_type : Type[enum.Enum]
+
+ The type of enum to create a parsing function for.
+
+ Returns
+ -------
+ Callable[[str], E]
+ A function that parses an enum object of type `enum_type` from a string.
+ """
+ # Save the function, since the same type will always be parsed the same way. Also
+ # makes testing easier.
+ if enum_type in _parsing_fns:
+ return _parsing_fns[enum_type]
+
+ # NOTE: Use `functools.wraps` so that fn name is the enum, so the metavar shows up
+ # just like the enum on the command-line, and not like
+ # "(...).parse_enum.<locals>._parse_enum" or something.
+ @functools.wraps(enum_type)
+ def _parse_enum(v: str) -> E:
+ return enum_type[v]
+ _parsing_fns[enum_type] = _parse_enum
+ return _parse_enum
+
\ No newline at end of file
|
lebrice/SimpleParsing
|
9d4e4c89991a0dc3220723a203a8a8ae3ede9cca
|
diff --git a/test/test_base.py b/test/test_base.py
index fb12434..2e36962 100644
--- a/test/test_base.py
+++ b/test/test_base.py
@@ -244,3 +244,76 @@ def test_using_a_Type_type():
foo = OtherFoo.setup("")
assert foo.a == Extended()
+
+
+def test_issue62():
+ import enum
+ from simple_parsing.helpers import list_field
+ from typing import List
+ parser = ArgumentParser()
+
+ class Color(enum.Enum):
+ RED = "red"
+ ORANGE = "orange"
+ BLUE = "blue"
+
+ class Temperature(enum.Enum):
+ HOT = 1
+ WARM = 0
+ COLD = -1
+ MONTREAL = -35
+
+ @dataclass
+ class MyPreferences(TestSetup):
+ """You can use Enums"""
+
+ color: Color = Color.BLUE # my favorite colour
+ # a list of colors
+ color_list: List[Color] = list_field(Color.ORANGE)
+ # Some floats.
+ floats: List[float] = list_field(1.1, 2.2, 3.3)
+ # pick a temperature
+ temp: Temperature = Temperature.WARM
+ # a list of temperatures
+ temp_list: List[Temperature] = list_field(Temperature.COLD, Temperature.WARM)
+
+ parser.add_arguments(MyPreferences, "my_preferences")
+ assert MyPreferences.setup(
+ "--color ORANGE --color_list RED BLUE --temp MONTREAL"
+ ) == MyPreferences(
+ color=Color.ORANGE,
+ color_list=[Color.RED, Color.BLUE],
+ temp=Temperature.MONTREAL,
+ temp_list=[Temperature.COLD, Temperature.WARM],
+ )
+ assert MyPreferences.setup(
+ "--color ORANGE --color_list RED BLUE --temp MONTREAL --temp_list MONTREAL HOT"
+ ) == MyPreferences(
+ color=Color.ORANGE,
+ color_list=[Color.RED, Color.BLUE],
+ temp=Temperature.MONTREAL,
+ temp_list=[Temperature.MONTREAL, Temperature.HOT],
+ )
+ assert Temperature["MONTREAL"] is Temperature.MONTREAL
+ assert Temperature(-35) is Temperature.MONTREAL
+
+ # NOTE: This kind of test (comparing the help str) is unreliable, changes depending on the
+ # python version.
+ # assert MyPreferences.get_help_text() == textwrap.dedent("""\
+ # usage: pytest [-h] [--color Color] [--color_list Color] [--floats float]
+ # [--temp Temperature] [--temp_list Temperature]
+
+ # optional arguments:
+ # -h, --help show this help message and exit
+
+ # test_issue62.<locals>.MyPreferences ['my_preferences']:
+ # You can use Enums
+
+ # --color Color my favorite colour (default: BLUE)
+ # --color_list Color a list of colors (default: [<Color.ORANGE: 'orange'>])
+ # --floats float Some floats. (default: [1.1, 2.2, 3.3])
+ # --temp Temperature pick a temperature (default: WARM)
+ # --temp_list Temperature
+ # a list of temperatures (default: [<Temperature.COLD:
+ # -1>, <Temperature.WARM: 0>])
+ # """)
diff --git a/test/test_fields.py b/test/test_fields.py
index a026d3c..9056766 100644
--- a/test/test_fields.py
+++ b/test/test_fields.py
@@ -46,14 +46,14 @@ class Color(Enum):
from simple_parsing.wrappers.field_parsing import get_parsing_fn
from simple_parsing.utils import str2bool
-
+from simple_parsing.wrappers.field_parsing import parse_enum
@pytest.mark.parametrize(
"annotation, expected_options",
[
(Tuple[int, int], dict(nargs=2, type=int)),
- (Tuple[Color, Color], dict(nargs=2, type=Color)),
- (Optional[Tuple[Color, Color]], dict(nargs=2, type=Color, required=False)),
+ (Tuple[Color, Color], dict(nargs=2, type=parse_enum(Color))),
+ (Optional[Tuple[Color, Color]], dict(nargs=2, type=parse_enum(Color), required=False)),
(List[str], dict(nargs="*", type=str)),
(Optional[List[str]], dict(nargs="*", type=str, required=False)),
(Optional[str], dict(nargs="?", type=str, required=False)),
|
Lists of enums are parsed by value on the command line
**Describe the bug**
While enums seem to be parsed by member name, a list of them seems to be parsed by member value, but only if the value is a string, not an integer
**To Reproduce**
```python
import enum
from typing import *
from dataclasses import dataclass
from simple_parsing import ArgumentParser
from simple_parsing.helpers import list_field
parser = ArgumentParser()
class Color(enum.Enum):
RED = "RED"
ORANGE = "ORANGE"
BLUE = "BLUE"
class Temperature(enum.Enum):
HOT = 1
WARM = 0
COLD = -1
MONTREAL = -35
@dataclass
class MyPreferences:
"""You can use Enums"""
color: Color = Color.BLUE # my favorite colour
# a list of colors
color_list: List[Color] = list_field(Color.ORANGE)
# pick a temperature
temp: Temperature = Temperature.WARM
# a list of temperatures
temp_list: List[Temperature] = list_field(Temperature.COLD, Temperature.WARM)
parser.add_arguments(MyPreferences, "my_preferences")
args = parser.parse_args()
prefs: MyPreferences = args.my_preferences
print(prefs)
```
**Expected behavior**
A clear and concise description of what you expected to happen.
```console
$ python issue.py --color ORANGE --color_list RED BLUE --temp MONTREAL
MyPreferences(color=<Color.ORANGE: 'ORANGE'>, color_list=[<Color.RED: 'RED'>, <Color.BLUE: 'BLUE'>], temp=<Temperature.MONTREAL: -35>, temp_list=[<Temperature.COLD: -1>, <Temperature.WARM: 0>])
$ python issue.py --color ORANGE --color_list RED BLUE --temp MONTREAL --temp_list MONTREAL
MyPreferences(color=<Color.ORANGE: 'ORANGE'>, color_list=[<Color.RED: 'RED'>, <Color.BLUE: 'BLUE'>], temp=<Temperature.MONTREAL: -35>, temp_list=[<Temperature.MONTREAL: -35>])
```
**Actual behavior**
A clear and concise description of what is happening.
```console
$ python issue.py --color ORANGE --color_list RED BLUE --temp MONTREAL --temp_list MONTREAL
usage: enums.py [-h] [--color Color] [--color_list Color] [--temp Temperature]
[--temp_list Temperature]
enums.py: error: argument --temp_list: invalid Temperature value: 'MONTREAL'
```
**Desktop (please complete the following information):**
- Version 0.0.15.post1
- Python version: 3.9.0
**Additional context**
If I add the proper encoders and decoders, I can load and save both kinds of enum lists from .json files just fine:
```
@encode.register
def encode_Color(obj: Color) -> str:
return obj.name
register_decoding_fn(Color, Color)
@encode.register
def encode_Temperature(obj: Temperature) -> str:
return obj.name
register_decoding_fn(Temperature, lambda temp: Temperature[temp])
```
|
0.0
|
9d4e4c89991a0dc3220723a203a8a8ae3ede9cca
|
[
"test/test_base.py::test_basic_required_argument[simple_attribute0]",
"test/test_base.py::test_basic_required_argument[simple_attribute1]",
"test/test_base.py::test_basic_required_argument[simple_attribute2]",
"test/test_base.py::test_basic_required_argument[simple_attribute3]",
"test/test_base.py::test_basic_required_argument[simple_attribute4]",
"test/test_base.py::test_basic_required_argument[simple_attribute5]",
"test/test_base.py::test_basic_required_argument[simple_attribute6]",
"test/test_base.py::test_basic_required_argument[simple_attribute7]",
"test/test_base.py::test_basic_required_argument[simple_attribute8]",
"test/test_base.py::test_basic_required_argument[simple_attribute9]",
"test/test_base.py::test_basic_required_argument[simple_attribute10]",
"test/test_base.py::test_basic_required_argument[simple_attribute11]",
"test/test_base.py::test_basic_required_argument[simple_attribute12]",
"test/test_base.py::test_basic_required_argument[simple_attribute13]",
"test/test_base.py::test_basic_required_argument[simple_attribute14]",
"test/test_base.py::test_basic_required_argument[simple_attribute15]",
"test/test_base.py::test_basic_required_argument[simple_attribute16]",
"test/test_base.py::test_basic_required_argument[simple_attribute17]",
"test/test_base.py::test_basic_required_argument[simple_attribute18]",
"test/test_base.py::test_basic_required_argument[simple_attribute19]",
"test/test_base.py::test_basic_required_argument[simple_attribute20]",
"test/test_base.py::test_basic_required_argument[simple_attribute21]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute0]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute1]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute2]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute3]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute4]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute5]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute6]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute7]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute8]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute9]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute10]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute11]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute12]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute13]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute14]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute15]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute16]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute17]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute18]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute19]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute20]",
"test/test_base.py::test_not_passing_required_argument_raises_error[simple_attribute21]",
"test/test_base.py::test_basic_optional_argument[simple_attribute0]",
"test/test_base.py::test_basic_optional_argument[simple_attribute1]",
"test/test_base.py::test_basic_optional_argument[simple_attribute2]",
"test/test_base.py::test_basic_optional_argument[simple_attribute3]",
"test/test_base.py::test_basic_optional_argument[simple_attribute4]",
"test/test_base.py::test_basic_optional_argument[simple_attribute5]",
"test/test_base.py::test_basic_optional_argument[simple_attribute6]",
"test/test_base.py::test_basic_optional_argument[simple_attribute7]",
"test/test_base.py::test_basic_optional_argument[simple_attribute8]",
"test/test_base.py::test_basic_optional_argument[simple_attribute9]",
"test/test_base.py::test_basic_optional_argument[simple_attribute10]",
"test/test_base.py::test_basic_optional_argument[simple_attribute11]",
"test/test_base.py::test_basic_optional_argument[simple_attribute12]",
"test/test_base.py::test_basic_optional_argument[simple_attribute13]",
"test/test_base.py::test_basic_optional_argument[simple_attribute14]",
"test/test_base.py::test_basic_optional_argument[simple_attribute15]",
"test/test_base.py::test_basic_optional_argument[simple_attribute16]",
"test/test_base.py::test_basic_optional_argument[simple_attribute17]",
"test/test_base.py::test_basic_optional_argument[simple_attribute18]",
"test/test_base.py::test_basic_optional_argument[simple_attribute19]",
"test/test_base.py::test_basic_optional_argument[simple_attribute20]",
"test/test_base.py::test_basic_optional_argument[simple_attribute21]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute0]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute1]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute2]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute3]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute4]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute5]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute6]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute7]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute8]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute9]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute10]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute11]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute12]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute13]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute14]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute15]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute16]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute17]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute18]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute19]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute20]",
"test/test_base.py::test_works_fine_with_other_argparse_arguments[simple_attribute21]",
"test/test_base.py::test_arg_value_is_set_when_args_are_provided[int-0-1234]",
"test/test_base.py::test_arg_value_is_set_when_args_are_provided[float-0.0-123.456]",
"test/test_base.py::test_arg_value_is_set_when_args_are_provided[str--bobby_boots]",
"test/test_base.py::test_arg_value_is_set_when_args_are_provided[bool-False-True]",
"test/test_base.py::test_not_providing_required_argument_throws_error[int]",
"test/test_base.py::test_not_providing_required_argument_throws_error[float]",
"test/test_base.py::test_not_providing_required_argument_throws_error[str]",
"test/test_base.py::test_not_providing_required_argument_throws_error[bool]",
"test/test_base.py::test_not_providing_required_argument_name_but_no_value_throws_error[int]",
"test/test_base.py::test_not_providing_required_argument_name_but_no_value_throws_error[float]",
"test/test_base.py::test_not_providing_required_argument_name_but_no_value_throws_error[str]",
"test/test_base.py::test_parse_base_simple_works",
"test/test_base.py::test_parse_multiple_works",
"test/test_base.py::test_parse_multiple_inconsistent_throws_error",
"test/test_base.py::test_help_displays_class_docstring_text",
"test/test_base.py::test_enum_attributes_work",
"test/test_base.py::test_passing_default_value[simple_attribute0]",
"test/test_base.py::test_passing_default_value[simple_attribute1]",
"test/test_base.py::test_passing_default_value[simple_attribute2]",
"test/test_base.py::test_passing_default_value[simple_attribute3]",
"test/test_base.py::test_passing_default_value[simple_attribute4]",
"test/test_base.py::test_passing_default_value[simple_attribute5]",
"test/test_base.py::test_passing_default_value[simple_attribute6]",
"test/test_base.py::test_passing_default_value[simple_attribute7]",
"test/test_base.py::test_passing_default_value[simple_attribute8]",
"test/test_base.py::test_passing_default_value[simple_attribute9]",
"test/test_base.py::test_passing_default_value[simple_attribute10]",
"test/test_base.py::test_passing_default_value[simple_attribute11]",
"test/test_base.py::test_passing_default_value[simple_attribute12]",
"test/test_base.py::test_passing_default_value[simple_attribute13]",
"test/test_base.py::test_passing_default_value[simple_attribute14]",
"test/test_base.py::test_passing_default_value[simple_attribute15]",
"test/test_base.py::test_passing_default_value[simple_attribute16]",
"test/test_base.py::test_passing_default_value[simple_attribute17]",
"test/test_base.py::test_passing_default_value[simple_attribute18]",
"test/test_base.py::test_passing_default_value[simple_attribute19]",
"test/test_base.py::test_passing_default_value[simple_attribute20]",
"test/test_base.py::test_passing_default_value[simple_attribute21]",
"test/test_base.py::test_parsing_twice",
"test/test_base.py::test_passing_instance",
"test/test_base.py::test_using_a_Type_type",
"test/test_base.py::test_issue62",
"test/test_fields.py::test_cmd_false_doesnt_create_conflicts",
"test/test_fields.py::test_generated_options_from_annotation[annotation0-expected_options0]",
"test/test_fields.py::test_generated_options_from_annotation[annotation1-expected_options1]",
"test/test_fields.py::test_generated_options_from_annotation[annotation2-expected_options2]",
"test/test_fields.py::test_generated_options_from_annotation[annotation3-expected_options3]",
"test/test_fields.py::test_generated_options_from_annotation[annotation4-expected_options4]",
"test/test_fields.py::test_generated_options_from_annotation[annotation5-expected_options5]",
"test/test_fields.py::test_generated_options_from_annotation[annotation6-expected_options6]"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-11-19 00:53:43+00:00
|
mit
| 3,536 |
|
lektor__lektor-1048
|
diff --git a/frontend/js/widgets/BooleanInputWidget.tsx b/frontend/js/widgets/BooleanInputWidget.tsx
index be38b38..e27c388 100644
--- a/frontend/js/widgets/BooleanInputWidget.tsx
+++ b/frontend/js/widgets/BooleanInputWidget.tsx
@@ -1,38 +1,53 @@
-import React from "react";
+import React, { KeyboardEvent } from "react";
import { WidgetProps } from "./types";
-import { trans_obj } from "../i18n";
+import { trans, trans_obj } from "../i18n";
-const isTrue = (value?: string) =>
+const isTrue = (value?: string | null) =>
value === "true" || value === "yes" || value === "1";
+const isDeleteOrBackspace = (event: KeyboardEvent<HTMLInputElement>) => {
+ if (event.altKey || event.metaKey || (event.shiftKey && !event.ctrlKey)) {
+ // If modifiers other than <ctrl>, <ctrl>-<shift>, or none are used, ignore
+ return false;
+ }
+ return event.key === "Delete" || event.key === "Backspace";
+};
+
export function BooleanInputWidget({
type,
value,
disabled,
placeholder,
onChange,
-}: WidgetProps): JSX.Element {
+}: WidgetProps<string | null>): JSX.Element {
return (
<div className="form-check">
<label className="form-check-label">
<input
type="checkbox"
- className="form-check-input"
+ className={[
+ "form-check-input",
+ `form-check-input--default-${placeholder ? "true" : "false"}`,
+ ].join(" ")}
disabled={disabled}
ref={(checkbox) => {
if (checkbox) {
- if (!value && placeholder) {
- checkbox.indeterminate = true;
- checkbox.checked = isTrue(placeholder);
- } else {
- checkbox.indeterminate = false;
- }
+ // wierdly, `indeterminate` can not be set via HTML attribute
+ checkbox.indeterminate = !value;
}
}}
- checked={isTrue(value)}
+ checked={isTrue(value || placeholder)}
onChange={(ev) => {
onChange(ev.target.checked ? "yes" : "no");
}}
+ onKeyDown={(ev) => {
+ if (isDeleteOrBackspace(ev)) {
+ ev.preventDefault();
+ ev.stopPropagation();
+ onChange(null); // set value back to unset
+ }
+ }}
+ title={trans("TRISTATE_CHECKBOX_TOOLTIP")}
/>
{type.checkbox_label_i18n ? trans_obj(type.checkbox_label_i18n) : null}
</label>
diff --git a/frontend/scss/forms.scss b/frontend/scss/forms.scss
index 5c8282e..e5de7bf 100644
--- a/frontend/scss/forms.scss
+++ b/frontend/scss/forms.scss
@@ -107,3 +107,28 @@ div.flow-block {
.spacing-widget {
height: 30px;
}
+
+// Visually distinguish between checked and unchecked indeterminate checkbox
+// (Checkboxes, as far as the browser is concerned only have three states.
+// Indeterminate checkboxes are not :checked, but we still set the default value
+// for the field in the checked attribute and would like to be able to tell
+// the difference between those that default to true vs false.)
+.form-check-input[type="checkbox"]:indeterminate {
+ border-color: rgba(#000, 0.25);
+ background-color: scale-color(
+ $form-check-input-checked-bg-color,
+ $alpha: -70%,
+ $saturation: -30%
+ );
+ // Slash, rather than bootstrap's horizontal bar, seems more intuitive indication
+ // of indeterminate state.
+ background-image: escape-svg(url("data:image/svg+xml,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'><path fill='none' stroke='#{$form-check-input-indeterminate-color}' stroke-linecap='round' stroke-width='3' d='M6 6 l8 8'/></svg>"));
+
+ &.form-check-input--default-true {
+ background-color: scale-color(
+ $form-check-input-checked-bg-color,
+ $alpha: -25%
+ );
+ border-color: $form-check-input-checked-border-color;
+ }
+}
diff --git a/lektor/admin/modules/serve.py b/lektor/admin/modules/serve.py
index 2ba9a1b..016862c 100644
--- a/lektor/admin/modules/serve.py
+++ b/lektor/admin/modules/serve.py
@@ -86,6 +86,14 @@ def _checked_send_file(filename: Filename, mimetype: Optional[str] = None) -> Re
return resp
+class HiddenRecordException(NotFound):
+ """Exception thrown when a request is made for a hidden page."""
+
+ def __init__(self, source: SourceObject) -> None:
+ super().__init__(description=f"Record is hidden: {source!r}")
+ self.source = source
+
+
class ArtifactServer:
"""Resolve url_path to a Lektor source object, build it, serve the result.
@@ -104,7 +112,7 @@ class ArtifactServer:
Raise NotFound if resolution fails.
"""
- source = self.lektor_ctx.pad.resolve_url_path(url_path)
+ source = self.lektor_ctx.pad.resolve_url_path(url_path, include_invisible=True)
if source is None:
abort(404)
return source
@@ -180,6 +188,9 @@ class ArtifactServer:
):
return append_slash_redirect(request.environ)
+ if source.is_hidden:
+ raise HiddenRecordException(source)
+
if isinstance(source, Directory):
# Special case for asset directories: resolve to index.html
source = self.resolve_directory_index(source)
@@ -230,6 +241,8 @@ def serve_file(path: str) -> Response:
def serve_artifact_or_file(path: str) -> Response:
try:
return serve_artifact(path)
+ except HiddenRecordException:
+ raise
except NotFound:
return serve_file(path)
@@ -238,5 +251,5 @@ def serve_artifact_or_file(path: str) -> Response:
def serve_error_page(error: NotFound) -> ResponseReturnValue:
try:
return serve_artifact("404.html"), 404
- except NotFound as e:
- return e
+ except NotFound:
+ return error
diff --git a/lektor/build_programs.py b/lektor/build_programs.py
index cfffb74..63a8b01 100644
--- a/lektor/build_programs.py
+++ b/lektor/build_programs.py
@@ -108,7 +108,7 @@ class BuildProgram:
_build(artifact, build_func)
# If we failed anywhere we want to mark *all* artifacts as dirty.
- # This means that if a sub-artifact failes we also rebuild the
+ # This means that if a sub-artifact fails we also rebuild the
# parent next time around.
if failures:
for artifact in self.artifacts:
@@ -217,6 +217,7 @@ class PageBuildProgram(BuildProgram):
# If pagination is disabled, all children and attachments are linked
# to this page.
all_children = self.source.children.include_undiscoverable(True)
+ all_children = all_children.include_hidden(True)
if pagination_enabled:
if self.source.page_num is None:
child_sources.append(self._iter_paginated_children())
diff --git a/lektor/builder.py b/lektor/builder.py
index cf82d34..72aea0c 100644
--- a/lektor/builder.py
+++ b/lektor/builder.py
@@ -387,47 +387,60 @@ class BuildState:
finally:
con.close()
+ def iter_existing_artifacts(self):
+ """Scan output directory for artifacts.
+
+ Returns an iterable of the artifact_names for artifacts found.
+ """
+ is_ignored = self.env.is_ignored_artifact
+
+ def _unignored(filenames):
+ return filter(lambda fn: not is_ignored(fn), filenames)
+
+ dst = self.builder.destination_path
+ for dirpath, dirnames, filenames in os.walk(dst):
+ dirnames[:] = _unignored(dirnames)
+ for filename in _unignored(filenames):
+ full_path = os.path.join(dst, dirpath, filename)
+ yield self.artifact_name_from_destination_filename(full_path)
+
def iter_unreferenced_artifacts(self, all=False):
"""Finds all unreferenced artifacts in the build folder and yields
them.
"""
- dst = os.path.join(self.builder.destination_path)
+ if all:
+ return self.iter_existing_artifacts()
con = self.connect_to_database()
cur = con.cursor()
- try:
- for dirpath, dirnames, filenames in os.walk(dst):
- dirnames[:] = [
- x for x in dirnames if not self.env.is_ignored_artifact(x)
- ]
- for filename in filenames:
- if self.env.is_ignored_artifact(filename):
- continue
- full_path = os.path.join(dst, dirpath, filename)
- artifact_name = self.artifact_name_from_destination_filename(
- full_path
- )
-
- if all:
- yield artifact_name
- continue
+ def _is_unreferenced(artifact_name):
+ # Check whether any of the primary sources for the artifact
+ # exist and — if the source can be resolved to a record —
+ # correspond to non-hidden records.
+ cur.execute(
+ """
+ SELECT DISTINCT source, path, alt
+ FROM artifacts LEFT JOIN source_info USING(source)
+ WHERE artifact = ?
+ AND is_primary_source""",
+ [artifact_name],
+ )
+ for source, path, alt in cur.fetchall():
+ if self.get_file_info(source).exists:
+ if path is None:
+ return False # no record to check
+ record = self.pad.get(path, alt)
+ if record is None:
+ # I'm not sure this should happen, but be safe
+ return False
+ if record.is_visible:
+ return False
+ # no sources exist, or those that do belong to hidden records
+ return True
- cur.execute(
- """
- select source from artifacts
- where artifact = ?
- and is_primary_source""",
- [artifact_name],
- )
- sources = set(x[0] for x in cur.fetchall())
-
- # It's a bad artifact if there are no primary sources
- # or the primary sources do not exist.
- if not sources or not any(
- self.get_file_info(x).exists for x in sources
- ):
- yield artifact_name
+ try:
+ yield from filter(_is_unreferenced, self.iter_existing_artifacts())
finally:
con.close()
diff --git a/lektor/db.py b/lektor/db.py
index 6c55e53..6cfcea9 100644
--- a/lektor/db.py
+++ b/lektor/db.py
@@ -620,7 +620,11 @@ class Page(Record):
# When we resolve URLs we also want to be able to explicitly
# target undiscoverable pages. Those who know the URL are
# rewarded.
- q = self.children.include_undiscoverable(True)
+
+ # We also want to resolve hidden children
+ # here. Pad.resolve_url_path() is where the check for hidden
+ # records is done.
+ q = self.children.include_undiscoverable(True).include_hidden(True)
for idx in range(len(url_path)):
piece = "/".join(url_path[: idx + 1])
diff --git a/lektor/translations/en.json b/lektor/translations/en.json
index c863fcf..9a860cc 100644
--- a/lektor/translations/en.json
+++ b/lektor/translations/en.json
@@ -124,5 +124,6 @@
"OPEN_OTHER_PROJECT": "Open other Project",
"OPEN_OTHER_PROJECT_QUESTION": "Opening this file requires opening another project (%s). The current project will be closed. Do you want to continue?",
"COLLAPSE": "Collapse",
- "EXPAND": "Expand"
+ "EXPAND": "Expand",
+ "TRISTATE_CHECKBOX_TOOLTIP": "Tri-state checkbox: use <del> or <backspace> to revert to unset"
}
|
lektor/lektor
|
7eff923133ced682a5658d18ef579564589b8bfe
|
diff --git a/tests/admin/test_serve.py b/tests/admin/test_serve.py
index b4734d8..c779506 100644
--- a/tests/admin/test_serve.py
+++ b/tests/admin/test_serve.py
@@ -222,6 +222,7 @@ class TestArtifactServer:
("blog", "/blog@1"),
("blog/2015/12/post1/", "/blog/post1"),
("de/blog/", "/blog@1"),
+ ("extra/container", "/extra/container"),
],
)
def test_resolve_url_path(self, a_s, url_path, source_path):
@@ -359,6 +360,10 @@ class TestArtifactServer:
("extra/build-failure/", "text/html", True), # Failing build
# Asset file
("static/demo.css", "text/css", False),
+ # Page with hidden parent
+ ("extra/container/a/", "text/html", True),
+ # Asset file with hidden parent
+ ("extra/container/hello.txt", "text/plain", False),
# Asset directories with index.{htm,html}
("dir_with_index_html/", "text/html", False),
("dir_with_index_htm/", "text/html", False),
@@ -380,6 +385,7 @@ class TestArtifactServer:
"url_path",
[
"[email protected]", # sub-artifact — no resolvable to source object
+ "extra/container/", # hidden page
"static/", # Asset directory without index.html
"dir_with_index_html/index.htm",
"dir_with_index_htm/index.html",
@@ -449,6 +455,7 @@ def test_serve_file_dir_handling(output_path, app, index_html):
"path",
[
"missing",
+ "example/container", # hidden page
"adir/", # no adir/index.{html,htm} exists
"adir/../top.txt", # ".." not allowed in path
"../adir/index.txt", # points outside of output_path
@@ -472,17 +479,19 @@ def test_serve_file_raises_404(output_path, app, path):
@pytest.mark.parametrize(
- "path, status, mimetype",
+ "path, status, mimetype, content",
[
- ("/hello.txt", 200, "text/plain"),
- ("/missing/", 404, "text/html"),
+ ("/hello.txt", 200, "text/plain", "Hello I am an Attachment"),
+ ("/missing/", 404, "text/html", "The requested URL was not found"),
+ ("/extra/container/", 404, "text/html", "Record is hidden"),
],
)
-def test_serve(app, path, status, mimetype):
+def test_serve(app, path, status, mimetype, content):
with app.test_client() as c:
resp = c.get(path)
assert resp.status_code == status
assert resp.mimetype == mimetype
+ assert content in resp.get_data(True)
def test_serve_from_file(app, output_path):
diff --git a/tests/test_builder.py b/tests/test_builder.py
index 87425a1..5310c9d 100644
--- a/tests/test_builder.py
+++ b/tests/test_builder.py
@@ -1,5 +1,6 @@
from pathlib import Path
+import pytest
from markers import imagemagick
@@ -34,6 +35,7 @@ def test_child_sources_basic(pad, builder):
assert [x["_id"] for x in child_sources] == [
"a",
"b",
+ "container", # hidden children should be built, too
"file.ext",
"hello.txt",
"paginated",
@@ -261,6 +263,26 @@ def test_iter_child_attachments(child_sources_test_project_builder):
assert builder.pad.get("attachment.txt") in prog.iter_child_sources()
[email protected](
+ "parent_path, child_name",
+ [
+ ("/extra", "container"), # a hidden page
+ ("/extra/container", "a"), # a page whose parent is hidden
+ ("/extra/container", "hello.txt"), # an attachment whose parent is hidden
+ ],
+)
+def test_iter_children_of_hidden_pages(builder, pad, parent_path, child_name):
+ # Test that child sources are built even if they're parent is hidden
+ parent = pad.get(parent_path)
+ child = pad.get(f"{parent_path}/{child_name}")
+ # sanity checks
+ assert parent is not None and child is not None
+ assert parent.is_hidden or child.is_hidden
+
+ prog, _ = builder.build(parent)
+ assert child in prog.iter_child_sources()
+
+
def test_record_is_file(pad, builder):
record = pad.get("/extra/file.ext")
@@ -300,3 +322,23 @@ def test_asseturl_dependency_tracking_integration(
updated_asset_url = output.read_text(encoding="utf-8").rstrip()
assert updated_asset_url != asset_url
assert len(build_state.updated_artifacts) == 1
+
+
+def test_prune_remove_artifacts_of_hidden_pages(scratch_project_data, scratch_builder):
+ pad = scratch_builder.pad
+ # Build root page
+ prog, _ = scratch_builder.build(pad.root)
+ (artifact,) = prog.artifacts
+ assert Path(artifact.dst_filename).is_file()
+
+ # Do a prune. Output artifact should survive
+ pad.cache.flush()
+ scratch_builder.prune()
+ assert Path(artifact.dst_filename).is_file()
+
+ # Mark page as hidden, prune should then clean the artifact
+ contents_lr = scratch_project_data.joinpath("content/contents.lr")
+ contents_lr.write_text(contents_lr.read_text() + "\n---\n_hidden: yes\n")
+ pad.cache.flush()
+ scratch_builder.prune()
+ assert not Path(artifact.dst_filename).is_file()
diff --git a/tests/test_db.py b/tests/test_db.py
index 500a3c5..b6c434d 100644
--- a/tests/test_db.py
+++ b/tests/test_db.py
@@ -78,6 +78,28 @@ def test_url_matching_with_customized_slug_in_alt(pad):
assert get_alts(en) == ["en", "de"]
[email protected](
+ "path",
+ [
+ "/",
+ "/extra/container/a", # child if hidden page explicit marked as non-hidden
+ "/extra/container/hello.txt", # attachment of hidden page
+ ],
+)
+def test_resolve_url(pad, path):
+ assert pad.resolve_url_path(path) is not None
+
+
+def test_resolve_url_hidden_page(pad):
+ assert pad.resolve_url_path("/extra/container") is None
+ assert pad.resolve_url_path("/extra/container", include_invisible=True) is not None
+
+
+def test_resolve_url_asset(pad):
+ assert pad.resolve_url_path("/static/demo.css") is not None
+ assert pad.resolve_url_path("/static/demo.css", include_assets=False) is None
+
+
def test_basic_alts(pad):
with Context(pad=pad):
assert get_alts() == ["en", "de"]
|
overriding parent _hidden field in content does not have desired effect
Per docs [here](https://www.getlektor.com/docs/api/db/system-fields/hidden/) on the `_hidden` system field:
> This also automatically applies to all children of a page unless they forcefully override this setting.
I'm trying to use this in a website to expose urls like:
/podcasts/pistons/3
while having a 404 for
/podcasts/
Here's a branch illustrating:
https://github.com/krosaen/brosaen-lektor/compare/unhidden-content-children?expand=1
note I'm hiding `/podcasts/` and attempting to unhide `/podcasts/pistons/`. Should this be possible? Not sure if this is a bug or a feature request / misunderstanding of the docs.
|
0.0
|
7eff923133ced682a5658d18ef579564589b8bfe
|
[
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None-extra/container-/extra/container]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1-extra/container-/extra/container]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[None-extra/container/a/-text/html-True]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[None-extra/container/hello.txt-text/plain-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[flag1-extra/container/a/-text/html-True]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[flag1-extra/container/hello.txt-text/plain-False]",
"tests/admin/test_serve.py::test_serve[None-/extra/container/-404-text/html-Record",
"tests/admin/test_serve.py::test_serve[flag1-/extra/container/-404-text/html-Record",
"tests/test_builder.py::test_child_sources_basic",
"tests/test_builder.py::test_iter_children_of_hidden_pages[/extra-container]",
"tests/test_builder.py::test_prune_remove_artifacts_of_hidden_pages",
"tests/test_db.py::test_resolve_url[/extra/container/a]",
"tests/test_db.py::test_resolve_url[/extra/container/hello.txt]",
"tests/test_db.py::test_resolve_url_hidden_page"
] |
[
"tests/admin/test_serve.py::test_rewrite_html_for_editing[<html><head></head><body></body></html>-False]",
"tests/admin/test_serve.py::test_rewrite_html_for_editing[<html>\\n<head>\\n",
"tests/admin/test_serve.py::test_rewrite_html_for_editing[<html><head></",
"tests/admin/test_serve.py::test_rewrite_html_for_editing[<html><head></HeAd><body></body></html>-False]",
"tests/admin/test_serve.py::test_rewrite_html_for_editing[<html></html>-True]",
"tests/admin/test_serve.py::test_rewrite_html_for_editing[<html><header></header><body></body></html>-True]",
"tests/admin/test_serve.py::test_send_html_for_editing",
"tests/admin/test_serve.py::test_send_html_for_editing_etag_depends_on_edit_url",
"tests/admin/test_serve.py::test_send_html_for_editing_raises_404",
"tests/admin/test_serve.py::test_deduce_mimetype[junk.html-text/html]",
"tests/admin/test_serve.py::test_deduce_mimetype[test.HTM-text/html]",
"tests/admin/test_serve.py::test_deduce_mimetype[test.txt-text/plain]",
"tests/admin/test_serve.py::test_deduce_mimetype[test.foo-application/octet-stream]",
"tests/admin/test_serve.py::test_checked_send_file",
"tests/admin/test_serve.py::test_checked_send_file_raises_404",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None--/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None-index.html-/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None-de/-/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None-de/index.html-/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None-extra/-/extra]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None-extra/index.html-/extra]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None-de/extra/-/extra]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None-de/extra/index.html-/extra]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None-extra/long/path/-/extra/slash-slug]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None-blog-/blog@1]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None-blog/2015/12/post1/-/blog/post1]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[None-de/blog/-/blog@1]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1--/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1-index.html-/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1-de/-/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1-de/index.html-/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1-extra/-/extra]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1-extra/index.html-/extra]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1-de/extra/-/extra]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1-de/extra/index.html-/extra]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1-extra/long/path/-/extra/slash-slug]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1-blog-/blog@1]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1-blog/2015/12/post1/-/blog/post1]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path[flag1-de/blog/-/blog@1]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_to_asset[None-static/demo.css]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_to_asset[None-dir_with_index_html/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_to_asset[None-dir_with_index_htm/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_to_asset[None-static/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_to_asset[flag1-static/demo.css]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_to_asset[flag1-dir_with_index_html/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_to_asset[flag1-dir_with_index_htm/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_to_asset[flag1-static/]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_raises_404[None-missing]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_raises_404[None-dir_with_index_html/index.htm]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_raises_404[None-dir_with_index_htm/index.html]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_raises_404[None-static/index.html]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_raises_404[flag1-missing]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_raises_404[flag1-dir_with_index_html/index.htm]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_raises_404[flag1-dir_with_index_htm/index.html]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_url_path_raises_404[flag1-static/index.html]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_directory_index[None-dir_with_index_html-index.html]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_directory_index[None-dir_with_index_htm-index.htm]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_directory_index[flag1-dir_with_index_html-index.html]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_directory_index[flag1-dir_with_index_htm-index.htm]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_directory_index_raises_404[None]",
"tests/admin/test_serve.py::TestArtifactServer::test_resolve_directory_index_raises_404[flag1]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[None-de/extra/-de/extra/index.html-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[None-dir_with_index_html/-None-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[None-dir_with_index_htm/-None-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[None-dir_with_index_html/index.html-dir_with_index_html/index.html-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[None-dir_with_index_htm/index.htm-dir_with_index_htm/index.htm-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[None-static/-None-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[None-/extra/build-failure-extra/build-failure/index.html-True]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[None-/extra/file.ext-extra/file.ext-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[flag1-de/extra/-de/extra/index.html-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[flag1-dir_with_index_html/-None-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[flag1-dir_with_index_htm/-None-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[flag1-dir_with_index_html/index.html-dir_with_index_html/index.html-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[flag1-dir_with_index_htm/index.htm-dir_with_index_htm/index.htm-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[flag1-static/-None-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[flag1-/extra/build-failure-extra/build-failure/index.html-True]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact[flag1-/extra/file.ext-extra/file.ext-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact_raises_404[None]",
"tests/admin/test_serve.py::TestArtifactServer::test_build_primary_artifact_raises_404[flag1]",
"tests/admin/test_serve.py::TestArtifactServer::test_handle_build_failure[None-None]",
"tests/admin/test_serve.py::TestArtifactServer::test_handle_build_failure[None-EDIT_URL]",
"tests/admin/test_serve.py::TestArtifactServer::test_handle_build_failure[flag1-None]",
"tests/admin/test_serve.py::TestArtifactServer::test_handle_build_failure[flag1-EDIT_URL]",
"tests/admin/test_serve.py::TestArtifactServer::test_get_edit_url[None-/blog-kw0-expect0]",
"tests/admin/test_serve.py::TestArtifactServer::test_get_edit_url[None-/extra-kw1-expect1]",
"tests/admin/test_serve.py::TestArtifactServer::test_get_edit_url[None-/extra-kw2-expect2]",
"tests/admin/test_serve.py::TestArtifactServer::test_get_edit_url[flag1-/blog-kw0-expect0]",
"tests/admin/test_serve.py::TestArtifactServer::test_get_edit_url[flag1-/extra-kw1-expect1]",
"tests/admin/test_serve.py::TestArtifactServer::test_get_edit_url[flag1-/extra-kw2-expect2]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_adds_slash[None-extra-extra/]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_adds_slash[None-dir_with_index_html-dir_with_index_html/]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_adds_slash[flag1-extra-extra/]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_adds_slash[flag1-dir_with_index_html-dir_with_index_html/]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[None-projects/coffee/-text/html-True]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[None-extra/test.txt-text/plain-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[None-hello.txt-text/plain-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[None-extra/build-failure/-text/html-True]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[None-static/demo.css-text/css-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[None-dir_with_index_html/-text/html-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[None-dir_with_index_htm/-text/html-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[None-dir_with_index_html/index.html-text/html-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[None-dir_with_index_htm/index.htm-text/html-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[flag1-projects/coffee/-text/html-True]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[flag1-extra/test.txt-text/plain-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[flag1-hello.txt-text/plain-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[flag1-extra/build-failure/-text/html-True]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[flag1-static/demo.css-text/css-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[flag1-dir_with_index_html/-text/html-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[flag1-dir_with_index_htm/-text/html-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[flag1-dir_with_index_html/index.html-text/html-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_serves_artifact[flag1-dir_with_index_htm/index.htm-text/html-False]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_raises_404[[email protected]]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_raises_404[None-extra/container/]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_raises_404[None-static/]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_raises_404[None-dir_with_index_html/index.htm]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_raises_404[None-dir_with_index_htm/index.html]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_raises_404[[email protected]]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_raises_404[flag1-extra/container/]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_raises_404[flag1-static/]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_raises_404[flag1-dir_with_index_html/index.htm]",
"tests/admin/test_serve.py::TestArtifactServer::test_serve_artifact_raises_404[flag1-dir_with_index_htm/index.html]",
"tests/admin/test_serve.py::test_serve_artifact[None]",
"tests/admin/test_serve.py::test_serve_artifact[flag1]",
"tests/admin/test_serve.py::test_serve_file[None]",
"tests/admin/test_serve.py::test_serve_file[flag1]",
"tests/admin/test_serve.py::test_serve_file_with_relative_output_path[None-output_path0]",
"tests/admin/test_serve.py::test_serve_file_with_relative_output_path[flag1-output_path0]",
"tests/admin/test_serve.py::test_serve_file_dir_handling[None-index.html]",
"tests/admin/test_serve.py::test_serve_file_dir_handling[None-index.htm]",
"tests/admin/test_serve.py::test_serve_file_dir_handling[flag1-index.html]",
"tests/admin/test_serve.py::test_serve_file_dir_handling[flag1-index.htm]",
"tests/admin/test_serve.py::test_serve_file_raises_404[None-missing]",
"tests/admin/test_serve.py::test_serve_file_raises_404[None-example/container]",
"tests/admin/test_serve.py::test_serve_file_raises_404[None-adir/]",
"tests/admin/test_serve.py::test_serve_file_raises_404[None-adir/../top.txt]",
"tests/admin/test_serve.py::test_serve_file_raises_404[None-../adir/index.txt]",
"tests/admin/test_serve.py::test_serve_file_raises_404[None-adir/index.txt/../index.txt]",
"tests/admin/test_serve.py::test_serve_file_raises_404[None-adir/index.txt/../../top.txt]",
"tests/admin/test_serve.py::test_serve_file_raises_404[None-adir/index.txt/../../adir/index.txt]",
"tests/admin/test_serve.py::test_serve_file_raises_404[flag1-missing]",
"tests/admin/test_serve.py::test_serve_file_raises_404[flag1-example/container]",
"tests/admin/test_serve.py::test_serve_file_raises_404[flag1-adir/]",
"tests/admin/test_serve.py::test_serve_file_raises_404[flag1-adir/../top.txt]",
"tests/admin/test_serve.py::test_serve_file_raises_404[flag1-../adir/index.txt]",
"tests/admin/test_serve.py::test_serve_file_raises_404[flag1-adir/index.txt/../index.txt]",
"tests/admin/test_serve.py::test_serve_file_raises_404[flag1-adir/index.txt/../../top.txt]",
"tests/admin/test_serve.py::test_serve_file_raises_404[flag1-adir/index.txt/../../adir/index.txt]",
"tests/admin/test_serve.py::test_serve[None-/hello.txt-200-text/plain-Hello",
"tests/admin/test_serve.py::test_serve[None-/missing/-404-text/html-The",
"tests/admin/test_serve.py::test_serve[flag1-/hello.txt-200-text/plain-Hello",
"tests/admin/test_serve.py::test_serve[flag1-/missing/-404-text/html-The",
"tests/admin/test_serve.py::test_serve_from_file[None]",
"tests/admin/test_serve.py::test_serve_from_file[flag1]",
"tests/admin/test_serve.py::test_serve_add_slash_redirect_integration[None-/extra-http://example.org/pfx/-http://example.org/pfx/extra/]",
"tests/admin/test_serve.py::test_serve_add_slash_redirect_integration[None-/dir_with_index_html?qs-http://localhost/-http://localhost/dir_with_index_html/?qs]",
"tests/admin/test_serve.py::test_serve_add_slash_redirect_integration[None-/projects/coffee-http://localhost/pfx/-http://localhost/pfx/projects/coffee/]",
"tests/admin/test_serve.py::test_serve_add_slash_redirect_integration[None-/adir-http://localhost/-http://localhost/adir/]",
"tests/admin/test_serve.py::test_serve_add_slash_redirect_integration[None-/adir/bdir-http://localhost/-http://localhost/adir/bdir/]",
"tests/admin/test_serve.py::test_serve_add_slash_redirect_integration[flag1-/extra-http://example.org/pfx/-http://example.org/pfx/extra/]",
"tests/admin/test_serve.py::test_serve_add_slash_redirect_integration[flag1-/dir_with_index_html?qs-http://localhost/-http://localhost/dir_with_index_html/?qs]",
"tests/admin/test_serve.py::test_serve_add_slash_redirect_integration[flag1-/projects/coffee-http://localhost/pfx/-http://localhost/pfx/projects/coffee/]",
"tests/admin/test_serve.py::test_serve_add_slash_redirect_integration[flag1-/adir-http://localhost/-http://localhost/adir/]",
"tests/admin/test_serve.py::test_serve_add_slash_redirect_integration[flag1-/adir/bdir-http://localhost/-http://localhost/adir/bdir/]",
"tests/admin/test_serve.py::test_serve_custom_404",
"tests/test_builder.py::test_child_sources_pagination",
"tests/test_builder.py::test_basic_artifact_current_test",
"tests/test_builder.py::test_basic_template_rendering",
"tests/test_builder.py::test_attachment_copying",
"tests/test_builder.py::test_asset_processing",
"tests/test_builder.py::test_included_assets",
"tests/test_builder.py::test_excluded_assets",
"tests/test_builder.py::test_iter_child_pages",
"tests/test_builder.py::test_iter_child_attachments",
"tests/test_builder.py::test_iter_children_of_hidden_pages[/extra/container-a]",
"tests/test_builder.py::test_iter_children_of_hidden_pages[/extra/container-hello.txt]",
"tests/test_builder.py::test_record_is_file",
"tests/test_builder.py::test_slug_contains_slash",
"tests/test_builder.py::test_asseturl_dependency_tracking_integration",
"tests/test_db.py::test_root",
"tests/test_db.py::test_project_implied_model",
"tests/test_db.py::test_child_query_visibility_setting",
"tests/test_db.py::test_alt_fallback",
"tests/test_db.py::test_alt_parent",
"tests/test_db.py::test_url_matching_with_customized_slug_in_alt",
"tests/test_db.py::test_resolve_url[/]",
"tests/test_db.py::test_resolve_url_asset",
"tests/test_db.py::test_basic_alts",
"tests/test_db.py::test_basic_query_syntax",
"tests/test_db.py::test_basic_query_syntax_template",
"tests/test_db.py::test_is_child_of",
"tests/test_db.py::test_undiscoverable_basics",
"tests/test_db.py::test_attachment_api",
"tests/test_db.py::test_attachment_url_path_with_alt[_primary]",
"tests/test_db.py::test_attachment_url_path_with_alt[en]",
"tests/test_db.py::test_attachment_url_path_with_alt[de]",
"tests/test_db.py::test_query_normalization",
"tests/test_db.py::test_distinct",
"tests/test_db.py::test_root_pagination",
"tests/test_db.py::test_undefined_order",
"tests/test_db.py::test_hidden_flag",
"tests/test_db.py::test_default_order_by",
"tests/test_db.py::test_offset_without_limit_query",
"tests/test_db.py::test_Pad_get_invalid_path",
"tests/test_db.py::test_Database_iter_items_invalid_path",
"tests/test_db.py::test_Record_get_clean_url_path[/test_dotted-subdir/test.dotted]",
"tests/test_db.py::test_Record_get_clean_url_path[/test_dotted/child-subdir/_test.dotted/child]",
"tests/test_db.py::test_Record_get_url_path_defaults_to_primary_alt",
"tests/test_db.py::test_Page_url_path[/-en-None-/]",
"tests/test_db.py::test_Page_url_path[/-de-None-/de/]",
"tests/test_db.py::test_Page_url_path[/paginated-en-1-/paginated/]",
"tests/test_db.py::test_Page_url_path[/paginated-de-2-/de/paginated/page/2/]",
"tests/test_db.py::test_Page_url_path[/test_dotted-en-None-/subdir/test.dotted]",
"tests/test_db.py::test_Page_url_path[/test_dotted-de-None-/de/subdir/test.dotted]",
"tests/test_db.py::test_Page_url_path_is_for_primary_alt",
"tests/test_db.py::test_Page_url_path_raise_error_if_paginated_and_dotted",
"tests/test_db.py::test_Attachment_url_path_is_for_primary_alt[en]",
"tests/test_db.py::test_Attachment_url_path_is_for_primary_alt[de]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-22 03:31:27+00:00
|
bsd-3-clause
| 3,537 |
|
lektor__lektor-1086
|
diff --git a/lektor/pluginsystem.py b/lektor/pluginsystem.py
index 4258870..22058ca 100644
--- a/lektor/pluginsystem.py
+++ b/lektor/pluginsystem.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import inspect
import os
import sys
import warnings
@@ -188,24 +189,31 @@ class PluginController:
return self.env.plugins.values()
def emit(self, event, **kwargs):
+ """Invoke event hook for all plugins that support it.
+ Any ``kwargs`` are passed to the hook methods.
+
+ Returns a dict mapping plugin ids to hook method return values.
+ """
rv = {}
- kwargs["extra_flags"] = process_extra_flags(self.extra_flags)
+ extra_flags = process_extra_flags(self.extra_flags)
funcname = "on_" + event.replace("-", "_")
for plugin in self.iter_plugins():
handler = getattr(plugin, funcname, None)
if handler is not None:
+ kw = {**kwargs, "extra_flags": extra_flags}
try:
- rv[plugin.id] = handler(**kwargs)
+ inspect.signature(handler).bind(**kw)
except TypeError:
- old_style_kwargs = kwargs.copy()
- old_style_kwargs.pop("extra_flags")
- rv[plugin.id] = handler(**old_style_kwargs)
+ del kw["extra_flags"]
+ rv[plugin.id] = handler(**kw)
+ if "extra_flags" not in kw:
warnings.warn(
- 'The plugin "{}" function "{}" does not accept extra_flags. '
+ f"The plugin {plugin.id!r} function {funcname!r} does not "
+ "accept extra_flags. "
"It should be updated to accept `**extra` so that it will "
"not break if new parameters are passed to it by newer "
- "versions of Lektor.".format(plugin.id, funcname),
+ "versions of Lektor.",
DeprecationWarning,
)
return rv
|
lektor/lektor
|
259f0265055142451648c28741d7cb307196b2de
|
diff --git a/tests/test_pluginsystem.py b/tests/test_pluginsystem.py
index c0b7f85..680907b 100644
--- a/tests/test_pluginsystem.py
+++ b/tests/test_pluginsystem.py
@@ -48,6 +48,13 @@ class DummyPlugin(Plugin):
self.calls.append({"event": "legacy-event"})
return "legacy-event return value"
+ def on_one_type_error(self, **kwargs):
+ """Raises TypeError only on the first call."""
+ self.calls.append({"event": "one-type-error"})
+ if len(self.calls) == 1:
+ raise TypeError("test")
+ return "one-type-error return value"
+
@pytest.fixture(autouse=True)
def dummy_plugin_calls(monkeypatch):
@@ -331,6 +338,13 @@ class TestPluginController:
rv = plugin_controller.emit("legacy-event")
assert rv == {dummy_plugin.id: "legacy-event return value"}
+ def test_emit_is_not_confused_by_type_error(self, plugin_controller, dummy_plugin):
+ # Excercises https://github.com/lektor/lektor/issues/1085
+ with pytest.raises(TypeError):
+ plugin_controller.emit("one-type-error")
+ rv = plugin_controller.emit("one-type-error")
+ assert rv == {dummy_plugin.id: "one-type-error return value"}
+
@pytest.mark.usefixtures("dummy_plugin_distribution")
def test_cli_integration(project, cli_runner, monkeypatch):
|
TypeErrors raised by plugin hooks are sometimes silently ignored
Hi, it took a while to identify the culprit of this bug. And I honestly cannot imagine what might cause it.
But this simple plugin example shows what is going wrong:
```py
from lektor.pluginsystem import Plugin
class TrashPlugin(Plugin):
def on_before_build(self, builder, source, **extra):
try:
self.custom_attr
except AttributeError:
self.custom_attr = 'a'
raise TypeError('FAIL')
```
Instead of showing the callstack with the `TypeError`, lektor somehow forgets the exception and continues happily.
The only hint that something gone wrong is this warning:
> /lektor/pluginsystem.py:174: DeprecationWarning: The plugin "trash" function "on_before_build" does not accept extra_flags. It should be updated to accept `**extra` so that it will not break if new parameters are passed to it by newer versions of Lektor.
warnings.warn(
(which is extra frustrating because ... "c'mon, its there" 😆 )
|
0.0
|
259f0265055142451648c28741d7cb307196b2de
|
[
"tests/test_pluginsystem.py::TestPluginController::test_emit_is_not_confused_by_type_error"
] |
[
"tests/test_pluginsystem.py::test_get_plugin",
"tests/test_pluginsystem.py::test_get_plugin_from_context",
"tests/test_pluginsystem.py::test_get_plugin_missing",
"tests/test_pluginsystem.py::test_get_plugin_no_env_or_ctx",
"tests/test_pluginsystem.py::TestPlugin::test_env",
"tests/test_pluginsystem.py::TestPlugin::test_env_went_away",
"tests/test_pluginsystem.py::TestPlugin::test_version",
"tests/test_pluginsystem.py::TestPlugin::test_version_missing",
"tests/test_pluginsystem.py::TestPlugin::test_path",
"tests/test_pluginsystem.py::TestPlugin::test_path_installed_plugin_is_none",
"tests/test_pluginsystem.py::TestPlugin::test_import_name",
"tests/test_pluginsystem.py::TestPlugin::test_get_lektor_config",
"tests/test_pluginsystem.py::TestPlugin::test_get_lektor_config_from_context",
"tests/test_pluginsystem.py::TestPlugin::test_config_filename",
"tests/test_pluginsystem.py::TestPlugin::test_get_config",
"tests/test_pluginsystem.py::TestPlugin::test_get_config_records_dependency",
"tests/test_pluginsystem.py::TestPlugin::test_get_config_returns_cached_value",
"tests/test_pluginsystem.py::TestPlugin::test_get_config_fresh",
"tests/test_pluginsystem.py::TestPlugin::test_emit",
"tests/test_pluginsystem.py::TestPlugin::test_to_json",
"tests/test_pluginsystem.py::test_find_plugins",
"tests/test_pluginsystem.py::test_check_dist_name[Lektor-FOO-Foo]",
"tests/test_pluginsystem.py::test_check_dist_name_raises[NotLektor-FOO-Foo]",
"tests/test_pluginsystem.py::TestPluginController::test_env",
"tests/test_pluginsystem.py::TestPluginController::test_env_went_away",
"tests/test_pluginsystem.py::TestPluginController::test_instantiate_plugin",
"tests/test_pluginsystem.py::TestPluginController::test_instantiate_plugin_error",
"tests/test_pluginsystem.py::TestPluginController::test_iter_plugins",
"tests/test_pluginsystem.py::TestPluginController::test_emit",
"tests/test_pluginsystem.py::TestPluginController::test_emit_with_kwargs",
"tests/test_pluginsystem.py::TestPluginController::test_emit_deprecation_warning"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-11-05 18:56:46+00:00
|
bsd-3-clause
| 3,538 |
|
lektor__lektor-1144
|
diff --git a/lektor/cli.py b/lektor/cli.py
index 23d6573..69ab2df 100644
--- a/lektor/cli.py
+++ b/lektor/cli.py
@@ -41,7 +41,8 @@ def cli(ctx, project=None, language=None):
This command can invoke lektor locally and serve up the website. It's
intended for local development of websites.
"""
- warnings.simplefilter("default")
+ if not sys.warnoptions:
+ warnings.simplefilter("default")
if language is not None:
ctx.ui_lang = language
if project is not None:
diff --git a/lektor/db.py b/lektor/db.py
index 18ad5b8..cf95cbd 100644
--- a/lektor/db.py
+++ b/lektor/db.py
@@ -11,12 +11,12 @@ from collections import OrderedDict
from datetime import timedelta
from itertools import islice
from operator import methodcaller
+from urllib.parse import urljoin
from jinja2 import is_undefined
from jinja2 import Undefined
from jinja2.exceptions import UndefinedError
from jinja2.utils import LRUCache
-from werkzeug.urls import url_join
from werkzeug.utils import cached_property
from lektor import metaformat
@@ -957,8 +957,6 @@ class VideoFrame:
"frames directly, use .thumbnail()."
)
- __unicode__ = __str__
-
@require_ffmpeg
def thumbnail(self, width=None, height=None, mode=None, upscale=None, quality=None):
"""Utility to create thumbnails."""
@@ -1628,7 +1626,7 @@ class Pad:
"To use absolute URLs you need to configure "
"the URL in the project config."
)
- return url_join(base_url.rstrip("/") + "/", url.lstrip("/"))
+ return urljoin(base_url.rstrip("/") + "/", url.lstrip("/"))
def make_url(self, url, base_url=None, absolute=None, external=None):
"""Helper method that creates a finalized URL based on the parameters
@@ -1646,9 +1644,9 @@ class Pad:
"To use absolute URLs you need to "
"configure the URL in the project config."
)
- return url_join(external_base_url, url.lstrip("/"))
+ return urljoin(external_base_url, url.lstrip("/"))
if absolute:
- return url_join(self.db.config.base_path, url.lstrip("/"))
+ return urljoin(self.db.config.base_path, url.lstrip("/"))
if base_url is None:
raise RuntimeError(
"Cannot calculate a relative URL if no base " "URL has been provided."
diff --git a/lektor/environment/config.py b/lektor/environment/config.py
index ff890f4..eafc5e2 100644
--- a/lektor/environment/config.py
+++ b/lektor/environment/config.py
@@ -2,9 +2,9 @@ import copy
import os
import re
from collections import OrderedDict
+from urllib.parse import urlsplit
from inifile import IniFile
-from werkzeug.urls import url_parse
from werkzeug.utils import cached_property
from lektor.constants import PRIMARY_ALT
@@ -273,7 +273,7 @@ class Config:
def base_url(self):
"""The external base URL."""
url = self.values["PROJECT"].get("url")
- if url and url_parse(url).scheme:
+ if url and urlsplit(url).scheme:
return url.rstrip("/") + "/"
return None
@@ -282,7 +282,7 @@ class Config:
"""The base path of the URL."""
url = self.values["PROJECT"].get("url")
if url:
- return url_parse(url).path.rstrip("/") + "/"
+ return urlsplit(url).path.rstrip("/") + "/"
return "/"
@cached_property
diff --git a/lektor/markdown.py b/lektor/markdown.py
index 2244037..ebc321f 100644
--- a/lektor/markdown.py
+++ b/lektor/markdown.py
@@ -1,9 +1,9 @@
import threading
+from urllib.parse import urlsplit
from weakref import ref as weakref
import mistune
from markupsafe import Markup
-from werkzeug.urls import url_parse
from lektor.context import get_ctx
@@ -18,7 +18,7 @@ def escape(text: str) -> str:
class ImprovedRenderer(mistune.Renderer):
def link(self, link, title, text):
if self.record is not None:
- url = url_parse(link)
+ url = urlsplit(link)
if not url.scheme:
link = self.record.url_to("!" + link, base_url=get_ctx().base_url)
link = escape(link)
@@ -29,7 +29,7 @@ class ImprovedRenderer(mistune.Renderer):
def image(self, src, title, text):
if self.record is not None:
- url = url_parse(src)
+ url = urlsplit(src)
if not url.scheme:
src = self.record.url_to("!" + src, base_url=get_ctx().base_url)
src = escape(src)
diff --git a/lektor/utils.py b/lektor/utils.py
index e33268d..e343c1f 100644
--- a/lektor/utils.py
+++ b/lektor/utils.py
@@ -18,14 +18,15 @@ from functools import lru_cache
from pathlib import PurePosixPath
from queue import Queue
from threading import Thread
+from urllib.parse import urlsplit
import click
from jinja2 import is_undefined
from markupsafe import Markup
from slugify import slugify as _slugify
-from werkzeug import urls
from werkzeug.http import http_date
-from werkzeug.urls import url_parse
+from werkzeug.urls import iri_to_uri
+from werkzeug.urls import uri_to_iri
is_windows = os.name == "nt"
@@ -388,24 +389,21 @@ class WorkerPool:
class Url:
- def __init__(self, value):
+ def __init__(self, value: str):
self.url = value
- u = url_parse(value)
- i = u.to_iri_tuple()
- self.ascii_url = str(u)
- self.host = i.host
- self.ascii_host = u.ascii_host
+ u = urlsplit(value)
+ i = urlsplit(uri_to_iri(u.geturl()))
+ self.ascii_url = iri_to_uri(u.geturl())
+ self.host = i.hostname
+ self.ascii_host = urlsplit(self.ascii_url).hostname
self.port = u.port
self.path = i.path
self.query = u.query
self.anchor = i.fragment
self.scheme = u.scheme
- def __unicode__(self):
- return self.url
-
def __str__(self):
- return self.ascii_url
+ return self.url
def is_unsafe_to_delete(path, base):
@@ -511,17 +509,12 @@ def is_valid_id(value):
)
-def secure_url(url):
- url = urls.url_parse(url)
- if url.password is not None:
- url = url.replace(
- netloc="%s@%s"
- % (
- url.username,
- url.netloc.split("@")[-1],
- )
- )
- return url.to_url()
+def secure_url(url: str) -> str:
+ parts = urlsplit(url)
+ if parts.password is not None:
+ _, _, host_port = parts.netloc.rpartition("@")
+ parts = parts._replace(netloc=f"{parts.username}@{host_port}")
+ return parts.geturl()
def bool_from_string(val, default=None):
diff --git a/setup.cfg b/setup.cfg
index f038fbc..922fe7c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -51,7 +51,7 @@ install_requires =
requests
setuptools>=45.2
watchdog
- Werkzeug<3
+ Werkzeug<2.4
[options.extras_require]
ipython =
|
lektor/lektor
|
86070c17d904e8f5695c38912c08624b972b28f3
|
diff --git a/tests/test_config.py b/tests/test_config.py
index 86d24d9..4dc843c 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -1,3 +1,50 @@
+import inspect
+
+import pytest
+
+from lektor.environment.config import Config
+
+
def test_custom_attachment_types(env):
attachment_types = env.load_config().values["ATTACHMENT_TYPES"]
assert attachment_types[".foo"] == "text"
+
+
[email protected](scope="function")
+def config(tmp_path, project_url):
+ projectfile = tmp_path / "scratch.lektorproject"
+ projectfile.write_text(
+ inspect.cleandoc(
+ f"""
+ [project]
+ url = {project_url}
+ """
+ )
+ )
+ return Config(projectfile)
+
+
[email protected](
+ "project_url, expected",
+ [
+ ("", None),
+ ("/path/", None),
+ ("https://example.org", "https://example.org/"),
+ ],
+)
+def test_base_url(config, expected):
+ assert config.base_url == expected
+
+
[email protected](
+ "project_url, expected",
+ [
+ ("", "/"),
+ ("/path", "/path/"),
+ ("/path/", "/path/"),
+ ("https://example.org", "/"),
+ ("https://example.org/pth", "/pth/"),
+ ],
+)
+def test_base_path(config, expected):
+ assert config.base_path == expected
diff --git a/tests/test_db.py b/tests/test_db.py
index 10d01ec..4a7f8ce 100644
--- a/tests/test_db.py
+++ b/tests/test_db.py
@@ -487,3 +487,30 @@ def test_Page_url_path_raise_error_if_paginated_and_dotted(scratch_pad):
def test_Attachment_url_path_is_for_primary_alt(scratch_pad, alt):
attachment = scratch_pad.get("/test.txt")
assert attachment.url_path == "/en/test.txt"
+
+
[email protected](
+ "url, base_url, absolute, external, project_url, expected",
+ [
+ ("/a/b.html", "/a/", None, None, None, "b.html"),
+ ("/a/b/", "/a/", None, None, None, "b/"),
+ ("/a/b/", "/a", None, None, None, "a/b/"),
+ ("/a/b/", "/a", True, None, None, "/a/b/"),
+ ("/a/b/", "/a", True, None, "https://example.net/pfx/", "/pfx/a/b/"),
+ ("/a/b/", "/a", None, True, "https://example.org", "https://example.org/a/b/"),
+ ],
+)
+def test_Pad_make_url(url, base_url, absolute, external, project_url, expected, pad):
+ if project_url is not None:
+ pad.db.config.values["PROJECT"]["url"] = project_url
+ assert pad.make_url(url, base_url, absolute, external) == expected
+
+
+def test_Pad_make_url_raises_runtime_error_if_no_project_url(pad):
+ with pytest.raises(RuntimeError, match="(?i)configure the url in the project"):
+ pad.make_url("/a/b", external=True)
+
+
+def test_Pad_make_url_raises_runtime_error_if_no_base_url(pad):
+ with pytest.raises(RuntimeError, match="(?i)no base url"):
+ pad.make_url("/a/b")
diff --git a/tests/test_deploy.py b/tests/test_deploy.py
index c73e96f..9654298 100644
--- a/tests/test_deploy.py
+++ b/tests/test_deploy.py
@@ -9,6 +9,9 @@ from lektor.publisher import GithubPagesPublisher
from lektor.publisher import RsyncPublisher
+pytestmark = pytest.mark.filterwarnings(r"ignore:'werkzeug\.urls:DeprecationWarning")
+
+
def test_get_server(env):
server = env.load_config().get_server("production")
assert server.name == "Production"
diff --git a/tests/test_publisher.py b/tests/test_publisher.py
index c4cca1a..d3b4fc1 100644
--- a/tests/test_publisher.py
+++ b/tests/test_publisher.py
@@ -42,6 +42,7 @@ def test_Command_triggers_no_warnings():
which("rsync") is None, reason="rsync is not available on this system"
)
@pytest.mark.parametrize("delete", ["yes", "no"])
[email protected](r"ignore:'werkzeug\.urls:DeprecationWarning")
def test_RsyncPublisher_integration(env, tmp_path, delete):
# Integration test of local rsync deployment
# Ensures that RsyncPublisher can successfully invoke rsync
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 0f62384..bdae109 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -1,4 +1,7 @@
# coding: utf-8
+from dataclasses import dataclass
+from urllib.parse import urlsplit
+
import pytest
from lektor.utils import build_url
@@ -7,7 +10,9 @@ from lektor.utils import join_path
from lektor.utils import magic_split_ext
from lektor.utils import make_relative_url
from lektor.utils import parse_path
+from lektor.utils import secure_url
from lektor.utils import slugify
+from lektor.utils import Url
def test_join_path():
@@ -70,6 +75,103 @@ def test_slugify():
assert slugify("slashed/slug") == "slashed/slug"
+@dataclass
+class SampleUrl:
+ uri: str
+ iri: str
+
+ @property
+ def split_uri(self):
+ return urlsplit(self.uri)
+
+ @property
+ def split_iri(self):
+ return urlsplit(self.iri)
+
+
+SAMPLE_URLS = [
+ SampleUrl("https://example.org/foo", "https://example.org/foo"),
+ SampleUrl("https://example.org:8001/f%C3%BC", "https://example.org:8001/fü"),
+ SampleUrl(
+ "https://xn--wgv71a119e.idn.icann.org/%E5%A4%A7",
+ "https://日本語.idn.icann.org/大",
+ ),
+ SampleUrl("/?q=sch%C3%B6n#gru%C3%9F", "/?q=schön#gruß"),
+]
+
+
[email protected](params=SAMPLE_URLS, ids=lambda sample: sample.uri)
+def sample_url(request):
+ sample_url = request.param
+ # sanity checks
+ assert sample_url.split_uri.scheme == sample_url.split_iri.scheme
+ assert sample_url.split_uri.port == sample_url.split_iri.port
+ return sample_url
+
+
+def test_Url_str(sample_url):
+ assert str(Url(sample_url.iri)) == sample_url.iri
+ assert str(Url(sample_url.uri)) == sample_url.uri
+
+
+def test_Url_ascii_url(sample_url):
+ assert Url(sample_url.iri).ascii_url == sample_url.uri
+ assert Url(sample_url.uri).ascii_url == sample_url.uri
+
+
+def test_Url_ascii_host(sample_url):
+ assert Url(sample_url.iri).ascii_host == sample_url.split_uri.hostname
+ assert Url(sample_url.uri).ascii_host == sample_url.split_uri.hostname
+
+
+def test_Url_scheme(sample_url):
+ assert Url(sample_url.iri).scheme == sample_url.split_uri.scheme
+ assert Url(sample_url.uri).scheme == sample_url.split_uri.scheme
+
+
+def test_Url_host(sample_url):
+ assert Url(sample_url.iri).host == sample_url.split_iri.hostname
+ assert Url(sample_url.uri).host == sample_url.split_iri.hostname
+
+
+def test_Url_port(sample_url):
+ assert Url(sample_url.iri).port == sample_url.split_uri.port
+ assert Url(sample_url.uri).port == sample_url.split_uri.port
+
+
+def test_Url_path(sample_url):
+ assert Url(sample_url.iri).path == sample_url.split_iri.path
+ assert Url(sample_url.uri).path == sample_url.split_iri.path
+
+
+def test_Url_query(sample_url):
+ try:
+ assert Url(sample_url.iri).query == sample_url.split_iri.query
+ assert Url(sample_url.uri).query == sample_url.split_iri.query
+ except AssertionError:
+ # This is the behavior prior to Lektor 3.4.x
+ assert Url(sample_url.iri).query == sample_url.split_iri.query
+ assert Url(sample_url.uri).query == sample_url.split_uri.query
+ pytest.xfail("Url.query is weird in Lektor<3.4")
+
+
+def test_Url_anchor(sample_url):
+ assert Url(sample_url.iri).anchor == sample_url.split_iri.fragment
+ assert Url(sample_url.uri).anchor == sample_url.split_iri.fragment
+
+
[email protected](
+ "url, expected",
+ [
+ ("https://user:[email protected]/p", "https://[email protected]/p"),
+ ("https://user:[email protected]:8000", "https://[email protected]:8000"),
+ ("https://[email protected]/b", "https://[email protected]/b"),
+ ],
+)
+def test_secure_url(url, expected):
+ assert secure_url(url) == expected
+
+
def test_url_builder():
assert build_url([]) == "/"
|
Deprecated Werkzeug APIs in use
Werkzeug 2.3 deprecated a number of functions in the `werkzeug.urls` module. Lektor uses some of these methods:
* `werkzeug.urls.url_parse` (in favor of `urllib.parse.urlsplit`)
* `werkzeug.urls.url_join` (in favor of `urllib.parse.urljoin`)
It also removes the `werkzeug.urls.URL` wrapper class.
As a result, running Lektor with Werkzeug 2.3+ causes hundreds of deprecation warnings.
|
0.0
|
86070c17d904e8f5695c38912c08624b972b28f3
|
[
"tests/test_utils.py::test_Url_ascii_url[https://example.org:8001/f%C3%BC]",
"tests/test_utils.py::test_Url_ascii_url[https://xn--wgv71a119e.idn.icann.org/%E5%A4%A7]",
"tests/test_utils.py::test_Url_ascii_url[/?q=sch%C3%B6n#gru%C3%9F]"
] |
[
"tests/test_config.py::test_custom_attachment_types",
"tests/test_config.py::test_base_url[-None]",
"tests/test_config.py::test_base_url[/path/-None]",
"tests/test_config.py::test_base_url[https://example.org-https://example.org/]",
"tests/test_config.py::test_base_path[-/]",
"tests/test_config.py::test_base_path[/path-/path/]",
"tests/test_config.py::test_base_path[/path/-/path/]",
"tests/test_config.py::test_base_path[https://example.org-/]",
"tests/test_config.py::test_base_path[https://example.org/pth-/pth/]",
"tests/test_db.py::test_root",
"tests/test_db.py::test_project_implied_model",
"tests/test_db.py::test_child_query_visibility_setting",
"tests/test_db.py::test_alt_fallback",
"tests/test_db.py::test_alt_parent",
"tests/test_db.py::test_url_matching_with_customized_slug_in_alt",
"tests/test_db.py::test_basic_alts",
"tests/test_db.py::test_basic_query_syntax",
"tests/test_db.py::test_basic_query_syntax_template",
"tests/test_db.py::test_is_child_of",
"tests/test_db.py::test_undiscoverable_basics",
"tests/test_db.py::test_attachment_api",
"tests/test_db.py::test_attachment_url_path_with_alt[_primary]",
"tests/test_db.py::test_attachment_url_path_with_alt[en]",
"tests/test_db.py::test_attachment_url_path_with_alt[de]",
"tests/test_db.py::test_query_normalization",
"tests/test_db.py::test_distinct",
"tests/test_db.py::test_root_pagination",
"tests/test_db.py::test_undefined_order",
"tests/test_db.py::test_hidden_flag",
"tests/test_db.py::test_default_order_by",
"tests/test_db.py::test_offset_without_limit_query",
"tests/test_db.py::test_Pad_get_invalid_path",
"tests/test_db.py::test_Database_iter_items_invalid_path",
"tests/test_db.py::test_Record_get_clean_url_path[/test_dotted-subdir/test.dotted]",
"tests/test_db.py::test_Record_get_clean_url_path[/test_dotted/child-subdir/_test.dotted/child]",
"tests/test_db.py::test_Record_get_url_path_defaults_to_primary_alt",
"tests/test_db.py::test_Page_url_path[/-en-None-/]",
"tests/test_db.py::test_Page_url_path[/-de-None-/de/]",
"tests/test_db.py::test_Page_url_path[/paginated-en-1-/paginated/]",
"tests/test_db.py::test_Page_url_path[/paginated-de-2-/de/paginated/page/2/]",
"tests/test_db.py::test_Page_url_path[/test_dotted-en-None-/subdir/test.dotted]",
"tests/test_db.py::test_Page_url_path[/test_dotted-de-None-/de/subdir/test.dotted]",
"tests/test_db.py::test_Page_url_path_is_for_primary_alt",
"tests/test_db.py::test_Page_url_path_raise_error_if_paginated_and_dotted",
"tests/test_db.py::test_Attachment_url_path_is_for_primary_alt[en]",
"tests/test_db.py::test_Attachment_url_path_is_for_primary_alt[de]",
"tests/test_db.py::test_Pad_make_url[/a/b.html-/a/-None-None-None-b.html]",
"tests/test_db.py::test_Pad_make_url[/a/b/-/a/-None-None-None-b/]",
"tests/test_db.py::test_Pad_make_url[/a/b/-/a-None-None-None-a/b/]",
"tests/test_db.py::test_Pad_make_url[/a/b/-/a-True-None-None-/a/b/]",
"tests/test_db.py::test_Pad_make_url[/a/b/-/a-True-None-https://example.net/pfx/-/pfx/a/b/]",
"tests/test_db.py::test_Pad_make_url[/a/b/-/a-None-True-https://example.org-https://example.org/a/b/]",
"tests/test_db.py::test_Pad_make_url_raises_runtime_error_if_no_project_url",
"tests/test_db.py::test_Pad_make_url_raises_runtime_error_if_no_base_url",
"tests/test_deploy.py::test_get_server",
"tests/test_deploy.py::test_ghpages_update_git_config",
"tests/test_deploy.py::test_ghpages_update_git_config_https",
"tests/test_deploy.py::test_ghpages_update_git_config_https_credentials",
"tests/test_deploy.py::test_ghpages_write_cname",
"tests/test_deploy.py::test_ghpages_detect_branch_username",
"tests/test_deploy.py::test_ghpages_detect_branch_username_case_insensitive",
"tests/test_deploy.py::test_ghpages_detect_branch_project",
"tests/test_deploy.py::test_rsync_command_credentials",
"tests/test_deploy.py::test_rsync_publisher[http://example.com-called_command0]",
"tests/test_deploy.py::test_rsync_publisher[http://[email protected]_command1]",
"tests/test_deploy.py::test_rsync_publisher[http://example.com?exclude=file-called_command2]",
"tests/test_deploy.py::test_rsync_publisher[http://example.com?exclude=file_one&exclude=file_two-called_command3]",
"tests/test_deploy.py::test_rsync_publisher[http://example.com?exclude='user's",
"tests/test_deploy.py::test_rsync_publisher[http://example.com?exclude=\"file",
"tests/test_deploy.py::test_rsync_publisher[http://example.com?delete-called_command6]",
"tests/test_deploy.py::test_rsync_publisher[http://example.com?delete=yes-called_command7]",
"tests/test_deploy.py::test_rsync_publisher[http://example.com?delete=no-called_command8]",
"tests/test_deploy.py::test_rsync_publisher[file:///path/to/directory-called_command9]",
"tests/test_publisher.py::test_Command_triggers_no_warnings",
"tests/test_utils.py::test_join_path",
"tests/test_utils.py::test_is_path_child_of",
"tests/test_utils.py::test_magic_split_ext",
"tests/test_utils.py::test_slugify",
"tests/test_utils.py::test_Url_str[https://example.org/foo]",
"tests/test_utils.py::test_Url_str[https://example.org:8001/f%C3%BC]",
"tests/test_utils.py::test_Url_str[https://xn--wgv71a119e.idn.icann.org/%E5%A4%A7]",
"tests/test_utils.py::test_Url_str[/?q=sch%C3%B6n#gru%C3%9F]",
"tests/test_utils.py::test_Url_ascii_url[https://example.org/foo]",
"tests/test_utils.py::test_Url_ascii_host[https://example.org/foo]",
"tests/test_utils.py::test_Url_ascii_host[https://example.org:8001/f%C3%BC]",
"tests/test_utils.py::test_Url_ascii_host[https://xn--wgv71a119e.idn.icann.org/%E5%A4%A7]",
"tests/test_utils.py::test_Url_ascii_host[/?q=sch%C3%B6n#gru%C3%9F]",
"tests/test_utils.py::test_Url_scheme[https://example.org/foo]",
"tests/test_utils.py::test_Url_scheme[https://example.org:8001/f%C3%BC]",
"tests/test_utils.py::test_Url_scheme[https://xn--wgv71a119e.idn.icann.org/%E5%A4%A7]",
"tests/test_utils.py::test_Url_scheme[/?q=sch%C3%B6n#gru%C3%9F]",
"tests/test_utils.py::test_Url_host[https://example.org/foo]",
"tests/test_utils.py::test_Url_host[https://example.org:8001/f%C3%BC]",
"tests/test_utils.py::test_Url_host[https://xn--wgv71a119e.idn.icann.org/%E5%A4%A7]",
"tests/test_utils.py::test_Url_host[/?q=sch%C3%B6n#gru%C3%9F]",
"tests/test_utils.py::test_Url_port[https://example.org/foo]",
"tests/test_utils.py::test_Url_port[https://example.org:8001/f%C3%BC]",
"tests/test_utils.py::test_Url_port[https://xn--wgv71a119e.idn.icann.org/%E5%A4%A7]",
"tests/test_utils.py::test_Url_port[/?q=sch%C3%B6n#gru%C3%9F]",
"tests/test_utils.py::test_Url_path[https://example.org/foo]",
"tests/test_utils.py::test_Url_path[https://example.org:8001/f%C3%BC]",
"tests/test_utils.py::test_Url_path[https://xn--wgv71a119e.idn.icann.org/%E5%A4%A7]",
"tests/test_utils.py::test_Url_path[/?q=sch%C3%B6n#gru%C3%9F]",
"tests/test_utils.py::test_Url_query[https://example.org/foo]",
"tests/test_utils.py::test_Url_query[https://example.org:8001/f%C3%BC]",
"tests/test_utils.py::test_Url_query[https://xn--wgv71a119e.idn.icann.org/%E5%A4%A7]",
"tests/test_utils.py::test_Url_anchor[https://example.org/foo]",
"tests/test_utils.py::test_Url_anchor[https://example.org:8001/f%C3%BC]",
"tests/test_utils.py::test_Url_anchor[https://xn--wgv71a119e.idn.icann.org/%E5%A4%A7]",
"tests/test_utils.py::test_Url_anchor[/?q=sch%C3%B6n#gru%C3%9F]",
"tests/test_utils.py::test_secure_url[https://user:[email protected]/p-https://[email protected]/p]",
"tests/test_utils.py::test_secure_url[https://user:[email protected]:8000-https://[email protected]:8000]",
"tests/test_utils.py::test_secure_url[https://[email protected]/b-https://[email protected]/b]",
"tests/test_utils.py::test_url_builder",
"tests/test_utils.py::test_parse_path",
"tests/test_utils.py::test_make_relative_url[/-./a/-a/]",
"tests/test_utils.py::test_make_relative_url[/-./a-a]",
"tests/test_utils.py::test_make_relative_url[/fr/blog/2015/11/a/-/fr/blog/2015/11/a/a.jpg-a.jpg]",
"tests/test_utils.py::test_make_relative_url[/fr/blog/2015/11/a/-/fr/blog/-../../../]",
"tests/test_utils.py::test_make_relative_url[/fr/blog/2015/11/a.php-/fr/blog/-../../]",
"tests/test_utils.py::test_make_relative_url[/fr/blog/2015/11/a/-/fr/blog/2016/-../../../2016/]",
"tests/test_utils.py::test_make_relative_url[/fr/blog/2015/11/a/-/fr/blog/2016/c.jpg-../../../2016/c.jpg]",
"tests/test_utils.py::test_make_relative_url[/fr/blog/2016/-/fr/blog/2015/a/-../2015/a/]",
"tests/test_utils.py::test_make_relative_url[/fr/blog/2016/-/fr/blog/2015/a/d.jpg-../2015/a/d.jpg]",
"tests/test_utils.py::test_make_relative_url[/fr/blog/2015/11/a/-/images/b.svg-../../../../../images/b.svg]",
"tests/test_utils.py::test_make_relative_url[/fr/blog/-2015/11/-2015/11/]",
"tests/test_utils.py::test_make_relative_url[/fr/blog/x-2015/11/-2015/11/]",
"tests/test_utils.py::test_make_relative_url[-./a/-a/]",
"tests/test_utils.py::test_make_relative_url[-./a-a]",
"tests/test_utils.py::test_make_relative_url[fr/blog/2015/11/a/-fr/blog/2015/11/a/a.jpg-a.jpg]",
"tests/test_utils.py::test_make_relative_url[fr/blog/2015/11/a/-fr/blog/-../../../]",
"tests/test_utils.py::test_make_relative_url[fr/blog/2015/11/a.php-fr/blog/-../../]",
"tests/test_utils.py::test_make_relative_url[fr/blog/2015/11/a/-fr/blog/2016/-../../../2016/]",
"tests/test_utils.py::test_make_relative_url[fr/blog/2015/11/a/-fr/blog/2016/c.jpg-../../../2016/c.jpg]",
"tests/test_utils.py::test_make_relative_url[fr/blog/2016/-fr/blog/2015/a/-../2015/a/]",
"tests/test_utils.py::test_make_relative_url[fr/blog/2016/-fr/blog/2015/a/d.jpg-../2015/a/d.jpg]",
"tests/test_utils.py::test_make_relative_url[fr/blog/2015/11/a/-images/b.svg-../../../../../images/b.svg]",
"tests/test_utils.py::test_make_relative_url[fr/blog/-2015/11/-../../2015/11/]",
"tests/test_utils.py::test_make_relative_url[fr/blog/x-2015/11/-../../2015/11/]",
"tests/test_utils.py::test_make_relative_url_relative_source_absolute_target"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-04 20:00:11+00:00
|
bsd-3-clause
| 3,539 |
|
lektor__lektor-960
|
diff --git a/lektor/db.py b/lektor/db.py
index 6cfcea9..bae4b54 100644
--- a/lektor/db.py
+++ b/lektor/db.py
@@ -6,7 +6,6 @@ import hashlib
import operator
import os
import posixpath
-import warnings
from collections import OrderedDict
from datetime import timedelta
from itertools import islice
@@ -832,21 +831,9 @@ class Image(Attachment):
return rv
return Undefined("The format of the image could not be determined.")
- def thumbnail(
- self, width=None, height=None, crop=None, mode=None, upscale=None, quality=None
- ):
+ def thumbnail(self, width=None, height=None, mode=None, upscale=None, quality=None):
"""Utility to create thumbnails."""
- # `crop` exists to preserve backward-compatibility, and will be removed.
- if crop is not None and mode is not None:
- raise ValueError("Arguments `crop` and `mode` are mutually exclusive.")
-
- if crop is not None:
- warnings.warn(
- 'The `crop` argument is deprecated. Use `mode="crop"` instead.'
- )
- mode = "crop"
-
if mode is None:
mode = ThumbnailMode.DEFAULT
else:
diff --git a/lektor/imagetools.py b/lektor/imagetools.py
index fcfcc1a..0418aa5 100644
--- a/lektor/imagetools.py
+++ b/lektor/imagetools.py
@@ -593,16 +593,6 @@ def make_image_thumbnail(
would_upscale = computed_width > source_width or computed_height > source_height
- # this part needs to be removed once backward-compatibility period passes
- if would_upscale and upscale is None:
- warnings.warn(
- "Your image is being scaled up since the requested thumbnail "
- "size is larger than the source. This default will change "
- "in the future. If you want to preserve the current behaviour, "
- "use `upscale=True`."
- )
- upscale = True
-
if would_upscale and not upscale:
return Thumbnail(source_url_path, source_width, source_height)
|
lektor/lektor
|
c7419cf6f6a986740da680fa158055cc83a5befe
|
diff --git a/tests/test_images.py b/tests/test_images.py
index 8609864..ae13a53 100644
--- a/tests/test_images.py
+++ b/tests/test_images.py
@@ -329,12 +329,11 @@ class Test_make_image_thumbnail:
assert rv.url_path != source_url_path
assert len(ctx.sub_artifacts) == 1
- def test_implicit_upscale(self, ctx, test_jpg, source_url_path):
- with pytest.warns(UserWarning, match=r"image is being scaled up"):
- rv = make_image_thumbnail(ctx, test_jpg, source_url_path, 512)
- assert (rv.width, rv.height) == (512, 683)
- assert rv.url_path == "[email protected]"
- assert len(ctx.sub_artifacts) == 1
+ def test_no_implicit_upscale(self, ctx, test_jpg, source_url_path):
+ rv = make_image_thumbnail(ctx, test_jpg, source_url_path, 512)
+ assert (rv.width, rv.height) == (384, 512)
+ assert rv.url_path == "test.jpg"
+ assert len(ctx.sub_artifacts) == 0
@pytest.mark.parametrize("mode", iter(ThumbnailMode))
def test_upscale_false(self, ctx, test_jpg, source_url_path, mode):
|
Disable deprecated implicit thumbnail upscaling?
Implicit thumbnail upscaling, as well as the `crop` argument to [`Image.thumbnail`](https://github.com/lektor/lektor/blob/f135d73a11ac557bff4f1068958e71b6b526ee63/lektor/db.py#L818) have been [deprecated](https://github.com/lektor/lektor/blob/f135d73a11ac557bff4f1068958e71b6b526ee63/lektor/imagetools.py#L598) since release 3.2.0.
This issue is here to make sure we don't forget to disable those features when the time is right.
(This issue should probably be attached to the [4.0 Milestone](https://github.com/lektor/lektor/milestone/5), but I don't think that I have sufficient privileges to do so.)
Refs: #551, #885
|
0.0
|
c7419cf6f6a986740da680fa158055cc83a5befe
|
[
"tests/test_images.py::Test_make_image_thumbnail::test_no_implicit_upscale"
] |
[
"tests/test_images.py::test_exif",
"tests/test_images.py::test_image_attributes",
"tests/test_images.py::test_is_rotated",
"tests/test_images.py::test_image_info_svg_declaration",
"tests/test_images.py::test_image_info_svg_length",
"tests/test_images.py::test_thumbnail_dimensions_reported",
"tests/test_images.py::test_dimensions",
"tests/test_images.py::Test_make_image_thumbnail::test_no_width_or_height",
"tests/test_images.py::Test_make_image_thumbnail::test_warn_fallback_to_fit",
"tests/test_images.py::Test_make_image_thumbnail::test_unknown_image_format",
"tests/test_images.py::Test_make_image_thumbnail::test_svg",
"tests/test_images.py::Test_make_image_thumbnail::test_scale[50-None-ThumbnailMode.FIT-None-expect0]",
"tests/test_images.py::Test_make_image_thumbnail::test_scale[None-128-ThumbnailMode.FIT-None-expect1]",
"tests/test_images.py::Test_make_image_thumbnail::test_scale[50-50-ThumbnailMode.CROP-None-expect2]",
"tests/test_images.py::Test_make_image_thumbnail::test_scale[50-50-ThumbnailMode.STRETCH-None-expect3]",
"tests/test_images.py::Test_make_image_thumbnail::test_scale[512-None-ThumbnailMode.FIT-True-expect4]",
"tests/test_images.py::Test_make_image_thumbnail::test_scale[512-512-ThumbnailMode.CROP-None-expect5]",
"tests/test_images.py::Test_make_image_thumbnail::test_scale[512-512-ThumbnailMode.STRETCH-None-expect6]",
"tests/test_images.py::Test_make_image_thumbnail::test_upscale_false[ThumbnailMode.FIT]",
"tests/test_images.py::Test_make_image_thumbnail::test_upscale_false[ThumbnailMode.CROP]",
"tests/test_images.py::Test_make_image_thumbnail::test_upscale_false[ThumbnailMode.STRETCH]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-12-13 02:02:31+00:00
|
bsd-3-clause
| 3,540 |
|
lemon24__reader-244
|
diff --git a/CHANGES.rst b/CHANGES.rst
index e46cddc..cb3f951 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -11,6 +11,12 @@ Version 1.20
Unreleased
+* Added public property :attr:`~Reader.after_entry_update_hooks` to
+ :class:`Reader`, which contains list of hooks to run for each new/modified
+ entry after :meth:`~Reader.update_feeds()` call. Hooks will receive
+ :class:`Reader` instance, :class:`~types.Entry`-like object instance and
+ :class:`~types.EntryUpdateStatus` value
+
Version 1.19
------------
diff --git a/docs/api.rst b/docs/api.rst
index 8393dcb..7d757f9 100644
--- a/docs/api.rst
+++ b/docs/api.rst
@@ -61,6 +61,9 @@ Data objects
.. autoclass:: UpdatedFeed
:members:
+.. autoclass:: EntryUpdateStatus
+ :members:
+
Exceptions
----------
diff --git a/src/reader/__init__.py b/src/reader/__init__.py
index 3f8fdaf..690acde 100644
--- a/src/reader/__init__.py
+++ b/src/reader/__init__.py
@@ -54,6 +54,7 @@ from .types import (
EntrySearchCounts,
UpdateResult,
UpdatedFeed,
+ EntryUpdateStatus,
)
from .exceptions import (
diff --git a/src/reader/core.py b/src/reader/core.py
index cf4bb34..da0e64d 100644
--- a/src/reader/core.py
+++ b/src/reader/core.py
@@ -52,6 +52,7 @@ from .types import EntryInput
from .types import EntrySearchCounts
from .types import EntrySearchResult
from .types import EntrySortOrder
+from .types import EntryUpdateStatus
from .types import Feed
from .types import FeedCounts
from .types import FeedInput
@@ -73,7 +74,12 @@ _U = TypeVar('_U')
ReaderPluginType = Callable[['Reader'], None]
_PostFeedUpdatePluginType = Callable[['Reader', str], None]
-_PostEntryAddPluginType = Callable[['Reader', EntryData[datetime]], None]
+#: Function that will be called for each new/modified entry
+#:
+#: ..versionadded:: 1.20
+AfterEntryUpdateHook = Callable[
+ ['Reader', EntryData[datetime], EntryUpdateStatus], None
+]
def make_reader(
@@ -265,6 +271,9 @@ class Reader:
.. versionadded:: 1.13
JSON Feed support.
+ .. versionadded:: 1.20
+ after_entry_update_hooks public attribute
+
"""
def __init__(
@@ -286,7 +295,22 @@ class Reader:
self._updater = reader._updater
self._post_feed_update_plugins: Collection[_PostFeedUpdatePluginType] = []
- self._post_entry_add_plugins: Collection[_PostEntryAddPluginType] = []
+
+ #: List of functions that will be called for each new/modified entry after
+ #: feed was updated. Each function should take three arguments and return None.
+ #:
+ #: * ``reader`` - a :class:`Reader` instance
+ #: * ``entry`` - an :class:`~types.Entry`-like object
+ #:
+ #: .. warning::
+ #:
+ #: The only attributes that are guaranteed to be present are ``feed_url``, ``id``
+ #: and ``object_id``; all other attributes may appear or disappear between minor
+ #: versions, or may be None.
+ #: * ``status`` - an :class:`~types.EntryUpdateStatus` value
+ #:
+ #: ..versionadded:: 1.20
+ self.after_entry_update_hooks: Collection[AfterEntryUpdateHook] = []
if _called_directly:
warnings.warn(
@@ -919,12 +943,12 @@ class Reader:
for entry in entries_to_update:
if entry.new:
new_count += 1
+ entry_status = EntryUpdateStatus.NEW
else:
updated_count += 1
- if not entry.new:
- continue
- for entry_plugin in self._post_entry_add_plugins:
- entry_plugin(self, entry.entry)
+ entry_status = EntryUpdateStatus.MODIFIED
+ for entry_hook in self.after_entry_update_hooks:
+ entry_hook(self, entry.entry, entry_status)
return new_count, updated_count
diff --git a/src/reader/plugins/entry_dedupe.py b/src/reader/plugins/entry_dedupe.py
index 972401b..91eddb5 100644
--- a/src/reader/plugins/entry_dedupe.py
+++ b/src/reader/plugins/entry_dedupe.py
@@ -74,6 +74,8 @@ Entry user attributes are set as follows:
import logging
import re
+from reader.types import EntryUpdateStatus
+
log = logging.getLogger('reader._plugins.feed_entry_dedupe')
@@ -111,7 +113,10 @@ def _is_duplicate(one, two):
return same_title and same_text
-def _entry_dedupe_plugin(reader, entry):
+def _entry_dedupe_plugin(reader, entry, status):
+ if status is EntryUpdateStatus.MODIFIED:
+ return
+
duplicates = [
e
for e in reader.get_entries(feed=entry.feed_url)
@@ -150,4 +155,4 @@ def _entry_dedupe_plugin(reader, entry):
def init_reader(reader):
- reader._post_entry_add_plugins.append(_entry_dedupe_plugin)
+ reader.after_entry_update_hooks.append(_entry_dedupe_plugin)
diff --git a/src/reader/plugins/mark_as_read.py b/src/reader/plugins/mark_as_read.py
index 8f696a7..3b30bf7 100644
--- a/src/reader/plugins/mark_as_read.py
+++ b/src/reader/plugins/mark_as_read.py
@@ -29,6 +29,7 @@ import logging
import re
from reader.exceptions import MetadataNotFoundError
+from reader.types import EntryUpdateStatus
# avoid circular imports
@@ -52,7 +53,10 @@ def _get_config(reader, feed_url, metadata_key, patterns_key):
return []
-def _mark_as_read(reader, entry):
+def _mark_as_read(reader, entry, status):
+ if status is EntryUpdateStatus.MODIFIED:
+ return
+
metadata_name = reader.make_reader_reserved_name('mark_as_read')
patterns = _get_config(reader, entry.feed_url, metadata_name, 'title')
@@ -63,4 +67,4 @@ def _mark_as_read(reader, entry):
def init_reader(reader):
- reader._post_entry_add_plugins.append(_mark_as_read)
+ reader.after_entry_update_hooks.append(_mark_as_read)
diff --git a/src/reader/types.py b/src/reader/types.py
index c3a1f5a..d5c90d5 100644
--- a/src/reader/types.py
+++ b/src/reader/types.py
@@ -1,4 +1,5 @@
import dataclasses
+import enum
import re
import traceback
import warnings
@@ -480,6 +481,19 @@ class EntrySearchResult(_namedtuple_compat):
return self.feed_url, self.id
+class EntryUpdateStatus(enum.Enum):
+
+ """Data type representing status of entry.
+ Used by :py:attr:`~Reader.after_entry_update_hooks`.
+
+ .. versionadded:: 1.20
+
+ """
+
+ NEW = 'new'
+ MODIFIED = 'modified'
+
+
# Semi-public API (typing support)
|
lemon24/reader
|
7a004969d0ba39aacf794bbe85c47b3f7198f26a
|
diff --git a/tests/test_plugins_entry_dedupe.py b/tests/test_plugins_entry_dedupe.py
index 11cba18..b11b5b2 100644
--- a/tests/test_plugins_entry_dedupe.py
+++ b/tests/test_plugins_entry_dedupe.py
@@ -92,6 +92,9 @@ def test_plugin(make_reader):
important_one = parser.entry(
1, 5, datetime(2010, 1, 1), title='important', summary='also important'
)
+ modified_one = parser.entry(
+ 1, 6, datetime(2010, 1, 1), title='title', summary='will be modified'
+ )
# TODO just use the feeds/entries as arguments
@@ -110,6 +113,9 @@ def test_plugin(make_reader):
important_two = parser.entry(
1, 15, datetime(2010, 1, 2), title='important', summary='also important'
)
+ modified_two = parser.entry(
+ 1, 6, datetime(2010, 1, 1), title='title', summary='was modified'
+ )
reader.update_feeds()
@@ -128,6 +134,8 @@ def test_plugin(make_reader):
# the old one is marked as read in favor of the new one
(unread_one.id, True),
(unread_two.id, False),
+ # modified entry is ignored by plugin
+ (modified_one.id, False),
}
} | {
# the new one is important because the old one was;
diff --git a/tests/test_plugins_mark_as_read.py b/tests/test_plugins_mark_as_read.py
index f8f55d7..cc68e62 100644
--- a/tests/test_plugins_mark_as_read.py
+++ b/tests/test_plugins_mark_as_read.py
@@ -48,10 +48,14 @@ def test_regex_mark_as_read_bad_metadata(make_reader, value):
one = parser.feed(1, datetime(2010, 1, 1))
parser.entry(1, 1, datetime(2010, 1, 1), title='match')
+ parser.entry(1, 2, datetime(2010, 1, 1), title='will be modified')
reader.add_feed(one)
reader.set_feed_metadata_item(one, '.reader.mark_as_read', value)
reader.update_feeds()
- assert [e.read for e in reader.get_entries()] == [False]
+ parser.entry(1, 2, datetime(2010, 1, 1), title='modified')
+ reader.update_feeds()
+
+ assert [e.read for e in reader.get_entries()] == [False, False]
diff --git a/tests/test_reader_hooks.py b/tests/test_reader_hooks.py
new file mode 100644
index 0000000..645f903
--- /dev/null
+++ b/tests/test_reader_hooks.py
@@ -0,0 +1,45 @@
+from datetime import datetime
+
+from fakeparser import Parser
+
+from reader.types import EntryUpdateStatus
+
+
+def test_post_entry_update_hooks(reader):
+ parser = Parser()
+ reader._parser = parser
+
+ plugin_calls = []
+
+ def first_plugin(r, e, s):
+ assert r is reader
+ plugin_calls.append((first_plugin, e, s))
+
+ def second_plugin(r, e, s):
+ assert r is reader
+ plugin_calls.append((second_plugin, e, s))
+
+ feed = parser.feed(1, datetime(2010, 1, 1))
+ one = parser.entry(1, 1, datetime(2010, 1, 1))
+ reader.add_feed(feed.url)
+ reader.after_entry_update_hooks.append(first_plugin)
+ reader.update_feeds()
+ assert plugin_calls == [(first_plugin, one, EntryUpdateStatus.NEW)]
+ assert set(e.id for e in reader.get_entries()) == {'1, 1'}
+
+ plugin_calls[:] = []
+
+ feed = parser.feed(1, datetime(2010, 1, 2))
+ one = parser.entry(1, 1, datetime(2010, 1, 2))
+ two = parser.entry(1, 2, datetime(2010, 1, 2))
+ reader.after_entry_update_hooks.append(second_plugin)
+ reader.update_feeds()
+ assert plugin_calls == [
+ (first_plugin, two, EntryUpdateStatus.NEW),
+ (second_plugin, two, EntryUpdateStatus.NEW),
+ (first_plugin, one, EntryUpdateStatus.MODIFIED),
+ (second_plugin, one, EntryUpdateStatus.MODIFIED),
+ ]
+ assert set(e.id for e in reader.get_entries()) == {'1, 1', '1, 2'}
+
+ # TODO: What is the expected behavior if a plugin raises an exception?
diff --git a/tests/test_reader_private.py b/tests/test_reader_private.py
index 545eb5e..fc3f6ac 100644
--- a/tests/test_reader_private.py
+++ b/tests/test_reader_private.py
@@ -95,44 +95,6 @@ def test_update_parse(reader, call_update_method):
assert parser.calls == [(feed.url, 'etag', 'last-modified')]
-def test_post_entry_add_plugins(reader):
- parser = Parser()
- reader._parser = parser
-
- plugin_calls = []
-
- def first_plugin(r, e):
- assert r is reader
- plugin_calls.append((first_plugin, e))
-
- def second_plugin(r, e):
- assert r is reader
- plugin_calls.append((second_plugin, e))
-
- feed = parser.feed(1, datetime(2010, 1, 1))
- one = parser.entry(1, 1, datetime(2010, 1, 1))
- reader.add_feed(feed.url)
- reader._post_entry_add_plugins.append(first_plugin)
- reader.update_feeds()
- assert plugin_calls == [(first_plugin, one)]
- assert set(e.id for e in reader.get_entries()) == {'1, 1'}
-
- plugin_calls[:] = []
-
- feed = parser.feed(1, datetime(2010, 1, 2))
- one = parser.entry(1, 1, datetime(2010, 1, 2))
- two = parser.entry(1, 2, datetime(2010, 1, 2))
- reader._post_entry_add_plugins.append(second_plugin)
- reader.update_feeds()
- assert plugin_calls == [
- (first_plugin, two),
- (second_plugin, two),
- ]
- assert set(e.id for e in reader.get_entries()) == {'1, 1', '1, 2'}
-
- # TODO: What is the expected behavior if a plugin raises an exception?
-
-
def test_post_feed_update_plugins(reader):
parser = Parser()
reader._parser = parser
|
[Question] Public API for recently fetched entries, or entries added/modified since date?
For app I'm working on, I wish to update some feeds and do something on entries that appeared. But I'm unsure how to implement this last part and get only new entries.
Workflow I sketched so far is:
1. Disable updates for all feeds in db (because there might be some I don't wish to update)
2. Start adding feeds to db
2.1. If feed already exists, enable it for updates
2.2. If feed doesn't exist, it will be added and enabled for updates automatically
3. Update feeds through `update_feeds` or `update_feeds_iter`
4. ???
`update_feeds` doesn't return anything. `update_feeds_iter` gives me back list of `UpdateResult`, where each item will have url and counts or exception.
So, I think I can sum all the counts and ask for that many entries. Something like:
```python
count = sum(
sum(result.value.new, result.value.updated)
for result in results
if not isinstance(result.value, ReaderError)
)
new_entries = reader.get_entries(limit=count)
```
But is it guaranteed that `get_entries(sort='recent')` will include recently updated entry? Even if that entry originally appeared long time ago? I might be misunderstanding what it means for entry to be marked as "updated", so any pointer on that would be helpful, too.
Perhaps I could change my workflow a little - first get monotonic timestamp, then run all the steps, and finally ask for all entries that were added or modified after timestamp. But it seems that there is no API for searching by date? `search_entries` is designed for full-text search and works only on few columns.
So, my question is:
1. What is the preferred way of obtaining all entries added in update call?
Counting and using `get_entries`? Calling `get_entries` for everything and discarding all results that were added / modified before timestamp? Something else?
2. What does it mean that entry was "updated"?
|
0.0
|
7a004969d0ba39aacf794bbe85c47b3f7198f26a
|
[
"tests/test_plugins_entry_dedupe.py::test_is_duplicate[one7-two7-True]",
"tests/test_plugins_entry_dedupe.py::test_is_duplicate[one9-two9-False]",
"tests/test_plugins_entry_dedupe.py::test_is_duplicate[one3-two3-False]",
"tests/test_plugins_entry_dedupe.py::test_is_duplicate[one5-two5-False]",
"tests/test_plugins_entry_dedupe.py::test_normalize",
"tests/test_plugins_entry_dedupe.py::test_is_duplicate[one10-two10-False]",
"tests/test_plugins_entry_dedupe.py::test_is_duplicate[one4-two4-True]",
"tests/test_plugins_entry_dedupe.py::test_is_duplicate[one11-two11-False]",
"tests/test_plugins_entry_dedupe.py::test_is_duplicate[one8-two8-False]",
"tests/test_plugins_entry_dedupe.py::test_is_duplicate[one2-two2-False]",
"tests/test_plugins_entry_dedupe.py::test_is_duplicate[one1-two1-False]",
"tests/test_plugins_entry_dedupe.py::test_is_duplicate[one0-two0-False]",
"tests/test_plugins_entry_dedupe.py::test_is_duplicate[one6-two6-False]"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-19 23:39:13+00:00
|
bsd-3-clause
| 3,541 |
|
lenskit__lkpy-293
|
diff --git a/lenskit/topn.py b/lenskit/topn.py
index 5c0dc13..cccd928 100644
--- a/lenskit/topn.py
+++ b/lenskit/topn.py
@@ -148,9 +148,21 @@ class RecListAnalysis:
ug_cols = [c for c in rec_key if c not in truth_key]
tcount = truth.groupby(truth_key)['item'].count()
tcount.name = 'ntruth'
+ _log.debug('truth data:\n%s', tcount)
if ug_cols:
_log.debug('regrouping by %s to fill', ug_cols)
- res = res.groupby(ug_cols).apply(lambda f: f.join(tcount, how='outer', on=truth_key))
+ _log.debug('pre-group series:\n%s', res)
+
+ rdict = {}
+
+ for key, df in res.groupby(ug_cols):
+ df2 = df.drop(columns=ug_cols).join(tcount, how='outer', on=truth_key)
+ rdict[key] = df2
+
+ res = pd.concat(rdict, names=ug_cols)
+ res = res.reset_index()
+ _log.debug('joined result:\n%s', res)
+
else:
_log.debug('no ungroup cols, directly merging to fill')
res = res.join(tcount, how='outer', on=truth_key)
|
lenskit/lkpy
|
b28a14f98529e1fd9d167ba1267edcedf55007d9
|
diff --git a/tests/test_topn_analysis.py b/tests/test_topn_analysis.py
index 64549ee..cfe2ee7 100644
--- a/tests/test_topn_analysis.py
+++ b/tests/test_topn_analysis.py
@@ -197,6 +197,7 @@ def test_fill_users():
algo.fit(train)
rec_users = test['user'].sample(50).unique()
+ assert len(rec_users) < 50
recs = batch.recommend(algo, rec_users, 25)
scores = rla.compute(recs, test, include_missing=True)
@@ -252,6 +253,10 @@ def test_adv_fill_users():
assert scores['recall'].isna().sum() > 0
_log.info('scores:\n%s', scores)
+ ucounts = scores.reset_index().groupby('algo')['user'].agg(['count', 'nunique'])
+ assert all(ucounts['count'] == 100)
+ assert all(ucounts['nunique'] == 100)
+
mscores = rla.compute(recs, test)
mscores = mscores.reset_index().set_index(inames)
mscores.sort_index(inplace=True)
|
include_missing fails in a few cases
I have seen include_missing fail for ML20M user-user in the LK demo experiment. Still debugging why.
|
0.0
|
b28a14f98529e1fd9d167ba1267edcedf55007d9
|
[
"tests/test_topn_analysis.py::test_adv_fill_users"
] |
[
"tests/test_topn_analysis.py::test_split_keys",
"tests/test_topn_analysis.py::test_split_keys_gcol",
"tests/test_topn_analysis.py::test_run_one",
"tests/test_topn_analysis.py::test_run_two",
"tests/test_topn_analysis.py::test_inner_format",
"tests/test_topn_analysis.py::test_spec_group_cols",
"tests/test_topn_analysis.py::test_java_equiv",
"tests/test_topn_analysis.py::test_fill_users",
"tests/test_topn_analysis.py::test_pr_bulk_match[False]",
"tests/test_topn_analysis.py::test_pr_bulk_match[True]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-01-22 03:32:08+00:00
|
mit
| 3,542 |
|
lenskit__lkpy-312
|
diff --git a/lenskit/metrics/topn.py b/lenskit/metrics/topn.py
index ae1b70f..7f616c5 100644
--- a/lenskit/metrics/topn.py
+++ b/lenskit/metrics/topn.py
@@ -300,11 +300,11 @@ def ndcg(recs, truth, discount=np.log2, k=None):
The maximum list length.
"""
- tpos = truth.index.get_indexer(recs['item'])
-
if k is not None:
recs = recs.iloc[:k]
+ tpos = truth.index.get_indexer(recs['item'])
+
if 'rating' in truth.columns:
i_rates = np.sort(truth.rating.values)[::-1]
if k is not None:
|
lenskit/lkpy
|
0716300a91e55b54e3da150c5fa8355af79fa745
|
diff --git a/tests/test_topn_ndcg.py b/tests/test_topn_ndcg.py
index 5d1601d..2fdf9e5 100644
--- a/tests/test_topn_ndcg.py
+++ b/tests/test_topn_ndcg.py
@@ -87,6 +87,14 @@ def test_ndcg_perfect():
assert ndcg(recs, truth) == approx(1.0)
+def test_ndcg_perfect_k_short():
+ recs = pd.DataFrame({'item': [2, 3, 1]})
+ truth = pd.DataFrame({'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
+ truth = truth.set_index('item')
+ assert ndcg(recs, truth, k=2) == approx(1.0)
+ assert ndcg(recs[:2], truth, k=2) == approx(1.0)
+
+
def test_ndcg_wrong():
recs = pd.DataFrame({'item': [1, 2]})
truth = pd.DataFrame({'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
|
nDCG metric does not correctly truncate lists
The nDCG metric has two faults whereby it fails to correctly truncate the truth list and recommendation list under certain conditions:
1. If no k is supplied, the full truth list and recommendation list are used to compute the ideal DCG and recommendation DCG, respectively. If the sizes of the input lists do not match, this causes an incorrect result. See the following example, based on "test_ndcg_wrong" in tests/test_topn_ndcg.py:
```
truth = pd.DataFrame({'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
truth = truth.set_index('item')
recs = pd.DataFrame({'item': [1, 2]})
print(ndcg(recs, truth)) # incorrectly gives ~0.7344
print(ndcg(recs, truth, k=2)) # correctly gives ~0.8888
```
2. If a k is supplied that is less than the length of the recommendation list, the recommendation DCG is not computed correctly (due to "tpos" being defined before the recommendation list is truncated). See the following example, based on "test_ndcg_perfect" found in tests/test_topn_ndcg.py:
```
truth = pd.DataFrame({'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
truth = truth.set_index('item')
recs = pd.DataFrame({'item': [2, 3, 1]})
print(ndcg(recs, truth)) # correctly gives 1.0
print(ndcg(recs, truth, k=2)) # incorrectly gives ~1.21
print(ndcg(recs[:2], truth, k=2)) # correctly gives 1.0
```
|
0.0
|
0716300a91e55b54e3da150c5fa8355af79fa745
|
[
"tests/test_topn_ndcg.py::test_ndcg_perfect_k_short"
] |
[
"tests/test_topn_ndcg.py::test_dcg_empty",
"tests/test_topn_ndcg.py::test_dcg_zeros",
"tests/test_topn_ndcg.py::test_dcg_single",
"tests/test_topn_ndcg.py::test_dcg_mult",
"tests/test_topn_ndcg.py::test_dcg_empty2",
"tests/test_topn_ndcg.py::test_dcg_zeros2",
"tests/test_topn_ndcg.py::test_dcg_single2",
"tests/test_topn_ndcg.py::test_dcg_nan",
"tests/test_topn_ndcg.py::test_dcg_series",
"tests/test_topn_ndcg.py::test_dcg_mult2",
"tests/test_topn_ndcg.py::test_ndcg_empty",
"tests/test_topn_ndcg.py::test_ndcg_no_match",
"tests/test_topn_ndcg.py::test_ndcg_perfect",
"tests/test_topn_ndcg.py::test_ndcg_wrong",
"tests/test_topn_ndcg.py::test_ndcg_perfect_k",
"tests/test_topn_ndcg.py::test_ndcg_bulk_at_top",
"tests/test_topn_ndcg.py::test_ndcg_bulk_not_at_top",
"tests/test_topn_ndcg.py::test_ndcg_bulk_match[False]",
"tests/test_topn_ndcg.py::test_ndcg_bulk_match[True]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-03-11 19:46:49+00:00
|
mit
| 3,543 |
|
leonardbinet__lighttree-12
|
diff --git a/lighttree/tree.py b/lighttree/tree.py
index 7e6bff4..15e0401 100644
--- a/lighttree/tree.py
+++ b/lighttree/tree.py
@@ -360,8 +360,7 @@ class Tree(object):
):
"""Python generator traversing the tree (or a subtree) with optional node filtering.
- Loosely based on an algorithm from 'Essential LISP' by John R. Anderson,
- Albert T. Corbett, and Brian J. Reiser, page 239-241, and inspired from treelib implementation.
+ Inspired by treelib implementation https://github.com/caesar0301/treelib/blob/master/treelib/tree.py#L374
:param nid: Node identifier from which tree traversal will start. If None tree root will be used
:param mode: Traversal mode, may be either "depth" or "width"
@@ -369,6 +368,7 @@ class Tree(object):
:param filter_through: if True, excluded nodes don't exclude their children.
:param reverse: the ``reverse`` param for sorting :class:`Node` objects in the same level
:param key: key used to order nodes of same parent
+ :param id_only: if True node ids will be yielded, else nodes themselves
:return: node ids that satisfy the conditions if ``id_only`` is True, else nodes.
:rtype: generator
"""
@@ -430,10 +430,11 @@ class Tree(object):
"""
output = ""
- for depth, prefix, node in self._line_repr_iter(
- nid, filter_, key, reverse, line_type
+ for is_last_list, node in self._iter_nodes_with_location(
+ nid, filter_, key, reverse
):
- node_repr = node.line_repr(depth=depth, **kwargs)
+ prefix = self._prefix_repr(line_type, is_last_list)
+ node_repr = node.line_repr(depth=len(is_last_list), **kwargs)
output += "%s%s\n" % (prefix, node_repr)
if limit is not None:
limit -= 1
@@ -444,9 +445,15 @@ class Tree(object):
return output
return output
- def _line_repr_iter(
- self, nid, filter_, key, reverse, line_type, depth=0, is_last_list=None
- ):
+ def _iter_nodes_with_location(self, nid, filter_, key, reverse, is_last_list=None):
+ """Yield nodes with information on how they are placed.
+ :param nid: starting node identifier
+ :param filter_: filter function applied on nodes
+ :param key: key used to order nodes of same parent
+ :param reverse: reverse parameter applied at sorting
+ :param is_last_list: list of booleans, each indicating if node is the last yielded one at this depth
+ :return: tuple of booleans, node
+ """
is_last_list = is_last_list or []
key = attrgetter("identifier") if key is None else key
@@ -454,8 +461,7 @@ class Tree(object):
if nid is not None:
node = self.get(nid)
if filter_ is None or filter_(node):
- prefix = self._prefix_repr(line_type, is_last_list)
- yield depth, prefix, node
+ yield tuple(is_last_list), node
children = [
child_node
for child_node in self.children(nid, id_only=False)
@@ -465,14 +471,8 @@ class Tree(object):
children.sort(key=key, reverse=reverse)
for idx, child in enumerate(children):
is_last_list.append(idx == idxlast)
- for item in self._line_repr_iter(
- child.identifier,
- filter_,
- key,
- reverse,
- line_type,
- depth + 1,
- is_last_list,
+ for item in self._iter_nodes_with_location(
+ child.identifier, filter_, key, reverse, is_last_list,
):
yield item
is_last_list.pop()
|
leonardbinet/lighttree
|
ba5d5157441cab95a85dd944839524c27ff235c7
|
diff --git a/tests/test_tree.py b/tests/test_tree.py
index ff2e1aa..e1ec137 100644
--- a/tests/test_tree.py
+++ b/tests/test_tree.py
@@ -455,8 +455,27 @@ class TreeCase(TestCase):
["a12", "a2"],
)
- def test_show(self):
+ def test_iter_nodes_with_location(self):
t = get_sample_tree()
+
+ # full
+ self.assertEqual(
+ list(
+ t._iter_nodes_with_location(
+ nid=None, filter_=None, key=None, reverse=False
+ )
+ ),
+ [
+ ((), t.get("root")),
+ ((False,), t.get("a")),
+ ((False, False), t.get("a1")),
+ ((False, False, False), t.get("a11")),
+ ((False, False, True), t.get("a12")),
+ ((False, True), t.get("a2")),
+ ((True,), t.get("b")),
+ ((True, True), t.get("b1")),
+ ],
+ )
self.assertEqual(
t.show(),
"""root
@@ -470,16 +489,24 @@ class TreeCase(TestCase):
""",
)
+ # subtree
self.assertEqual(
- t.show("a"),
- """a
-├── a1
-│ ├── a11
-│ └── a12
-└── a2
+ list(
+ t._iter_nodes_with_location(
+ nid="a1", filter_=None, key=None, reverse=False
+ )
+ ),
+ [((), t.get("a1")), ((False,), t.get("a11")), ((True,), t.get("a12"))],
+ )
+ self.assertEqual(
+ t.show("a1"),
+ """a1
+├── a11
+└── a12
""",
)
+ def test_show(self):
t = get_sample_custom_tree()
self.assertEqual(
t.show(),
|
In `_line_repr_iter`, distinguish line formatting from tree structure
This will allow more possibilities to display tree.
Notably it could allow the display of the tree in html to have nice formatting in jupyter notebooks.
|
0.0
|
ba5d5157441cab95a85dd944839524c27ff235c7
|
[
"tests/test_tree.py::TreeCase::test_iter_nodes_with_location"
] |
[
"tests/test_tree.py::TreeCase::test_ancestors",
"tests/test_tree.py::TreeCase::test_children",
"tests/test_tree.py::TreeCase::test_clone_with_subtree",
"tests/test_tree.py::TreeCase::test_clone_with_tree",
"tests/test_tree.py::TreeCase::test_contains",
"tests/test_tree.py::TreeCase::test_depth",
"tests/test_tree.py::TreeCase::test_drop_node",
"tests/test_tree.py::TreeCase::test_drop_subtree",
"tests/test_tree.py::TreeCase::test_empty_clone",
"tests/test_tree.py::TreeCase::test_ensure_present",
"tests/test_tree.py::TreeCase::test_expand_tree",
"tests/test_tree.py::TreeCase::test_get",
"tests/test_tree.py::TreeCase::test_insert_node_above",
"tests/test_tree.py::TreeCase::test_insert_node_below",
"tests/test_tree.py::TreeCase::test_insert_root",
"tests/test_tree.py::TreeCase::test_insert_tree_above",
"tests/test_tree.py::TreeCase::test_insert_tree_at_root",
"tests/test_tree.py::TreeCase::test_insert_tree_below",
"tests/test_tree.py::TreeCase::test_is_empty",
"tests/test_tree.py::TreeCase::test_is_leaf",
"tests/test_tree.py::TreeCase::test_leaves",
"tests/test_tree.py::TreeCase::test_list",
"tests/test_tree.py::TreeCase::test_merge",
"tests/test_tree.py::TreeCase::test_parent",
"tests/test_tree.py::TreeCase::test_prefix_repr",
"tests/test_tree.py::TreeCase::test_serialize",
"tests/test_tree.py::TreeCase::test_show",
"tests/test_tree.py::TreeCase::test_siblings",
"tests/test_tree.py::TreeCase::test_validate_node_insertion"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-17 17:08:07+00:00
|
mit
| 3,544 |
|
leonardt__ast_tools-80
|
diff --git a/ast_tools/passes/ssa.py b/ast_tools/passes/ssa.py
index 01d90fa..b37a642 100644
--- a/ast_tools/passes/ssa.py
+++ b/ast_tools/passes/ssa.py
@@ -394,9 +394,7 @@ class SSATransformer(NodeTrackingTransformer):
# Need to visit params to get them to be rebuilt and therfore
# tracked to build the symbol table
- self._skip += 1
update_params = updated_node.params.visit(self)
- self._skip -= 1
assert not self._skip
assert not self._assigned_names, self._assigned_names
new_body = updated_node.body.visit(self)
@@ -531,6 +529,16 @@ class SSATransformer(NodeTrackingTransformer):
def leave_Arg_keyword(self, node: cst.Arg):
self._skip -= 1
+ def visit_Parameters(self, node: cst.Parameters) -> tp.Optional[bool]:
+ self._skip += 1
+ return True
+
+ def leave_Parameters(self,
+ original_node: cst.Parameters,
+ updated_node: cst.Parameters) -> cst.Parameters:
+ self._skip -= 1
+ return updated_node
+
def leave_Name(self,
original_node: cst.Name,
updated_node: cst.Name) -> cst.Name:
|
leonardt/ast_tools
|
cfa081e3f24bf23dbe84cd50ecea8a320027de75
|
diff --git a/tests/test_ssa.py b/tests/test_ssa.py
index 4c35f7e..56786c4 100644
--- a/tests/test_ssa.py
+++ b/tests/test_ssa.py
@@ -36,6 +36,7 @@ def _do_ssa(func, strict, **kwargs):
func = dec(func)
return func
+
@pytest.mark.parametrize('strict', [True, False])
@pytest.mark.parametrize('a', template_options)
@pytest.mark.parametrize('b', template_options)
@@ -457,3 +458,20 @@ def test_call_in_annotations(strict, x, y):
f1 = exec_def_in_file(tree, env)
f2 = apply_passes([ssa(strict)])(f1)
+
[email protected]('strict', [True, False])
+def test_issue_79(strict):
+ class Wrapper:
+ def __init__(self, val):
+ self.val = val
+ def apply(self, f):
+ return f(self.val)
+
+ def f1(x):
+ return x.apply(lambda x: x+1)
+
+ f2 = apply_passes([ssa(strict)])(f1)
+
+ for _ in range(8):
+ x = Wrapper(random.randint(0, 1<<10))
+ assert f1(x) == f2(x)
|
Regression in fault test suite
I'm not sure how important this is, but with the latest release the following example taken from fault doesn't work anymore with the SSA pass
```python
import magma as m
import fault
class MWrapperMeta(m.MagmaProtocolMeta):
def __getitem__(cls, T):
assert cls is MWrapper
return type(cls)(f'MWrapper[{T}]', (cls,), {'_T_': T})
def _to_magma_(cls):
return cls._T_
def _qualify_magma_(cls, d):
return MWrapper[cls._T_.qualify(d)]
def _flip_magma_(cls):
return MWrapper[cls._T_.flip()]
def _from_magma_value_(cls, value):
return cls(value)
class MWrapper(m.MagmaProtocol, metaclass=MWrapperMeta):
def __init__(self, val):
if not isinstance(val, type(self)._T_):
raise TypeError()
self._value_ = val
def _get_magma_value_(self):
return self._value_
def apply(self, f):
return f(self._value_)
WrappedBits8 = MWrapper[m.UInt[8]]
@m.sequential2()
class Foo:
def __call__(self, val: WrappedBits8) -> m.UInt[8]:
return val.apply(lambda x: x + 1)
def test_proto():
tester = fault.Tester(Foo)
tester.circuit.val = 1
tester.eval()
tester.circuit.O.expect(2)
tester.compile_and_run("verilator", flags=['-Wno-unused'])
```
It produces this error
```python
../ast_tools/ast_tools/passes/ssa.py:514: in leave_Assign
assert not self._assigned_names, (to_module(original_node).code, self._assigned_names)
E AssertionError: ('__0_return_0 = val.apply(lambda x: x + 1)', ['x_0'])
```
Upon investigation, it seems like the SSA pass is treating the `lambda x: x + 1` expression to contain an assignment (perhaps because the `x` inside the lambda parameter is treated as a "store"). Adding support for lambda (not sure whether it worked before or just missed this case) seems doable (we need to treat it as a store, but remove the assigned names after processing the lambda, so we use the new name for the parameters, but the correct SSA names for anything else coming from the outer scope). Not sure if this is something we need though, so just documenting it here.
CC @cdonovick
|
0.0
|
cfa081e3f24bf23dbe84cd50ecea8a320027de75
|
[
"tests/test_ssa.py::test_issue_79[True]",
"tests/test_ssa.py::test_issue_79[False]"
] |
[
"tests/test_ssa.py::test_basic_if[r",
"tests/test_ssa.py::test_basic_if[return-r",
"tests/test_ssa.py::test_basic_if[return-return-True]",
"tests/test_ssa.py::test_basic_if[return-return-False]",
"tests/test_ssa.py::test_nested[r",
"tests/test_ssa.py::test_nested[return-r",
"tests/test_ssa.py::test_nested[return-return-r",
"tests/test_ssa.py::test_nested[return-return-return-r",
"tests/test_ssa.py::test_nested[return-return-return-return-True]",
"tests/test_ssa.py::test_nested[return-return-return-return-False]",
"tests/test_ssa.py::test_imbalanced[r",
"tests/test_ssa.py::test_imbalanced[return-r",
"tests/test_ssa.py::test_imbalanced[return-return-r",
"tests/test_ssa.py::test_imbalanced[return-return-0-r",
"tests/test_ssa.py::test_imbalanced[return-return-0-0-True]",
"tests/test_ssa.py::test_imbalanced[return-return-0-0-False]",
"tests/test_ssa.py::test_reassign_arg",
"tests/test_ssa.py::test_double_nested_function_call",
"tests/test_ssa.py::test_attrs_basic[True]",
"tests/test_ssa.py::test_attrs_basic[False]",
"tests/test_ssa.py::test_attrs_returns[True]",
"tests/test_ssa.py::test_attrs_returns[False]",
"tests/test_ssa.py::test_attrs_class[True]",
"tests/test_ssa.py::test_attrs_class[False]",
"tests/test_ssa.py::test_attrs_class_methods[True]",
"tests/test_ssa.py::test_attrs_class_methods[False]",
"tests/test_ssa.py::test_nstrict",
"tests/test_ssa.py::test_attr",
"tests/test_ssa.py::test_call",
"tests/test_ssa.py::test_call_in_annotations[--True]",
"tests/test_ssa.py::test_call_in_annotations[--False]",
"tests/test_ssa.py::test_call_in_annotations[-int-True]",
"tests/test_ssa.py::test_call_in_annotations[-int-False]",
"tests/test_ssa.py::test_call_in_annotations[-ident(int)-True]",
"tests/test_ssa.py::test_call_in_annotations[-ident(int)-False]",
"tests/test_ssa.py::test_call_in_annotations[-ident(x=int)-True]",
"tests/test_ssa.py::test_call_in_annotations[-ident(x=int)-False]",
"tests/test_ssa.py::test_call_in_annotations[int--True]",
"tests/test_ssa.py::test_call_in_annotations[int--False]",
"tests/test_ssa.py::test_call_in_annotations[int-int-True]",
"tests/test_ssa.py::test_call_in_annotations[int-int-False]",
"tests/test_ssa.py::test_call_in_annotations[int-ident(int)-True]",
"tests/test_ssa.py::test_call_in_annotations[int-ident(int)-False]",
"tests/test_ssa.py::test_call_in_annotations[int-ident(x=int)-True]",
"tests/test_ssa.py::test_call_in_annotations[int-ident(x=int)-False]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)--True]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)--False]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)-int-True]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)-int-False]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)-ident(int)-True]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)-ident(int)-False]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)-ident(x=int)-True]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)-ident(x=int)-False]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)--True]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)--False]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)-int-True]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)-int-False]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)-ident(int)-True]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)-ident(int)-False]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)-ident(x=int)-True]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)-ident(x=int)-False]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2021-06-22 01:07:05+00:00
|
apache-2.0
| 3,545 |
|
leonardt__hwtypes-117
|
diff --git a/hwtypes/adt_meta.py b/hwtypes/adt_meta.py
index cb052e7..b5caf38 100644
--- a/hwtypes/adt_meta.py
+++ b/hwtypes/adt_meta.py
@@ -1,6 +1,6 @@
+from abc import ABCMeta, abstractmethod
import itertools as it
import typing as tp
-from abc import ABCMeta, abstractmethod
import weakref
from types import MappingProxyType
@@ -53,15 +53,27 @@ RESERVED_SUNDERS = frozenset({
'_fields_',
'_field_table_',
'_unbound_base_',
+ '_syntax_',
})
RESERVED_ATTRS = frozenset(RESERVED_NAMES | RESERVED_SUNDERS)
+class Syntax(type):
+ def __subclasscheck__(cls, sub):
+ return super().__subclasscheck__(getattr(sub, '_syntax_', type(None)))
+
+ def __instancecheck__(cls, instance):
+ return cls.__subclasscheck__(type(instance))
+
+
+class AttrSyntax(metaclass=Syntax): pass
+class GetitemSyntax(metaclass=Syntax): pass
# Can't have abstract metaclass https://bugs.python.org/issue36881
-class GetitemSyntax: #(metaclass=ABCMeta):
+class _GetitemSyntax(type): #(metaclass=ABCMeta):
# Should probaly do more than it currently does but it gets the job done
ORDERED = False
+ _syntax_ = GetitemSyntax
def __getitem__(cls, idx):
if not isinstance(idx, tp.Iterable):
@@ -78,10 +90,12 @@ class GetitemSyntax: #(metaclass=ABCMeta):
def _from_idx(cls, idx):
pass
-class AttrSyntax(type): #, metaclass=ABCMeta):
+
+class _AttrSyntax(type): #, metaclass=ABCMeta):
# Tells AttrSyntax which attrs are fields
FIELDS_T = type
ORDERED = False
+ _syntax_ = AttrSyntax
def __new__(mcs, name, bases, namespace, cache=False, **kwargs):
fields = {}
@@ -160,7 +174,8 @@ class AttrSyntax(type): #, metaclass=ABCMeta):
def _from_fields(mcs, fields, name, bases, ns, **kwargs):
pass
-class BoundMeta(GetitemSyntax, type): #, metaclass=ABCMeta):
+
+class BoundMeta(_GetitemSyntax): #, metaclass=ABCMeta):
# for legacy reasons
ORDERED = True
@@ -284,6 +299,7 @@ class BoundMeta(GetitemSyntax, type): #, metaclass=ABCMeta):
def is_cached(cls):
return getattr(cls, '_cached_', False)
+
class TupleMeta(BoundMeta):
ORDERED = True
@@ -309,7 +325,7 @@ class TupleMeta(BoundMeta):
return MappingProxyType({idx : field for idx, field in enumerate(cls.fields)})
-class ProductMeta(AttrSyntax, TupleMeta):
+class ProductMeta(_AttrSyntax, TupleMeta):
FIELDS_T = type
ORDERED = True
@@ -437,7 +453,7 @@ class SumMeta(BoundMeta):
return MappingProxyType({field : field for field in cls.fields})
-class TaggedUnionMeta(AttrSyntax, SumMeta):
+class TaggedUnionMeta(_AttrSyntax, SumMeta):
FIELDS_T = type
ORDERED = False
@@ -495,12 +511,11 @@ class TaggedUnionMeta(AttrSyntax, SumMeta):
yield cls(**{tag: field()})
-class EnumMeta(AttrSyntax, BoundMeta):
+class EnumMeta(_AttrSyntax, BoundMeta):
class Auto:
def __repr__(self):
return 'Auto()'
-
FIELDS_T = int, Auto
ORDERED = False
@@ -514,23 +529,23 @@ class EnumMeta(AttrSyntax, BoundMeta):
else:
raise TypeError('Can only inherit from one enum type')
- t = super().__new__(mcs, name, bases, ns, **kwargs)
+ cls = super().__new__(mcs, name, bases, ns, **kwargs)
name_table = dict()
for name, value in fields.items():
- elem = t.__new__(t)
- t.__init__(elem, value)
+ elem = cls.__new__(cls)
+ cls.__init__(elem, value)
setattr(elem, '_name_', name)
name_table[name] = elem
- setattr(t, name, elem)
+ setattr(cls, name, elem)
- t._fields_ = tuple(name_table.values())
- t._field_table_ = name_table
+ cls._fields_ = tuple(name_table.values())
+ cls._field_table_ = name_table
if enum_base is not None:
- t._unbound_base_ = enum_base
+ cls._unbound_base_ = enum_base
- return t
+ return cls
def __call__(cls, elem):
if not isinstance(elem, cls):
|
leonardt/hwtypes
|
150884892c54013665eb14df706a1bc53e935a4c
|
diff --git a/tests/test_adt.py b/tests/test_adt.py
index 03c0a8b..0472b15 100644
--- a/tests/test_adt.py
+++ b/tests/test_adt.py
@@ -1,8 +1,10 @@
import pytest
from hwtypes.adt import Product, Sum, Enum, Tuple, TaggedUnion
-from hwtypes.adt_meta import RESERVED_ATTRS, ReservedNameError
+from hwtypes.adt_meta import RESERVED_ATTRS, ReservedNameError, AttrSyntax, GetitemSyntax
from hwtypes.modifiers import new
from hwtypes.adt_util import rebind_bitvector
+from hwtypes import BitVector, AbstractBitVector, Bit, AbstractBit
+
class En1(Enum):
a = 0
@@ -386,3 +388,18 @@ def test_unbound_t(t, base):
def test_deprecated(val):
with pytest.warns(DeprecationWarning):
val.value
+
+def test_adt_syntax():
+ # En1, Pr, Su, Tu, Ta
+ for T in (En1, Pr, Ta):
+ assert isinstance(T, AttrSyntax)
+ assert not isinstance(T, GetitemSyntax)
+
+ for T in (Su, Tu):
+ assert not isinstance(T, AttrSyntax)
+ assert isinstance(T, GetitemSyntax)
+
+ for T in (str, Bit, BitVector[4], AbstractBit, AbstractBitVector[4], int):
+ assert not isinstance(T, AttrSyntax)
+ assert not isinstance(T, GetitemSyntax)
+
|
Syntax's are not working correctly with isinstance()
See the failing test test_adt_syntax in tests/test_adt.py in branch 'adt_meta'
|
0.0
|
150884892c54013665eb14df706a1bc53e935a4c
|
[
"tests/test_adt.py::test_repr[Sum[En1,",
"tests/test_adt.py::test_reserved[_syntax_-T_field0]",
"tests/test_adt.py::test_reserved[_syntax_-T_field1]",
"tests/test_adt.py::test_reserved[_syntax_-T_field2]",
"tests/test_adt.py::test_unbound_t[Sum[En1,",
"tests/test_adt.py::test_adt_syntax"
] |
[
"tests/test_adt.py::test_enum",
"tests/test_adt.py::test_tuple",
"tests/test_adt.py::test_product",
"tests/test_adt.py::test_product_from_fields",
"tests/test_adt.py::test_product_caching",
"tests/test_adt.py::test_sum",
"tests/test_adt.py::test_tagged_union",
"tests/test_adt.py::test_tagged_union_from_fields",
"tests/test_adt.py::test_tagged_union_caching",
"tests/test_adt.py::test_new",
"tests/test_adt.py::test_repr[En1]",
"tests/test_adt.py::test_repr[Tuple[En1,",
"tests/test_adt.py::test_repr[Pr]",
"tests/test_adt.py::test_repr[Ta]",
"tests/test_adt.py::test_reserved[enumerate-T_field0]",
"tests/test_adt.py::test_reserved[enumerate-T_field1]",
"tests/test_adt.py::test_reserved[enumerate-T_field2]",
"tests/test_adt.py::test_reserved[fields-T_field0]",
"tests/test_adt.py::test_reserved[fields-T_field1]",
"tests/test_adt.py::test_reserved[fields-T_field2]",
"tests/test_adt.py::test_reserved[_fields_-T_field0]",
"tests/test_adt.py::test_reserved[_fields_-T_field1]",
"tests/test_adt.py::test_reserved[_fields_-T_field2]",
"tests/test_adt.py::test_reserved[_value_-T_field0]",
"tests/test_adt.py::test_reserved[_value_-T_field1]",
"tests/test_adt.py::test_reserved[_value_-T_field2]",
"tests/test_adt.py::test_reserved[_unbound_base_-T_field0]",
"tests/test_adt.py::test_reserved[_unbound_base_-T_field1]",
"tests/test_adt.py::test_reserved[_unbound_base_-T_field2]",
"tests/test_adt.py::test_reserved[is_bound-T_field0]",
"tests/test_adt.py::test_reserved[is_bound-T_field1]",
"tests/test_adt.py::test_reserved[is_bound-T_field2]",
"tests/test_adt.py::test_reserved[_tag_-T_field0]",
"tests/test_adt.py::test_reserved[_tag_-T_field1]",
"tests/test_adt.py::test_reserved[_tag_-T_field2]",
"tests/test_adt.py::test_reserved[is_cached-T_field0]",
"tests/test_adt.py::test_reserved[is_cached-T_field1]",
"tests/test_adt.py::test_reserved[is_cached-T_field2]",
"tests/test_adt.py::test_reserved[_field_table_-T_field0]",
"tests/test_adt.py::test_reserved[_field_table_-T_field1]",
"tests/test_adt.py::test_reserved[_field_table_-T_field2]",
"tests/test_adt.py::test_reserved[field_dict-T_field0]",
"tests/test_adt.py::test_reserved[field_dict-T_field1]",
"tests/test_adt.py::test_reserved[field_dict-T_field2]",
"tests/test_adt.py::test_reserved[_cached_-T_field0]",
"tests/test_adt.py::test_reserved[_cached_-T_field1]",
"tests/test_adt.py::test_reserved[_cached_-T_field2]",
"tests/test_adt.py::test_reserved[value_dict-T_field0]",
"tests/test_adt.py::test_reserved[value_dict-T_field1]",
"tests/test_adt.py::test_reserved[value_dict-T_field2]",
"tests/test_adt.py::test_unbound_t[En1-Enum]",
"tests/test_adt.py::test_unbound_t[Pr-Product]",
"tests/test_adt.py::test_unbound_t[Tuple[En1,",
"tests/test_adt.py::test_unbound_t[Ta-TaggedUnion]",
"tests/test_adt.py::test_deprecated[val0]",
"tests/test_adt.py::test_deprecated[val1]",
"tests/test_adt.py::test_deprecated[val2]",
"tests/test_adt.py::test_deprecated[val3]",
"tests/test_adt.py::test_deprecated[val4]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-01-16 22:03:12+00:00
|
bsd-3-clause
| 3,546 |
|
leonardt__hwtypes-148
|
diff --git a/hwtypes/__init__.py b/hwtypes/__init__.py
index 6858443..ea48e32 100644
--- a/hwtypes/__init__.py
+++ b/hwtypes/__init__.py
@@ -7,3 +7,4 @@ from .fp_vector_abc import *
from .fp_vector import *
from .smt_fp_vector import *
from .modifiers import *
+from .smt_int import *
diff --git a/hwtypes/smt_bit_vector.py b/hwtypes/smt_bit_vector.py
index 75d515c..0aa8c4a 100644
--- a/hwtypes/smt_bit_vector.py
+++ b/hwtypes/smt_bit_vector.py
@@ -643,11 +643,15 @@ class SMTBitVector(AbstractBitVector):
# def __int__(self):
# return self.as_uint()
#
-# def as_uint(self):
-# return self._value.bv_unsigned_value()
-#
-# def as_sint(self):
-# return self._value.bv_signed_value()
+ def as_uint(self):
+ #Avoids circular import
+ from . import SMTInt
+ return SMTInt(self)
+
+ def as_sint(self):
+ #Avoids circular import
+ from . import SMTInt
+ return SMTInt(self[:-1]) - SMTInt(self & (1<<(self.size-1)))
#
# @classmethod
# def random(cls, width):
diff --git a/hwtypes/smt_int.py b/hwtypes/smt_int.py
new file mode 100644
index 0000000..e4eeb16
--- /dev/null
+++ b/hwtypes/smt_int.py
@@ -0,0 +1,148 @@
+import itertools as it
+import functools as ft
+from .smt_bit_vector import SMTBit, SMTBitVector
+
+import pysmt
+import pysmt.shortcuts as smt
+from pysmt.typing import INT
+
+from collections import defaultdict
+import re
+import warnings
+import weakref
+
+__ALL__ = ['SMTInt']
+
+_var_counters = defaultdict(it.count)
+_name_table = weakref.WeakValueDictionary()
+
+def _gen_name(prefix='V'):
+ name = f'{prefix}_{next(_var_counters[prefix])}'
+ while name in _name_table:
+ name = f'{prefix}_{next(_var_counters[prefix])}'
+ return name
+
+_name_re = re.compile(r'V_\d+')
+
+class _SMYBOLIC:
+ def __repr__(self):
+ return 'SYMBOLIC'
+
+class _AUTOMATIC:
+ def __repr__(self):
+ return 'AUTOMATIC'
+
+SMYBOLIC = _SMYBOLIC()
+AUTOMATIC = _AUTOMATIC()
+
+def int_cast(fn):
+ @ft.wraps(fn)
+ def wrapped(self, other):
+ if isinstance(other, SMTInt):
+ return fn(self, other)
+ else:
+ try:
+ other = SMTInt(other)
+ except TypeError:
+ return NotImplemented
+ return fn(self, other)
+ return wrapped
+
+class SMTInt:
+ def __init__(self, value=SMYBOLIC, *, name=AUTOMATIC, prefix=AUTOMATIC):
+ if (name is not AUTOMATIC or prefix is not AUTOMATIC) and value is not SMYBOLIC:
+ raise TypeError('Can only name symbolic variables')
+ elif name is not AUTOMATIC and prefix is not AUTOMATIC:
+ raise ValueError('Can only set either name or prefix not both')
+ elif name is not AUTOMATIC:
+ if not isinstance(name, str):
+ raise TypeError('Name must be string')
+ elif name in _name_table:
+ raise ValueError(f'Name {name} already in use')
+ elif _name_re.fullmatch(name):
+ warnings.warn('Name looks like an auto generated name, this might break things')
+ _name_table[name] = self
+ elif prefix is not AUTOMATIC:
+ name = _gen_name(prefix)
+ _name_table[name] = self
+ elif name is AUTOMATIC and value is SMYBOLIC:
+ name = _gen_name()
+ _name_table[name] = self
+
+ if value is SMYBOLIC:
+ self._value = smt.Symbol(name, INT)
+ elif isinstance(value, pysmt.fnode.FNode):
+ if value.get_type().is_int_type():
+ self._value = value
+ elif value.get_type().is_bv_type():
+ self._value = smt.BVToNatural(value)
+ else:
+ raise TypeError(f'Expected int type not {value.get_type()}')
+ elif isinstance(value, SMTInt):
+ self._value = value._value
+ elif isinstance(value, SMTBitVector):
+ self._value = smt.BVToNatural(value.value)
+ elif isinstance(value, bool):
+ self._value = smt.Int(int(value))
+ elif isinstance(value, int):
+ self._value = smt.Int(value)
+ elif hasattr(value, '__int__'):
+ self._value = smt.Int(int(value))
+ else:
+ raise TypeError("Can't coerce {} to Int".format(type(value)))
+
+ self._name = name
+ self._value = smt.simplify(self._value)
+
+ def __repr__(self):
+ if self._name is not AUTOMATIC:
+ return f'{type(self)}({self._name})'
+ else:
+ return f'{type(self)}({self._value})'
+
+ @property
+ def value(self):
+ return self._value
+
+ def __neg__(self):
+ return SMTInt(0) - self
+
+ @int_cast
+ def __sub__(self, other: 'SMTInt') -> 'SMTInt':
+ return SMTInt(self.value - other.value)
+
+ @int_cast
+ def __add__(self, other: 'SMTInt') -> 'SMTInt':
+ return SMTInt(self.value + other.value)
+
+ @int_cast
+ def __mul__(self, other: 'SMTInt') -> 'SMTInt':
+ return SMTInt(self.value * other.value)
+
+ @int_cast
+ def __floordiv__(self, other: 'SMTInt') -> 'SMTInt':
+ return SMTInt(smt.Div(self.value, other.value))
+
+ @int_cast
+ def __ge__(self, other: 'SMTInt') -> SMTBit:
+ return SMTBit(self.value >= other.value)
+
+ @int_cast
+ def __gt__(self, other: 'SMTInt') -> SMTBit:
+ return SMTBit(self.value > other.value)
+
+ @int_cast
+ def __le__(self, other: 'SMTInt') -> SMTBit:
+ return SMTBit(self.value <= other.value)
+
+ @int_cast
+ def __lt__(self, other: 'SMTInt') -> SMTBit:
+ return SMTBit(self.value < other.value)
+
+ @int_cast
+ def __eq__(self, other: 'SMTInt') -> SMTBit:
+ return SMTBit(smt.Equals(self.value, other.value))
+
+ @int_cast
+ def __ne__(self, other: 'SMTInt') -> SMTBit:
+ return SMTBit(smt.NotEquals(self.value, other.value))
|
leonardt/hwtypes
|
f15b8a6e37d0e5c51c94a15464c714c1b1e6b43d
|
diff --git a/tests/test_smt_int.py b/tests/test_smt_int.py
new file mode 100644
index 0000000..554f83d
--- /dev/null
+++ b/tests/test_smt_int.py
@@ -0,0 +1,89 @@
+from hwtypes import SMTInt, SMTBitVector
+import pysmt.shortcuts as smt
+import pytest
+
+def test_sat_unsat():
+ x = SMTInt(prefix='x')
+ f1 = (x > 0) & (x < 0)
+ logic = None
+ #Test unsat
+ with smt.Solver(logic=logic, name='z3') as solver:
+ solver.add_assertion(f1.value)
+ res = solver.solve()
+ assert not res
+
+ f2 = (x >= 0) & (x*x+x == 30)
+ #test sat
+ with smt.Solver(logic=logic, name='z3') as solver:
+ solver.add_assertion(f2.value)
+ res = solver.solve()
+ assert res
+ x_val = solver.get_value(x.value)
+ assert x_val.constant_value() == 5
+
+bin_ops = dict(
+ sub = lambda x,y: x-y,
+ add = lambda x,y: x+y,
+ mul = lambda x,y: x*y,
+ div = lambda x,y: x//y,
+ lte = lambda x,y: x<=y,
+ lt = lambda x,y: x<y,
+ gte = lambda x,y: x>=y,
+ gt = lambda x,y: x>y,
+ eq = lambda x,y: x != y,
+ neq = lambda x,y: x == y,
+)
+
+unary_ops = dict(
+ neg = lambda x: -x,
+)
+
+import random
+
[email protected]("name, fun", bin_ops.items())
+def test_bin_ops(name, fun):
+ x = SMTInt(prefix='x')
+ y = SMTInt(prefix='y')
+ x_val = random.randint(-100, 100)
+ y_val = random.choice(list(set(x for x in range(-100,100))-set((0,))))
+ f_val = fun(x_val,y_val)
+ f = (fun(x, y) == f_val) & (y != 0)
+ with smt.Solver(name='z3') as solver:
+ solver.add_assertion(f.value)
+ res = solver.solve()
+ assert res
+ x_solved = solver.get_value(x.value).constant_value()
+ y_solved = solver.get_value(y.value).constant_value()
+ assert fun(x_solved, y_solved) == f_val
+
+
[email protected]("name, fun", unary_ops.items())
+def test_unary_ops(name, fun):
+ x = SMTInt(prefix='x')
+ x_val = random.randint(-100, 100)
+ f_val = fun(x_val)
+ f = (fun(x) == f_val)
+ with smt.Solver(name='z3') as solver:
+ solver.add_assertion(f.value)
+ res = solver.solve()
+ assert res
+ x_solved = solver.get_value(x.value).constant_value()
+ assert fun(x_solved) == f_val
+
+def test_init_bv():
+ SMTInt(SMTBitVector[5]())
+ SMTInt(SMTBitVector[5](10))
+ SMTInt(SMTBitVector[5](10).value)
+
+
+def test_as_sint():
+ x = SMTBitVector[5](-5)
+ x_int = x.as_sint()
+ assert isinstance(x_int, SMTInt)
+ assert x_int.value.constant_value() == -5
+
+def test_as_uint():
+ x = SMTBitVector[5](5)
+ x_int = x.as_uint()
+ assert isinstance(x_int, SMTInt)
+ assert x.as_uint().value.constant_value() == 5
\ No newline at end of file
|
SMT Int class [Enhancement]
For building up constraints for synthesis problems it is useful to have access to an Int class that plays nicely with SMTBit.
Current implementation in branch: smt_int
Semantics defined in: https://smtlib.cs.uiowa.edu/theories-Ints.shtml
Would be nice to include methods to translate between SMTInt and SMTBitVector.
|
0.0
|
f15b8a6e37d0e5c51c94a15464c714c1b1e6b43d
|
[
"tests/test_smt_int.py::test_sat_unsat",
"tests/test_smt_int.py::test_bin_ops[sub-<lambda>]",
"tests/test_smt_int.py::test_bin_ops[add-<lambda>]",
"tests/test_smt_int.py::test_bin_ops[mul-<lambda>]",
"tests/test_smt_int.py::test_bin_ops[div-<lambda>]",
"tests/test_smt_int.py::test_bin_ops[lte-<lambda>]",
"tests/test_smt_int.py::test_bin_ops[lt-<lambda>]",
"tests/test_smt_int.py::test_bin_ops[gte-<lambda>]",
"tests/test_smt_int.py::test_bin_ops[gt-<lambda>]",
"tests/test_smt_int.py::test_bin_ops[eq-<lambda>]",
"tests/test_smt_int.py::test_bin_ops[neq-<lambda>]",
"tests/test_smt_int.py::test_unary_ops[neg-<lambda>]",
"tests/test_smt_int.py::test_init_bv",
"tests/test_smt_int.py::test_as_sint",
"tests/test_smt_int.py::test_as_uint"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-09-22 18:47:29+00:00
|
bsd-3-clause
| 3,547 |
|
leonardt__hwtypes-39
|
diff --git a/hwtypes/__init__.py b/hwtypes/__init__.py
index 7e71d46..eca7ea7 100644
--- a/hwtypes/__init__.py
+++ b/hwtypes/__init__.py
@@ -5,3 +5,4 @@ from .smt_bit_vector import *
from .z3_bit_vector import *
from .fp_vector_abc import *
from .fp_vector import *
+from .modifiers import *
diff --git a/hwtypes/adt.py b/hwtypes/adt.py
index c54f98c..c47a51a 100644
--- a/hwtypes/adt.py
+++ b/hwtypes/adt.py
@@ -2,23 +2,11 @@ from .adt_meta import TupleMeta, ProductMeta, SumMeta, EnumMeta, is_adt_type
from types import MappingProxyType
__all__ = ['Tuple', 'Product', 'Sum', 'Enum']
-__all__ += ['new', 'new_instruction', 'is_adt_type']
+__all__ += ['new_instruction', 'is_adt_type']
#special sentinal value
class _MISSING: pass
-def new(klass, bind=_MISSING, *, name=_MISSING, module=_MISSING):
- class T(klass): pass
- if name is not _MISSING:
- T.__name__ = name
- if module is not _MISSING:
- T.__module__ = module
-
- if bind is not _MISSING:
- return T[bind]
- else:
- return T
-
class Tuple(metaclass=TupleMeta):
def __new__(cls, *value):
if cls.is_bound:
diff --git a/hwtypes/fp_vector.py b/hwtypes/fp_vector.py
index 68d833c..9fd7320 100644
--- a/hwtypes/fp_vector.py
+++ b/hwtypes/fp_vector.py
@@ -1,11 +1,11 @@
-import typing as tp
import functools
-import random
import gmpy2
+import random
+import typing as tp
import warnings
-from .fp_vector_abc import AbstractFPVector, RoundingMode
from .bit_vector import Bit, BitVector, SIntVector
+from .fp_vector_abc import AbstractFPVector, RoundingMode
__ALL__ = ['FPVector']
@@ -42,7 +42,36 @@ def set_context(fn: tp.Callable) -> tp.Callable:
class FPVector(AbstractFPVector):
@set_context
def __init__(self, value):
- value = gmpy2.mpfr(value)
+ # Because for some reason gmpy2.mpfr is a function and not a type
+ if isinstance(value, type(gmpy2.mpfr(0))):
+ #need to specify precision because mpfr will use the input
+ #precision not the context precision when constructing from mpfr
+ value = gmpy2.mpfr(value, self._ctx_.precision)
+ elif isinstance(value, FPVector):
+ value = gmpy2.mpfr(value._value, self._ctx_.precision)
+ elif isinstance(value, (int, float, type(gmpy2.mpz(0)), type(gmpy2.mpq(0)))):
+ value = gmpy2.mpfr(value)
+ elif isinstance(value, str):
+ try:
+ #Handles '0.5'
+ value = gmpy2.mpfr(value)
+ except ValueError:
+ try:
+ #Handles '1/2'
+ value = gmpy2.mpfr(gmpy2.mpq(value))
+ except ValueError:
+ raise ValueError('Invalid string')
+ elif hasattr(value, '__float__'):
+ value = gmpy2.mpfr(float(value))
+ elif hasattr(value, '__int__'):
+ value = gmpy2.mpfr(int(value))
+ else:
+ try:
+ #if gmpy2 doesn't complain I wont
+ value = gmpy2.mpfr(value)
+ except TypeError:
+ raise TypeError(f"Can't construct FPVector from {type(value)}")
+
if gmpy2.is_nan(value) and not type(self).ieee_compliance:
if gmpy2.is_signed(value):
self._value = gmpy2.mpfr('-inf')
diff --git a/hwtypes/fp_vector_abc.py b/hwtypes/fp_vector_abc.py
index ed4c1df..1e32487 100644
--- a/hwtypes/fp_vector_abc.py
+++ b/hwtypes/fp_vector_abc.py
@@ -2,16 +2,16 @@ from abc import ABCMeta, abstractmethod
import typing as tp
import weakref
import warnings
-from enum import Enum, auto
+import enum
from . import AbstractBitVectorMeta, AbstractBitVector, AbstractBit
-class RoundingMode(Enum):
- RNE = auto() # roundTiesToEven
- RNA = auto() # roundTiesToAway
- RTP = auto() # roundTowardPositive
- RTN = auto() # roundTowardNegative
- RTZ = auto() # roundTowardZero
+class RoundingMode(enum.Enum):
+ RNE = enum.auto() # roundTiesToEven
+ RNA = enum.auto() # roundTiesToAway
+ RTP = enum.auto() # roundTowardPositive
+ RTN = enum.auto() # roundTowardNegative
+ RTZ = enum.auto() # roundTowardZero
class AbstractFPVectorMeta(ABCMeta):
# FPVectorType, (eb, mb, mode, ieee_compliance) : FPVectorType[eb, mb, mode, ieee_compliance]
diff --git a/hwtypes/modifiers.py b/hwtypes/modifiers.py
new file mode 100644
index 0000000..15df6ba
--- /dev/null
+++ b/hwtypes/modifiers.py
@@ -0,0 +1,53 @@
+import types
+import weakref
+
+#special sentinal value
+class _MISSING: pass
+
+def new(klass, bind=_MISSING, *, name=_MISSING, module=_MISSING):
+ class T(klass): pass
+ if name is not _MISSING:
+ T.__name__ = name
+ if module is not _MISSING:
+ T.__module__ = module
+
+ if bind is not _MISSING:
+ return T[bind]
+ else:
+ return T
+
+class _ModifierMeta(type):
+ def __instancecheck__(cls, obj):
+ return type(obj) in cls._sub_classes.values()
+
+ def __subclasscheck__(cls, typ):
+ return typ in cls._sub_classes.values()
+
+ def __call__(cls, *args):
+ if len(args) != 1:
+ return super().__call__(*args)
+ sub = args[0]
+ try:
+ return cls._sub_classes[sub]
+ except KeyError:
+ pass
+
+ mod_sub_name = cls.__name__ + sub.__name__
+ mod_sub = type(mod_sub_name, (sub,), {})
+ return cls._sub_classes.setdefault(sub, mod_sub)
+
+_mod_cache = weakref.WeakValueDictionary()
+# This is a factory for type modifiers.
+def make_modifier(name, cache=False):
+ if cache:
+ try:
+ return _mod_cache[name]
+ except KeyError:
+ pass
+
+ ModType = _ModifierMeta(name, (), {'_sub_classes' : weakref.WeakValueDictionary()})
+
+ if cache:
+ return _mod_cache.setdefault(name, ModType)
+
+ return ModType
diff --git a/setup.py b/setup.py
index 99e7d82..a2c7bc6 100644
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,7 @@ setup(
url='https://github.com/leonardt/hwtypes',
author='Leonard Truong',
author_email='[email protected]',
- version='1.0.10',
+ version='1.0.11',
description='Python implementations of fixed size hardware types (Bit, '
'BitVector, UInt, SInt, ...) based on the SMT-LIB2 semantics',
scripts=[],
|
leonardt/hwtypes
|
cc7e418eb3561970bd6ca87779f858528670b67c
|
diff --git a/tests/test_adt.py b/tests/test_adt.py
index ee58324..a14967a 100644
--- a/tests/test_adt.py
+++ b/tests/test_adt.py
@@ -1,5 +1,6 @@
import pytest
-from hwtypes.adt import Product, Sum, Enum, Tuple, new
+from hwtypes.adt import Product, Sum, Enum, Tuple
+from hwtypes.modifiers import new
class En(Enum):
a = 0
@@ -125,7 +126,7 @@ def test_new():
t = new(Sum, (En, Pr))
assert t is not Su
assert Sum[En, Pr] is Su
- assert t.__module__ == 'hwtypes.adt'
+ assert t.__module__ == 'hwtypes.modifiers'
t = new(Sum, (En, Pr), module=__name__)
assert t.__module__ == __name__
diff --git a/tests/test_fp.py b/tests/test_fp.py
index 7cff76d..20628da 100644
--- a/tests/test_fp.py
+++ b/tests/test_fp.py
@@ -127,6 +127,30 @@ def _c_type_vector(T):
NTESTS = 100
[email protected]("mode", [
+ RoundingMode.RNE,
+ RoundingMode.RNA,
+ RoundingMode.RTP,
+ RoundingMode.RTN,
+ RoundingMode.RTZ,
+ ])
[email protected]("ieee", [False, True])
+def test_init(mode, ieee):
+ BigFloat = FPVector[27,100, mode, ieee]
+ class F:
+ def __float__(self):
+ return 0.5
+
+ class I:
+ def __int__(self):
+ return 1
+
+ assert BigFloat(0.5) == BigFloat(F())
+ assert BigFloat(1) == BigFloat(I())
+ assert BigFloat(0.5) == BigFloat('0.5')
+ assert BigFloat('1/3') == BigFloat(1)/BigFloat(3)
+ assert BigFloat('1/3') != BigFloat(1/3) # as 1/3 is performed in python floats
+
@pytest.mark.parametrize("FT", [
FPVector[8, 7, RoundingMode.RNE, True],
FPVector[8, 7, RoundingMode.RNE, False],
diff --git a/tests/test_modifiers.py b/tests/test_modifiers.py
new file mode 100644
index 0000000..e97896f
--- /dev/null
+++ b/tests/test_modifiers.py
@@ -0,0 +1,27 @@
+from hwtypes.modifiers import make_modifier
+from hwtypes import Bit, AbstractBit
+
+
+def test_basic():
+ Global = make_modifier("Global")
+ GlobalBit = Global(Bit)
+
+ assert issubclass(GlobalBit, Bit)
+ assert issubclass(GlobalBit, AbstractBit)
+ assert issubclass(GlobalBit, Global)
+
+ global_bit = GlobalBit(0)
+
+ assert isinstance(global_bit, GlobalBit)
+ assert isinstance(global_bit, Bit)
+ assert isinstance(global_bit, AbstractBit)
+ assert isinstance(global_bit, Global)
+
+def test_cache():
+ G1 = make_modifier("Global", cache=True)
+ G2 = make_modifier("Global", cache=True)
+ G3 = make_modifier("Global")
+
+ assert G1 is G2
+ assert G1 is not G3
+
|
FPVector is missing "to_fp" method
This would cast a BitVector to floating point.
```
BFloat16 = FPVector[8,7,...]
assert BFloat16.to_fp(BitVector[16](10)) == BFloat16(10.0)
```
|
0.0
|
cc7e418eb3561970bd6ca87779f858528670b67c
|
[
"tests/test_adt.py::test_enum",
"tests/test_adt.py::test_tuple",
"tests/test_adt.py::test_product",
"tests/test_adt.py::test_sum",
"tests/test_adt.py::test_new",
"tests/test_fp.py::test_init[False-RoundingMode.RNE]",
"tests/test_fp.py::test_init[False-RoundingMode.RNA]",
"tests/test_fp.py::test_init[False-RoundingMode.RTP]",
"tests/test_fp.py::test_init[False-RoundingMode.RTN]",
"tests/test_fp.py::test_init[False-RoundingMode.RTZ]",
"tests/test_fp.py::test_init[True-RoundingMode.RNE]",
"tests/test_fp.py::test_init[True-RoundingMode.RNA]",
"tests/test_fp.py::test_init[True-RoundingMode.RTP]",
"tests/test_fp.py::test_init[True-RoundingMode.RTN]",
"tests/test_fp.py::test_init[True-RoundingMode.RTZ]",
"tests/test_fp.py::test_random[False-FPVector[8,7,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_random[False-FPVector[8,7,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_random[False-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_random[False-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_random[True-FPVector[8,7,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_random[True-FPVector[8,7,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_random[True-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_random[True-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret[FPVector[8,7,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret[FPVector[8,7,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret[FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret[FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-5.421010862427522e-20-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-5.421010862427522e-20-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-5.421010862427522e-20-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-5.421010862427522e-20-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1.52587890625e-05-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1.52587890625e-05-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1.52587890625e-05-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1.52587890625e-05-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-0.0625-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-0.0625-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-0.0625-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-0.0625-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-16-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-16-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-16-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-16-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-65536-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-65536-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-65536-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-65536-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-18446744073709551616-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-18446744073709551616-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-18446744073709551616-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-18446744073709551616-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_bv[FPVector[8,7,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_bv[FPVector[8,7,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_bv[FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_bv[FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_epsilon[vector-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_epsilon[vector-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_unary_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_modifiers.py::test_basic",
"tests/test_modifiers.py::test_cache"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-05-22 18:16:31+00:00
|
bsd-3-clause
| 3,548 |
|
leonardt__hwtypes-50
|
diff --git a/hwtypes/bit_vector.py b/hwtypes/bit_vector.py
index 885b2cb..e7f0010 100644
--- a/hwtypes/bit_vector.py
+++ b/hwtypes/bit_vector.py
@@ -175,6 +175,10 @@ class BitVector(AbstractBitVector):
def __repr__(self):
return "BitVector[{size}]({value})".format(value=self._value, size=self.size)
+ @property
+ def value(self):
+ return self._value
+
def __setitem__(self, index, value):
if isinstance(index, slice):
raise NotImplementedError()
@@ -211,10 +215,11 @@ class BitVector(AbstractBitVector):
def __len__(self):
return self.size
- # Note: In concat(x, y), the MSB of the result is the MSB of x.
- @classmethod
- def concat(cls, x, y):
- return cls.unsized_t[x.size+y.size](y.bits() + x.bits())
+ def concat(self, other):
+ T = type(self).unsized_t
+ if not isinstance(other, T):
+ raise TypeError(f'value must of type {T}')
+ return T[self.size+other.size](self.value | (other.value << self.size))
def bvnot(self):
return type(self)(~self.as_uint())
@@ -246,12 +251,12 @@ class BitVector(AbstractBitVector):
@bv_cast
def bvrol(self, other):
other = other.as_uint() % len(self)
- return self.concat( self[other:], self[0:other] )
+ return self[:other].concat(self[other:])
@bv_cast
def bvror(self, other):
other = (len(self) - other.as_uint()) % len(self)
- return self.concat( self[other:], self[0:other] )
+ return self[:other].concat(self[other:])
@bv_cast
def bvcomp(self, other):
@@ -400,7 +405,7 @@ class BitVector(AbstractBitVector):
raise ValueError()
T = type(self).unsized_t
- return T.concat(T[1](self[-1]).repeat(ext), self)
+ return self.concat(T[1](self[-1]).repeat(ext))
def ext(self, ext):
return self.zext(other)
@@ -411,7 +416,7 @@ class BitVector(AbstractBitVector):
raise ValueError()
T = type(self).unsized_t
- return T.concat(T[ext](0), self)
+ return self.concat(T[ext](0))
@staticmethod
def random(width):
diff --git a/hwtypes/bit_vector_abc.py b/hwtypes/bit_vector_abc.py
index 616ee76..1521849 100644
--- a/hwtypes/bit_vector_abc.py
+++ b/hwtypes/bit_vector_abc.py
@@ -176,10 +176,8 @@ class AbstractBitVector(metaclass=AbstractBitVectorMeta):
def __len__(self) -> int:
pass
- #could still be staticmethod but I think thats annoying
- @classmethod
@abstractmethod
- def concat(cls, x, y) -> 'AbstractBitVector':
+ def concat(self, other) -> 'AbstractBitVector':
pass
@abstractmethod
diff --git a/hwtypes/fp_vector.py b/hwtypes/fp_vector.py
index 9fd7320..3e1f0f0 100644
--- a/hwtypes/fp_vector.py
+++ b/hwtypes/fp_vector.py
@@ -269,15 +269,15 @@ class FPVector(AbstractFPVector):
sign_bit = BitVector[1](gmpy2.is_signed(self._value))
if self.fp_is_zero():
- return BitVector.concat(sign_bit, BitVector[cls.size-1](0))
+ return BitVector[cls.size-1](0).concat(sign_bit)
elif self.fp_is_infinite():
exp_bits = BitVector[cls.exponent_size](-1)
mantissa_bits = BitVector[cls.mantissa_size](0)
- return BitVector.concat(BitVector.concat(sign_bit, exp_bits), mantissa_bits)
+ return mantissa_bits.concat(exp_bits).concat(sign_bit)
elif self.fp_is_NaN():
exp_bits = BitVector[cls.exponent_size](-1)
mantissa_bits = BitVector[cls.mantissa_size](1)
- return BitVector.concat(BitVector.concat(sign_bit, exp_bits), mantissa_bits)
+ return mantissa_bits.concat(exp_bits).concat(sign_bit)
bias = (1 << (cls.exponent_size - 1)) - 1
@@ -307,7 +307,7 @@ class FPVector(AbstractFPVector):
mantissa = BitVector[cls.mantissa_size+1](mantissa_int)
exp_bits = BitVector[cls.exponent_size](exp)
mantissa_bits = mantissa[:cls.mantissa_size]
- return BitVector.concat(BitVector.concat(sign_bit, exp_bits), mantissa_bits)
+ return mantissa_bits.concat(exp_bits).concat(sign_bit)
@classmethod
@set_context
diff --git a/hwtypes/smt_bit_vector.py b/hwtypes/smt_bit_vector.py
index 892dcdc..0b46323 100644
--- a/hwtypes/smt_bit_vector.py
+++ b/hwtypes/smt_bit_vector.py
@@ -231,7 +231,7 @@ class SMTBitVector(AbstractBitVector):
raise ValueError('Iterable is not the correct size')
cls = type(self)
B1 = cls.unsized_t[1]
- self._value = ft.reduce(cls.concat, map(B1, reversed(value))).value
+ self._value = ft.reduce(lambda acc, elem : acc.concat(elem), map(B1, value)).value
elif isinstance(value, int):
self._value = smt.BV(value % (1 << self.size), self.size)
@@ -320,9 +320,11 @@ class SMTBitVector(AbstractBitVector):
def __len__(self):
return self.size
- @classmethod
- def concat(cls, x, y):
- return cls.unsized_t[x.size + y.size](smt.BVConcat(x.value, y.value))
+ def concat(self, other):
+ T = type(self).unsized_t
+ if not isinstance(other, T):
+ raise TypeError(f'value must of type {T}')
+ return T[self.size + other.size](smt.BVConcat(other.value, self.value))
def bvnot(self):
return type(self)(smt.BVNot(self.value))
diff --git a/hwtypes/z3_bit_vector.py b/hwtypes/z3_bit_vector.py
index c555656..9947eb1 100644
--- a/hwtypes/z3_bit_vector.py
+++ b/hwtypes/z3_bit_vector.py
@@ -229,7 +229,7 @@ class z3BitVector(AbstractBitVector):
raise ValueError('Iterable is not the correct size')
cls = type(self)
B1 = cls.unsized_t[1]
- self._value = ft.reduce(cls.concat, map(B1, reversed(value))).value
+ self._value = ft.reduce(lambda acc, elem : acc.concat(elem), map(B1, value)).value
elif isinstance(value, int):
self._value = z3.BitVecVal(value, self.size)
@@ -318,9 +318,11 @@ class z3BitVector(AbstractBitVector):
def __len__(self):
return self.size
- @classmethod
- def concat(cls, x, y):
- return cls.unsized_t[x.size + y.size](z3.Concat(x.value, y.value))
+ def concat(self, other):
+ T = type(self).unsized_t
+ if not isinstance(other, T):
+ raise TypeError(f'value must of type {T}')
+ return T[self.size + other.size](z3.Concat(other.value, self.value))
def bvnot(self):
return type(self)(~self.value)
|
leonardt/hwtypes
|
11bf81a76f31f703248e3d5b84a9c631bd618422
|
diff --git a/tests/test_concat.py b/tests/test_concat.py
index 7f9f6af..1a5dfa6 100644
--- a/tests/test_concat.py
+++ b/tests/test_concat.py
@@ -1,8 +1,25 @@
from hwtypes import BitVector
+import random
-def test_concat():
+NTESTS = 10
+MAX_BITS = 128
+
+def test_concat_const():
a = BitVector[4](4)
b = BitVector[4](1)
- c = BitVector.concat(a, b)
- expected = BitVector[8]([1,0,0,0,0,0,1,0])
+ c = a.concat(b)
+ print(a.binary_string())
+ print(c.binary_string())
+ expected = BitVector[8]([0,0,1,0,1,0,0,0])
assert expected == c
+
+def test_concat_random():
+ for _ in range(NTESTS):
+ n1 = random.randint(1, MAX_BITS)
+ n2 = random.randint(1, MAX_BITS)
+ a = BitVector.random(n1)
+ b = BitVector.random(n2)
+ c = a.concat(b)
+ assert c.size == a.size + b.size
+ assert c == BitVector[n1 + n2](a.bits() + b.bits())
+ assert c.binary_string() == b.binary_string() + a.binary_string()
diff --git a/tests/test_fp.py b/tests/test_fp.py
index 20628da..d44a34d 100644
--- a/tests/test_fp.py
+++ b/tests/test_fp.py
@@ -125,7 +125,7 @@ def _c_type_vector(T):
return vector
-NTESTS = 100
+NTESTS = 128
@pytest.mark.parametrize("mode", [
RoundingMode.RNE,
@@ -247,6 +247,37 @@ def test_reinterpret_bv(FT):
assert bv1[:ms] != 0
assert bv2[:ms] != 0
+def test_reinterpret_bv_corner():
+ for _ in range(NTESTS):
+ FT = FPVector[random.randint(3, 16),
+ random.randint(2, 64),
+ random.choice(list(RoundingMode)),
+ True]
+ bv_pinf = BitVector[FT.mantissa_size](0).concat(BitVector[FT.exponent_size](-1)).concat(BitVector[1](0))
+ bv_ninf = BitVector[FT.mantissa_size](0).concat(BitVector[FT.exponent_size](-1)).concat(BitVector[1](1))
+ pinf = FT.reinterpret_from_bv(bv_pinf)
+ ninf = FT.reinterpret_from_bv(bv_ninf)
+ assert pinf.reinterpret_as_bv() == bv_pinf
+ assert ninf.reinterpret_as_bv() == bv_ninf
+ assert pinf.fp_is_positive()
+ assert pinf.fp_is_infinite()
+ assert ninf.fp_is_negative()
+ assert ninf.fp_is_infinite()
+
+ bv_pz = BitVector[FT.size](0)
+ bv_nz = BitVector[FT.size-1](0).concat(BitVector[1](1))
+ pz = FT.reinterpret_from_bv(bv_pz)
+ nz = FT.reinterpret_from_bv(bv_nz)
+ assert pz.reinterpret_as_bv() == bv_pz
+ assert nz.reinterpret_as_bv() == bv_nz
+ assert pz.fp_is_zero()
+ assert nz.fp_is_zero()
+
+ bv_nan = BitVector[FT.mantissa_size](1).concat(BitVector[FT.exponent_size](-1)).concat(BitVector[1](0))
+ nan = FT.reinterpret_from_bv(bv_nan)
+ assert nan.reinterpret_as_bv() == bv_nan
+ assert nan.fp_is_NaN()
+
@pytest.mark.parametrize("CT, FT", [
(_c_type_vector(ctypes.c_float), FPVector[8, 23, RoundingMode.RNE, True]),
(_c_type_vector(ctypes.c_double), FPVector[11, 52, RoundingMode.RNE, True]),])
|
semantics for Bitvector concat is opposite of magma concat
semantics for magma.concat is concat(lsbs,msbs) while hwtypes.BitVector.concat is concat(msbs,lsbs)
We should unify the semantics. I would prefer magma's version but at the end of the day, it does not matter.
|
0.0
|
11bf81a76f31f703248e3d5b84a9c631bd618422
|
[
"tests/test_concat.py::test_concat_const",
"tests/test_concat.py::test_concat_random",
"tests/test_fp.py::test_reinterpret_bv_corner"
] |
[
"tests/test_fp.py::test_init[False-RoundingMode.RNE]",
"tests/test_fp.py::test_init[False-RoundingMode.RNA]",
"tests/test_fp.py::test_init[False-RoundingMode.RTP]",
"tests/test_fp.py::test_init[False-RoundingMode.RTN]",
"tests/test_fp.py::test_init[False-RoundingMode.RTZ]",
"tests/test_fp.py::test_init[True-RoundingMode.RNE]",
"tests/test_fp.py::test_init[True-RoundingMode.RNA]",
"tests/test_fp.py::test_init[True-RoundingMode.RTP]",
"tests/test_fp.py::test_init[True-RoundingMode.RTN]",
"tests/test_fp.py::test_init[True-RoundingMode.RTZ]",
"tests/test_fp.py::test_random[False-FPVector[8,7,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_random[False-FPVector[8,7,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_random[False-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_random[False-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_random[True-FPVector[8,7,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_random[True-FPVector[8,7,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_random[True-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_random[True-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret[FPVector[8,7,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret[FPVector[8,7,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret[FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret[FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-5.421010862427522e-20-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-5.421010862427522e-20-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-5.421010862427522e-20-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-5.421010862427522e-20-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1.52587890625e-05-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1.52587890625e-05-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1.52587890625e-05-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1.52587890625e-05-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-0.0625-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-0.0625-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-0.0625-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-0.0625-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-1-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-16-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-16-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-16-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-16-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-65536-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-65536-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-65536-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-65536-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-18446744073709551616-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-18446744073709551616-FPVector[8,23,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-18446744073709551616-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_pyrandom[0-18446744073709551616-FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_bv[FPVector[8,7,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_bv[FPVector[8,7,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_reinterpret_bv[FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_reinterpret_bv[FPVector[11,52,RoundingMode.RNE,False]]",
"tests/test_fp.py::test_epsilon[vector-FPVector[8,23,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_epsilon[vector-FPVector[11,52,RoundingMode.RNE,True]]",
"tests/test_fp.py::test_unary_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_unary_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-neg]",
"tests/test_fp.py::test_unary_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-abs]",
"tests/test_fp.py::test_unary_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-<lambda>]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-add]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-sub]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-mul]",
"tests/test_fp.py::test_bin_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-truediv]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-5.421010862427522e-20-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-1.52587890625e-05-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-0.0625-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-1-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-16-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-65536-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[8,23,RoundingMode.RNE,True]-ge]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-eq]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-ne]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-lt]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-le]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-gt]",
"tests/test_fp.py::test_bool_op[0-18446744073709551616-vector-FPVector[11,52,RoundingMode.RNE,True]-ge]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-06-10 23:53:24+00:00
|
bsd-3-clause
| 3,549 |
|
leonardt__hwtypes-61
|
diff --git a/hwtypes/adt.py b/hwtypes/adt.py
index 468e5c6..3117965 100644
--- a/hwtypes/adt.py
+++ b/hwtypes/adt.py
@@ -144,5 +144,16 @@ class Enum(metaclass=EnumMeta):
def __hash__(self):
return hash(self.value)
+ def __getattribute__(self, attr):
+ # prevent:
+ # class E(Enum):
+ # a = 0
+ # b = 1
+ # E.a.b == E.b
+ if attr in type(self).field_dict:
+ raise AttributeError('Cannot access enum members from enum instances')
+ else:
+ return super().__getattribute__(attr)
+
def new_instruction():
return EnumMeta.Auto()
diff --git a/hwtypes/adt_meta.py b/hwtypes/adt_meta.py
index 12dfbee..2039666 100644
--- a/hwtypes/adt_meta.py
+++ b/hwtypes/adt_meta.py
@@ -24,6 +24,12 @@ def _is_dunder(name):
and name[2] != '_' and name[-3] != '_')
+def _is_sunder(name):
+ return (len(name) > 2
+ and name[0] == name[-1] == '_'
+ and name[1] != '_' and name[-2] != '_')
+
+
def _is_descriptor(obj):
return hasattr(obj, '__get__') or hasattr(obj, '__set__') or hasattr(obj, '__delete__')
@@ -32,6 +38,8 @@ def is_adt_type(t):
return isinstance(t, BoundMeta)
+class ReservedNameError(Exception): pass
+
# Can't have abstract metaclass https://bugs.python.org/issue36881
class BoundMeta(type): #, metaclass=ABCMeta):
# (UnboundType, (types...)) : BoundType
@@ -54,7 +62,7 @@ class BoundMeta(type): #, metaclass=ABCMeta):
def __new__(mcs, name, bases, namespace, fields=None, **kwargs):
if '_fields_' in namespace:
- raise TypeError('class attribute _fields_ is reversed by the type machinery')
+ raise ReservedNameError('class attribute _fields_ is reserved by the type machinery')
bound_types = fields
for base in bases:
@@ -145,7 +153,7 @@ class TupleMeta(BoundMeta):
def field_dict(cls):
return MappingProxyType({idx : field for idx, field in enumerate(cls.fields)})
-
+_RESERVED_NAMES = {'enumerate', 'fields', 'field_dict', 'is_bound', 'value', 'value_dict'}
class ProductMeta(TupleMeta):
def __new__(mcs, name, bases, namespace, **kwargs):
fields = {}
@@ -158,7 +166,11 @@ class ProductMeta(TupleMeta):
else:
fields[k] = v
for k, v in namespace.items():
- if isinstance(v, type):
+ if _is_sunder(k) or _is_dunder(k) or _is_descriptor(v):
+ ns[k] = v
+ elif k in _RESERVED_NAMES:
+ raise ReservedNameError(f'Field name {k} is reserved by the type machinery')
+ elif isinstance(v, type):
if k in fields:
raise TypeError(f'Conflicting definitions of field {k}')
else:
@@ -176,7 +188,7 @@ class ProductMeta(TupleMeta):
# not strictly necessary could iterative over class dict finding
# TypedProperty to reconstruct _field_table_ but that seems bad
if '_field_table_' in ns:
- raise TypeError('class attribute _field_table_ is reversed by the type machinery')
+ raise ReservedNameError('class attribute _field_table_ is reserved by the type machinery')
else:
ns['_field_table_'] = dict()
@@ -286,16 +298,18 @@ class EnumMeta(BoundMeta):
def __new__(mcs, cls_name, bases, namespace, **kwargs):
if '_field_table_' in namespace:
- raise TypeError('class attribute _field_table_ is reversed by the type machinery')
+ raise ReservedNameError('class attribute _field_table_ is reserved by the type machinery')
elems = {}
ns = {}
for k, v in namespace.items():
- if isinstance(v, (int, mcs.Auto)):
- elems[k] = v
- elif _is_dunder(k) or _is_descriptor(v):
+ if _is_dunder(k) or _is_sunder(k) or _is_descriptor(v):
ns[k] = v
+ elif k in _RESERVED_NAMES:
+ raise ReservedNameError(f'Field name {k} is resevsed by the type machinery')
+ elif isinstance(v, (int, mcs.Auto)):
+ elems[k] = v
else:
raise TypeError(f'Enum value should be int not {type(v)}')
|
leonardt/hwtypes
|
7ea0aaf57d7a5ee9a009f677bf02cd02fcec5732
|
diff --git a/tests/test_adt.py b/tests/test_adt.py
index ec27659..178dfd0 100644
--- a/tests/test_adt.py
+++ b/tests/test_adt.py
@@ -1,5 +1,6 @@
import pytest
from hwtypes.adt import Product, Sum, Enum, Tuple
+from hwtypes.adt_meta import _RESERVED_NAMES, ReservedNameError
from hwtypes.modifiers import new
class En1(Enum):
@@ -31,6 +32,9 @@ def test_enum():
assert isinstance(En1.a, Enum)
assert isinstance(En1.a, En1)
+ with pytest.raises(AttributeError):
+ En1.a.b
+
def test_tuple():
assert set(Tu.enumerate()) == {
Tu(En1.a, En2.c),
@@ -153,3 +157,17 @@ def test_repr(T):
assert isinstance(s, str)
assert s != ''
+
[email protected]("T_field", [(Enum, '0'), (Product, 'int')])
[email protected]("field_name", list(_RESERVED_NAMES))
+def test_reserved(T_field, field_name):
+ T, field = T_field
+ l_dict = {'T' : T}
+ cls_str = f'''
+class _(T):
+ {field_name} = {field}
+'''
+ with pytest.raises(ReservedNameError):
+ exec(cls_str, l_dict)
+
+
|
if 'value' is used as a field in Product, a stack overflow occurs
```
class A(Product):
value = int
print(A(value=5))
```
|
0.0
|
7ea0aaf57d7a5ee9a009f677bf02cd02fcec5732
|
[
"tests/test_adt.py::test_enum",
"tests/test_adt.py::test_tuple",
"tests/test_adt.py::test_product",
"tests/test_adt.py::test_sum",
"tests/test_adt.py::test_new",
"tests/test_adt.py::test_repr[En1]",
"tests/test_adt.py::test_repr[Tuple[En1,",
"tests/test_adt.py::test_repr[Sum[Pr,",
"tests/test_adt.py::test_repr[Pr]",
"tests/test_adt.py::test_reserved[field_dict-T_field0]",
"tests/test_adt.py::test_reserved[field_dict-T_field1]",
"tests/test_adt.py::test_reserved[value-T_field0]",
"tests/test_adt.py::test_reserved[value-T_field1]",
"tests/test_adt.py::test_reserved[enumerate-T_field0]",
"tests/test_adt.py::test_reserved[enumerate-T_field1]",
"tests/test_adt.py::test_reserved[is_bound-T_field0]",
"tests/test_adt.py::test_reserved[is_bound-T_field1]",
"tests/test_adt.py::test_reserved[value_dict-T_field0]",
"tests/test_adt.py::test_reserved[value_dict-T_field1]",
"tests/test_adt.py::test_reserved[fields-T_field0]",
"tests/test_adt.py::test_reserved[fields-T_field1]"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-07-09 20:16:51+00:00
|
bsd-3-clause
| 3,550 |
|
leonardt__hwtypes-64
|
diff --git a/hwtypes/adt.py b/hwtypes/adt.py
index 64837d8..1be6a4a 100644
--- a/hwtypes/adt.py
+++ b/hwtypes/adt.py
@@ -1,6 +1,7 @@
from .adt_meta import TupleMeta, ProductMeta, SumMeta, EnumMeta, is_adt_type
from collections import OrderedDict
from types import MappingProxyType
+import typing as tp
__all__ = ['Tuple', 'Product', 'Sum', 'Enum']
__all__ += ['new_instruction', 'is_adt_type']
@@ -65,17 +66,6 @@ class Tuple(metaclass=TupleMeta):
class Product(Tuple, metaclass=ProductMeta):
- def __new__(cls, *args, **kwargs):
- if cls.is_bound:
- return super().__new__(cls, *args, **kwargs)
- elif len(args) == 1:
- #fields, name, bases, namespace
- t = type(cls).from_fields(kwargs, args[0], (cls,), {})
- return t
-
- else:
- raise TypeError('Cannot instance unbound product type')
-
def __repr__(self):
return f'{type(self).__name__}({", ".join(f"{k}={v}" for k,v in self.value_dict.items())})'
@@ -86,6 +76,28 @@ class Product(Tuple, metaclass=ProductMeta):
d[k] = getattr(self, k)
return MappingProxyType(d)
+ @classmethod
+ def from_fields(cls,
+ class_name: str,
+ fields: tp.Mapping[str, type],
+ module: tp.Optional[str] = None,
+ qualname: tp.Optional[str] = None):
+ if cls.is_bound:
+ raise TypeError('Type must not be bound')
+
+ ns = {}
+
+ if module is None:
+ module = cls.__module__
+
+ if qualname is None:
+ qualname = class_name
+
+ ns['__module__'] = module
+ ns['__qualname__'] = qualname
+
+ return cls._from_fields(fields, class_name, (cls,), ns)
+
class Sum(metaclass=SumMeta):
def __init__(self, value):
if not isinstance(value, tuple(type(self).fields)):
diff --git a/hwtypes/adt_meta.py b/hwtypes/adt_meta.py
index d1c81bb..8559537 100644
--- a/hwtypes/adt_meta.py
+++ b/hwtypes/adt_meta.py
@@ -169,8 +169,6 @@ class ProductMeta(TupleMeta):
for k, v in namespace.items():
if _is_sunder(k) or _is_dunder(k) or _is_descriptor(v):
ns[k] = v
- elif k in _RESERVED_NAMES:
- raise ReservedNameError(f'Field name {k} is reserved by the type machinery')
elif isinstance(v, type):
if k in fields:
raise TypeError(f'Conflicting definitions of field {k}')
@@ -180,12 +178,12 @@ class ProductMeta(TupleMeta):
ns[k] = v
if fields:
- return mcs.from_fields(fields, name, bases, ns, **kwargs)
+ return mcs._from_fields(fields, name, bases, ns, **kwargs)
else:
return super().__new__(mcs, name, bases, ns, **kwargs)
@classmethod
- def from_fields(mcs, fields, name, bases, ns, **kwargs):
+ def _from_fields(mcs, fields, name, bases, ns, **kwargs):
# not strictly necessary could iterative over class dict finding
# TypedProperty to reconstruct _field_table_ but that seems bad
if '_field_table_' in ns:
@@ -193,6 +191,10 @@ class ProductMeta(TupleMeta):
else:
ns['_field_table_'] = OrderedDict()
+ for field in fields:
+ if field in _RESERVED_NAMES:
+ raise ReservedNameError(f'Field name {field} is reserved by the type machinery')
+
def _get_tuple_base(bases):
for base in bases:
if not isinstance(base, ProductMeta) and isinstance(base, TupleMeta):
|
leonardt/hwtypes
|
06507759411b492ae7355b572adc0fc2a5bc3688
|
diff --git a/tests/test_adt.py b/tests/test_adt.py
index 2e4798f..6c71ca4 100644
--- a/tests/test_adt.py
+++ b/tests/test_adt.py
@@ -7,26 +7,33 @@ class En1(Enum):
a = 0
b = 1
+
class En2(Enum):
c = 0
d = 1
+
class Pr(Product):
x = En1
y = En2
+
class Pr2(Product):
x = En1
y = En2
+
class Pr3(Product):
y = En2
x = En1
+
Su = Sum[En1, Pr]
+
Tu = Tuple[En1, En2]
+
def test_enum():
assert set(En1.enumerate()) == {
En1.a,
@@ -43,6 +50,7 @@ def test_enum():
with pytest.raises(AttributeError):
En1.a.b
+
def test_tuple():
assert set(Tu.enumerate()) == {
Tu(En1.a, En2.c),
@@ -71,6 +79,7 @@ def test_tuple():
with pytest.raises(TypeError):
t[1] = 1
+
def test_product():
assert set(Pr.enumerate()) == {
Pr(En1.a, En2.c),
@@ -119,6 +128,26 @@ def test_product():
assert Pr.field_dict != Pr3.field_dict
+def test_product_from_fields():
+ P = Product.from_fields('P', {'A' : int, 'B' : str})
+ assert issubclass(P, Product)
+ assert issubclass(P, Tuple[int, str])
+ assert P.A == int
+ assert P.B == str
+ assert P.__name__ == 'P'
+ assert P.__module__ == Product.__module__
+ assert P.__qualname__ == 'P'
+
+ P = Product.from_fields('P', {'A' : int, 'B' : str}, module='foo')
+ assert P.__module__ == 'foo'
+
+ P = Product.from_fields('P', {'A' : int, 'B' : str}, qualname='Foo.P')
+ assert P.__qualname__ == 'Foo.P'
+
+ with pytest.raises(TypeError):
+ Pr.from_fields('P', {'A' : int, 'B' : str})
+
+
def test_sum():
assert set(Su.enumerate()) == {
Su(En1.a),
@@ -159,6 +188,7 @@ def test_new():
t = new(Sum, (En1, Pr), module=__name__)
assert t.__module__ == __name__
+
@pytest.mark.parametrize("T", [En1, Tu, Su, Pr])
def test_repr(T):
s = repr(T)
@@ -181,5 +211,3 @@ class _(T):
'''
with pytest.raises(ReservedNameError):
exec(cls_str, l_dict)
-
-
|
[feature request] dynamic constructor for adt.Product
`new_product(class_name : str, field_dict : tp.Mapping[str,type])`
|
0.0
|
06507759411b492ae7355b572adc0fc2a5bc3688
|
[
"tests/test_adt.py::test_product_from_fields"
] |
[
"tests/test_adt.py::test_enum",
"tests/test_adt.py::test_tuple",
"tests/test_adt.py::test_product",
"tests/test_adt.py::test_sum",
"tests/test_adt.py::test_new",
"tests/test_adt.py::test_repr[En1]",
"tests/test_adt.py::test_repr[Tuple[En1,",
"tests/test_adt.py::test_repr[Sum[Pr,",
"tests/test_adt.py::test_repr[Pr]",
"tests/test_adt.py::test_reserved[fields-T_field0]",
"tests/test_adt.py::test_reserved[fields-T_field1]",
"tests/test_adt.py::test_reserved[field_dict-T_field0]",
"tests/test_adt.py::test_reserved[field_dict-T_field1]",
"tests/test_adt.py::test_reserved[is_bound-T_field0]",
"tests/test_adt.py::test_reserved[is_bound-T_field1]",
"tests/test_adt.py::test_reserved[value_dict-T_field0]",
"tests/test_adt.py::test_reserved[value_dict-T_field1]",
"tests/test_adt.py::test_reserved[enumerate-T_field0]",
"tests/test_adt.py::test_reserved[enumerate-T_field1]",
"tests/test_adt.py::test_reserved[value-T_field0]",
"tests/test_adt.py::test_reserved[value-T_field1]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-07-10 22:46:41+00:00
|
bsd-3-clause
| 3,551 |
|
leonardt__hwtypes-77
|
diff --git a/hwtypes/adt_meta.py b/hwtypes/adt_meta.py
index 144cc72..0a65710 100644
--- a/hwtypes/adt_meta.py
+++ b/hwtypes/adt_meta.py
@@ -1,13 +1,14 @@
import itertools as it
import typing as tp
from abc import ABCMeta, abstractmethod
-from collections import OrderedDict
-
import weakref
from types import MappingProxyType
from collections.abc import Mapping, MutableMapping
+from collections import OrderedDict
from .util import TypedProperty
+from .util import OrderedFrozenDict
+from .util import _issubclass
__all__ = ['BoundMeta', 'TupleMeta', 'ProductMeta', 'SumMeta', 'EnumMeta']
@@ -34,6 +35,25 @@ def is_adt_type(t):
class ReservedNameError(Exception): pass
+RESERVED_NAMES = frozenset({
+ 'enumerate',
+ 'fields',
+ 'field_dict',
+ 'is_bound',
+ 'is_cached',
+ 'value',
+ 'value_dict',
+})
+
+RESERVED_SUNDERS = frozenset({
+ '_cached_',
+ '_fields_',
+ '_field_table_',
+ '_unbound_base_',
+})
+
+RESERVED_ATTRS = frozenset(RESERVED_NAMES | RESERVED_SUNDERS)
+
# Can't have abstract metaclass https://bugs.python.org/issue36881
class BoundMeta(type): #, metaclass=ABCMeta):
# (UnboundType, (types...)) : BoundType
@@ -55,10 +75,9 @@ class BoundMeta(type): #, metaclass=ABCMeta):
return super().__call__(*args, **kwargs)
def __new__(mcs, name, bases, namespace, fields=None, **kwargs):
- if '_fields_' in namespace:
- raise ReservedNameError('class attribute _fields_ is reserved by the type machinery')
- if '_unbound_base_' in namespace:
- raise ReservedNameError('class attribute _unbound_base_ is reserved by the type machinery')
+ for rname in RESERVED_SUNDERS:
+ if rname in namespace:
+ raise ReservedNameError(f'class attribute {rname} is reserved by the type machinery')
bound_types = fields
has_bound_base = False
@@ -82,9 +101,9 @@ class BoundMeta(type): #, metaclass=ABCMeta):
if hasattr(t, '_fields_cb'):
bound_types = t._fields_cb(bound_types)
- namespace['_fields_'] = bound_types
- namespace['_unbound_base_'] = None
t = super().__new__(mcs, name, bases, namespace, **kwargs)
+ t._fields_ = bound_types
+ t._unbound_base_ = None
if bound_types is None:
# t is a unbound type
@@ -106,13 +125,14 @@ class BoundMeta(type): #, metaclass=ABCMeta):
return tuple(idx)
def __getitem__(cls, idx) -> 'BoundMeta':
+ mcs = type(cls)
if not isinstance(idx, tp.Iterable):
idx = idx,
idx = cls._fields_cb(idx)
try:
- return BoundMeta._class_cache[cls, idx]
+ return mcs._class_cache[cls, idx]
except KeyError:
pass
@@ -123,9 +143,10 @@ class BoundMeta(type): #, metaclass=ABCMeta):
bases.extend(b[idx] for b in cls.__bases__ if isinstance(b, BoundMeta))
bases = tuple(bases)
class_name = '{}[{}]'.format(cls.__name__, ', '.join(map(lambda t : t.__name__, idx)))
- t = type(cls)(class_name, bases, {}, fields=idx)
- t.__module__ = cls.__module__
- BoundMeta._class_cache[cls, idx] = t
+
+ t = mcs(class_name, bases, {'__module__' : cls.__module__}, fields=idx)
+ mcs._class_cache[cls, idx] = t
+ t._cached_ = True
return t
@property
@@ -152,17 +173,21 @@ class BoundMeta(type): #, metaclass=ABCMeta):
def __repr__(cls):
return f"{cls.__name__}"
- def rebind(cls, A : type, B : type):
+ def rebind(cls, A: type, B: type, rebind_sub_types: bool = False):
new_fields = []
for T in cls.fields:
- if T == A:
+ if T == A or (rebind_sub_types and _issubclass(T,A)):
new_fields.append(B)
elif isinstance(T, BoundMeta):
- new_fields.append(T.rebind(A, B))
+ new_fields.append(T.rebind(A, B, rebind_sub_types))
else:
new_fields.append(T)
return cls.unbound_t[new_fields]
+ @property
+ def is_cached(cls):
+ return getattr(cls, '_cached_', False)
+
class TupleMeta(BoundMeta):
def __getitem__(cls, idx):
if cls.is_bound:
@@ -185,9 +210,9 @@ class TupleMeta(BoundMeta):
def field_dict(cls):
return MappingProxyType({idx : field for idx, field in enumerate(cls.fields)})
-_RESERVED_NAMES = {'enumerate', 'fields', 'field_dict', 'is_bound', 'value', 'value_dict'}
+
class ProductMeta(TupleMeta):
- def __new__(mcs, name, bases, namespace, **kwargs):
+ def __new__(mcs, name, bases, namespace, cache=False, **kwargs):
fields = {}
ns = {}
for base in bases:
@@ -197,9 +222,14 @@ class ProductMeta(TupleMeta):
raise TypeError(f'Conflicting definitions of field {k}')
else:
fields[k] = v
+
for k, v in namespace.items():
- if _is_sunder(k) or _is_dunder(k) or _is_descriptor(v):
+ if k in RESERVED_SUNDERS:
+ raise ReservedNameError(f'class attribute {k} is reserved by the type machinery')
+ elif _is_dunder(k) or _is_sunder(k) or _is_descriptor(v):
ns[k] = v
+ elif k in RESERVED_NAMES:
+ raise ReservedNameError(f'Field name {k} is resevsed by the type machinery')
elif isinstance(v, type):
if k in fields:
raise TypeError(f'Conflicting definitions of field {k}')
@@ -209,22 +239,36 @@ class ProductMeta(TupleMeta):
ns[k] = v
if fields:
- return mcs._from_fields(fields, name, bases, ns, **kwargs)
+ return mcs._from_fields(fields, name, bases, ns, cache, **kwargs)
else:
return super().__new__(mcs, name, bases, ns, **kwargs)
+ def __init__(cls, name, bases, namespace, cache=False, **kwargs):
+ return super().__init__(name, bases, namespace, **kwargs)
+
@classmethod
- def _from_fields(mcs, fields, name, bases, ns, **kwargs):
+ def _from_fields(mcs, fields, name, bases, ns, cache, **kwargs):
+ if cache:
+ ns_idx = set()
+ for k,v in ns.items():
+ if not _is_dunder(k):
+ ns_idx.add((k,v))
+
+ fields_idx = tuple(map(tuple, fields.items()))
+ cache_idx = (fields_idx,
+ bases,
+ name,
+ frozenset(ns_idx),
+ frozenset(kwargs.items()),)
+
+ try:
+ return mcs._class_cache[cache_idx]
+ except KeyError:
+ pass
+
# not strictly necessary could iterative over class dict finding
# TypedProperty to reconstruct _field_table_ but that seems bad
- if '_field_table_' in ns:
- raise ReservedNameError('class attribute _field_table_ is reserved by the type machinery')
- else:
- ns['_field_table_'] = OrderedDict()
-
- for field in fields:
- if field in _RESERVED_NAMES:
- raise ReservedNameError(f'Field name {field} is reserved by the type machinery')
+ field_table = dict()
def _get_tuple_base(bases):
for base in bases:
@@ -257,13 +301,16 @@ class ProductMeta(TupleMeta):
for field_name, field_type in fields.items():
assert field_name not in ns
idx = idx_table[field_name]
- ns['_field_table_'][field_name] = field_type
+ field_table[field_name] = field_type
ns[field_name] = _make_prop(field_type, idx)
+
# this is all really gross but I don't know how to do this cleanly
# need to build t so I can call super() in new and init
# need to exec to get proper signatures
t = super().__new__(mcs, name, bases, ns, **kwargs)
+ t._field_table_ = OrderedFrozenDict(field_table)
+ t._cahced_ = cache
gs = {name : t, 'ProductMeta' : ProductMeta}
ls = {}
@@ -286,16 +333,8 @@ def __init__(self, {type_sig}):
exec(__init__, gs, ls)
t.__init__ = ls['__init__']
- product_base = None
- for base in bases:
- if isinstance(base, mcs):
- if product_base is None:
- product_base = base
- else:
- raise TypeError('Can only inherit from one product type')
-
- if product_base is not None and not product_base.is_bound:
- t._unbound_base_ = product_base
+ if cache:
+ mcs._class_cache[cache_idx] = t
return t
@@ -306,51 +345,46 @@ def __init__(self, {type_sig}):
else:
raise TypeError("Cannot bind product types with getitem")
- def __repr__(cls):
- if cls.is_bound:
- field_spec = ', '.join(map('{0[0]}={0[1].__name__}'.format, cls.field_dict.items()))
- return f"{cls.__bases__[0].__name__}('{cls.__name__}', {field_spec})"
- else:
- return super().__repr__()
-
@property
def field_dict(cls):
return MappingProxyType(cls._field_table_)
def from_fields(cls,
- class_name: str,
+ name: str,
fields: tp.Mapping[str, type],
- module: tp.Optional[str] = None,
- qualname: tp.Optional[str] = None):
+ cache: tp.Optional[bool] = None):
+
if cls.is_bound:
raise TypeError('Type must not be bound')
+ for field in fields:
+ if field in RESERVED_ATTRS:
+ raise ReservedNameError(f'Field name {field} is reserved by the type machinery')
+
ns = {}
- if module is None:
- module = cls.__module__
+ if cache is None:
+ cache = True
- if qualname is None:
- qualname = class_name
- ns['__module__'] = module
- ns['__qualname__'] = qualname
+ ns['__module__'] = cls.__module__
+ ns['__qualname__'] = name
- return cls._from_fields(fields, class_name, (cls,), ns)
+ return cls._from_fields(fields, name, (cls,), ns, cache)
- def rebind(cls, A : type, B : type):
+ def rebind(cls, A: type, B: type, rebind_sub_types: bool = False):
new_fields = OrderedDict()
for field, T in cls.field_dict.items():
- if T == A:
+ if T == A or (rebind_sub_types and _issubclass(T,A)):
new_fields[field] = B
elif isinstance(T, BoundMeta):
- new_fields[field] = T.rebind(A, B)
+ new_fields[field] = T.rebind(A, B, rebind_sub_types)
else:
new_fields[field] = T
if new_fields != cls.field_dict:
- return cls.unbound_t.from_fields(cls.__name__, new_fields, cls.__module__, cls.__qualname__)
+ return cls.unbound_t.from_fields(cls.__name__, new_fields, cache=cls.is_cached)
else:
return cls
@@ -369,11 +403,12 @@ class SumMeta(BoundMeta):
if fields is not None:
for field in fields:
- if field.__name__ in _RESERVED_NAMES:
- raise ReservedNameError(f'Field name {field.__name__} is reserved by the type machinery')
- elif field.__name__ in namespace:
- raise TypeError(f'Field name {field.__name__} cannot be used as a class attribute')
- namespace[field.__name__] = _make_prop(field)
+ fname = field.__name__
+ if fname in RESERVED_ATTRS:
+ raise ReservedNameError(f'Field name {fname} is reserved by the type machinery')
+ elif fname in namespace:
+ raise TypeError(f'Field name {fname} cannot be used as a class attribute')
+ namespace[fname] = _make_prop(field)
return super().__new__(mcs, name, bases, namespace, fields, **kwargs)
@@ -398,36 +433,36 @@ class EnumMeta(BoundMeta):
return 'Auto()'
def __new__(mcs, cls_name, bases, namespace, **kwargs):
- if '_field_table_' in namespace:
- raise ReservedNameError('class attribute _field_table_ is reserved by the type machinery')
-
elems = {}
ns = {}
for k, v in namespace.items():
- if _is_dunder(k) or _is_sunder(k) or _is_descriptor(v):
+ if k in RESERVED_SUNDERS:
+ raise ReservedNameError(f'class attribute {k} is reserved by the type machinery')
+ elif _is_dunder(k) or _is_sunder(k) or _is_descriptor(v):
ns[k] = v
- elif k in _RESERVED_NAMES:
+ elif k in RESERVED_NAMES:
raise ReservedNameError(f'Field name {k} is resevsed by the type machinery')
elif isinstance(v, (int, mcs.Auto)):
elems[k] = v
else:
raise TypeError(f'Enum value should be int not {type(v)}')
- ns['_field_table_'] = name_table = dict()
t = super().__new__(mcs, cls_name, bases, ns, **kwargs)
+ name_table = dict()
if not elems:
return t
for name, value in elems.items():
elem = t.__new__(t)
- elem.__init__(value)
+ t.__init__(elem, value)
setattr(elem, '_name_', name)
name_table[name] = elem
setattr(t, name, elem)
t._fields_ = tuple(name_table.values())
+ t._field_table_ = name_table
enum_base = None
for base in bases:
@@ -454,7 +489,7 @@ class EnumMeta(BoundMeta):
def enumerate(cls):
yield from cls.fields
- def rebind(cls, A : type, B : type):
+ def rebind(cls, A: type, B: type, rebind_sub_types: bool = False):
# Enums aren't bound to types
# could potentialy rebind values but that seems annoying
return cls
diff --git a/hwtypes/util.py b/hwtypes/util.py
index c403ce7..a68bf27 100644
--- a/hwtypes/util.py
+++ b/hwtypes/util.py
@@ -1,5 +1,44 @@
+from collections import OrderedDict
+from collections.abc import Mapping, MutableMapping
import typing as tp
+class FrozenDict(Mapping):
+ __slots__ = '_d', '_hash'
+
+ def __init__(self, *args, **kwargs):
+ self._d = dict(*args, **kwargs)
+ self._hash = hash(frozenset(self.items()))
+
+ def __getitem__(self, key):
+ return self._d.__getitem__(key)
+
+ def __iter__(self):
+ return self._d.__iter__()
+
+ def __len__(self):
+ return self._d.__len__()
+
+ def __eq__(self, other):
+ if isinstance(other, type(self)):
+ return self._d == other._d
+ else:
+ return self._d == other
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __hash__(self):
+ return self._hash
+
+
+class OrderedFrozenDict(FrozenDict):
+ __slots__ = ()
+
+ def __init__(self, *args, **kwargs):
+ self._d = OrderedDict(*args, **kwargs)
+ self._hash = hash(tuple(self.items()))
+
+
class TypedProperty:
'''
Behaves mostly like property except:
|
leonardt/hwtypes
|
85a69e89a8ee2eb6e0a8d650a4aa9e6dacb82ba7
|
diff --git a/tests/test_adt.py b/tests/test_adt.py
index 3b8115c..414cb33 100644
--- a/tests/test_adt.py
+++ b/tests/test_adt.py
@@ -1,9 +1,8 @@
import pytest
from hwtypes.adt import Product, Sum, Enum, Tuple
-from hwtypes.adt_meta import _RESERVED_NAMES, ReservedNameError
+from hwtypes.adt_meta import RESERVED_ATTRS, ReservedNameError
from hwtypes.modifiers import new
from hwtypes.adt_util import rebind_bitvector
-from hwtypes.bit_vector import AbstractBitVector, BitVector
class En1(Enum):
a = 0
@@ -15,12 +14,7 @@ class En2(Enum):
d = 1
-class En3(Enum):
- e = 3
- f = 4
-
-
-class Pr(Product):
+class Pr(Product, cache=True):
x = En1
y = En2
@@ -132,6 +126,7 @@ def test_product():
p[0] = En2.c
assert Pr != Pr2
+ assert Pr is Product.from_fields('Pr', {'x' : En1, 'y' : En2 }, cache=True)
assert Pr.field_dict == Pr2.field_dict
assert Pr.field_dict != Pr3.field_dict
@@ -142,15 +137,16 @@ def test_product_from_fields():
assert issubclass(P, Tuple[int, str])
assert P.A == int
assert P.B == str
- assert P.__name__ == 'P'
assert P.__module__ == Product.__module__
- assert P.__qualname__ == 'P'
- P = Product.from_fields('P', {'A' : int, 'B' : str}, module='foo')
- assert P.__module__ == 'foo'
+ assert P is Product.from_fields('P', {'A' : int, 'B' : str})
+
+ P2 = Product.from_fields('P', {'B' : str, 'A' : int})
+ assert P2 is not P
- P = Product.from_fields('P', {'A' : int, 'B' : str}, qualname='Foo.P')
- assert P.__qualname__ == 'Foo.P'
+ P3 = Product.from_fields('P', {'A' : int, 'B' : str}, cache=False)
+ assert P3 is not P
+ assert P3 is not P2
with pytest.raises(TypeError):
Pr.from_fields('P', {'A' : int, 'B' : str})
@@ -229,7 +225,7 @@ def test_repr(T):
@pytest.mark.parametrize("T_field", [(Enum, '0'), (Product, 'int')])
[email protected]("field_name", list(_RESERVED_NAMES))
[email protected]("field_name", list(RESERVED_ATTRS))
def test_reserved(T_field, field_name):
T, field = T_field
l_dict = {'T' : T}
@@ -252,60 +248,3 @@ def test_unbound_t(t, base):
with pytest.raises(AttributeError):
sub_t.unbound_t
[email protected]("T", [Tu, Su, Pr])
-def test_rebind(T):
- assert En1 in T.fields
- assert En3 not in T.fields
- T2 = T.rebind(En1, En3)
- assert En1 not in T2.fields
- assert En3 in T2.fields
-
-
-class A: pass
-class B: pass
-class C: pass
-class D: pass
-class P1(Product):
- A = A
- B = B
-
-S1 = Sum[C, P1]
-
-class P2(Product):
- S1 = S1
- C = C
-
-def test_rebind_recusrive():
- P3 = P2.rebind(A, D)
- assert P3.S1.field_dict['P1'].A == D
- assert P3.S1.field_dict['P1'].B == B
- assert C in P3.S1.fields
- P4 = P3.rebind(C, D)
- assert P4.C == D
- assert D in P4.S1.fields
- P5 = P2.rebind(P1, A)
- assert P5.S1 == Sum[C, A]
-
-
-class F(Product):
- Y = AbstractBitVector
-
-
-class P(Product):
- X = AbstractBitVector[16]
- S = Sum[AbstractBitVector[4], AbstractBitVector[8]]
- T = Tuple[AbstractBitVector[32]]
- F = F
-
-
-def test_rebind_bv():
- P_bound = rebind_bitvector(P, BitVector)
- assert P_bound.X == BitVector[16]
- assert P_bound.S == Sum[BitVector[4], BitVector[8]]
- assert P_bound.T[0] == BitVector[32]
- assert P_bound.F.Y == BitVector
-
- P_unbound = rebind_bitvector(P_bound, AbstractBitVector)
- assert P_unbound.X == AbstractBitVector[16]
- assert P_unbound.S == Sum[AbstractBitVector[4], AbstractBitVector[8]]
- assert P_unbound.T[0] == AbstractBitVector[32]
diff --git a/tests/test_rebind.py b/tests/test_rebind.py
new file mode 100644
index 0000000..b01726c
--- /dev/null
+++ b/tests/test_rebind.py
@@ -0,0 +1,149 @@
+import pytest
+
+from hwtypes.adt import Product, Sum, Enum, Tuple
+from hwtypes.adt_util import rebind_bitvector
+from hwtypes.bit_vector import AbstractBitVector, BitVector, AbstractBit, Bit
+from hwtypes.smt_bit_vector import SMTBit
+from hwtypes.util import _issubclass
+
+class A: pass
+class B: pass
+class C(A): pass
+class D(B): pass
+
+class E(Enum):
+ A = 0
+ B = 1
+ C = 2
+ E = 3
+
+T0 = Tuple[A, B, C, E]
+
+class P0(Product, cache=True):
+ A = A
+ B = B
+ C = C
+ E = E
+
+S0 = Sum[A, B, C, E]
+
+class P1(Product, cache=True):
+ P0 = P0
+ S0 = S0
+ T0 = T0
+ D = D
+
+S1 = Sum[P0, S0, T0, D]
+
+
+
[email protected]("type_0", [A, B, C, D, E])
[email protected]("type_1", [A, B, C, D, E])
[email protected]("rebind_sub_types", [False, True])
+def test_rebind_enum(type_0, type_1, rebind_sub_types):
+ assert E is E.rebind(type_0, type_1, rebind_sub_types)
+
+
[email protected]("T", [T0, S0])
[email protected]("type_0", [A, B, C, D, E])
[email protected]("type_1", [A, B, C, D, E])
[email protected]("rebind_sub_types", [False, True])
+def test_rebind_sum_tuple(T, type_0, type_1, rebind_sub_types):
+ fields = T.fields
+ T_ = T.rebind(type_0, type_1, rebind_sub_types)
+
+ if rebind_sub_types:
+ map_fn = lambda s : type_1 if _issubclass(s, type_0) else s
+ else:
+ map_fn = lambda s : type_1 if s == type_0 else s
+
+ new_fields = map(map_fn, fields)
+
+ assert T_ is T.unbound_t[new_fields]
+
+
[email protected]("type_0", [A, B, C, D, E])
[email protected]("type_1", [A, B, C, D, E])
[email protected]("rebind_sub_types", [False, True])
+def test_rebind_product(type_0, type_1, rebind_sub_types):
+ field_dict = P0.field_dict
+ P_ = P0.rebind(type_0, type_1, rebind_sub_types)
+
+ if rebind_sub_types:
+ map_fn = lambda s : type_1 if _issubclass(s, type_0) else s
+ else:
+ map_fn = lambda s : type_1 if s == type_0 else s
+
+ new_fields = {}
+ for k,v in field_dict.items():
+ new_fields[k] = map_fn(v)
+
+ assert P_ is Product.from_fields('P0', new_fields)
+
+
[email protected]("rebind_sub_types", [False, True])
+def test_rebind_recursive(rebind_sub_types):
+ S_ = S1.rebind(B, A, rebind_sub_types)
+ if rebind_sub_types:
+ gold = Sum[
+ P0.rebind(B, A, rebind_sub_types),
+ S0.rebind(B, A, rebind_sub_types),
+ T0.rebind(B, A, rebind_sub_types),
+ A
+ ]
+ else:
+ gold = Sum[
+ P0.rebind(B, A, rebind_sub_types),
+ S0.rebind(B, A, rebind_sub_types),
+ T0.rebind(B, A, rebind_sub_types),
+ D
+ ]
+
+ assert S_ is gold
+
+ P_ = P1.rebind(B, A, rebind_sub_types)
+ if rebind_sub_types:
+ gold = Product.from_fields('P1', {
+ 'P0' : P0.rebind(B, A, rebind_sub_types),
+ 'S0' : S0.rebind(B, A, rebind_sub_types),
+ 'T0' : T0.rebind(B, A, rebind_sub_types),
+ 'D' : A
+ })
+ else:
+ gold = Product.from_fields('P1', {
+ 'P0' : P0.rebind(B, A, rebind_sub_types),
+ 'S0' : S0.rebind(B, A, rebind_sub_types),
+ 'T0' : T0.rebind(B, A, rebind_sub_types),
+ 'D' : D
+ })
+
+
+ assert P_ is gold
+
+
+class P(Product):
+ X = AbstractBitVector[16]
+ S = Sum[AbstractBitVector[4], AbstractBitVector[8]]
+ T = Tuple[AbstractBitVector[32]]
+ class F(Product):
+ Y = AbstractBitVector
+
+
+def test_rebind_bv():
+ P_bound = rebind_bitvector(P, BitVector)
+ assert P_bound.X == BitVector[16]
+ assert P_bound.S == Sum[BitVector[4], BitVector[8]]
+ assert P_bound.T[0] == BitVector[32]
+ assert P_bound.F.Y == BitVector
+
+ P_unbound = rebind_bitvector(P_bound, AbstractBitVector)
+ assert P_unbound.X == AbstractBitVector[16]
+ assert P_unbound.S == Sum[AbstractBitVector[4], AbstractBitVector[8]]
+ assert P_unbound.T[0] == AbstractBitVector[32]
+
+def test_issue_74():
+ class A(Product):
+ a = Bit
+
+ A_smt = A.rebind(AbstractBit, SMTBit, True)
+ assert A_smt.a is SMTBit
|
[feature-request] Product types defined the same way should be the same object.
I Ideally would like the following to work.
```
A0 = Product.from_fields("A",{"a":int})
A1 = Product.from_fields("A",{"a":int})
assert A0 is A1
```
I do not think this would change any existing code and would be immensely helpful.
|
0.0
|
85a69e89a8ee2eb6e0a8d650a4aa9e6dacb82ba7
|
[
"tests/test_adt.py::test_enum",
"tests/test_adt.py::test_tuple",
"tests/test_adt.py::test_product",
"tests/test_adt.py::test_product_from_fields",
"tests/test_adt.py::test_sum",
"tests/test_adt.py::test_new",
"tests/test_adt.py::test_repr[En1]",
"tests/test_adt.py::test_repr[Tuple[En1,",
"tests/test_adt.py::test_repr[Sum[En1,",
"tests/test_adt.py::test_repr[Pr]",
"tests/test_adt.py::test_reserved[is_bound-T_field0]",
"tests/test_adt.py::test_reserved[is_bound-T_field1]",
"tests/test_adt.py::test_reserved[_field_table_-T_field0]",
"tests/test_adt.py::test_reserved[_field_table_-T_field1]",
"tests/test_adt.py::test_reserved[value_dict-T_field0]",
"tests/test_adt.py::test_reserved[value_dict-T_field1]",
"tests/test_adt.py::test_reserved[fields-T_field0]",
"tests/test_adt.py::test_reserved[fields-T_field1]",
"tests/test_adt.py::test_reserved[_cached_-T_field0]",
"tests/test_adt.py::test_reserved[_cached_-T_field1]",
"tests/test_adt.py::test_reserved[is_cached-T_field0]",
"tests/test_adt.py::test_reserved[is_cached-T_field1]",
"tests/test_adt.py::test_reserved[field_dict-T_field0]",
"tests/test_adt.py::test_reserved[field_dict-T_field1]",
"tests/test_adt.py::test_reserved[_unbound_base_-T_field0]",
"tests/test_adt.py::test_reserved[_unbound_base_-T_field1]",
"tests/test_adt.py::test_reserved[_fields_-T_field0]",
"tests/test_adt.py::test_reserved[_fields_-T_field1]",
"tests/test_adt.py::test_reserved[enumerate-T_field0]",
"tests/test_adt.py::test_reserved[enumerate-T_field1]",
"tests/test_adt.py::test_reserved[value-T_field0]",
"tests/test_adt.py::test_reserved[value-T_field1]",
"tests/test_adt.py::test_unbound_t[En1-Enum]",
"tests/test_adt.py::test_unbound_t[Pr-Product]",
"tests/test_adt.py::test_unbound_t[Sum[En1,",
"tests/test_adt.py::test_unbound_t[Tuple[En1,",
"tests/test_rebind.py::test_rebind_enum[False-A-A]",
"tests/test_rebind.py::test_rebind_enum[False-A-B]",
"tests/test_rebind.py::test_rebind_enum[False-A-C]",
"tests/test_rebind.py::test_rebind_enum[False-A-D]",
"tests/test_rebind.py::test_rebind_enum[False-A-E]",
"tests/test_rebind.py::test_rebind_enum[False-B-A]",
"tests/test_rebind.py::test_rebind_enum[False-B-B]",
"tests/test_rebind.py::test_rebind_enum[False-B-C]",
"tests/test_rebind.py::test_rebind_enum[False-B-D]",
"tests/test_rebind.py::test_rebind_enum[False-B-E]",
"tests/test_rebind.py::test_rebind_enum[False-C-A]",
"tests/test_rebind.py::test_rebind_enum[False-C-B]",
"tests/test_rebind.py::test_rebind_enum[False-C-C]",
"tests/test_rebind.py::test_rebind_enum[False-C-D]",
"tests/test_rebind.py::test_rebind_enum[False-C-E]",
"tests/test_rebind.py::test_rebind_enum[False-D-A]",
"tests/test_rebind.py::test_rebind_enum[False-D-B]",
"tests/test_rebind.py::test_rebind_enum[False-D-C]",
"tests/test_rebind.py::test_rebind_enum[False-D-D]",
"tests/test_rebind.py::test_rebind_enum[False-D-E]",
"tests/test_rebind.py::test_rebind_enum[False-E-A]",
"tests/test_rebind.py::test_rebind_enum[False-E-B]",
"tests/test_rebind.py::test_rebind_enum[False-E-C]",
"tests/test_rebind.py::test_rebind_enum[False-E-D]",
"tests/test_rebind.py::test_rebind_enum[False-E-E]",
"tests/test_rebind.py::test_rebind_enum[True-A-A]",
"tests/test_rebind.py::test_rebind_enum[True-A-B]",
"tests/test_rebind.py::test_rebind_enum[True-A-C]",
"tests/test_rebind.py::test_rebind_enum[True-A-D]",
"tests/test_rebind.py::test_rebind_enum[True-A-E]",
"tests/test_rebind.py::test_rebind_enum[True-B-A]",
"tests/test_rebind.py::test_rebind_enum[True-B-B]",
"tests/test_rebind.py::test_rebind_enum[True-B-C]",
"tests/test_rebind.py::test_rebind_enum[True-B-D]",
"tests/test_rebind.py::test_rebind_enum[True-B-E]",
"tests/test_rebind.py::test_rebind_enum[True-C-A]",
"tests/test_rebind.py::test_rebind_enum[True-C-B]",
"tests/test_rebind.py::test_rebind_enum[True-C-C]",
"tests/test_rebind.py::test_rebind_enum[True-C-D]",
"tests/test_rebind.py::test_rebind_enum[True-C-E]",
"tests/test_rebind.py::test_rebind_enum[True-D-A]",
"tests/test_rebind.py::test_rebind_enum[True-D-B]",
"tests/test_rebind.py::test_rebind_enum[True-D-C]",
"tests/test_rebind.py::test_rebind_enum[True-D-D]",
"tests/test_rebind.py::test_rebind_enum[True-D-E]",
"tests/test_rebind.py::test_rebind_enum[True-E-A]",
"tests/test_rebind.py::test_rebind_enum[True-E-B]",
"tests/test_rebind.py::test_rebind_enum[True-E-C]",
"tests/test_rebind.py::test_rebind_enum[True-E-D]",
"tests/test_rebind.py::test_rebind_enum[True-E-E]",
"tests/test_rebind.py::test_rebind_sum_tuple[False-A-A-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-A-A-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-A-B-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-A-B-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-A-C-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-A-C-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-A-D-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-A-D-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-A-E-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-A-E-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-B-A-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-B-A-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-B-B-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-B-B-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-B-C-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-B-C-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-B-D-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-B-D-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-B-E-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-B-E-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-C-A-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-C-A-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-C-B-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-C-B-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-C-C-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-C-C-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-C-D-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-C-D-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-C-E-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-C-E-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-D-A-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-D-A-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-D-B-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-D-B-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-D-C-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-D-C-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-D-D-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-D-D-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-D-E-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-D-E-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-E-A-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-E-A-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-E-B-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-E-B-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-E-C-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-E-C-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-E-D-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-E-D-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-E-E-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[False-E-E-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-A-A-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-A-A-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-A-B-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-A-B-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-A-C-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-A-C-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-A-D-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-A-D-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-A-E-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-A-E-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-B-A-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-B-A-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-B-B-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-B-B-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-B-C-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-B-C-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-B-D-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-B-D-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-B-E-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-B-E-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-C-A-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-C-A-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-C-B-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-C-B-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-C-C-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-C-C-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-C-D-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-C-D-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-C-E-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-C-E-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-D-A-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-D-A-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-D-B-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-D-B-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-D-C-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-D-C-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-D-D-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-D-D-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-D-E-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-D-E-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-E-A-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-E-A-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-E-B-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-E-B-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-E-C-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-E-C-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-E-D-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-E-D-Sum[B,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-E-E-Tuple[A,",
"tests/test_rebind.py::test_rebind_sum_tuple[True-E-E-Sum[B,",
"tests/test_rebind.py::test_rebind_product[False-A-A]",
"tests/test_rebind.py::test_rebind_product[False-A-B]",
"tests/test_rebind.py::test_rebind_product[False-A-C]",
"tests/test_rebind.py::test_rebind_product[False-A-D]",
"tests/test_rebind.py::test_rebind_product[False-A-E]",
"tests/test_rebind.py::test_rebind_product[False-B-A]",
"tests/test_rebind.py::test_rebind_product[False-B-B]",
"tests/test_rebind.py::test_rebind_product[False-B-C]",
"tests/test_rebind.py::test_rebind_product[False-B-D]",
"tests/test_rebind.py::test_rebind_product[False-B-E]",
"tests/test_rebind.py::test_rebind_product[False-C-A]",
"tests/test_rebind.py::test_rebind_product[False-C-B]",
"tests/test_rebind.py::test_rebind_product[False-C-C]",
"tests/test_rebind.py::test_rebind_product[False-C-D]",
"tests/test_rebind.py::test_rebind_product[False-C-E]",
"tests/test_rebind.py::test_rebind_product[False-D-A]",
"tests/test_rebind.py::test_rebind_product[False-D-B]",
"tests/test_rebind.py::test_rebind_product[False-D-C]",
"tests/test_rebind.py::test_rebind_product[False-D-D]",
"tests/test_rebind.py::test_rebind_product[False-D-E]",
"tests/test_rebind.py::test_rebind_product[False-E-A]",
"tests/test_rebind.py::test_rebind_product[False-E-B]",
"tests/test_rebind.py::test_rebind_product[False-E-C]",
"tests/test_rebind.py::test_rebind_product[False-E-D]",
"tests/test_rebind.py::test_rebind_product[False-E-E]",
"tests/test_rebind.py::test_rebind_product[True-A-A]",
"tests/test_rebind.py::test_rebind_product[True-A-B]",
"tests/test_rebind.py::test_rebind_product[True-A-C]",
"tests/test_rebind.py::test_rebind_product[True-A-D]",
"tests/test_rebind.py::test_rebind_product[True-A-E]",
"tests/test_rebind.py::test_rebind_product[True-B-A]",
"tests/test_rebind.py::test_rebind_product[True-B-B]",
"tests/test_rebind.py::test_rebind_product[True-B-C]",
"tests/test_rebind.py::test_rebind_product[True-B-D]",
"tests/test_rebind.py::test_rebind_product[True-B-E]",
"tests/test_rebind.py::test_rebind_product[True-C-A]",
"tests/test_rebind.py::test_rebind_product[True-C-B]",
"tests/test_rebind.py::test_rebind_product[True-C-C]",
"tests/test_rebind.py::test_rebind_product[True-C-D]",
"tests/test_rebind.py::test_rebind_product[True-C-E]",
"tests/test_rebind.py::test_rebind_product[True-D-A]",
"tests/test_rebind.py::test_rebind_product[True-D-B]",
"tests/test_rebind.py::test_rebind_product[True-D-C]",
"tests/test_rebind.py::test_rebind_product[True-D-D]",
"tests/test_rebind.py::test_rebind_product[True-D-E]",
"tests/test_rebind.py::test_rebind_product[True-E-A]",
"tests/test_rebind.py::test_rebind_product[True-E-B]",
"tests/test_rebind.py::test_rebind_product[True-E-C]",
"tests/test_rebind.py::test_rebind_product[True-E-D]",
"tests/test_rebind.py::test_rebind_product[True-E-E]",
"tests/test_rebind.py::test_rebind_recursive[False]",
"tests/test_rebind.py::test_rebind_recursive[True]",
"tests/test_rebind.py::test_rebind_bv",
"tests/test_rebind.py::test_issue_74"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-07-24 01:46:54+00:00
|
bsd-3-clause
| 3,552 |
|
leovt__constructible-3
|
diff --git a/constructible.py b/constructible.py
index 31b4188..1269854 100644
--- a/constructible.py
+++ b/constructible.py
@@ -178,6 +178,9 @@ class Constructible(object):
self.a * other.b + self.b * other.a,
self.field)
+ a, b = self.join(other)
+ return a * b
+
def __truediv__(self, other):
if not isinstance(other, Constructible):
if isinstance(other, Rational):
@@ -239,18 +242,26 @@ class Constructible(object):
if isinstance(other, Constructible) or isinstance(other, Rational):
return (self -other)._sign() < 0
+ return NotImplemented
+
def __gt__(self, other):
if isinstance(other, Constructible) or isinstance(other, Rational):
return (self -other)._sign() > 0
+ return NotImplemented
+
def __le__(self, other):
if isinstance(other, Constructible) or isinstance(other, Rational):
return (self -other)._sign() <= 0
+ return NotImplemented
+
def __ge__(self, other):
if isinstance(other, Constructible) or isinstance(other, Rational):
return (self -other)._sign() >= 0
+ return NotImplemented
+
def join(self, other):
'''return a tuple (new_self, new_other) such that
new_self == self, new_other == other, and new_self.field == new_other.field '''
|
leovt/constructible
|
a2e10f2a7ec271b6a3d9c0e9f88af46b5c4db63b
|
diff --git a/test_constructible.py b/test_constructible.py
index 13a43b6..5dadb24 100644
--- a/test_constructible.py
+++ b/test_constructible.py
@@ -239,6 +239,12 @@ class TestSqrt(TestCase):
self.assertTrue(r > 0)
self.assertEqual(r*r*r*r - 10*r*r + 1, 0)
+ def test_sqrt236(self):
+ from constructible import sqrt
+ r = sqrt(2) * sqrt(3)
+ self.assertTrue(r > 0)
+ self.assertEqual(r, sqrt(6))
+
def test_sqrt235(self):
from constructible import sqrt
r = sqrt(2) + sqrt(3) + sqrt(5)
|
sqrt(2) * sqrt(3) returns None
```py
>>> print(sqrt(2) * sqrt(3))
None
>>> print(sqrt(2) / sqrt(3))
None
```
|
0.0
|
a2e10f2a7ec271b6a3d9c0e9f88af46b5c4db63b
|
[
"test_constructible.py::TestSqrt::test_sqrt236"
] |
[
"test_constructible.py::TestHelperFunctions::test_fsqrt",
"test_constructible.py::TestHelperFunctions::test_isqrt",
"test_constructible.py::TestArithmeticOperators::test_expressions_type",
"test_constructible.py::TestArithmeticOperators::test_mix_rational_binop",
"test_constructible.py::TestArithmeticOperators::test_mix_rational_rbinop",
"test_constructible.py::TestArithmeticOperators::test_rational_binop",
"test_constructible.py::TestArithmeticOperators::test_rational_unop",
"test_constructible.py::TestStrRepr::test_repr",
"test_constructible.py::TestStrRepr::test_str",
"test_constructible.py::TestComparison::test_comparison_Qsqrt2",
"test_constructible.py::TestComparison::test_rational_comparison",
"test_constructible.py::TestSqrt::test_double_sqrt",
"test_constructible.py::TestSqrt::test_sqrt23",
"test_constructible.py::TestSqrt::test_sqrt235",
"test_constructible.py::TestSqrt::test_sqrt_2",
"test_constructible.py::TestSqrt::test_sqrt_square",
"test_constructible.py::TestTrySqrt::test_sqrt2"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-09-30 17:49:25+00:00
|
apache-2.0
| 3,553 |
|
lepture__mistune-277
|
diff --git a/mistune/plugins/task_lists.py b/mistune/plugins/task_lists.py
index 8f7997c..3094ea7 100644
--- a/mistune/plugins/task_lists.py
+++ b/mistune/plugins/task_lists.py
@@ -7,11 +7,7 @@ TASK_LIST_ITEM = re.compile(r'^(\[[ xX]\])\s+')
def task_lists_hook(md, tokens, state):
- for tok in tokens:
- if tok['type'] == 'list':
- for item in tok['children']:
- _rewrite_list_item(item)
- return tokens
+ return _rewrite_all_list_items(tokens)
def render_ast_task_list_item(children, level, checked):
@@ -50,10 +46,16 @@ def plugin_task_lists(md):
md.renderer.register('task_list_item', render_ast_task_list_item)
-def _rewrite_list_item(item):
- if item['type'] != 'list_item':
- return
+def _rewrite_all_list_items(tokens):
+ for tok in tokens:
+ if tok['type'] == 'list_item':
+ _rewrite_list_item(tok)
+ if 'children' in tok.keys():
+ _rewrite_all_list_items(tok['children'])
+ return tokens
+
+def _rewrite_list_item(item):
children = item['children']
if children:
first_child = children[0]
|
lepture/mistune
|
ab0e8b697ada8f68899a569307510133e8f13771
|
diff --git a/tests/fixtures/task_lists.txt b/tests/fixtures/task_lists.txt
index e8c1c7b..c1760ac 100644
--- a/tests/fixtures/task_lists.txt
+++ b/tests/fixtures/task_lists.txt
@@ -62,3 +62,47 @@
over two lines</li>
</ul>
````````````````````````````````
+
+```````````````````````````````` example
+- [ ] foo
+ - [x] bar
+ - [ ] baz
+.
+<ul>
+<li class="task-list-item"><input class="task-list-item-checkbox" type="checkbox" disabled/>foo<ul>
+<li class="task-list-item"><input class="task-list-item-checkbox" type="checkbox" disabled checked/>bar</li>
+<li class="task-list-item"><input class="task-list-item-checkbox" type="checkbox" disabled/>baz</li>
+</ul>
+</li>
+</ul>
+````````````````````````````````
+
+```````````````````````````````` example
+1. [ ] foo
+ 1. [ ] bar
+ 2. [ ] baz
+.
+<ol>
+<li class="task-list-item"><input class="task-list-item-checkbox" type="checkbox" disabled/>foo<ol>
+<li class="task-list-item"><input class="task-list-item-checkbox" type="checkbox" disabled/>bar</li>
+<li class="task-list-item"><input class="task-list-item-checkbox" type="checkbox" disabled/>baz</li>
+</ol>
+</li>
+</ol>
+````````````````````````````````
+
+```````````````````````````````` example
+> - [ ] foo
+> - [x] bar
+> - [ ] baz
+.
+<blockquote>
+<ul>
+<li class="task-list-item"><input class="task-list-item-checkbox" type="checkbox" disabled/>foo<ul>
+<li class="task-list-item"><input class="task-list-item-checkbox" type="checkbox" disabled checked/>bar</li>
+<li class="task-list-item"><input class="task-list-item-checkbox" type="checkbox" disabled/>baz</li>
+</ul>
+</li>
+</ul>
+</blockquote>
+````````````````````````````````
\ No newline at end of file
|
Task lists cannot be nested
Task lists apparently cannot be nested (tested with master and 2.0.0rc1):
```py
>>> mistune.create_markdown(plugins=['task_lists'])('* [ ] task\n * [ ] subtask')
'<ul>\n<li class="task-list-item"><input class="task-list-item-checkbox" type="checkbox" disabled/>task<ul>\n<li>[ ] subtask</li>\n</ul>\n</li>\n</ul>\n'
```
Note that the checkbox in the sublist isn't recognized. Since GitHub [allows task lists to be nested arbitrarily](https://github.github.com/gfm/#task-list-items-extension-), Mistune should probably also support this.
* [ ] nested task lists in mistune
* [ ] agree that this should be fixed
* [ ] fix it
|
0.0
|
ab0e8b697ada8f68899a569307510133e8f13771
|
[
"tests/test_plugins.py::TestPlugin_task_lists::test_task_lists_006",
"tests/test_plugins.py::TestPlugin_task_lists::test_task_lists_007",
"tests/test_plugins.py::TestPlugin_task_lists::test_task_lists_008"
] |
[
"tests/test_ast.py::TestAstRenderer::test_0",
"tests/test_ast.py::TestAstRenderer::test_1",
"tests/test_ast.py::TestAstRenderer::test_10",
"tests/test_ast.py::TestAstRenderer::test_11",
"tests/test_ast.py::TestAstRenderer::test_12",
"tests/test_ast.py::TestAstRenderer::test_2",
"tests/test_ast.py::TestAstRenderer::test_3",
"tests/test_ast.py::TestAstRenderer::test_4",
"tests/test_ast.py::TestAstRenderer::test_5",
"tests/test_ast.py::TestAstRenderer::test_6",
"tests/test_ast.py::TestAstRenderer::test_7",
"tests/test_ast.py::TestAstRenderer::test_8",
"tests/test_ast.py::TestAstRenderer::test_9",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_001",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_002",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_003",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_004",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_005",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_006",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_007",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_008",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_009",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_010",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_011",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_012",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_013",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_014",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_015",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_016",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_017",
"tests/test_commonmark.py::TestCommonMark::test_atx_headings_018",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_001",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_003",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_004",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_005",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_006",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_007",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_008",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_009",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_010",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_011",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_012",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_013",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_014",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_015",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_016",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_017",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_018",
"tests/test_commonmark.py::TestCommonMark::test_autolinks_019",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_001",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_002",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_003",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_004",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_005",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_006",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_007",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_008",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_009",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_010",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_011",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_012",
"tests/test_commonmark.py::TestCommonMark::test_backslash_escapes_013",
"tests/test_commonmark.py::TestCommonMark::test_blank_lines_001",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_001",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_002",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_003",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_004",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_007",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_009",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_010",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_012",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_013",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_014",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_015",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_016",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_017",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_018",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_019",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_021",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_022",
"tests/test_commonmark.py::TestCommonMark::test_block_quotes_025",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_001",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_002",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_003",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_004",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_005",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_006",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_007",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_008",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_011",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_012",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_013",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_014",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_015",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_016",
"tests/test_commonmark.py::TestCommonMark::test_code_spans_017",
"tests/test_commonmark.py::TestCommonMark::test_entity_and_numeric_character_references_001",
"tests/test_commonmark.py::TestCommonMark::test_entity_and_numeric_character_references_002",
"tests/test_commonmark.py::TestCommonMark::test_entity_and_numeric_character_references_003",
"tests/test_commonmark.py::TestCommonMark::test_entity_and_numeric_character_references_006",
"tests/test_commonmark.py::TestCommonMark::test_entity_and_numeric_character_references_007",
"tests/test_commonmark.py::TestCommonMark::test_entity_and_numeric_character_references_008",
"tests/test_commonmark.py::TestCommonMark::test_entity_and_numeric_character_references_009",
"tests/test_commonmark.py::TestCommonMark::test_entity_and_numeric_character_references_010",
"tests/test_commonmark.py::TestCommonMark::test_entity_and_numeric_character_references_011",
"tests/test_commonmark.py::TestCommonMark::test_entity_and_numeric_character_references_012",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_001",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_002",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_003",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_004",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_005",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_006",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_007",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_008",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_009",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_010",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_011",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_012",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_014",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_016",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_017",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_018",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_019",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_020",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_021",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_022",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_023",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_024",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_025",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_026",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_027",
"tests/test_commonmark.py::TestCommonMark::test_fenced_code_blocks_028",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_001",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_002",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_003",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_004",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_005",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_006",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_007",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_008",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_009",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_010",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_011",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_012",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_013",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_014",
"tests/test_commonmark.py::TestCommonMark::test_hard_line_breaks_015",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_001",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_002",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_003",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_004",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_005",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_006",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_007",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_008",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_009",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_010",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_011",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_012",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_013",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_014",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_015",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_016",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_017",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_018",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_019",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_020",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_021",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_022",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_023",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_024",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_025",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_026",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_027",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_028",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_029",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_030",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_031",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_032",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_033",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_034",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_035",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_036",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_037",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_038",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_040",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_041",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_042",
"tests/test_commonmark.py::TestCommonMark::test_html_blocks_043",
"tests/test_commonmark.py::TestCommonMark::test_images_001",
"tests/test_commonmark.py::TestCommonMark::test_images_007",
"tests/test_commonmark.py::TestCommonMark::test_images_008",
"tests/test_commonmark.py::TestCommonMark::test_images_009",
"tests/test_commonmark.py::TestCommonMark::test_images_010",
"tests/test_commonmark.py::TestCommonMark::test_images_011",
"tests/test_commonmark.py::TestCommonMark::test_images_012",
"tests/test_commonmark.py::TestCommonMark::test_images_013",
"tests/test_commonmark.py::TestCommonMark::test_images_015",
"tests/test_commonmark.py::TestCommonMark::test_images_016",
"tests/test_commonmark.py::TestCommonMark::test_images_017",
"tests/test_commonmark.py::TestCommonMark::test_images_019",
"tests/test_commonmark.py::TestCommonMark::test_images_020",
"tests/test_commonmark.py::TestCommonMark::test_images_021",
"tests/test_commonmark.py::TestCommonMark::test_images_022",
"tests/test_commonmark.py::TestCommonMark::test_indented_code_blocks_001",
"tests/test_commonmark.py::TestCommonMark::test_indented_code_blocks_002",
"tests/test_commonmark.py::TestCommonMark::test_indented_code_blocks_003",
"tests/test_commonmark.py::TestCommonMark::test_indented_code_blocks_004",
"tests/test_commonmark.py::TestCommonMark::test_indented_code_blocks_005",
"tests/test_commonmark.py::TestCommonMark::test_indented_code_blocks_006",
"tests/test_commonmark.py::TestCommonMark::test_indented_code_blocks_007",
"tests/test_commonmark.py::TestCommonMark::test_indented_code_blocks_008",
"tests/test_commonmark.py::TestCommonMark::test_indented_code_blocks_009",
"tests/test_commonmark.py::TestCommonMark::test_indented_code_blocks_010",
"tests/test_commonmark.py::TestCommonMark::test_indented_code_blocks_011",
"tests/test_commonmark.py::TestCommonMark::test_indented_code_blocks_012",
"tests/test_commonmark.py::TestCommonMark::test_inlines_001",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_001",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_006",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_008",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_009",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_010",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_011",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_012",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_013",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_014",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_015",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_016",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_017",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_018",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_020",
"tests/test_commonmark.py::TestCommonMark::test_link_reference_definitions_022",
"tests/test_commonmark.py::TestCommonMark::test_links_001",
"tests/test_commonmark.py::TestCommonMark::test_links_002",
"tests/test_commonmark.py::TestCommonMark::test_links_003",
"tests/test_commonmark.py::TestCommonMark::test_links_004",
"tests/test_commonmark.py::TestCommonMark::test_links_005",
"tests/test_commonmark.py::TestCommonMark::test_links_006",
"tests/test_commonmark.py::TestCommonMark::test_links_007",
"tests/test_commonmark.py::TestCommonMark::test_links_008",
"tests/test_commonmark.py::TestCommonMark::test_links_009",
"tests/test_commonmark.py::TestCommonMark::test_links_010",
"tests/test_commonmark.py::TestCommonMark::test_links_011",
"tests/test_commonmark.py::TestCommonMark::test_links_012",
"tests/test_commonmark.py::TestCommonMark::test_links_013",
"tests/test_commonmark.py::TestCommonMark::test_links_014",
"tests/test_commonmark.py::TestCommonMark::test_links_015",
"tests/test_commonmark.py::TestCommonMark::test_links_016",
"tests/test_commonmark.py::TestCommonMark::test_links_017",
"tests/test_commonmark.py::TestCommonMark::test_links_018",
"tests/test_commonmark.py::TestCommonMark::test_links_019",
"tests/test_commonmark.py::TestCommonMark::test_links_020",
"tests/test_commonmark.py::TestCommonMark::test_links_021",
"tests/test_commonmark.py::TestCommonMark::test_links_022",
"tests/test_commonmark.py::TestCommonMark::test_links_023",
"tests/test_commonmark.py::TestCommonMark::test_links_024",
"tests/test_commonmark.py::TestCommonMark::test_links_026",
"tests/test_commonmark.py::TestCommonMark::test_links_027",
"tests/test_commonmark.py::TestCommonMark::test_links_028",
"tests/test_commonmark.py::TestCommonMark::test_links_030",
"tests/test_commonmark.py::TestCommonMark::test_links_035",
"tests/test_commonmark.py::TestCommonMark::test_links_036",
"tests/test_commonmark.py::TestCommonMark::test_links_040",
"tests/test_commonmark.py::TestCommonMark::test_links_042",
"tests/test_commonmark.py::TestCommonMark::test_links_044",
"tests/test_commonmark.py::TestCommonMark::test_links_048",
"tests/test_commonmark.py::TestCommonMark::test_links_052",
"tests/test_commonmark.py::TestCommonMark::test_links_053",
"tests/test_commonmark.py::TestCommonMark::test_links_054",
"tests/test_commonmark.py::TestCommonMark::test_links_055",
"tests/test_commonmark.py::TestCommonMark::test_links_056",
"tests/test_commonmark.py::TestCommonMark::test_links_057",
"tests/test_commonmark.py::TestCommonMark::test_links_058",
"tests/test_commonmark.py::TestCommonMark::test_links_059",
"tests/test_commonmark.py::TestCommonMark::test_links_061",
"tests/test_commonmark.py::TestCommonMark::test_links_062",
"tests/test_commonmark.py::TestCommonMark::test_links_063",
"tests/test_commonmark.py::TestCommonMark::test_links_066",
"tests/test_commonmark.py::TestCommonMark::test_links_067",
"tests/test_commonmark.py::TestCommonMark::test_links_068",
"tests/test_commonmark.py::TestCommonMark::test_links_069",
"tests/test_commonmark.py::TestCommonMark::test_links_070",
"tests/test_commonmark.py::TestCommonMark::test_links_071",
"tests/test_commonmark.py::TestCommonMark::test_links_072",
"tests/test_commonmark.py::TestCommonMark::test_links_073",
"tests/test_commonmark.py::TestCommonMark::test_links_074",
"tests/test_commonmark.py::TestCommonMark::test_links_075",
"tests/test_commonmark.py::TestCommonMark::test_links_076",
"tests/test_commonmark.py::TestCommonMark::test_links_078",
"tests/test_commonmark.py::TestCommonMark::test_links_079",
"tests/test_commonmark.py::TestCommonMark::test_links_080",
"tests/test_commonmark.py::TestCommonMark::test_links_081",
"tests/test_commonmark.py::TestCommonMark::test_links_083",
"tests/test_commonmark.py::TestCommonMark::test_list_items_001",
"tests/test_commonmark.py::TestCommonMark::test_list_items_002",
"tests/test_commonmark.py::TestCommonMark::test_list_items_003",
"tests/test_commonmark.py::TestCommonMark::test_list_items_004",
"tests/test_commonmark.py::TestCommonMark::test_list_items_006",
"tests/test_commonmark.py::TestCommonMark::test_list_items_007",
"tests/test_commonmark.py::TestCommonMark::test_list_items_008",
"tests/test_commonmark.py::TestCommonMark::test_list_items_009",
"tests/test_commonmark.py::TestCommonMark::test_list_items_010",
"tests/test_commonmark.py::TestCommonMark::test_list_items_011",
"tests/test_commonmark.py::TestCommonMark::test_list_items_012",
"tests/test_commonmark.py::TestCommonMark::test_list_items_013",
"tests/test_commonmark.py::TestCommonMark::test_list_items_014",
"tests/test_commonmark.py::TestCommonMark::test_list_items_015",
"tests/test_commonmark.py::TestCommonMark::test_list_items_016",
"tests/test_commonmark.py::TestCommonMark::test_list_items_017",
"tests/test_commonmark.py::TestCommonMark::test_list_items_018",
"tests/test_commonmark.py::TestCommonMark::test_list_items_019",
"tests/test_commonmark.py::TestCommonMark::test_list_items_020",
"tests/test_commonmark.py::TestCommonMark::test_list_items_021",
"tests/test_commonmark.py::TestCommonMark::test_list_items_022",
"tests/test_commonmark.py::TestCommonMark::test_list_items_023",
"tests/test_commonmark.py::TestCommonMark::test_list_items_025",
"tests/test_commonmark.py::TestCommonMark::test_list_items_026",
"tests/test_commonmark.py::TestCommonMark::test_list_items_027",
"tests/test_commonmark.py::TestCommonMark::test_list_items_029",
"tests/test_commonmark.py::TestCommonMark::test_list_items_030",
"tests/test_commonmark.py::TestCommonMark::test_list_items_031",
"tests/test_commonmark.py::TestCommonMark::test_list_items_032",
"tests/test_commonmark.py::TestCommonMark::test_list_items_034",
"tests/test_commonmark.py::TestCommonMark::test_list_items_035",
"tests/test_commonmark.py::TestCommonMark::test_list_items_036",
"tests/test_commonmark.py::TestCommonMark::test_list_items_037",
"tests/test_commonmark.py::TestCommonMark::test_list_items_042",
"tests/test_commonmark.py::TestCommonMark::test_list_items_043",
"tests/test_commonmark.py::TestCommonMark::test_list_items_044",
"tests/test_commonmark.py::TestCommonMark::test_list_items_045",
"tests/test_commonmark.py::TestCommonMark::test_list_items_046",
"tests/test_commonmark.py::TestCommonMark::test_list_items_047",
"tests/test_commonmark.py::TestCommonMark::test_list_items_048",
"tests/test_commonmark.py::TestCommonMark::test_lists_001",
"tests/test_commonmark.py::TestCommonMark::test_lists_002",
"tests/test_commonmark.py::TestCommonMark::test_lists_003",
"tests/test_commonmark.py::TestCommonMark::test_lists_004",
"tests/test_commonmark.py::TestCommonMark::test_lists_005",
"tests/test_commonmark.py::TestCommonMark::test_lists_006",
"tests/test_commonmark.py::TestCommonMark::test_lists_008",
"tests/test_commonmark.py::TestCommonMark::test_lists_009",
"tests/test_commonmark.py::TestCommonMark::test_lists_010",
"tests/test_commonmark.py::TestCommonMark::test_lists_011",
"tests/test_commonmark.py::TestCommonMark::test_lists_012",
"tests/test_commonmark.py::TestCommonMark::test_lists_013",
"tests/test_commonmark.py::TestCommonMark::test_lists_014",
"tests/test_commonmark.py::TestCommonMark::test_lists_015",
"tests/test_commonmark.py::TestCommonMark::test_lists_020",
"tests/test_commonmark.py::TestCommonMark::test_lists_021",
"tests/test_commonmark.py::TestCommonMark::test_lists_022",
"tests/test_commonmark.py::TestCommonMark::test_lists_023",
"tests/test_commonmark.py::TestCommonMark::test_lists_024",
"tests/test_commonmark.py::TestCommonMark::test_paragraphs_001",
"tests/test_commonmark.py::TestCommonMark::test_paragraphs_002",
"tests/test_commonmark.py::TestCommonMark::test_paragraphs_003",
"tests/test_commonmark.py::TestCommonMark::test_paragraphs_004",
"tests/test_commonmark.py::TestCommonMark::test_paragraphs_005",
"tests/test_commonmark.py::TestCommonMark::test_paragraphs_006",
"tests/test_commonmark.py::TestCommonMark::test_paragraphs_007",
"tests/test_commonmark.py::TestCommonMark::test_paragraphs_008",
"tests/test_commonmark.py::TestCommonMark::test_precedence_001",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_001",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_002",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_003",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_004",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_005",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_006",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_007",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_008",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_009",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_010",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_011",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_012",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_013",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_014",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_015",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_016",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_017",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_018",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_019",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_020",
"tests/test_commonmark.py::TestCommonMark::test_raw_html_021",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_001",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_004",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_005",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_006",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_008",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_009",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_010",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_011",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_012",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_014",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_016",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_017",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_018",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_019",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_020",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_021",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_022",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_023",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_024",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_025",
"tests/test_commonmark.py::TestCommonMark::test_setext_headings_026",
"tests/test_commonmark.py::TestCommonMark::test_soft_line_breaks_001",
"tests/test_commonmark.py::TestCommonMark::test_soft_line_breaks_002",
"tests/test_commonmark.py::TestCommonMark::test_tabs_001",
"tests/test_commonmark.py::TestCommonMark::test_tabs_002",
"tests/test_commonmark.py::TestCommonMark::test_tabs_003",
"tests/test_commonmark.py::TestCommonMark::test_tabs_004",
"tests/test_commonmark.py::TestCommonMark::test_tabs_005",
"tests/test_commonmark.py::TestCommonMark::test_tabs_006",
"tests/test_commonmark.py::TestCommonMark::test_tabs_007",
"tests/test_commonmark.py::TestCommonMark::test_tabs_008",
"tests/test_commonmark.py::TestCommonMark::test_tabs_009",
"tests/test_commonmark.py::TestCommonMark::test_tabs_010",
"tests/test_commonmark.py::TestCommonMark::test_tabs_011",
"tests/test_commonmark.py::TestCommonMark::test_textual_content_001",
"tests/test_commonmark.py::TestCommonMark::test_textual_content_002",
"tests/test_commonmark.py::TestCommonMark::test_textual_content_003",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_001",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_002",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_003",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_004",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_005",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_006",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_007",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_008",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_009",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_010",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_011",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_012",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_013",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_014",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_015",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_016",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_017",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_018",
"tests/test_commonmark.py::TestCommonMark::test_thematic_breaks_019",
"tests/test_directive.py::TestPluginAdmonition::test_admonition_options",
"tests/test_directive.py::TestPluginAdmonition::test_ast_admonition",
"tests/test_directive.py::TestPluginAdmonition::test_code_admonition",
"tests/test_directive.py::TestPluginAdmonition::test_note_admonition",
"tests/test_directive.py::TestPluginAdmonition::test_note_admonition_no_text",
"tests/test_directive.py::TestPluginAdmonition::test_unsupported_directive",
"tests/test_include.py::TestPluginDirective::test_ast_include",
"tests/test_include.py::TestPluginDirective::test_html_include",
"tests/test_include.py::TestPluginDirective::test_include_missing_source",
"tests/test_misc.py::TestMiscCases::test_allow_data_protocols",
"tests/test_misc.py::TestMiscCases::test_allow_harmful_protocols",
"tests/test_misc.py::TestMiscCases::test_before_parse_hooks",
"tests/test_misc.py::TestMiscCases::test_emphasis",
"tests/test_misc.py::TestMiscCases::test_escape_html",
"tests/test_misc.py::TestMiscCases::test_hard_wrap",
"tests/test_misc.py::TestMiscCases::test_harmful_links",
"tests/test_misc.py::TestMiscCases::test_none",
"tests/test_misc.py::TestMiscCases::test_use_plugin",
"tests/test_plugins.py::TestPlugin_url::test_autourl_001",
"tests/test_plugins.py::TestPlugin_strikethrough::test_strikethrough_001",
"tests/test_plugins.py::TestPlugin_strikethrough::test_strikethrough_002",
"tests/test_plugins.py::TestPlugin_strikethrough::test_strikethrough_003",
"tests/test_plugins.py::TestPlugin_strikethrough::test_strikethrough_004",
"tests/test_plugins.py::TestPlugin_strikethrough::test_strikethrough_005",
"tests/test_plugins.py::TestPlugin_strikethrough::test_strikethrough_006",
"tests/test_plugins.py::TestPlugin_footnotes::test_ast_renderer",
"tests/test_plugins.py::TestPlugin_footnotes::test_footnotes_001",
"tests/test_plugins.py::TestPlugin_footnotes::test_footnotes_002",
"tests/test_plugins.py::TestPlugin_footnotes::test_footnotes_003",
"tests/test_plugins.py::TestPlugin_table::test_align_table_001",
"tests/test_plugins.py::TestPlugin_table::test_align_table_002",
"tests/test_plugins.py::TestPlugin_table::test_ast_renderer",
"tests/test_plugins.py::TestPlugin_table::test_misc_table_001",
"tests/test_plugins.py::TestPlugin_table::test_misc_table_002",
"tests/test_plugins.py::TestPlugin_table::test_misc_table_003",
"tests/test_plugins.py::TestPlugin_table::test_misc_table_004",
"tests/test_plugins.py::TestPlugin_table::test_misc_table_005",
"tests/test_plugins.py::TestPlugin_table::test_misc_table_006",
"tests/test_plugins.py::TestPlugin_table::test_misc_table_007",
"tests/test_plugins.py::TestPlugin_table::test_nptable_001",
"tests/test_plugins.py::TestPlugin_table::test_table_001",
"tests/test_plugins.py::TestPlugin_table::test_table_002",
"tests/test_plugins.py::TestPlugin_task_lists::test_ast_renderer",
"tests/test_plugins.py::TestPlugin_task_lists::test_task_lists_001",
"tests/test_plugins.py::TestPlugin_task_lists::test_task_lists_002",
"tests/test_plugins.py::TestPlugin_task_lists::test_task_lists_003",
"tests/test_plugins.py::TestPlugin_task_lists::test_task_lists_004",
"tests/test_plugins.py::TestPlugin_task_lists::test_task_lists_005",
"tests/test_plugins.py::TestPlugin_def_list::test_ast_renderer",
"tests/test_plugins.py::TestPlugin_def_list::test_definition_lists_001",
"tests/test_plugins.py::TestPlugin_def_list::test_definition_lists_002",
"tests/test_plugins.py::TestPlugin_def_list::test_definition_lists_003",
"tests/test_plugins.py::TestPlugin_def_list::test_definition_lists_004",
"tests/test_plugins.py::TestPlugin_def_list::test_definition_lists_005",
"tests/test_plugins.py::TestPlugin_def_list::test_definition_lists_006",
"tests/test_plugins.py::TestPlugin_abbr::test_abbreviation_001",
"tests/test_plugins.py::TestPlugin_abbr::test_abbreviation_002",
"tests/test_plugins.py::TestPlugin_abbr::test_abbreviation_003",
"tests/test_plugins.py::TestPlugin_abbr::test_abbreviation_004",
"tests/test_plugins.py::TestPlugin_abbr::test_abbreviation_005",
"tests/test_plugins.py::TestPlugin_abbr::test_abbreviation_006",
"tests/test_plugins.py::TestPlugin_abbr::test_abbreviation_007",
"tests/test_plugins.py::TestPlugin_abbr::test_abbreviation_008",
"tests/test_plugins.py::TestPlugin_abbr::test_abbreviation_009",
"tests/test_plugins.py::TestPlugin_abbr::test_ast_renderer",
"tests/test_syntax.py::TestSyntax::test_axt_headings_001",
"tests/test_syntax.py::TestSyntax::test_axt_headings_002",
"tests/test_syntax.py::TestSyntax::test_axt_headings_003",
"tests/test_syntax.py::TestSyntax::test_axt_headings_004",
"tests/test_syntax.py::TestSyntax::test_axt_headings_005",
"tests/test_syntax.py::TestSyntax::test_axt_headings_006",
"tests/test_syntax.py::TestSyntax::test_axt_headings_007",
"tests/test_syntax.py::TestSyntax::test_axt_headings_008",
"tests/test_syntax.py::TestSyntax::test_blockquote_001",
"tests/test_syntax.py::TestSyntax::test_links_001",
"tests/test_syntax.py::TestSyntax::test_links_002",
"tests/test_syntax.py::TestSyntax::test_links_003",
"tests/test_syntax.py::TestSyntax::test_links_004",
"tests/test_syntax.py::TestSyntax::test_links_005",
"tests/test_syntax.py::TestSyntax::test_links_006",
"tests/test_syntax.py::TestSyntax::test_links_007",
"tests/test_syntax.py::TestSyntax::test_links_008",
"tests/test_syntax.py::TestSyntax::test_links_009",
"tests/test_syntax.py::TestSyntax::test_links_010",
"tests/test_syntax.py::TestSyntax::test_links_011",
"tests/test_syntax.py::TestSyntax::test_links_012",
"tests/test_syntax.py::TestSyntax::test_links_013",
"tests/test_syntax.py::TestSyntax::test_links_014",
"tests/test_syntax.py::TestSyntax::test_links_015",
"tests/test_syntax.py::TestSyntax::test_links_016",
"tests/test_syntax.py::TestSyntax::test_links_017",
"tests/test_syntax.py::TestSyntax::test_lists_001",
"tests/test_syntax.py::TestSyntax::test_lists_002",
"tests/test_syntax.py::TestSyntax::test_lists_003",
"tests/test_syntax.py::TestSyntax::test_setext_headings_001",
"tests/test_syntax.py::TestSyntax::test_setext_headings_002",
"tests/test_syntax.py::TestSyntax::test_setext_headings_003",
"tests/test_syntax.py::TestSyntax::test_setext_headings_004",
"tests/test_syntax.py::TestSyntax::test_setext_headings_005",
"tests/test_syntax.py::TestSyntax::test_setext_headings_006",
"tests/test_syntax.py::TestSyntax::test_setext_headings_007",
"tests/test_syntax.py::TestSyntax::test_thematic_breaks_001",
"tests/test_syntax.py::TestSyntax::test_thematic_breaks_002",
"tests/test_syntax.py::TestSyntax::test_thematic_breaks_003",
"tests/test_syntax.py::TestSyntax::test_thematic_breaks_004",
"tests/test_syntax.py::TestSyntax::test_thematic_breaks_005",
"tests/test_toc.py::TestPluginToc::test_complex_001",
"tests/test_toc.py::TestPluginToc::test_html_in_heading_001",
"tests/test_toc.py::TestPluginToc::test_insane_001",
"tests/test_toc.py::TestPluginToc::test_insane_002",
"tests/test_toc.py::TestPluginToc::test_invalid_option_001",
"tests/test_toc.py::TestPluginToc::test_link_in_heading_001",
"tests/test_toc.py::TestPluginToc::test_no_toc_001",
"tests/test_toc.py::TestPluginToc::test_no_toc_002",
"tests/test_toc.py::TestPluginToc::test_simple_toc_001",
"tests/test_toc.py::TestPluginTocAst::test_ast_renderer",
"tests/test_toc.py::TestPluginTocAst::test_extract_toc_items",
"tests/test_toc.py::TestPluginTocAst::test_render_toc_ul"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-04-25 11:22:17+00:00
|
bsd-3-clause
| 3,554 |
|
lepture__python-livereload-198
|
diff --git a/.gitignore b/.gitignore
index 8c31eb5..46b78ee 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,6 +21,7 @@ pip-log.txt
.coverage
.tox
.env/
+venv/
docs/_build
example/style.css
diff --git a/livereload/watcher.py b/livereload/watcher.py
index b0545e6..8ac39ca 100644
--- a/livereload/watcher.py
+++ b/livereload/watcher.py
@@ -23,10 +23,14 @@ logger = logging.getLogger('livereload')
class Watcher(object):
- """A file watcher registery."""
+ """A file watcher registry."""
def __init__(self):
self._tasks = {}
- self._mtimes = {}
+
+ # modification time of filepaths for each task,
+ # before and after checking for changes
+ self._task_mtimes = {}
+ self._new_mtimes = {}
# setting changes
self._changes = []
@@ -65,6 +69,7 @@ class Watcher(object):
'func': func,
'delay': delay,
'ignore': ignore,
+ 'mtimes': {},
}
def start(self, callback):
@@ -73,7 +78,10 @@ class Watcher(object):
return False
def examine(self):
- """Check if there are changes, if true, run the given task."""
+ """Check if there are changes. If so, run the given task.
+
+ Returns a tuple of modified filepath and reload delay.
+ """
if self._changes:
return self._changes.pop()
@@ -82,6 +90,7 @@ class Watcher(object):
delays = set()
for path in self._tasks:
item = self._tasks[path]
+ self._task_mtimes = item['mtimes']
if self.is_changed(path, item['ignore']):
func = item['func']
delay = item['delay']
@@ -102,13 +111,49 @@ class Watcher(object):
return self.filepath, delay
def is_changed(self, path, ignore=None):
+ """Check if any filepaths have been added, modified, or removed.
+
+ Updates filepath modification times in self._task_mtimes.
+ """
+ self._new_mtimes = {}
+ changed = False
+
if os.path.isfile(path):
- return self.is_file_changed(path, ignore)
+ changed = self.is_file_changed(path, ignore)
elif os.path.isdir(path):
- return self.is_folder_changed(path, ignore)
- return self.is_glob_changed(path, ignore)
+ changed = self.is_folder_changed(path, ignore)
+ else:
+ changed = self.is_glob_changed(path, ignore)
+
+ if not changed:
+ changed = self.is_file_removed()
+
+ self._task_mtimes.update(self._new_mtimes)
+ return changed
+
+ def is_file_removed(self):
+ """Check if any filepaths have been removed since last check.
+
+ Deletes removed paths from self._task_mtimes.
+ Sets self.filepath to one of the removed paths.
+ """
+ removed_paths = set(self._task_mtimes) - set(self._new_mtimes)
+ if not removed_paths:
+ return False
+
+ for path in removed_paths:
+ self._task_mtimes.pop(path)
+ # self.filepath seems purely informational, so setting one
+ # of several removed files seems sufficient
+ self.filepath = path
+ return True
def is_file_changed(self, path, ignore=None):
+ """Check if filepath has been added or modified since last check.
+
+ Updates filepath modification times in self._new_mtimes.
+ Sets self.filepath to changed path.
+ """
if not os.path.isfile(path):
return False
@@ -120,20 +165,21 @@ class Watcher(object):
mtime = os.path.getmtime(path)
- if path not in self._mtimes:
- self._mtimes[path] = mtime
+ if path not in self._task_mtimes:
+ self._new_mtimes[path] = mtime
self.filepath = path
return mtime > self._start
- if self._mtimes[path] != mtime:
- self._mtimes[path] = mtime
+ if self._task_mtimes[path] != mtime:
+ self._new_mtimes[path] = mtime
self.filepath = path
return True
- self._mtimes[path] = mtime
+ self._new_mtimes[path] = mtime
return False
def is_folder_changed(self, path, ignore=None):
+ """Check if directory path has any changed filepaths."""
for root, dirs, files in os.walk(path, followlinks=True):
for d in self.ignored_dirs:
if d in dirs:
@@ -145,6 +191,7 @@ class Watcher(object):
return False
def is_glob_changed(self, path, ignore=None):
+ """Check if glob path has any changed filepaths."""
for f in glob.glob(path):
if self.is_file_changed(f, ignore):
return True
|
lepture/python-livereload
|
f80cb3ae0f8f2cdf38203a712fe25ef7f1899c34
|
diff --git a/tests/test_watcher.py b/tests/test_watcher.py
index 40529cd..07ee21c 100644
--- a/tests/test_watcher.py
+++ b/tests/test_watcher.py
@@ -32,19 +32,27 @@ class TestWatcher(unittest.TestCase):
assert watcher.is_changed(tmpdir) is False
# sleep 1 second so that mtime will be different
+ # TODO: This doesn't seem necessary; test passes without it
time.sleep(1)
- with open(os.path.join(tmpdir, 'foo'), 'w') as f:
+ filepath = os.path.join(tmpdir, 'foo')
+
+ with open(filepath, 'w') as f:
f.write('')
assert watcher.is_changed(tmpdir)
assert watcher.is_changed(tmpdir) is False
+ os.remove(filepath)
+ assert watcher.is_changed(tmpdir)
+ assert watcher.is_changed(tmpdir) is False
+
def test_watch_file(self):
watcher = Watcher()
watcher.count = 0
# sleep 1 second so that mtime will be different
+ # TODO: This doesn't seem necessary; test passes without it
time.sleep(1)
filepath = os.path.join(tmpdir, 'foo')
@@ -56,17 +64,25 @@ class TestWatcher(unittest.TestCase):
watcher.watch(filepath, add_count)
assert watcher.is_changed(filepath)
+ assert watcher.is_changed(filepath) is False
# sleep 1 second so that mtime will be different
+ # TODO: This doesn't seem necessary; test passes without it
time.sleep(1)
with open(filepath, 'w') as f:
f.write('')
- rv = watcher.examine()
- assert rv[0] == os.path.abspath(filepath)
+ abs_filepath = os.path.abspath(filepath)
+ assert watcher.examine() == (abs_filepath, None)
+ assert watcher.examine() == (None, None)
assert watcher.count == 1
+ os.remove(filepath)
+ assert watcher.examine() == (abs_filepath, None)
+ assert watcher.examine() == (None, None)
+ assert watcher.count == 2
+
def test_watch_glob(self):
watcher = Watcher()
watcher.watch(tmpdir + '/*')
@@ -82,8 +98,13 @@ class TestWatcher(unittest.TestCase):
with open(filepath, 'w') as f:
f.write('')
- rv = watcher.examine()
- assert rv[0] == os.path.abspath(filepath)
+ abs_filepath = os.path.abspath(filepath)
+ assert watcher.examine() == (abs_filepath, None)
+ assert watcher.examine() == (None, None)
+
+ os.remove(filepath)
+ assert watcher.examine() == (abs_filepath, None)
+ assert watcher.examine() == (None, None)
def test_watch_ignore(self):
watcher = Watcher()
@@ -94,3 +115,38 @@ class TestWatcher(unittest.TestCase):
f.write('')
assert watcher.examine() == (None, None)
+
+ def test_watch_multiple_dirs(self):
+ first_dir = os.path.join(tmpdir, 'first')
+ second_dir = os.path.join(tmpdir, 'second')
+
+ watcher = Watcher()
+
+ os.mkdir(first_dir)
+ watcher.watch(first_dir)
+ assert watcher.examine() == (None, None)
+
+ first_path = os.path.join(first_dir, 'foo')
+ with open(first_path, 'w') as f:
+ f.write('')
+ assert watcher.examine() == (first_path, None)
+ assert watcher.examine() == (None, None)
+
+ os.mkdir(second_dir)
+ watcher.watch(second_dir)
+ assert watcher.examine() == (None, None)
+
+ second_path = os.path.join(second_dir, 'bar')
+ with open(second_path, 'w') as f:
+ f.write('')
+ assert watcher.examine() == (second_path, None)
+ assert watcher.examine() == (None, None)
+
+ with open(first_path, 'a') as f:
+ f.write('foo')
+ assert watcher.examine() == (first_path, None)
+ assert watcher.examine() == (None, None)
+
+ os.remove(second_path)
+ assert watcher.examine() == (second_path, None)
+ assert watcher.examine() == (None, None)
|
Doesn't detect file deletion on macOS/Windows
I was hoping to use this as a replacement for Pelican's development server, which has an option to automatically regenerate content when a file is created, updated, or deleted, but doesn't refresh the browser. I followed an [existing example](https://merlijn.vandeen.nl/2015/pelican-livereload.html) to get it going, but was surprised that files deleted from a watched directory/glob don't trigger a reload.
Looking through [watcher.py](https://github.com/lepture/python-livereload/blob/master/livereload/watcher.py), it looks like this might work on Linux via `INotifyWatcher` and `pyinotify`. However, it seems like the base `Watcher` class used macOS/Windows walks the glob/directory tree looking for changes to files based on modification time, with no check for files that no longer exist.
|
0.0
|
f80cb3ae0f8f2cdf38203a712fe25ef7f1899c34
|
[
"tests/test_watcher.py::TestWatcher::test_watch_dir",
"tests/test_watcher.py::TestWatcher::test_watch_file",
"tests/test_watcher.py::TestWatcher::test_watch_glob"
] |
[
"tests/test_watcher.py::TestWatcher::test_watch_ignore"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-14 20:44:39+00:00
|
bsd-3-clause
| 3,555 |
|
level12__keg-191
|
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 4a07fac..7b9caf0 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -25,3 +25,4 @@ Contents
web
readme/features
readme/configuration
+ readme/internationalization
diff --git a/docs/source/readme/installation.rst b/docs/source/readme/installation.rst
index 2c8a4e6..97859b6 100644
--- a/docs/source/readme/installation.rst
+++ b/docs/source/readme/installation.rst
@@ -2,3 +2,25 @@ Installation
============
``pip install keg``
+
+
+Upgrade Notes
+=============
+
+While we attempt to preserve backward compatibility, some Keg versions do introduce
+breaking changes. This list should provide information on needed app changes.
+
+- 0.10.0
+ - ``rule``: default class view route no longer generated when any rules are present
+
+ - Absolute route had been provided automatically from the class name, but in some situations
+ this would not be desired. Views that still need that route can use a couple of solutions:
+
+ - Provide an absolute route rule: ``rule('/my-route')``
+ - Use an empty relative route rule: ``rule()``
+
+ - All of an app's routes may be shown on CLI with the ``<app> develop routes`` command
+
+ - Removed ``keg`` blueprint along with ``ping`` and ``exception-test`` routes
+ - DB manager ``prep_empty`` method no longer called (had been deprecated)
+ - Python 2 support removed
diff --git a/docs/source/readme/internationalization.rst b/docs/source/readme/internationalization.rst
new file mode 100644
index 0000000..7a18fc6
--- /dev/null
+++ b/docs/source/readme/internationalization.rst
@@ -0,0 +1,18 @@
+Internationalization
+====================
+
+Keg can optionally be installed with the ``morphi`` library to use ``babel`` for internationalization::
+
+ pip install keg[i18n]
+
+The ``setup.cfg`` file is configured to handle the standard message extraction commands. For ease of development
+and ensuring that all marked strings have translations, a tox environment is defined for testing i18n. This will
+run commands to update and compile the catalogs, and specify any strings which need to be added.
+
+The desired workflow here is to run tox, update strings in the PO files as necessary, run tox again
+(until it passes), and then commit the changes to the catalog files.
+
+.. code::
+
+ tox -e i18n
+
diff --git a/keg/app.py b/keg/app.py
index abf0b5c..c6519b9 100644
--- a/keg/app.py
+++ b/keg/app.py
@@ -3,7 +3,6 @@ import importlib
import flask
from werkzeug.datastructures import ImmutableDict
-from keg.blueprints import keg as kegbp
import keg.cli
import keg.config
from keg.ctx import KegRequestContext
@@ -137,8 +136,6 @@ class Keg(flask.Flask):
comp_object.init_app(self, parent_path=comp_path)
def init_blueprints(self):
- # TODO: probably want to be selective about adding our blueprint
- self.register_blueprint(kegbp)
for blueprint in self.use_blueprints:
self.register_blueprint(blueprint)
diff --git a/keg/blueprints/__init__.py b/keg/blueprints/__init__.py
deleted file mode 100644
index 70d297c..0000000
--- a/keg/blueprints/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import flask
-
-from keg.extensions import lazy_gettext as _
-
-
-keg = flask.Blueprint('keg', __name__)
-
-
[email protected]('/exception-test')
-def exception_test():
- raise Exception(_('Deliberate exception for testing purposes'))
-
-
[email protected]('/ping')
-def ping():
- return '{} ok'.format(flask.current_app.name)
diff --git a/keg/web.py b/keg/web.py
index a6b09dc..cae3c59 100644
--- a/keg/web.py
+++ b/keg/web.py
@@ -315,16 +315,16 @@ class BaseView(MethodView, metaclass=_ViewMeta):
str_endpoint = str(endpoint)
view_func = cls.as_view(str_endpoint)
- absolute_found = False
for rule, options in rules:
- if rule.startswith('/'):
- absolute_found = True
+ if rule and rule.startswith('/'):
class_url = rule
+ elif not rule:
+ rule = class_url
else:
rule = '{}/{}'.format(class_url, rule)
cls.blueprint.add_url_rule(rule, endpoint=endpoint, view_func=view_func, **options)
- if not absolute_found:
+ if not rules:
cls.blueprint.add_url_rule(class_url, endpoint=endpoint, view_func=view_func)
for rule, options in cls.url_rules:
diff --git a/keg_apps/web/views/routing.py b/keg_apps/web/views/routing.py
index f2dc75c..b625291 100644
--- a/keg_apps/web/views/routing.py
+++ b/keg_apps/web/views/routing.py
@@ -60,10 +60,10 @@ class ExplicitRouteAlt(KegBaseView):
class HelloWorld(BaseView):
"""
- /hello -> 'Hello World'
- /hello/foo -> 'Hello Foo'
+ /hello-world -> 'Hello World'
+ /hello-world/foo -> 'Hello Foo'
"""
- # relative URL indicates this route should be appended to the default rule for the class
+ rule()
rule('<name>')
def get(self, name='World'):
@@ -101,6 +101,18 @@ class HelloReq(BaseView):
return _('Hello {name}', name=name)
+class HelloReq2(BaseView):
+ """
+ /hello-req2 -> 404
+ /hello-req2/foo -> 'Hello Foo'
+ """
+ # no absolute rule, but only one endpoint to use
+ rule('<name>')
+
+ def get(self, name):
+ return _('Hello {name}', name=name)
+
+
class Cars(BaseView):
"""
CRUD for a model/entity
|
level12/keg
|
a528fe2c6381fd4d845060218a134c57d0ad71b9
|
diff --git a/keg/tests/test_view_routing.py b/keg/tests/test_view_routing.py
index 03fb8ed..84f069a 100644
--- a/keg/tests/test_view_routing.py
+++ b/keg/tests/test_view_routing.py
@@ -54,6 +54,7 @@ class TestViewRouting(WebBase):
'routing.cars:list',
'routing.explicit-route',
'routing.hello-req',
+ 'routing.hello-req2',
'routing.hello-world',
'routing.hw-rule-default',
'routing.misc',
@@ -210,6 +211,20 @@ class TestViewRouting(WebBase):
assert rule.methods == {'GET', 'HEAD', 'OPTIONS'}
assert rule.endpoint == 'routing.hello-req'
+ def test_route_no_absolute_single_endpoint(self):
+ self.testapp.get('/hello-req2', status=404)
+
+ resp = self.testapp.get('/hello-req2/foo')
+ assert resp.text == 'Hello foo'
+
+ rules = list(self.app.url_map.iter_rules(endpoint='routing.hello-req2'))
+ assert len(rules) == 1
+ rule = rules.pop()
+
+ assert rule.rule == '/hello-req2/<name>'
+ assert rule.methods == {'GET', 'HEAD', 'OPTIONS'}
+ assert rule.endpoint == 'routing.hello-req2'
+
def test_route_plain(self):
resp = self.testapp.get('/cars/list')
assert resp.text == 'list'
|
rule() when used w/ a relative URL should not result in the default URL also being available
```python
class ApprovalReview(ApprovalBaseView):
rule('<int:week_id>', post=True)
def get(self, week_id):
pass
```
```
$ marshal develop routes | grep review
admin.approval-review GET,OPTIONS,HEAD,POST /admin/approval-review
admin.approval-review GET,OPTIONS,HEAD,POST /admin/approval-review/<int:week_id>
```
I was expecting only the second route in the list above to be generated. I think the first route being present is a bug.
|
0.0
|
a528fe2c6381fd4d845060218a134c57d0ad71b9
|
[
"keg/tests/test_view_routing.py::TestViewRouting::test_route_endpoints",
"keg/tests/test_view_routing.py::TestViewRouting::test_route_no_absolute_single_endpoint"
] |
[
"keg/tests/test_view_routing.py::TestViewCalculations::test_view_defaults_no_blueprint",
"keg/tests/test_view_routing.py::TestViewCalculations::test_view_url_no_blueprint",
"keg/tests/test_view_routing.py::TestViewCalculations::test_view_url_with_blueprint_prefix",
"keg/tests/test_view_routing.py::TestViewCalculations::test_view_defaults_with_blueprint",
"keg/tests/test_view_routing.py::TestViewRouting::test_verb_routing",
"keg/tests/test_view_routing.py::TestViewRouting::test_subclassing",
"keg/tests/test_view_routing.py::TestViewRouting::test_explicit_route",
"keg/tests/test_view_routing.py::TestViewRouting::test_explicit_route_assigned_blueprint",
"keg/tests/test_view_routing.py::TestViewRouting::test_blueprint_routes",
"keg/tests/test_view_routing.py::TestViewRouting::test_hello_world",
"keg/tests/test_view_routing.py::TestViewRouting::test_hello_world_defaults",
"keg/tests/test_view_routing.py::TestViewRouting::test_route_with_required_rule",
"keg/tests/test_view_routing.py::TestViewRouting::test_route_plain",
"keg/tests/test_view_routing.py::TestViewRouting::test_route_method_verb_suffix",
"keg/tests/test_view_routing.py::TestViewRouting::test_route_relative",
"keg/tests/test_view_routing.py::TestViewRouting::test_route_on_http_verb_method",
"keg/tests/test_view_routing.py::TestViewRouting::test_rest_example",
"keg/tests/test_view_routing.py::TestViewRouting::test_multiple_class_rules",
"keg/tests/test_view_routing.py::TestViewRouting::test_abs_route",
"keg/tests/test_view_routing.py::TestViewRouting::test_two_routes",
"keg/tests/test_view_routing.py::TestViewRouting::test_abstract_class_usage",
"keg/tests/test_view_routing.py::TestViewRouting::test_routing_decorator_class_context",
"keg/tests/test_view_routing.py::TestViewRouting::test_routing_decorator_instance_context"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-28 17:02:53+00:00
|
bsd-2-clause
| 3,556 |
|
level12__keg-elements-132
|
diff --git a/keg_elements/forms/__init__.py b/keg_elements/forms/__init__.py
index 22baffc..c045e2c 100644
--- a/keg_elements/forms/__init__.py
+++ b/keg_elements/forms/__init__.py
@@ -4,7 +4,6 @@ from __future__ import unicode_literals
import functools
import inspect
import logging
-from collections import namedtuple
from operator import attrgetter
import flask
@@ -12,6 +11,7 @@ from decimal import Decimal
from flask_wtf import FlaskForm as BaseForm
from keg.db import db
import sqlalchemy as sa
+from markupsafe import Markup
from sqlalchemy_utils import ArrowType
import six
import wtforms.fields
@@ -274,11 +274,11 @@ class TypeHintingTextInputB3(_TypeHintingTextInputBase):
"""
def __call__(self, field, **kwargs):
def make_addon(txt):
- return wtforms.widgets.HTMLString(
+ return Markup(
'<span class="input-group-addon">{}</span>'.format(wtforms.widgets.core.escape(txt))
)
- return wtforms.widgets.HTMLString(
+ return Markup(
'<div class="input-group">{pre}{field}{post}</div>'.format(
pre=make_addon(self.prefix) if self.prefix else '',
field=super().__call__(field, **kwargs).__html__(),
@@ -294,13 +294,13 @@ class TypeHintingTextInputB4(_TypeHintingTextInputBase):
"""
def __call__(self, field, **kwargs):
def make_addon(txt, addon_type):
- return wtforms.widgets.HTMLString(
+ return Markup(
'<div class="input-group-{type}">'
' <span class="input-group-text">{txt}</span>'
"</div>".format(type=addon_type, txt=wtforms.widgets.core.escape(txt))
)
- return wtforms.widgets.HTMLString(
+ return Markup(
'<div class="input-group">{pre}{field}{post}</div>'.format(
pre=make_addon(self.prefix, "prepend") if self.prefix else "",
field=super().__call__(field, **kwargs).__html__(),
@@ -404,7 +404,7 @@ def form_validator(func=None, only_when_fields_valid=False):
@functools.wraps(func)
def wrapper(form):
- if not only_when_fields_valid or not form.errors:
+ if not only_when_fields_valid or not form.field_errors:
return func(form)
global ___validator_creation_counter
@@ -499,14 +499,15 @@ class Form(BaseForm):
return self._form_level_errors
@property
- def errors(self):
- if self._errors is None:
- self._errors = {name: f.errors for name, f in six.iteritems(self._fields) if f.errors}
- return self._errors
+ def field_errors(self):
+ return super().errors
@property
- def all_errors(self):
- return namedtuple('Errors', ['field', 'form'])(self.errors, self.form_errors)
+ def errors(self):
+ errors = self.field_errors
+ if self.form_errors:
+ errors['_form'] = self.form_errors
+ return errors
BaseModelForm = model_form_factory(Form, form_generator=FormGenerator)
|
level12/keg-elements
|
0ea69ab464c61c93338196d167c220859130196b
|
diff --git a/keg_elements/tests/test_forms/test_form.py b/keg_elements/tests/test_forms/test_form.py
index d5c31fb..0db8afe 100644
--- a/keg_elements/tests/test_forms/test_form.py
+++ b/keg_elements/tests/test_forms/test_form.py
@@ -26,6 +26,7 @@ class FormBase(object):
class _ComposedForm(ModelForm):
class Meta:
model = self.entity_cls
+ csrf = False
if fields_meta_cls:
FieldsMeta = fields_meta_cls
@@ -64,7 +65,7 @@ class TestSelectField(FormBase):
form_cls = FruitForm
def test_blank_choice_rendering(self):
- form = FruitForm(csrf_enabled=False)
+ form = FruitForm(meta={'csrf': False})
fruit_options = pq(form.fruit())('option')
assert fruit_options.length == 3
assert fruit_options.eq(0).attr('value') == ''
@@ -100,7 +101,7 @@ class TestRequiredBoolRadioField(FormBase):
form_cls = RequiredBoolMockForm
def test_required_bool_radio_rendering(self):
- form = self.RequiredBoolMockForm(csrf_enabled=False)
+ form = self.RequiredBoolMockForm(meta={'csrf': False})
is_competent_labels = pq(form.is_competent())('label')
assert is_competent_labels.length == 2
assert is_competent_labels.eq(0).text() == 'Yes'
@@ -131,7 +132,7 @@ class TestDefaultTypeOfRequiredBooleanField(FormBase):
entity_cls = ents.ThingWithRequiredBoolean
def test_field_types(self):
- form = self.compose_meta(csrf_enabled=False)
+ form = self.compose_meta()
assert type(form.nullable_boolean) == wtf.fields.BooleanField
assert type(form.required_boolean) == ke_forms.RequiredBoolRadioField
assert type(form.required_boolean_with_default) == wtf.fields.BooleanField
@@ -161,7 +162,7 @@ class TestFieldMeta(FormBase):
assert not form.color.flags.required
def test_widget_no_override(self):
- form = self.compose_meta(csrf_enabled=False)
+ form = self.compose_meta()
assert type(form.color.widget) == wtforms_components.widgets.TextInput
def test_widget_override(self):
@@ -169,7 +170,7 @@ class TestFieldMeta(FormBase):
__default__ = FieldMeta
color = FieldMeta(widget=wtf.widgets.TextArea())
- form = self.compose_meta(fields_meta_cls=WidgetOverrideFieldsMeta, csrf_enabled=False)
+ form = self.compose_meta(fields_meta_cls=WidgetOverrideFieldsMeta)
assert type(form.color.widget) == wtf.widgets.TextArea
def test_extra_validators(self):
@@ -181,11 +182,11 @@ class TestFieldMeta(FormBase):
__default__ = FieldMeta
color = FieldMeta(extra_validators=[_is_roy])
- form = self.compose_meta(fields_meta_cls=ExtraValidatorsFieldsMeta, csrf_enabled=False,
+ form = self.compose_meta(fields_meta_cls=ExtraValidatorsFieldsMeta,
name='Test', color='red')
assert form.validate()
- form = self.compose_meta(fields_meta_cls=ExtraValidatorsFieldsMeta, csrf_enabled=False,
+ form = self.compose_meta(fields_meta_cls=ExtraValidatorsFieldsMeta,
name='Test', color='mauve')
assert not form.validate()
assert set(form.color.errors) == {'Not a ROY color'}
@@ -196,8 +197,7 @@ class TestFieldMeta(FormBase):
units = FieldMeta(coerce=lambda x: ents.Units[x.lower()] if x else x,
choices=[(x, x.value) for x in ents.Units])
- form = self.compose_meta(fields_meta_cls=CoerceFieldsMeta, csrf_enabled=False,
- name='Test', units='FEET')
+ form = self.compose_meta(fields_meta_cls=CoerceFieldsMeta, name='Test', units='FEET')
assert isinstance(form.units, wtf.SelectField)
assert form.validate()
assert isinstance(form.units.data, ents.Units)
@@ -216,7 +216,7 @@ class TestFieldMeta(FormBase):
__default__ = FieldMeta
name = FieldMeta(default='foo')
- form = self.compose_meta(fields_meta_cls=DefaultFieldsMeta, csrf_enabled=False)
+ form = self.compose_meta(fields_meta_cls=DefaultFieldsMeta)
assert form.name.default == 'foo'
assert form.color.default is None
@@ -285,18 +285,18 @@ class TestValidators(FormBase):
assert len(form.float_check.validators) == 1
def test_length_validation_not_applied_for_enums(self):
- form = self.compose_meta(csrf_enabled=False)
+ form = self.compose_meta()
for validator in form.units.validators:
assert not isinstance(validator, wtf.validators.Length)
class FeaturesForm(Form):
- name = wtf.StringField(validators=[validators.required()])
+ name = wtf.StringField(validators=[validators.data_required()])
color = wtf.StringField()
class NumbersSubForm(wtf.Form):
- number = wtf.StringField('Number', validators=[validators.required()])
+ number = wtf.StringField('Number', validators=[validators.data_required()])
color = wtf.StringField('Color')
@@ -563,12 +563,14 @@ class TestFormLevelValidation(FormBase):
def test_form_valid(self):
form = self.assert_valid(num1=5, num2=37, num3=100)
assert form.form_errors == []
- assert form.all_errors == ({}, [])
+ assert form.field_errors == {}
+ assert form.errors == {}
def test_form_invalid(self):
form = self.assert_invalid(num1=40, num2=3, num3=50)
assert form.form_errors == ['Does not add up', 'Out of order']
- assert form.all_errors == ({}, ['Does not add up', 'Out of order'])
+ assert form.field_errors == {}
+ assert form.errors == {'_form': ['Does not add up', 'Out of order']}
def test_stop_validation_with_error(self):
class StopValidationForm(Form):
@@ -586,7 +588,8 @@ class TestFormLevelValidation(FormBase):
form = self.assert_invalid(form_cls=StopValidationForm, s1='v1', s2='v2')
assert form.form_errors == ['not equal']
- assert form.all_errors == ({}, ['not equal'])
+ assert form.field_errors == {}
+ assert form.errors == {'_form': ['not equal']}
def test_stop_validation_no_error(self):
class StopValidationForm(Form):
@@ -604,7 +607,8 @@ class TestFormLevelValidation(FormBase):
form = self.assert_valid(form_cls=StopValidationForm, s1='v1', s2='v2')
assert form.form_errors == []
- assert form.all_errors == ({}, [])
+ assert form.field_errors == {}
+ assert form.errors == {}
def test_invalid_with_field_errors(self):
class InvalidFieldsForm(Form):
@@ -618,8 +622,11 @@ class TestFormLevelValidation(FormBase):
form = self.assert_invalid(form_cls=InvalidFieldsForm, s1='1234', s2='4321')
assert form.form_errors == ['not equal']
- assert form.all_errors == (
- {'s1': ['Field cannot be longer than 3 characters.']}, ['not equal'])
+ assert form.field_errors == {'s1': ['Field cannot be longer than 3 characters.']}
+ assert form.errors == {
+ '_form': ['not equal'],
+ 's1': ['Field cannot be longer than 3 characters.']
+ }
def test_do_not_validate_with_field_errors(self):
class InvalidFieldsForm(Form):
@@ -632,8 +639,8 @@ class TestFormLevelValidation(FormBase):
form = self.assert_invalid(form_cls=InvalidFieldsForm, s1='1234', s2='4321')
assert form.form_errors == []
- assert form.all_errors == (
- {'s1': ['Field cannot be longer than 3 characters.']}, [])
+ assert form.field_errors == {'s1': ['Field cannot be longer than 3 characters.']}
+ assert form.errors == {'s1': ['Field cannot be longer than 3 characters.']}
def test_validators_inherited(self):
class SubclassForm(self.MyForm):
@@ -649,11 +656,15 @@ class TestFormLevelValidation(FormBase):
form = self.assert_invalid(num1=7, num2=5, num3=51, form_cls=SubclassForm)
assert form.form_errors == ['Out of order', 'Num3 is odd', 'Does not compute']
- assert form.all_errors == ({}, ['Out of order', 'Num3 is odd', 'Does not compute'])
+ assert form.field_errors == {}
+ assert form.errors == {
+ '_form': ['Out of order', 'Num3 is odd', 'Does not compute']
+ }
form = self.assert_valid(num1=6, num2=7, num3=50, form_cls=SubclassForm)
assert form.form_errors == []
- assert form.all_errors == ({}, [])
+ assert form.field_errors == {}
+ assert form.errors == {}
class TestExcludesDatetimes(FormBase):
|
Form.errors, .all_errors, and .form_errors
Right now, we have:
* `Form.errors`: field errors
* `Form.form_errors`: form level errors
* `Form.all_errors`: form & field level errors
I think it would make more sense to have:
* `Form.field_errors`: field errors
* `Form.form_errors`: form level errors
* `Form.errors`: form & field level errors
|
0.0
|
0ea69ab464c61c93338196d167c220859130196b
|
[
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB4::test_prefix",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB4::test_suffix",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB4::test_both",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB4::test_text_escaped",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB3::test_prefix",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB3::test_suffix",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB3::test_both",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB3::test_text_escaped"
] |
[
"keg_elements/tests/test_forms/test_form.py::TestFieldsToDict::test_field_to_dict_field"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-19 17:59:00+00:00
|
bsd-3-clause
| 3,557 |
|
level12__keg-elements-134
|
diff --git a/keg_elements/forms/__init__.py b/keg_elements/forms/__init__.py
index c045e2c..4bde279 100644
--- a/keg_elements/forms/__init__.py
+++ b/keg_elements/forms/__init__.py
@@ -365,6 +365,8 @@ class FormGenerator(FormGeneratorBase):
def create_validators(self, prop, column):
validators = super(FormGenerator, self).create_validators(prop, column)
if isinstance(column.type, sa.Numeric) and not isinstance(column.type, sa.Float):
+ if column.type.precision is None or column.type.scale is None:
+ raise ValueError('Numeric fields must specify precision and scale')
max_ = _max_for_numeric(column.type.precision, column.type.scale)
validators.append(NumberRange(min=-max_, max=max_))
validators.append(NumberScale(column.type.scale))
diff --git a/keg_elements/forms/validators.py b/keg_elements/forms/validators.py
index 62be761..56e0dc1 100644
--- a/keg_elements/forms/validators.py
+++ b/keg_elements/forms/validators.py
@@ -44,7 +44,7 @@ def numeric(form, field):
class NumberScale(object):
- def __init__(self, scale=-1, message=None):
+ def __init__(self, scale, message=None):
self.scale = scale
if not message:
message = _(u'Field must have no more than {scale} decimal places.', scale=scale)
@@ -56,7 +56,7 @@ class NumberScale(object):
return
# use decimal's quantization to see if it's equal to field data at the full scale.
# If it isn't, user entered too many decimal places
- if field.data != field.data.quantize(Decimal('0.{}1'.format('0' * (self.scale - 1)))):
+ if field.data != field.data.quantize(Decimal(1) / 10 ** self.scale):
raise ValidationError(self.message)
diff --git a/kegel_app/model/entities.py b/kegel_app/model/entities.py
index 59c5a0e..83a6abe 100644
--- a/kegel_app/model/entities.py
+++ b/kegel_app/model/entities.py
@@ -219,3 +219,9 @@ class SoftDeleteTester(mixins.SoftDeleteMixin, mixins.DefaultMixin, db.Model):
def testing_create(cls, **kwargs):
kwargs['hdp_id'] = kwargs.get('hpd_id') or HardDeleteParent.testing_create().id
return super().testing_create(**kwargs)
+
+
+class DefaultNumeric(mixins.DefaultMixin, db.Model):
+ __tablename__ = 'default_numeric'
+
+ number = sa.Column(sa.Numeric)
|
level12/keg-elements
|
e590f2da7161670ad36b20cde3cb7f01d1e466ff
|
diff --git a/keg_elements/testing.py b/keg_elements/testing.py
index c82551e..963cddf 100644
--- a/keg_elements/testing.py
+++ b/keg_elements/testing.py
@@ -130,6 +130,15 @@ class EntityBase(object):
assert col.onupdate, 'Column "{}" should have onupdate set'.format(col.name)
assert col.server_default, 'Column "{}" should have server_default set'.format(col.name)
+ def test_column_numeric_scale_precision_set(self):
+ for col_check in self.column_check_generator():
+ col = getattr(self.entity_cls, col_check.name)
+ if isinstance(col.type, sa.Numeric) and not isinstance(col.type, sa.Float):
+ assert col.type.precision is not None, \
+ 'Column "{}" does not specify precision'.format(col.name)
+ assert col.type.scale is not None, \
+ 'Column "{}" does not specify scale'.format(col.name)
+
def test_all_columns_are_constraint_tested(self):
"""Checks that all fields declared on entity are in the constraint tests"""
diff --git a/keg_elements/tests/test_forms/test_form.py b/keg_elements/tests/test_forms/test_form.py
index 0db8afe..01b92a6 100644
--- a/keg_elements/tests/test_forms/test_form.py
+++ b/keg_elements/tests/test_forms/test_form.py
@@ -284,6 +284,19 @@ class TestValidators(FormBase):
assert len(form.float_check.validators) == 1
+ def test_numeric_scale_precision_required(self):
+ with pytest.raises(ValueError, match='Numeric fields must specify precision and scale'):
+ class TestForm1(ke_forms.ModelForm):
+ class Meta:
+ model = ents.DefaultNumeric
+
+ class TestForm2(ke_forms.ModelForm):
+ class Meta:
+ model = ents.DefaultNumeric
+ exclude = ('number',)
+
+ number = wtf.DecimalField('Number', validators=[])
+
def test_length_validation_not_applied_for_enums(self):
form = self.compose_meta()
for validator in form.units.validators:
|
ModelForm Requires Precision and Scale to be Set on Numeric Columns
It seems to create validators for a `ModelForm` any Numeric columns on the model are assumed to have both precision and scale set: (focus on lines 367-368)
https://github.com/level12/keg-elements/blob/0ea69ab464c61c93338196d167c220859130196b/keg_elements/forms/__init__.py#L365-L371
https://github.com/level12/keg-elements/blob/0ea69ab464c61c93338196d167c220859130196b/keg_elements/forms/__init__.py#L312-L313
I was setting up a `ModelForm` for a model with a Numeric column on which precision and scale were _not_ set. It failed like this:
```
keg_elements/forms/__init__.py:315: in _max_for_numeric
return Decimal('{}.{}'.format('9' * (digits - scale), '9' * scale))
E TypeError: unsupported operand type(s) for -: 'NoneType' and 'NoneType'
```
In my mind there are two possible directions to go with a fix for this:
1. We still want to require Numeric columns to have precision and scale set in order for us to generate validators for that form field. In which case, we should probably assert that precision and scale are set, and if they aren't give a more helpful error message.
2. We do not want to require Numeric columns to have precision and scale set. In which case, we should probably not call `_max_for_numeric` if either value is missing.
|
0.0
|
e590f2da7161670ad36b20cde3cb7f01d1e466ff
|
[
"keg_elements/tests/test_forms/test_form.py::TestValidators::test_numeric_scale_precision_required"
] |
[
"keg_elements/tests/test_forms/test_form.py::TestFieldsToDict::test_field_to_dict_field",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB4::test_prefix",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB4::test_suffix",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB4::test_both",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB4::test_text_escaped",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB3::test_prefix",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB3::test_suffix",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB3::test_both",
"keg_elements/tests/test_forms/test_form.py::TestTypeHintingTextInputB3::test_text_escaped"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-27 14:10:30+00:00
|
bsd-3-clause
| 3,558 |
|
level12__morphi-6
|
diff --git a/morphi/messages/manager.py b/morphi/messages/manager.py
index 3137358..71c946f 100644
--- a/morphi/messages/manager.py
+++ b/morphi/messages/manager.py
@@ -256,7 +256,7 @@ def gettext_find(domain, localedir=None, languages=None, all=False, # noqa: C90
else:
return mofile_lp
- except NotImplementedError:
+ except (NotImplementedError, ValueError):
pass
return result
|
level12/morphi
|
291ca955fbe2d2ef56899f84fca5b51193d09640
|
diff --git a/morphi/tests/test_messages.py b/morphi/tests/test_messages.py
index 8130c1f..da2aacd 100644
--- a/morphi/tests/test_messages.py
+++ b/morphi/tests/test_messages.py
@@ -176,12 +176,17 @@ class TestMessagesManagerFindMoFilename(object):
yield gettext_find
@pytest.fixture(scope='function')
- def path_exists(self):
+ def path_exists_enclosure(self):
with mock.patch(
'morphi.libs.packages.enclose_package_path_exists',
- autospec=True, spec_set=True, return_value=lambda path: False
+ autospec=True, spec_set=True,
+ return_value=mock.Mock(side_effect=lambda path: False)
) as enclosure:
- yield enclosure.return_value
+ yield enclosure
+
+ @pytest.fixture(scope='function')
+ def path_exists(self, path_exists_enclosure):
+ yield path_exists_enclosure.return_value
@pytest.fixture(scope='function')
def expected(self, path_exists):
@@ -233,6 +238,12 @@ class TestMessagesManagerFindMoFilename(object):
# attempted_domain, attempted_dirname, languages, all, path_exists=path_exists
assert expected == gettext_find.call_args_list
+ def test_invalid_package_resource_path_throws_no_error(self, path_exists_enclosure):
+ path_exists_enclosure.return_value.side_effect = ValueError
+
+ manager.find_mo_filename(domain='domain', localedir='localedir', languages=['es'],
+ package_name='package_name', extension='ext')
+
def test_file_found(self, path_exists, gettext_find):
def find_package_i18n_es_path_exists(domain, dirname, languages, all,
path_exists, extension):
|
Certain paths in locale directory can causes ValueErrors
If a user-specified locale directory contains parent references ('..') or absolute paths, the package resource loader can throw a ValueError.
|
0.0
|
291ca955fbe2d2ef56899f84fca5b51193d09640
|
[
"morphi/tests/test_messages.py::TestMessagesManagerFindMoFilename::test_invalid_package_resource_path_throws_no_error"
] |
[
"morphi/tests/test_messages.py::TestMessagesFrontendCompileJson::test_output_dir_set_explicitly",
"morphi/tests/test_messages.py::TestMessagesFrontendCompileJson::test_output_dir_from_directory",
"morphi/tests/test_messages.py::TestMessagesFrontendCompileJsonWriteJson::test_write_fuzzy",
"morphi/tests/test_messages.py::TestMessagesFrontendCompileJsonWriteJson::test_elide_fuzzy",
"morphi/tests/test_messages.py::TestMessagesManagerManager::test_gettext",
"morphi/tests/test_messages.py::TestMessagesManagerManager::test_lazy_gettext",
"morphi/tests/test_messages.py::TestMessagesManagerManager::test_lazy_ngettext[0-0",
"morphi/tests/test_messages.py::TestMessagesManagerManager::test_lazy_ngettext[1-1",
"morphi/tests/test_messages.py::TestMessagesManagerManager::test_lazy_ngettext[2-2",
"morphi/tests/test_messages.py::TestMessagesManagerManager::test_ngettext[0-0",
"morphi/tests/test_messages.py::TestMessagesManagerManager::test_ngettext[1-1",
"morphi/tests/test_messages.py::TestMessagesManagerManager::test_ngettext[2-2",
"morphi/tests/test_messages.py::TestMessagesManagerFindMoFilename::test_find_mo_filename",
"morphi/tests/test_messages.py::TestMessagesManagerFindMoFilename::test_single_languages_are_listified",
"morphi/tests/test_messages.py::TestMessagesManagerFindMoFilename::test_file_found",
"morphi/tests/test_messages.py::TestMessagesManagerGetMoData::test_no_file_found",
"morphi/tests/test_messages.py::TestMessagesManagerGetMoData::test_filesystem_opener",
"morphi/tests/test_messages.py::TestMessagesManagerGetMoData::test_package_opener",
"morphi/tests/test_messages.py::TestMessagesManagerGetMoData::test_filesystem_fallback_for_package_opener",
"morphi/tests/test_messages.py::TestMessagesManagerLoadTranslations::test_load_fails",
"morphi/tests/test_messages.py::TestMessagesManagerLoadTranslations::test_load"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-11 15:12:30+00:00
|
bsd-3-clause
| 3,559 |
|
lgpage__pytest-cython-5
|
diff --git a/setup.cfg b/setup.cfg
index a5f7bb1..1b8c2c3 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -8,7 +8,7 @@ release = register clean --all sdist bdist_wheel
max-line-length = 140
exclude = tests/*,*/migrations/*,*/south_migrations/*
-[pytest]
+[tool:pytest]
norecursedirs =
.git
.tox
|
lgpage/pytest-cython
|
7c60e5bf4f8d581a7175d707b9e37070c519a93c
|
diff --git a/src/pytest_cython/plugin.py b/src/pytest_cython/plugin.py
index d4874cd..5c610e4 100644
--- a/src/pytest_cython/plugin.py
+++ b/src/pytest_cython/plugin.py
@@ -67,7 +67,7 @@ def pytest_collect_file(path, parent):
# only run test if matching .so and .pyx files exist
# create addoption for this ??
if pyx_file is not None:
- return DoctestModule(path, parent)
+ return DoctestModule.from_parent(parent, fs_path=path)
# XXX patch pyimport to support PEP 3149
@@ -117,7 +117,7 @@ class DoctestModule(pytest.Module):
checker=checker)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests
- yield DoctestItem(test.name, self, runner, test)
+ yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _importtestmodule(self):
# we assume we are only called once per module
diff --git a/tests/example-project/setup.py b/tests/example-project/setup.py
index fca0567..e690383 100644
--- a/tests/example-project/setup.py
+++ b/tests/example-project/setup.py
@@ -55,11 +55,12 @@ if __name__ == "__main__":
exclude_files = ['__init__.py']
include_dirs = [os.path.abspath(os.path.join(root, 'src/clib'))]
for file_ in ext_files:
- if os.path.basename(file_) in exclude_files:
+ basename = os.path.basename(file_)
+ if basename in exclude_files:
continue
- pyx_file, _ = os.path.splitext(file_)
+ pyx_file, _ = os.path.splitext(basename)
extensions.append(Extension(
- pyx_file,
+ 'src.pypackage.' + pyx_file,
[file_],
define_macros=macros,
include_dirs=include_dirs,
diff --git a/tests/example-project/src/__init__.py b/tests/example-project/src/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_pytest_cython.py b/tests/test_pytest_cython.py
index 55a66bf..e546fe3 100644
--- a/tests/test_pytest_cython.py
+++ b/tests/test_pytest_cython.py
@@ -15,9 +15,9 @@ def test_cython_ext_module(testdir):
assert module.check()
result = testdir.runpytest('-vv', '--doctest-cython', str(module))
result.stdout.fnmatch_lines([
- "*Eggs.__init__ *PASSED",
- "*Eggs.blarg*PASSED",
- "*Eggs.fubar*PASSED",
+ "*Eggs.__init__ *PASSED*",
+ "*Eggs.blarg*PASSED*",
+ "*Eggs.fubar*PASSED*",
])
assert result.ret == 0
@@ -27,7 +27,7 @@ def test_wrap_c_ext_module(testdir):
assert module.check()
result = testdir.runpytest('-vv', '--doctest-cython', str(module))
result.stdout.fnmatch_lines([
- "*sqr*PASSED",
+ "*sqr*PASSED*",
])
assert result.ret == 0
@@ -37,7 +37,7 @@ def test_wrap_cpp_ext_module(testdir):
assert module.check()
result = testdir.runpytest('-vv', '--doctest-cython', str(module))
result.stdout.fnmatch_lines([
- "*sqr*PASSED",
+ "*sqr*PASSED*",
])
assert result.ret == 0
@@ -47,8 +47,8 @@ def test_pure_py_module(testdir):
assert module.check()
result = testdir.runpytest('-vv', '--doctest-cython', str(module))
result.stdout.fnmatch_lines([
- "*Eggs.__init__*PASSED",
- "*Eggs.foo*PASSED",
- "*foo*PASSED",
+ "*Eggs.__init__*PASSED*",
+ "*Eggs.foo*PASSED*",
+ "*foo*PASSED*",
])
assert result.ret == 0
|
Depreciation warnings
Running `pytest --doctest-cython` works, but gives the following warnings
```
<some_path>/python3.7/site-packages/pytest_cython/plugin.py:70
<some_path>/python3.7/site-packages/pytest_cython/plugin.py:70: PytestDeprecationWarning: direct construction of DoctestModule has been deprecated, please use DoctestModule.from_parent
return DoctestModule(path, parent)
<some_path>/.local/lib/python3.7/site-packages/pytest_cython/plugin.py:120
<some_path>/.local/lib/python3.7/site-packages/pytest_cython/plugin.py:120
<some_path>/.local/lib/python3.7/site-packages/pytest_cython/plugin.py:120: PytestDeprecationWarning: direct construction of DoctestItem has been deprecated, please use DoctestItem.from_parent
yield DoctestItem(test.name, self, runner, test)
-- Docs: https://docs.pytest.org/en/latest/warnings.html
```
## A Quick Workaround
```bash
pytest --doctest-cython --disable-warnings
```
# Info
* OS: Fedora 31 (Thirty One) x86_64
* Sample `fib.pyx`
```python3
#cython: language_level=3
def fib(int n):
'''Return the nth number in the Fibonacci sequence.
>>> fib(0)
0.0
>>> fib(1)
1.0
>>> fib(4)
3.0
>>> fib(7)
13.0
'''
cdef int i
cdef double a=0.0, b=1.0
for i in range(n):
a, b = a + b, a
return a
```
|
0.0
|
7c60e5bf4f8d581a7175d707b9e37070c519a93c
|
[
"tests/test_pytest_cython.py::test_pure_py_module"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-24 13:47:24+00:00
|
mit
| 3,560 |
|
lhotse-speech__lhotse-724
|
diff --git a/lhotse/dataset/sampling/bucketing.py b/lhotse/dataset/sampling/bucketing.py
index 04541b37..a2ad7ba3 100644
--- a/lhotse/dataset/sampling/bucketing.py
+++ b/lhotse/dataset/sampling/bucketing.py
@@ -422,23 +422,55 @@ def _create_buckets_equal_duration_single(
"""
total_duration = np.sum(c.duration for c in cuts)
bucket_duration = total_duration / num_buckets
- iter_cuts = iter(cuts)
- buckets = []
- for bucket_idx in range(num_buckets):
- bucket = []
- current_duration = 0
- try:
- while current_duration < bucket_duration:
- bucket.append(next(iter_cuts))
- current_duration += bucket[-1].duration
- # Every odd bucket, take the cut that exceeded the bucket's duration
- # and put it in the front of the iterable, so that it goes to the
- # next bucket instead. It will ensure that the last bucket is not too
- # thin (otherwise all the previous buckets are a little too large).
- if bucket_idx % 2:
- last_cut = bucket.pop()
- iter_cuts = chain([last_cut], iter_cuts)
- except StopIteration:
- assert bucket_idx == num_buckets - 1
- buckets.append(CutSet.from_cuts(bucket))
+ # Define the order for adding cuts. We start at the beginning, then go to
+ # the end, and work our way to the middle. Once in the middle we distribute
+ # excess cuts among the two buckets close to the median duration. This
+ # handles the problem of where to place cuts that caused previous buckets
+ # to "over-flow" without sticking all of them in the last bucket, which
+ # causes one large bucket at the end and also places many small duration
+ # cuts with longer ones.
+ order = list(range(0, len(cuts), 2)) + list(
+ range(len(cuts) - (1 + len(cuts) % 2), 0, -2)
+ )
+ order2idx = {o_idx: i for i, o_idx in enumerate(order)}
+ durations = [c.duration for c in cuts]
+
+ # We need a list of the cut durations in the same order (0, N-1, 1, N-2, ...)
+ ordered_cut_durations = sorted(zip(order, durations), key=lambda x: x[0])
+ last_order, first_bucket = 0, 0
+ last_bucket = num_buckets - 1
+ buckets_dict = {i: 0 for i in range(num_buckets)}
+ buckets_cut_dict = {i: [] for i in range(num_buckets)}
+ middle_bucket = None
+ idx_to_bucket_id = {}
+ for i, (order_idx, duration) in enumerate(ordered_cut_durations, 1):
+ # Check if we are at the middle bucket. first_bucket is the left bucket
+ # we are processing. last_bucket is the right bucket. When they are the
+ # same we are filling the bucket with cuts near the median duration.
+ if middle_bucket is None and first_bucket == last_bucket:
+ middle_bucket = first_bucket
+
+ # i % 2 = 1 ==> process the left_bucket (first_bucket)
+ if i % 2:
+ if buckets_dict[first_bucket] + duration > bucket_duration:
+ if middle_bucket is not None and first_bucket == middle_bucket:
+ first_bucket = min(middle_bucket - 1, num_buckets - 1)
+ else:
+ first_bucket = min(first_bucket + 1, num_buckets - 1)
+ buckets_dict[first_bucket] += duration
+ idx_to_bucket_id[order2idx[order_idx]] = first_bucket
+ # i % 2 = 0 ==> process the right bucket (last_bucket)
+ else:
+ if buckets_dict[last_bucket] + duration > bucket_duration:
+ if middle_bucket is not None and last_bucket == middle_bucket:
+ last_bucket = max(middle_bucket + 1, 0)
+ else:
+ last_bucket = max(last_bucket - 1, 0)
+ buckets_dict[last_bucket] += duration
+ idx_to_bucket_id[order2idx[order_idx]] = last_bucket
+
+ # Now that buckets have been assigned, create the new cutset.
+ for cut_idx, cut in enumerate(cuts):
+ buckets_cut_dict[idx_to_bucket_id[cut_idx]].append(cut)
+ buckets = [CutSet.from_cuts(buckets_cut_dict[i]) for i in range(num_buckets)]
return buckets
|
lhotse-speech/lhotse
|
1c137cf491f835dca25be92d5bf89272ec37b8fe
|
diff --git a/test/dataset/sampling/test_sampling.py b/test/dataset/sampling/test_sampling.py
index 4c922d40..1ed453ef 100644
--- a/test/dataset/sampling/test_sampling.py
+++ b/test/dataset/sampling/test_sampling.py
@@ -467,19 +467,29 @@ def test_bucketing_sampler_single_cuts_equal_duration():
)
# Ensure that each consecutive bucket has less cuts than the previous one
- prev_len = float("inf")
- bucket_cum_durs = []
+ sampled_cuts, bucket_cum_durs = [], []
+ prev_min, prev_max = 0, 0
+ num_overlapping_bins = 0
for (bucket,) in sampler.buckets:
- bucket_cum_durs.append(sum(c.duration for c in bucket))
- curr_len = len(bucket)
- assert curr_len < prev_len
- prev_len = curr_len
+ bucket_durs = [c.duration for c in bucket]
+ sampled_cuts.extend(c for c in bucket)
+ bucket_cum_durs.append(sum(bucket_durs))
+ bucket_min, bucket_max = min(bucket_durs), max(bucket_durs)
+ # Ensure that bucket lengths do not overlap, except for the middle
+ # 3 buckets maybe
+ if prev_max > bucket_min:
+ num_overlapping_bins += 1
+ assert num_overlapping_bins < 3
+ prev_min = bucket_min
+ prev_max = bucket_max
# Assert that all bucket cumulative durations are within 1/10th of the mean
mean_bucket_dur = mean(bucket_cum_durs) # ~ 1300s
for d in bucket_cum_durs:
assert abs(d - mean_bucket_dur) < 0.1 * mean_bucket_dur
+ assert set(cut_set.ids) == set(c.id for c in sampled_cuts)
+
def test_bucketing_sampler_shuffle():
cut_set = DummyManifest(CutSet, begin_id=0, end_id=10)
|
BucketingSampler with equal_duration drops an arbitrary(?) number of cuts.
If you use the BucketingSampler with num_buckets, especially with even number of buckets, though I think you can also get it to work for an odd number of buckets, at the very least the last cut is dropped, but if you choose the durations and num_buckets appropriate you can have many more dropped.
The reason this is not caught in the test is that the line
`assert set(cut_set.ids) == set(c.id for c in sampled_cuts)`
is only called for the equal_length method and not the equal_duration method.
The problem arises from the fact that the loops in _create_buckets_equal_duration_single() are not over the cuts, but are instead over the buckets and the duration over the buckets. Because you pop the last cut off of the odd buckets and put it back at the front, you can be left with a non-empty iterator.
At the very least the BucketingSampler equal_duration feature should be updated with a warning when using it and print out the number of cuts it has dropped. Many times it will be 0, or 1.
However, if modify
`def test_bucketing_sampler_single_cuts():
cut_set = DummyManifest(CutSet, begin_id=0, end_id=1000)
sampler = BucketingSampler(cut_set, sampler_type=SimpleCutSampler, bucket_method="equal_duration", num_buckets=500)
print(f'Num cuts orig: {len(cut_set)}, Num cuts in sampler: {sum(len(b[0]) for b in sampler.buckets)}')
sampled_cuts = []
for batch in sampler:
sampled_cuts.extend(batch)
assert set(cut_set.ids) == set(c.id for c in sampled_cuts)`
in test/dataset/sampling/test_sampling.py to look like what you have above, you will see that in that example it drops 25% of the cuts.
|
0.0
|
1c137cf491f835dca25be92d5bf89272ec37b8fe
|
[
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_single_cuts_equal_duration"
] |
[
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_shuffling[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_shuffling[SingleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_shuffling[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_time_constraints[None-None-None-exception_expectation0]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_time_constraints[10.0-None-None-exception_expectation1]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_time_constraints[None-1000-None-exception_expectation2]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_time_constraints[None-None-160000-exception_expectation3]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_time_constraints[None-1000-160000-exception_expectation4]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_time_constraints[5.0-1000-160000-exception_expectation5]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_order_is_deterministic_given_epoch[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_order_is_deterministic_given_epoch[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_order_differs_between_epochs[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_order_differs_between_epochs[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_low_max_frames[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_low_max_frames[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler",
"test/dataset/sampling/test_sampling.py::test_dynamic_cut_sampler_as_cut_pairs_sampler",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_2",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_time_constraints[None-None-None-exception_expectation0]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_time_constraints[10.0-None-None-exception_expectation1]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_time_constraints[None-1000-None-exception_expectation2]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_time_constraints[None-None-160000-exception_expectation3]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_time_constraints[None-1000-160000-exception_expectation4]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_time_constraints[5.0-1000-160000-exception_expectation5]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_order_is_deterministic_given_epoch",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_order_differs_between_epochs",
"test/dataset/sampling/test_sampling.py::test_concat_cuts",
"test/dataset/sampling/test_sampling.py::test_concat_cuts_with_duration_factor",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_single_cuts",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_single_cuts_no_proportional_sampling",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_single_cuts_equal_len",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_shuffle",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_cut_pairs",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_cut_pairs_equal_len[False]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_cut_pairs_equal_len[True]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_cut_pairs_equal_duration[False]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_cut_pairs_equal_duration[True]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_order_is_deterministic_given_epoch",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_order_differs_between_epochs",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_buckets_have_different_durations",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_chooses_buckets_randomly",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_time_constraints[constraint0]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_time_constraints[constraint1]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_time_constraints[constraint2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-995-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-995-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-995-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-996-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-996-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-996-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-997-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-997-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-997-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-998-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-998-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-998-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-999-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-999-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-999-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1000-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1000-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1000-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1001-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1001-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1001-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1002-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1002-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1002-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1003-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1003-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1003-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-995-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-995-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-995-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-996-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-996-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-996-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-997-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-997-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-997-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-998-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-998-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-998-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-999-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-999-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-999-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1000-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1000-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1000-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1001-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1001-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1001-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1002-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1002-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1002-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1003-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1003-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1003-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-995-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-995-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-995-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-996-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-996-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-996-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-997-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-997-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-997-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-998-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-998-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-998-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-999-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-999-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-999-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1000-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1000-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1000-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1001-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1001-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1001-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1002-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1002-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1002-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1003-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1003-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1003-4]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_raises_value_error_on_lazy_cuts_input",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_with_lazy_cuts[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_with_lazy_cuts[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_with_lazy_cuts_concat[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_with_lazy_cuts_concat[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_filter[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_filter[BucketingSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_filter[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_filter",
"test/dataset/sampling/test_sampling.py::test_zip_sampler_merge_batches_true",
"test/dataset/sampling/test_sampling.py::test_zip_sampler_cut_pairs_merge_batches_true",
"test/dataset/sampling/test_sampling.py::test_zip_sampler_merge_batches_false",
"test/dataset/sampling/test_sampling.py::test_round_robin_sampler",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_drop_last[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_drop_last[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_drop_last[False]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_drop_last[True]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>0]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>1]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>2]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>3]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>4]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>5]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>6]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>0]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>1]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>2]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>3]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>4]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>5]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>6]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_lazy_shuffle[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_lazy_shuffle[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_lazy_shuffle[CutPairsSampler]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[100-10]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[100-1000]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[100-20000]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[1000-10]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[1000-1000]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[1000-20000]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[10000-10]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[10000-1000]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[10000-20000]",
"test/dataset/sampling/test_sampling.py::test_sampler_properties[sampler0]",
"test/dataset/sampling/test_sampling.py::test_sampler_properties[sampler1]",
"test/dataset/sampling/test_sampling.py::test_sampler_properties[sampler2]",
"test/dataset/sampling/test_sampling.py::test_sampler_properties[sampler3]",
"test/dataset/sampling/test_sampling.py::test_sampler_properties[sampler4]",
"test/dataset/sampling/test_sampling.py::test_report_padding_ratio_estimate",
"test/dataset/sampling/test_sampling.py::test_time_constraint_strictness",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[1-SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[1-DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[1-sampler_fn2]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[1-sampler_fn3]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[2-SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[2-DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[2-sampler_fn2]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[2-sampler_fn3]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[3-SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[3-DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[3-sampler_fn2]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[3-sampler_fn3]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[4-SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[4-DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[4-sampler_fn2]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[4-sampler_fn3]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-05-27 00:33:01+00:00
|
apache-2.0
| 3,561 |
|
lidatong__dataclasses-json-123
|
diff --git a/dataclasses_json/mm.py b/dataclasses_json/mm.py
index 08a69e9..5df24eb 100644
--- a/dataclasses_json/mm.py
+++ b/dataclasses_json/mm.py
@@ -1,6 +1,7 @@
import typing
import warnings
import sys
+from copy import deepcopy
from dataclasses import MISSING, is_dataclass, fields as dc_fields
from datetime import datetime
@@ -14,7 +15,7 @@ from marshmallow import fields, Schema, post_load
from marshmallow_enum import EnumField
from dataclasses_json.core import (_is_supported_generic, _decode_dataclass,
- _ExtendedEncoder)
+ _ExtendedEncoder, _user_overrides)
from dataclasses_json.utils import (_is_collection, _is_optional,
_issubclass_safe, _timestamp_to_dt_aware,
_is_new_type, _get_type_origin)
@@ -64,22 +65,23 @@ class _UnionField(fields.Field):
return super()._serialize(value, attr, obj, **kwargs)
def _deserialize(self, value, attr, data, **kwargs):
- if isinstance(value, dict) and '__type' in value:
- dc_name = value['__type']
+ tmp_value = deepcopy(value)
+ if isinstance(tmp_value, dict) and '__type' in tmp_value:
+ dc_name = tmp_value['__type']
for type_, schema_ in self.desc.items():
if is_dataclass(type_) and type_.__name__ == dc_name:
- del value['__type']
- return schema_._deserialize(value, attr, data, **kwargs)
+ del tmp_value['__type']
+ return schema_._deserialize(tmp_value, attr, data, **kwargs)
for type_, schema_ in self.desc.items():
- if isinstance(value, _get_type_origin(type_)):
- return schema_._deserialize(value, attr, data, **kwargs)
+ if isinstance(tmp_value, _get_type_origin(type_)):
+ return schema_._deserialize(tmp_value, attr, data, **kwargs)
else:
warnings.warn(
- f'The type "{type(value).__name__}" (value: "{value}") '
+ f'The type "{type(tmp_value).__name__}" (value: "{tmp_value}") '
f'is not in the list of possible types of typing.Union '
f'(dataclass: {self.cls.__name__}, field: {self.field.name}). '
f'Value cannot be deserialized properly.')
- return super()._deserialize(value, attr, data, **kwargs)
+ return super()._deserialize(tmp_value, attr, data, **kwargs)
TYPES = {
@@ -236,10 +238,12 @@ def build_type(type_, options, mixin, field, cls):
def schema(cls, mixin, infer_missing):
schema = {}
+ overrides = _user_overrides(cls)
for field in dc_fields(cls):
metadata = (field.metadata or {}).get('dataclasses_json', {})
- if 'mm_field' in metadata:
- schema[field.name] = metadata['mm_field']
+ metadata = overrides[field.name]
+ if metadata.mm_field is not None:
+ schema[field.name] = metadata.mm_field
else:
type_ = field.type
options = {}
@@ -259,6 +263,9 @@ def schema(cls, mixin, infer_missing):
# Union[str, int, None] is optional too, but it has more than 1 typed field.
type_ = type_.__args__[0]
+ if metadata.letter_case is not None:
+ options['data_key'] = metadata.letter_case(field.name)
+
t = build_type(type_, options, mixin, field, cls)
# if type(t) is not fields.Field: # If we use `isinstance` we would return nothing.
schema[field.name] = t
|
lidatong/dataclasses-json
|
37de0bebb56964cd3207d3a2a1260f5fc6542709
|
diff --git a/tests/test_schema.py b/tests/test_schema.py
index 905e3b1..e565242 100644
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -1,4 +1,5 @@
from .entities import DataClassDefaultListStr, DataClassDefaultOptionalList, DataClassList, DataClassOptional
+from .test_letter_case import CamelCasePerson, KebabCasePerson, SnakeCasePerson, FieldNamePerson
test_do_list = """[{}, {"children": [{"name": "a"}, {"name": "b"}]}]"""
@@ -21,3 +22,8 @@ class TestSchema:
def test_optional(self):
DataClassOptional.schema().loads('{"a": 4, "b": null}')
assert True
+
+ def test_letter_case(self):
+ for cls in (CamelCasePerson, KebabCasePerson, SnakeCasePerson, FieldNamePerson):
+ p = cls('Alice')
+ assert p.to_dict() == cls.schema().dump(p)
diff --git a/tests/test_union.py b/tests/test_union.py
index 5f90ff2..c2e8650 100644
--- a/tests/test_union.py
+++ b/tests/test_union.py
@@ -125,6 +125,16 @@ def test_deserialize(expected_obj, data, data_json):
assert s.loads(data_json) == expected_obj
+def test_deserialize_twice():
+ data = {"f1": [{"f1": 12, "__type": "Aux1"}, {"f1": "str3", "__type": "Aux2"}]}
+ expected_obj = C9([Aux1(12), Aux2("str3")])
+
+ s = C9.schema()
+ res1 = s.load(data)
+ res2 = s.load(data)
+ assert res1 == expected_obj and res2 == expected_obj
+
+
@pytest.mark.parametrize('obj', [
(C2(f1={"str1": "str1"})),
(C3(f1=[0.12, 0.13, "str1"])),
|
field_name is not supported with schema
```ipython
In [4]: @dataclasses_json.dataclass_json
...: @dataclasses.dataclass
...: class Person:
...: name: str = dataclasses.field(metadata=dataclasses_json.config(field_name="fullname"))
...: age: int
...:
In [5]: p = Person("A B", 23)
In [6]: p
Out[6]: Person(name='A B', age=23)
In [7]: p.to_dict()
Out[7]: {'fullname': 'A B', 'age': 23}
In [8]: d = p.to_dict()
In [9]: Person.from_dict(d)
Out[9]: Person(name='A B', age=23)
In [10]: Person.schema().load(d)
---------------------------------------------------------------------------
ValidationError Traceback (most recent call last)
<ipython-input-10-cd95d439489f> in <module>()
----> 1 Person.schema().load(d)
/home/venv/lib/python3.7/site-packages/marshmallow/schema.py in load(self, data, many, partial, unknown)
705 return self._do_load(
706 data, many, partial=partial, unknown=unknown,
--> 707 postprocess=True,
708 )
709
/home/venv/lib/python3.7/site-packages/marshmallow/schema.py in _do_load(self, data, many, partial, unknown, postprocess)
865 )
866 self.handle_error(exc, data)
--> 867 raise exc
868
869 return result
ValidationError: {'fullname': ['Unknown field.']}
```
It seems that the schema should use `data_key` from the [marshmallow Field](https://marshmallow.readthedocs.io/en/3.0/api_reference.html#module-marshmallow.fields)
|
0.0
|
37de0bebb56964cd3207d3a2a1260f5fc6542709
|
[
"tests/test_schema.py::TestSchema::test_letter_case",
"tests/test_union.py::test_deserialize_twice"
] |
[
"tests/test_schema.py::TestSchema::test_default_list_str",
"tests/test_schema.py::TestSchema::test_default_optional_list",
"tests/test_schema.py::TestSchema::test_list",
"tests/test_schema.py::TestSchema::test_optional",
"tests/test_union.py::test_serialize[obj0-expected0-{\"f1\":",
"tests/test_union.py::test_serialize[obj1-expected1-{\"f1\":",
"tests/test_union.py::test_serialize[obj2-expected2-{\"f1\":",
"tests/test_union.py::test_serialize[obj3-expected3-{\"f1\":",
"tests/test_union.py::test_serialize[obj4-expected4-{\"f1\":",
"tests/test_union.py::test_serialize[obj5-expected5-{\"f1\":",
"tests/test_union.py::test_serialize[obj6-expected6-{\"f1\":",
"tests/test_union.py::test_serialize[obj7-expected7-{\"f1\":",
"tests/test_union.py::test_serialize[obj8-expected8-{\"f1\":",
"tests/test_union.py::test_serialize[obj9-expected9-{\"f1\":",
"tests/test_union.py::test_serialize[obj10-expected10-{\"f1\":",
"tests/test_union.py::test_serialize[obj11-expected11-{\"f1\":",
"tests/test_union.py::test_serialize[obj12-expected12-{\"f1\":",
"tests/test_union.py::test_serialize[obj13-expected13-{\"f1\":",
"tests/test_union.py::test_serialize[obj14-expected14-{\"f1\":",
"tests/test_union.py::test_serialize[obj15-expected15-{\"f1\":",
"tests/test_union.py::test_serialize[obj16-expected16-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj0-data0-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj1-data1-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj2-data2-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj3-data3-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj4-data4-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj5-data5-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj6-data6-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj7-data7-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj8-data8-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj9-data9-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj10-data10-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj11-data11-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj12-data12-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj13-data13-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj14-data14-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj15-data15-{\"f1\":",
"tests/test_union.py::test_deserialize[expected_obj16-data16-{\"f1\":",
"tests/test_union.py::test_serialize_with_error[obj0]",
"tests/test_union.py::test_serialize_with_error[obj1]",
"tests/test_union.py::test_deserialize_with_error[C1-data0]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-08-10 19:33:50+00:00
|
mit
| 3,562 |
|
lidatong__dataclasses-json-221
|
diff --git a/dataclasses_json/__init__.py b/dataclasses_json/__init__.py
index f76ef77..2a001b1 100644
--- a/dataclasses_json/__init__.py
+++ b/dataclasses_json/__init__.py
@@ -2,5 +2,5 @@
from dataclasses_json.api import (DataClassJsonMixin,
LetterCase,
dataclass_json)
-from dataclasses_json.cfg import config, global_config
+from dataclasses_json.cfg import config, global_config, Exclude
from dataclasses_json.undefined import CatchAll, Undefined
diff --git a/dataclasses_json/cfg.py b/dataclasses_json/cfg.py
index 8f1ff8d..7a6413d 100644
--- a/dataclasses_json/cfg.py
+++ b/dataclasses_json/cfg.py
@@ -1,10 +1,24 @@
import functools
-from typing import (Callable, Dict, Optional, Union)
+from typing import Callable, Dict, NamedTuple, Optional, TypeVar, Union
from marshmallow.fields import Field as MarshmallowField
from dataclasses_json.undefined import Undefined, UndefinedParameterError
+T = TypeVar("T")
+
+
+class _Exclude(NamedTuple):
+ """
+ Whether or not the field should be excluded when encoded
+ """
+
+ ALWAYS: Callable[[T], bool] = lambda _: True
+ NEVER: Callable[[T], bool] = lambda _: False
+
+
+Exclude = _Exclude()
+
# TODO: add warnings?
class _GlobalConfig:
@@ -35,7 +49,9 @@ def config(metadata: dict = None, *,
mm_field: MarshmallowField = None,
letter_case: Callable[[str], str] = None,
undefined: Optional[Union[str, Undefined]] = None,
- field_name: str = None) -> Dict[str, dict]:
+ field_name: str = None,
+ exclude: Optional[Callable[[str, T], bool]] = None,
+ ) -> Dict[str, dict]:
if metadata is None:
metadata = {}
@@ -75,4 +91,7 @@ def config(metadata: dict = None, *,
lib_metadata['undefined'] = undefined
+ if exclude is not None:
+ lib_metadata['exclude'] = exclude
+
return metadata
diff --git a/dataclasses_json/core.py b/dataclasses_json/core.py
index 527287f..ddb3238 100644
--- a/dataclasses_json/core.py
+++ b/dataclasses_json/core.py
@@ -48,7 +48,7 @@ class _ExtendedEncoder(json.JSONEncoder):
def _user_overrides_or_exts(cls):
- confs = ['encoder', 'decoder', 'mm_field', 'letter_case']
+ confs = ['encoder', 'decoder', 'mm_field', 'letter_case', 'exclude']
FieldOverride = namedtuple('FieldOverride', confs)
global_metadata = defaultdict(dict)
@@ -94,6 +94,11 @@ def _encode_overrides(kvs, overrides, encode_json=False):
override_kvs = {}
for k, v in kvs.items():
if k in overrides:
+ exclude = overrides[k].exclude
+ # If the exclude predicate returns true, the key should be
+ # excluded from encoding, so skip the rest of the loop
+ if exclude and exclude(v):
+ continue
letter_case = overrides[k].letter_case
original_key = k
k = letter_case(k) if letter_case is not None else k
|
lidatong/dataclasses-json
|
f3d8860228d92b8af98f58f0b21304185b0e753d
|
diff --git a/tests/test_exclude.py b/tests/test_exclude.py
new file mode 100644
index 0000000..cf5eeb1
--- /dev/null
+++ b/tests/test_exclude.py
@@ -0,0 +1,51 @@
+from dataclasses import dataclass, field
+
+from dataclasses_json.api import DataClassJsonMixin, config
+from dataclasses_json.cfg import Exclude
+
+
+@dataclass
+class EncodeExclude(DataClassJsonMixin):
+ public_field: str
+ private_field: str = field(metadata=config(exclude=Exclude.ALWAYS))
+
+
+@dataclass
+class EncodeInclude(DataClassJsonMixin):
+ public_field: str
+ private_field: str = field(metadata=config(exclude=Exclude.NEVER))
+
+
+@dataclass
+class EncodeCustom(DataClassJsonMixin):
+ public_field: str
+ sensitive_field: str = field(
+ metadata=config(exclude=lambda v: v.startswith("secret"))
+ )
+
+
+def test_exclude():
+ dclass = EncodeExclude(public_field="public", private_field="private")
+ encoded = dclass.to_dict()
+ assert "public_field" in encoded
+ assert "private_field" not in encoded
+
+
+def test_include():
+ dclass = EncodeInclude(public_field="public", private_field="private")
+ encoded = dclass.to_dict()
+ assert "public_field" in encoded
+ assert "private_field" in encoded
+ assert encoded["private_field"] == "private"
+
+
+def test_custom_action_included():
+ dclass = EncodeCustom(public_field="public", sensitive_field="notsecret")
+ encoded = dclass.to_dict()
+ assert "sensitive_field" in encoded
+
+
+def test_custom_action_excluded():
+ dclass = EncodeCustom(public_field="public", sensitive_field="secret")
+ encoded = dclass.to_dict()
+ assert "sensitive_field" not in encoded
|
Omitting fields when serializing
Is it possible to omit fields when `to_dict` or `to_json` is invoked? My use-case is I have fields with sensitive information that I only want available to the internal parts of my application, and would like to ensure they're never pushed to my end-users.
Right now, I'm accomplishing it by overriding `to_dict` as follows:
```python
@dataclass
class MyObject(DataClassJsonMixin):
"""An object."""
id_: uuid.UUID = field(metadata=config(field_name="id"))
name: str
secret: str
another_secret: str
def to_dict( # type: ignore[override]
self, omit_sensitive=True, **kwargs
) -> Dict[str, "dataclasses_json.core.Json"]:
"""Serialize the dataclass, with an optional ability to omit sensitive/internal fields."""
serialized = super().to_dict(**kwargs)
if omit_sensitive:
for field_name in list(serialized.keys()):
if field_name in ("secret", "another_secret"):
del serialized[field_name]
return serialized
```
Ideally, this would be something I could define as part of the `field()`. Is this currently possible with dataclass-json, and, if not, would this be something that you would consider for future functionality?
|
0.0
|
f3d8860228d92b8af98f58f0b21304185b0e753d
|
[
"tests/test_exclude.py::test_exclude",
"tests/test_exclude.py::test_include",
"tests/test_exclude.py::test_custom_action_included",
"tests/test_exclude.py::test_custom_action_excluded"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-23 20:54:32+00:00
|
mit
| 3,563 |
|
lidatong__dataclasses-json-394
|
diff --git a/dataclasses_json/core.py b/dataclasses_json/core.py
index 529e153..ae1e99b 100644
--- a/dataclasses_json/core.py
+++ b/dataclasses_json/core.py
@@ -93,7 +93,12 @@ def _user_overrides_or_exts(cls):
def _encode_json_type(value, default=_ExtendedEncoder().default):
if isinstance(value, Json.__args__): # type: ignore
- return value
+ if isinstance(value, list):
+ return [_encode_json_type(i) for i in value]
+ elif isinstance(value, dict):
+ return {k: _encode_json_type(v) for k, v in value.items()}
+ else:
+ return value
return default(value)
|
lidatong/dataclasses-json
|
39b4c2f4462282576d72a72ade048ef49e2133bb
|
diff --git a/tests/test_enum.py b/tests/test_enum.py
index 6d0ce82..f4848ed 100644
--- a/tests/test_enum.py
+++ b/tests/test_enum.py
@@ -1,9 +1,9 @@
import json
from enum import Enum
-from typing import Dict, List
+from typing import Dict, List, Optional
import pytest
-from dataclasses import dataclass
+from dataclasses import dataclass, field
from dataclasses_json import dataclass_json
@@ -27,19 +27,25 @@ class MyStrEnum(str, Enum):
class DataWithEnum:
name: str
my_enum: MyEnum = MyEnum.STR3
+ enum_list: List[MyEnum] = field(default_factory=list)
+ nested: Dict[str, List[MyEnum]] = field(default_factory=dict)
d1 = DataWithEnum('name1', MyEnum.STR1)
-d1_json = '{"name": "name1", "my_enum": "str1"}'
+d1_json = '{"name": "name1", "my_enum": "str1", "enum_list": [], "nested": {}}'
# Make sure the enum is set to the default value defined by MyEnum
d2_using_default_value = DataWithEnum('name2')
-d2_json = '{"name": "name2", "my_enum": "str3"}'
+d2_json = '{"name": "name2", "my_enum": "str3", "enum_list": [], "nested": {}}'
d3_int = DataWithEnum('name1', MyEnum.INT1)
-d3_int_json = '{"name": "name1", "my_enum": 1}'
+d3_int_json = '{"name": "name1", "my_enum": 1, "enum_list": [], "nested": {}}'
+
d4_float = DataWithEnum('name1', MyEnum.FLOAT1)
-d4_float_json = '{"name": "name1", "my_enum": 1.23}'
+d4_float_json = '{"name": "name1", "my_enum": 1.23, "enum_list": [], "nested": {}}'
+
+d5_list = DataWithEnum('name1', MyEnum.STR1, [MyEnum.STR2, MyEnum.STR3], nested={'enum_val': [MyEnum.STR1]})
+d5_list_json = '{"name": "name1", "my_enum": "str1", "enum_list": ["str2", "str3"], "nested": {"enum_val": ["str1"]}}'
@dataclass_json
@@ -82,6 +88,10 @@ class TestEncoder:
def test_collection_with_enum(self):
assert container.to_json() == container_json
+ def test_enum_with_list(self):
+ assert d5_list.to_json() == d5_list_json, f'Actual: {d5_list.to_json()}, Expected: {d5_list_json}'
+ assert d5_list.to_dict(encode_json=True) == json.loads(d5_list_json), f'Actual: {d5_list.to_dict()}, Expected: {json.loads(d5_list_json)}'
+
class TestDecoder:
def test_data_with_enum(self):
|
`to_dict(encode_json=True)` is not recursive
I expect `to_dict(encode_json=True)` to be recursive, however the code below evaluates to:
```
{'engine': 'fast', 'wheels': [<Spec.fast: 'fast'>, <Spec.slow: 'slow'>]}
```
The first level of Enums evaluate to strings as expected, but the list is not JSON encoded. I expected it to be:
```
{'engine': 'fast', 'wheels': ['fast', 'slow']}
```
```
from dataclasses import dataclass
from enum import Enum
from typing import List
from dataclasses_json import DataClassJsonMixin
class Spec(Enum):
fast = 'fast'
slow = 'slow'
@dataclass
class Car(DataClassJsonMixin):
engine: Spec
wheels: List[Spec]
car = Car(engine=Spec.fast, wheels=[Spec.fast, Spec.slow])
car.to_dict(encode_json=True)
```
My current work-around is:
```
import json
json.loads(car.to_json())
```
However, this is not as nice
|
0.0
|
39b4c2f4462282576d72a72ade048ef49e2133bb
|
[
"tests/test_enum.py::TestEncoder::test_enum_with_list"
] |
[
"tests/test_enum.py::TestEncoder::test_data_with_enum",
"tests/test_enum.py::TestEncoder::test_data_with_str_enum",
"tests/test_enum.py::TestEncoder::test_data_with_enum_default_value",
"tests/test_enum.py::TestEncoder::test_collection_with_enum",
"tests/test_enum.py::TestDecoder::test_data_with_enum",
"tests/test_enum.py::TestDecoder::test_data_with_str_enum",
"tests/test_enum.py::TestDecoder::test_data_with_enum_default_value",
"tests/test_enum.py::TestDecoder::test_collection_with_enum",
"tests/test_enum.py::TestValidator::test_data_with_enum[str1-True]",
"tests/test_enum.py::TestValidator::test_data_with_enum[str2-True]",
"tests/test_enum.py::TestValidator::test_data_with_enum[str3-True]",
"tests/test_enum.py::TestValidator::test_data_with_enum[1-False]",
"tests/test_enum.py::TestValidator::test_data_with_enum[1.23-False]",
"tests/test_enum.py::TestValidator::test_data_with_enum[str4-False]",
"tests/test_enum.py::TestValidator::test_data_with_enum[2-False]",
"tests/test_enum.py::TestValidator::test_data_with_enum[1.24-False]",
"tests/test_enum.py::TestValidator::test_data_with_str_enum[str1-True]",
"tests/test_enum.py::TestValidator::test_data_with_str_enum[str2-False]",
"tests/test_enum.py::TestLoader::test_data_with_enum[{\"name\":",
"tests/test_enum.py::TestLoader::test_data_with_enum_exception",
"tests/test_enum.py::TestLoader::test_data_with_str_enum[{\"my_str_enum\":",
"tests/test_enum.py::TestLoader::test_data_with_str_enum_exception"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-12-05 13:47:35+00:00
|
mit
| 3,564 |
|
lidatong__dataclasses-json-426
|
diff --git a/dataclasses_json/core.py b/dataclasses_json/core.py
index fb7f0e6..d34e51d 100644
--- a/dataclasses_json/core.py
+++ b/dataclasses_json/core.py
@@ -281,10 +281,12 @@ def _decode_generic(type_, value, infer_missing):
# get the constructor if using corresponding generic type in `typing`
# otherwise fallback on constructing using type_ itself
+ materialize_type = type_
try:
- res = _get_type_cons(type_)(xs)
+ materialize_type = _get_type_cons(type_)
except (TypeError, AttributeError):
- res = type_(xs)
+ pass
+ res = materialize_type(xs)
else: # Optional or Union
_args = _get_type_args(type_)
if _args is _NO_ARGS:
|
lidatong/dataclasses-json
|
38d8123f9f990ecd088f5d8f42768685de975644
|
diff --git a/tests/entities.py b/tests/entities.py
index 6c0db90..61b8af2 100644
--- a/tests/entities.py
+++ b/tests/entities.py
@@ -264,6 +264,19 @@ class DataClassWithConfigHelper:
id: float = field(metadata=config(encoder=str))
+@dataclass_json
+@dataclass
+class DataClassWithErroneousDecode:
+ # Accepts no arguments, so passing in a single argument will result in a TypeError.
+ id: float = field(metadata=config(decoder=lambda: None))
+
+
+@dataclass_json
+@dataclass
+class DataClassMappingBadDecode:
+ map: Dict[str, DataClassWithErroneousDecode]
+
+
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DataClassWithConfigDecorator:
diff --git a/tests/test_nested.py b/tests/test_nested.py
index 56dc9ba..69c6456 100644
--- a/tests/test_nested.py
+++ b/tests/test_nested.py
@@ -1,4 +1,6 @@
-from tests.entities import (DataClassWithDataClass,
+import pytest
+from tests.entities import (DataClassMappingBadDecode,
+ DataClassWithDataClass,
DataClassWithList,
DataClassWithNestedDictWithTupleKeys,
DataClassX,
@@ -25,6 +27,10 @@ class TestDecoder:
assert (DataClassXs.from_json('{"xs": [{"x": 0}, {"x": 1}]}') ==
DataClassXs([DataClassX(0), DataClassX(1)]))
+ def test_nested_mapping_of_dataclasses(self):
+ with pytest.raises(TypeError, match="positional arguments"):
+ DataClassMappingBadDecode.from_dict(dict(map=dict(test=dict(id="irrelevant"))))
+
class TestNested:
def test_tuple_dict_key(self):
|
[BUG] common errors from decoder on inner dataclass silently ignored
### Description
If you have a dataclass with a mapping field that includes a dataclass member, and you try to `from_dict` the outer dataclass, some errors from a custom `decoder` implementation will get silently ignored.
### Code snippet that reproduces the issue
```py
from dataclasses import dataclass, field
from dataclasses_json import config, dataclass_json
def implementation_issue(value: str):
raise TypeError("improper implementation")
@dataclass_json
@dataclass
class InnerExample:
example_inner: int = field(metadata=config(decoder=implementation_issue))
@dataclass_json
@dataclass
class Example:
example_field: dict[str, InnerExample]
# This should fail, but it doesn't. The new Example instance's example_field
# value is just empty instead.
Example.from_dict(dict(example_field=dict(test=dict(example_inner="what"))))
```
### Describe the results you expected
```
Traceback (most recent call last):
File "/private/tmp/t.py", line 23, in <module>
Example.from_dict(dict(example_field=dict(test=dict(example_inner="what"))))
File "/usr/local/lib/python3.9/site-packages/dataclasses_json/api.py", line 70, in from_dict
return _decode_dataclass(cls, kvs, infer_missing)
File "/usr/local/lib/python3.9/site-packages/dataclasses_json/core.py", line 210, in _decode_dataclass
init_kwargs[field.name] = _decode_generic(field_type,
File "/usr/local/lib/python3.9/site-packages/dataclasses_json/core.py", line 279, in _decode_generic
res = t(xs)
File "/usr/local/lib/python3.9/site-packages/dataclasses_json/core.py", line 336, in <genexpr>
items = (_decode_dataclass(type_arg, x, infer_missing)
File "/usr/local/lib/python3.9/site-packages/dataclasses_json/core.py", line 196, in _decode_dataclass
init_kwargs[field.name] = overrides[field.name].decoder(
File "/private/tmp/t.py", line 6, in implementation_issue
raise TypeError("improper implementation")
TypeError: improper implementation
```
### Python version you are using
3.9.16
### Environment description
irrelevant, cause is clear
|
0.0
|
38d8123f9f990ecd088f5d8f42768685de975644
|
[
"tests/test_nested.py::TestDecoder::test_nested_mapping_of_dataclasses"
] |
[
"tests/test_nested.py::TestEncoder::test_nested_dataclass",
"tests/test_nested.py::TestEncoder::test_nested_list_of_dataclasses",
"tests/test_nested.py::TestDecoder::test_nested_dataclass",
"tests/test_nested.py::TestDecoder::test_nested_list_of_dataclasses",
"tests/test_nested.py::TestNested::test_tuple_dict_key"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-16 22:16:07+00:00
|
mit
| 3,565 |
|
lidatong__dataclasses-json-435
|
diff --git a/dataclasses_json/core.py b/dataclasses_json/core.py
index 3118036..acb5a47 100644
--- a/dataclasses_json/core.py
+++ b/dataclasses_json/core.py
@@ -23,7 +23,7 @@ from dataclasses_json.utils import (_get_type_cons, _get_type_origin,
_get_type_arg_param,
_get_type_args,
_NO_ARGS,
- _issubclass_safe)
+ _issubclass_safe, _is_tuple)
Json = Union[dict, list, str, int, float, bool, None]
@@ -278,9 +278,10 @@ def _decode_generic(type_, value, infer_missing):
ks = _decode_dict_keys(k_type, value.keys(), infer_missing)
vs = _decode_items(v_type, value.values(), infer_missing)
xs = zip(ks, vs)
+ elif _is_tuple(type_):
+ xs = _decode_items(_get_type_args(type_) or _NO_ARGS, value, infer_missing)
else:
- xs = _decode_items(_get_type_arg_param(type_, 0),
- value, infer_missing)
+ xs = _decode_items(_get_type_arg_param(type_, 0), value, infer_missing)
# get the constructor if using corresponding generic type in `typing`
# otherwise fallback on constructing using type_ itself
@@ -335,7 +336,7 @@ def _decode_dict_keys(key_type, xs, infer_missing):
return map(decode_function, _decode_items(key_type, xs, infer_missing))
-def _decode_items(type_arg, xs, infer_missing):
+def _decode_items(type_args, xs, infer_missing):
"""
This is a tricky situation where we need to check both the annotated
type info (which is usually a type from `typing`) and check the
@@ -345,14 +346,16 @@ def _decode_items(type_arg, xs, infer_missing):
type_arg is a typevar we need to extract the reified type information
hence the check of `is_dataclass(vs)`
"""
- if is_dataclass(type_arg) or is_dataclass(xs):
- items = (_decode_dataclass(type_arg, x, infer_missing)
- for x in xs)
- elif _is_supported_generic(type_arg):
- items = (_decode_generic(type_arg, x, infer_missing) for x in xs)
- else:
- items = xs
- return items
+ def _decode_item(type_arg, x):
+ if is_dataclass(type_arg) or is_dataclass(xs):
+ return _decode_dataclass(type_arg, x, infer_missing)
+ if _is_supported_generic(type_arg):
+ return _decode_generic(type_arg, x, infer_missing)
+ return x
+
+ if _isinstance_safe(type_args, Collection) and not _issubclass_safe(type_args, Enum):
+ return list(_decode_item(type_arg, x) for type_arg, x in zip(type_args, xs))
+ return list(_decode_item(type_args, x) for x in xs)
def _asdict(obj, encode_json=False):
diff --git a/dataclasses_json/utils.py b/dataclasses_json/utils.py
index 355e690..0927cd0 100644
--- a/dataclasses_json/utils.py
+++ b/dataclasses_json/utils.py
@@ -150,6 +150,10 @@ def _is_collection(type_):
return _issubclass_safe(_get_type_origin(type_), Collection)
+def _is_tuple(type_):
+ return _issubclass_safe(_get_type_origin(type_), Tuple)
+
+
def _is_nonstr_collection(type_):
return (_issubclass_safe(_get_type_origin(type_), Collection)
and not _issubclass_safe(type_, str))
|
lidatong/dataclasses-json
|
bc83017e26493a67bccb46ace6caecb00e9fde99
|
diff --git a/tests/test_enum.py b/tests/test_enum.py
index f4848ed..4313c27 100644
--- a/tests/test_enum.py
+++ b/tests/test_enum.py
@@ -1,6 +1,6 @@
import json
from enum import Enum
-from typing import Dict, List, Optional
+from typing import Dict, List
import pytest
from dataclasses import dataclass, field
diff --git a/tests/test_tuples.py b/tests/test_tuples.py
new file mode 100644
index 0000000..44e3574
--- /dev/null
+++ b/tests/test_tuples.py
@@ -0,0 +1,57 @@
+import json
+from enum import Enum
+from typing import Tuple
+
+from dataclasses import dataclass
+
+from dataclasses_json import dataclass_json
+
+
+class MyEnum(Enum):
+ STR1 = "str1"
+ STR2 = "str2"
+ STR3 = "str3"
+ INT1 = 1
+ FLOAT1 = 1.23
+
+
+@dataclass_json
+@dataclass
+class DataWithHeterogeneousTuple:
+ my_tuple: Tuple[MyEnum, str, float]
+
+
+hetero_tuple_json = '{"my_tuple": ["str1", "str2", 1.23]}'
+hetero_tuple_data = DataWithHeterogeneousTuple(my_tuple=(MyEnum.STR1, "str2", 1.23))
+
+
+@dataclass_json
+@dataclass
+class DataWithNestedTuple:
+ my_tuple: Tuple[Tuple[int, str], Tuple[MyEnum, int], int]
+
+
+nested_tuple_json = '{"my_tuple": [[1, "str1"], ["str2", 1], 1]}'
+nested_tuple_data = DataWithNestedTuple(my_tuple=((1, "str1"), (MyEnum.STR2, 1), 1))
+
+
+class TestEncoder:
+ def test_enum_with_tuple(self):
+ assert hetero_tuple_data.to_json() == hetero_tuple_json, f'Actual: {hetero_tuple_data.to_json()}, Expected: {hetero_tuple_json}'
+ assert hetero_tuple_data.to_dict(encode_json=True) == json.loads(hetero_tuple_json), f'Actual: {hetero_tuple_data.to_dict()}, Expected: {json.loads(hetero_tuple_json)}'
+
+ def test_nested_tuple(self):
+ assert nested_tuple_data.to_json() == nested_tuple_json, f'Actual: {nested_tuple_data.to_json()}, Expected: {nested_tuple_json}'
+ assert nested_tuple_data.to_dict(encode_json=True) == json.loads(nested_tuple_json), f'Actual: {nested_tuple_data.to_dict()}, Expected: {json.loads(nested_tuple_json)}'
+
+
+class TestDecoder:
+ def test_enum_with_tuple(self):
+ tuple_data_from_json = DataWithHeterogeneousTuple.from_json(hetero_tuple_json)
+ assert hetero_tuple_data == tuple_data_from_json
+ assert tuple_data_from_json.to_json() == hetero_tuple_json
+
+ def test_nested_tuple(self):
+ tuple_data_from_json = DataWithNestedTuple.from_json(nested_tuple_json)
+ assert nested_tuple_data == tuple_data_from_json
+ assert tuple_data_from_json.to_json() == nested_tuple_json
|
[BUG] tuple decode assumes homogenous sequence
### Description
If I express a field type as `tuple[int, str]`, I would expect the two dimensions of the tuple to be decoded as each separate type. This is especially true if the type of the first dimension is an Enum, because assuming that only the first type applies to all values means decoding the second value as an Enum even though it isn't the appropriate value.
### Code snippet that reproduces the issue
```py
from dataclasses import dataclass, field
from enum import Enum
from dataclasses_json import config, dataclass_json
class ExampleEnum(Enum):
FIRST = "first"
@dataclass_json
@dataclass
class Example:
example_inner: tuple[ExampleEnum, str]
# This should produce no error. On the contrary, this should be perfectly valid.
Example.from_dict(dict(example_inner=("first", "second")))
```
### Describe the results you expected
No error. Instead, got:
```py
Traceback (most recent call last):
File "/tmp/t.py", line 18, in <module>
Example.from_dict(dict(example_inner=("first", "second")))
File "/usr/local/lib/python3.9/site-packages/dataclasses_json/api.py", line 70, in from_dict
return _decode_dataclass(cls, kvs, infer_missing)
File "/usr/local/lib/python3.9/site-packages/dataclasses_json/core.py", line 210, in _decode_dataclass
init_kwargs[field.name] = _decode_generic(field_type,
File "/usr/local/lib/python3.9/site-packages/dataclasses_json/core.py", line 279, in _decode_generic
res = t(xs)
File "/usr/local/lib/python3.9/site-packages/dataclasses_json/core.py", line 339, in <genexpr>
items = (_decode_generic(type_arg, x, infer_missing) for x in xs)
File "/usr/local/lib/python3.9/site-packages/dataclasses_json/core.py", line 258, in _decode_generic
res = type_(value)
File "/usr/local/Cellar/[email protected]/3.9.16/Frameworks/Python.framework/Versions/3.9/lib/python3.9/enum.py", line 384, in __call__
return cls.__new__(cls, value)
File "/usr/local/Cellar/[email protected]/3.9.16/Frameworks/Python.framework/Versions/3.9/lib/python3.9/enum.py", line 702, in __new__
raise ve_exc
ValueError: 'second' is not a valid ExampleEnum
```
### Python version you are using
3.9.16
### Environment description
irrelevant, cause is obvious: lack of supported functionality, but in this case we should see an error due to no support for tuples rather than an error due to trying to decode the enum.
|
0.0
|
bc83017e26493a67bccb46ace6caecb00e9fde99
|
[
"tests/test_tuples.py::TestDecoder::test_enum_with_tuple",
"tests/test_tuples.py::TestDecoder::test_nested_tuple"
] |
[
"tests/test_enum.py::TestEncoder::test_data_with_enum",
"tests/test_enum.py::TestEncoder::test_data_with_str_enum",
"tests/test_enum.py::TestEncoder::test_data_with_enum_default_value",
"tests/test_enum.py::TestEncoder::test_collection_with_enum",
"tests/test_enum.py::TestEncoder::test_enum_with_list",
"tests/test_enum.py::TestDecoder::test_data_with_enum",
"tests/test_enum.py::TestDecoder::test_data_with_str_enum",
"tests/test_enum.py::TestDecoder::test_data_with_enum_default_value",
"tests/test_enum.py::TestDecoder::test_collection_with_enum",
"tests/test_enum.py::TestValidator::test_data_with_enum[str1-True]",
"tests/test_enum.py::TestValidator::test_data_with_enum[str2-True]",
"tests/test_enum.py::TestValidator::test_data_with_enum[str3-True]",
"tests/test_enum.py::TestValidator::test_data_with_enum[1-False]",
"tests/test_enum.py::TestValidator::test_data_with_enum[1.23-False]",
"tests/test_enum.py::TestValidator::test_data_with_enum[str4-False]",
"tests/test_enum.py::TestValidator::test_data_with_enum[2-False]",
"tests/test_enum.py::TestValidator::test_data_with_enum[1.24-False]",
"tests/test_enum.py::TestValidator::test_data_with_str_enum[str1-True]",
"tests/test_enum.py::TestValidator::test_data_with_str_enum[str2-False]",
"tests/test_enum.py::TestLoader::test_data_with_enum[{\"name\":",
"tests/test_enum.py::TestLoader::test_data_with_enum_exception",
"tests/test_enum.py::TestLoader::test_data_with_str_enum[{\"my_str_enum\":",
"tests/test_enum.py::TestLoader::test_data_with_str_enum_exception",
"tests/test_tuples.py::TestEncoder::test_enum_with_tuple",
"tests/test_tuples.py::TestEncoder::test_nested_tuple"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-13 09:38:04+00:00
|
mit
| 3,566 |
|
lifeomic__phc-sdk-py-102
|
diff --git a/phc/easy/__init__.py b/phc/easy/__init__.py
index db31100..b50bf0c 100644
--- a/phc/easy/__init__.py
+++ b/phc/easy/__init__.py
@@ -8,8 +8,6 @@ from phc.easy.diagnostic_report import DiagnosticReport
from phc.easy.document_reference import DocumentReference
from phc.easy.encounter import Encounter
from phc.easy.frame import Frame
-from phc.easy.genomic_short_variant import GenomicShortVariant
-from phc.easy.genomic_test import GenomicTest
from phc.easy.goal import Goal
from phc.easy.imaging_study import ImagingStudy
from phc.easy.immunization import Immunization
@@ -20,6 +18,8 @@ from phc.easy.medication_dispense import MedicationDispense
from phc.easy.medication_request import MedicationRequest
from phc.easy.medication_statement import MedicationStatement
from phc.easy.observation import Observation
+from phc.easy.omics.genomic_short_variant import GenomicShortVariant
+from phc.easy.omics.genomic_test import GenomicTest
from phc.easy.organization import Organization
from phc.easy.patient_item import PatientItem
from phc.easy.patients import Patient
@@ -34,7 +34,6 @@ from phc.easy.referral_request import ReferralRequest
from phc.easy.sequence import Sequence
from phc.easy.specimen import Specimen
-
__all__ = [
"AuditEvent",
"Auth",
diff --git a/phc/easy/omics/__init__.py b/phc/easy/omics/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/phc/easy/omics/genomic_short_variant.py b/phc/easy/omics/genomic_short_variant.py
new file mode 100644
index 0000000..c2f69cc
--- /dev/null
+++ b/phc/easy/omics/genomic_short_variant.py
@@ -0,0 +1,125 @@
+import inspect
+from typing import List, Optional
+
+import pandas as pd
+from phc.easy.auth import Auth
+from phc.easy.frame import Frame
+from phc.easy.omics.options.genomic_short_variant import (
+ GenomicShortVariantInclude,
+ GenomicShortVariantOptions,
+)
+from phc.easy.paging_api_item import PagingApiItem
+
+
+class GenomicShortVariant(PagingApiItem):
+ @staticmethod
+ def resource_path():
+ return "genomics/variants"
+
+ @staticmethod
+ def params_class():
+ return GenomicShortVariantOptions
+
+ @staticmethod
+ def transform_results(data_frame: pd.DataFrame, **expand_args):
+ def expand_id(id_column: pd.Series):
+ return pd.concat(
+ [
+ id_column,
+ id_column.str.split(":", expand=True).rename(
+ columns={0: "variant_set_id", 2: "gene"}
+ )[["variant_set_id", "gene"]],
+ ],
+ axis=1,
+ )
+
+ args = {
+ **expand_args,
+ "custom_columns": [
+ *expand_args.get("custom_columns", []),
+ *[
+ Frame.codeable_like_column_expander(k)
+ for k in ["clinvar", "cosmic", "vcf"]
+ ],
+ ("id", expand_id),
+ ],
+ }
+
+ return Frame.expand(data_frame, **args)
+
+ @classmethod
+ def get_data_frame(
+ cls,
+ # Query parameters
+ variant_set_ids: List[str],
+ include: List[GenomicShortVariantInclude] = ["vcf"],
+ gene: List[str] = [],
+ rsid: List[str] = [],
+ chromosome: List[str] = [],
+ clinvar_allele_id: List[str] = [],
+ clinvar_disease: List[str] = [],
+ clinvar_review: List[str] = [],
+ clinvar_significance: List[str] = [],
+ cosmic_id: List[str] = [],
+ cosmic_status: List[str] = [],
+ cosmic_histology: List[str] = [],
+ cosmic_tumor_site: List[str] = [],
+ variant_class: List[str] = [],
+ group: List[str] = [],
+ impact: List[str] = [],
+ transcript_id: List[str] = [],
+ biotype: List[str] = [],
+ amino_acid_change: List[str] = [],
+ sequence_type: List[str] = [],
+ position: List[str] = [],
+ cosmic_sample_count: List[str] = [],
+ min_allele_frequency: List[str] = [],
+ max_allele_frequency: List[str] = [],
+ pop_allele_frequency: List[str] = [],
+ exac_allele_frequency: List[str] = [],
+ exac_homozygous: List[str] = [],
+ dbnsfp_damaging_count: List[str] = [],
+ dbnsfp_damaging_predictor: List[str] = [],
+ dbnsfp_damaging_vote: List[str] = [],
+ dbnsfp_fathmm_rankscore: List[str] = [],
+ dbnsfp_fathmm_pred: List[str] = [],
+ dbnsfp_mean_rankscore: List[str] = [],
+ dbnsfp_mean_rankscore_predictor: List[str] = [],
+ dbnsfp_mutationtaster_rankscore: List[str] = [],
+ dbnsfp_mutationtaster_pred: List[str] = [],
+ dbnsfp_sift_rankscore: List[str] = [],
+ dbnsfp_sift_pred: List[str] = [],
+ zygosity: List[str] = [],
+ genotype: List[str] = [],
+ variant_allele_frequency: List[str] = [],
+ quality: List[str] = [],
+ read_depth: List[str] = [],
+ alt_read_depth: List[str] = [],
+ ref_read_depth: List[str] = [],
+ variant_filter: List[str] = [],
+ drug_associations: Optional[bool] = None,
+ # Execution parameters,
+ all_results: bool = False,
+ auth_args: Auth = Auth.shared(),
+ max_pages: Optional[int] = None,
+ page_size: Optional[int] = None,
+ log: bool = False,
+ **kw_args,
+ ):
+ """Execute a request for genomic short variants
+
+ ## Parameters
+
+ Query: `phc.easy.omics.options.genomic_short_variant.GenomicShortVariantOptions`
+
+ Execution: `phc.easy.query.Query.execute_paging_api`
+
+ Expansion: `phc.easy.frame.Frame.expand`
+
+ NOTE:
+ - `variant_class` is translated to `class` as a parameter
+ - `variant_filter` is translated to `filter` as a parameter
+ """
+ return super().get_data_frame(
+ **kw_args, **cls._get_current_args(inspect.currentframe(), locals())
+ )
diff --git a/phc/easy/genomic_test.py b/phc/easy/omics/genomic_test.py
similarity index 100%
rename from phc/easy/genomic_test.py
rename to phc/easy/omics/genomic_test.py
diff --git a/phc/easy/omics/options/__init__.py b/phc/easy/omics/options/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/phc/easy/genomic_short_variant.py b/phc/easy/omics/options/genomic_short_variant.py
similarity index 52%
rename from phc/easy/genomic_short_variant.py
rename to phc/easy/omics/options/genomic_short_variant.py
index f6aa6db..22ed728 100644
--- a/phc/easy/genomic_short_variant.py
+++ b/phc/easy/omics/options/genomic_short_variant.py
@@ -1,13 +1,8 @@
-import inspect
-import pandas as pd
-from typing import List, Optional
-from phc.easy.frame import Frame
-from pydantic import Field
-from phc.easy.auth import Auth
-from phc.easy.paging_api_item import PagingApiItem, PagingApiOptions
-
from enum import Enum
+from typing import List, Optional
+from phc.easy.paging_api_item import PagingApiOptions
+from pydantic import Field
MAPPINGS = {
"variant_set_ids": "variantSetIds",
@@ -115,105 +110,3 @@ class GenomicShortVariantOptions(PagingApiOptions):
value = ",".join(value)
return (MAPPINGS.get(key, key), value)
-
-
-class GenomicShortVariant(PagingApiItem):
- @staticmethod
- def resource_path():
- return "genomics/variants"
-
- @staticmethod
- def params_class():
- return GenomicShortVariantOptions
-
- @staticmethod
- def transform_results(data_frame: pd.DataFrame, **expand_args):
- args = {
- **expand_args,
- "custom_columns": [
- *expand_args.get("custom_columns", []),
- *[
- Frame.codeable_like_column_expander(k)
- for k in ["clinvar", "cosmic", "vcf"]
- ],
- ],
- }
-
- return Frame.expand(data_frame, **args)
-
- @classmethod
- def get_data_frame(
- cls,
- # Query parameters
- variant_set_ids: List[str],
- include: List[GenomicShortVariantInclude] = ["vcf"],
- gene: List[str] = [],
- rsid: List[str] = [],
- chromosome: List[str] = [],
- clinvar_allele_id: List[str] = [],
- clinvar_disease: List[str] = [],
- clinvar_review: List[str] = [],
- clinvar_significance: List[str] = [],
- cosmic_id: List[str] = [],
- cosmic_status: List[str] = [],
- cosmic_histology: List[str] = [],
- cosmic_tumor_site: List[str] = [],
- variant_class: List[str] = [],
- group: List[str] = [],
- impact: List[str] = [],
- transcript_id: List[str] = [],
- biotype: List[str] = [],
- amino_acid_change: List[str] = [],
- sequence_type: List[str] = [],
- position: List[str] = [],
- cosmic_sample_count: List[str] = [],
- min_allele_frequency: List[str] = [],
- max_allele_frequency: List[str] = [],
- pop_allele_frequency: List[str] = [],
- exac_allele_frequency: List[str] = [],
- exac_homozygous: List[str] = [],
- dbnsfp_damaging_count: List[str] = [],
- dbnsfp_damaging_predictor: List[str] = [],
- dbnsfp_damaging_vote: List[str] = [],
- dbnsfp_fathmm_rankscore: List[str] = [],
- dbnsfp_fathmm_pred: List[str] = [],
- dbnsfp_mean_rankscore: List[str] = [],
- dbnsfp_mean_rankscore_predictor: List[str] = [],
- dbnsfp_mutationtaster_rankscore: List[str] = [],
- dbnsfp_mutationtaster_pred: List[str] = [],
- dbnsfp_sift_rankscore: List[str] = [],
- dbnsfp_sift_pred: List[str] = [],
- zygosity: List[str] = [],
- genotype: List[str] = [],
- variant_allele_frequency: List[str] = [],
- quality: List[str] = [],
- read_depth: List[str] = [],
- alt_read_depth: List[str] = [],
- ref_read_depth: List[str] = [],
- variant_filter: List[str] = [],
- drug_associations: Optional[bool] = None,
- # Execution parameters,
- all_results: bool = False,
- auth_args: Auth = Auth.shared(),
- max_pages: Optional[int] = None,
- page_size: Optional[int] = None,
- log: bool = False,
- **kw_args,
- ):
- """Execute a request for genomic short variants
-
- ## Parameters
-
- Query: `GenomicShortVariantOptions`
-
- Execution: `phc.easy.query.Query.execute_paging_api`
-
- Expansion: `phc.easy.frame.Frame.expand`
-
- NOTE:
- - `variant_class` is translated to `class` as a parameter
- - `variant_filter` is translated to `filter` as a parameter
- """
- return super().get_data_frame(
- **kw_args, **cls._get_current_args(inspect.currentframe(), locals())
- )
|
lifeomic/phc-sdk-py
|
24e973757a0133993609ca76404b9c41d3cb6e16
|
diff --git a/tests/test_genomic_short_variant.py b/tests/test_genomic_short_variant.py
new file mode 100644
index 0000000..4e1c289
--- /dev/null
+++ b/tests/test_genomic_short_variant.py
@@ -0,0 +1,28 @@
+import pandas as pd
+from phc.easy.omics.genomic_short_variant import GenomicShortVariant
+
+
+def test_parse_id():
+ raw_df = pd.DataFrame(
+ # NOTE: Sample is taken and adapted from BRCA data set
+ [
+ {"id": "f0e381b6-a9b3-4411-af56-7f7f5ce3ce6b:XjQLzpOuLm=:GOLGA3"},
+ {"id": "f0e381b6-a9b3-4411-af56-7f7f5ce3ce6b:naTuKl96CL=:ESCO1"},
+ {"id": "6b0591ce-7b3b-4b04-85bc-d17e463ca869:A235y+Jw+v=:MAP3K13"},
+ {"id": "f0e381b6-a9b3-4411-af56-7f7f5ce3ce6b:dOML6l4/uk=:MAP3K7"},
+ {"id": "f0e381b6-a9b3-4411-af56-7f7f5ce3ce6b:tCkWMHDLL7=:CACNA1B"},
+ {"id": "6b0591ce-7b3b-4b04-85bc-d17e463ca869:3szKb4RVAR=:PCDHB7"},
+ {"id": "6b0591ce-7b3b-4b04-85bc-d17e463ca869:BsZ3G0NtUz=:FASN"},
+ ]
+ )
+
+ frame = GenomicShortVariant.transform_results(raw_df)
+
+ assert frame.columns.values.tolist() == ["id", "variant_set_id", "gene"]
+
+ assert frame.variant_set_id.unique().tolist() == [
+ "f0e381b6-a9b3-4411-af56-7f7f5ce3ce6b",
+ "6b0591ce-7b3b-4b04-85bc-d17e463ca869",
+ ]
+
+ assert "FASN" in frame.gene.values.tolist()
|
Add variant_set_id when returning GenomicShortVariant
The id of the variants comes in the following form:
```python
'55e945ec-57d1-4dde-9a59-bcdd6d7271e6:+0LLoDMx2dXBmvef9GcN4Dz+v4EMI87/FXW9X2mG72k=:TFB2M'
```
The first part of this ID is the variant_set_id which can be joined with the output of the `GenomicTest` to match a given mutation to the patient. This is a common use case.
|
0.0
|
24e973757a0133993609ca76404b9c41d3cb6e16
|
[
"tests/test_genomic_short_variant.py::test_parse_id"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-11-10 17:52:52+00:00
|
mit
| 3,567 |
|
lifeomic__phc-sdk-py-69
|
diff --git a/phc/easy/patients/__init__.py b/phc/easy/patients/__init__.py
index c6ecfb0..fe3c26d 100644
--- a/phc/easy/patients/__init__.py
+++ b/phc/easy/patients/__init__.py
@@ -1,24 +1,23 @@
import pandas as pd
-from phc.easy.auth import Auth
from phc.easy.frame import Frame
+from phc.easy.item import Item
from phc.easy.patients.address import expand_address_column
from phc.easy.patients.name import expand_name_column
-from phc.easy.query import Query
-class Patient:
+class Patient(Item):
@staticmethod
- def get_count(query_overrides: dict = {}, auth_args=Auth.shared()):
- return Query.find_count_of_dsl_query(
- {
- "type": "select",
- "columns": "*",
- "from": [{"table": "patient"}],
- **query_overrides,
- },
- auth_args=auth_args,
- )
+ def table_name():
+ return "patient"
+
+ @staticmethod
+ def code_keys():
+ return [
+ "extension.valueCodeableConcept.coding",
+ "identifier.type.coding",
+ "meta.tag",
+ ]
@staticmethod
def transform_results(data_frame: pd.DataFrame, **expand_args):
@@ -32,65 +31,3 @@ class Patient:
}
return Frame.expand(data_frame, **args)
-
- @staticmethod
- def get_data_frame(
- all_results: bool = False,
- raw: bool = False,
- query_overrides: dict = {},
- auth_args: Auth = Auth.shared(),
- ignore_cache: bool = False,
- expand_args: dict = {},
- ):
- """Retrieve patients as a data frame with unwrapped FHIR columns
-
- Attributes
- ----------
- all_results : bool = False
- Override limit to retrieve all patients
-
- raw : bool = False
- If raw, then values will not be expanded (useful for manual
- inspection if something goes wrong). Note that this option will
- override all_results if True.
-
- query_overrides : dict = {}
- Override any part of the elasticsearch FHIR query
-
- auth_args : Any
- The authenication to use for the account and project (defaults to shared)
-
- ignore_cache : bool = False
- Bypass the caching system that auto-saves results to a CSV file.
- Caching only occurs when all results are being retrieved.
-
- expand_args : Any
- Additional arguments passed to phc.Frame.expand
-
- Examples
- --------
- >>> import phc.easy as phc
- >>> phc.Auth.set({'account': '<your-account-name>'})
- >>> phc.Project.set_current('My Project Name')
- >>> phc.Patient.get_data_frame()
-
- """
- query = {
- "type": "select",
- "columns": "*",
- "from": [{"table": "patient"}],
- **query_overrides,
- }
-
- def transform(df: pd.DataFrame):
- return Patient.transform_results(df, **expand_args)
-
- return Query.execute_fhir_dsl_with_options(
- query,
- transform,
- all_results,
- raw,
- query_overrides,
- auth_args,
- ignore_cache,
- )
diff --git a/phc/easy/patients/address.py b/phc/easy/patients/address.py
index edbd1c8..c7af9a3 100644
--- a/phc/easy/patients/address.py
+++ b/phc/easy/patients/address.py
@@ -1,4 +1,6 @@
import pandas as pd
+
+from funcy import first
from phc.easy.codeable import generic_codeable_to_dict
from phc.easy.util import concat_dicts
@@ -22,18 +24,29 @@ def expand_address_value(value):
if type(value) is not list:
return {}
- # Value is always list of one item
- assert len(value) == 1
- value = value[0]
+ primary_address = first(
+ filter(lambda v: v.get("use") != "old", value)
+ ) or first(value)
+
+ other_addresses = list(
+ filter(lambda address: address != primary_address, value)
+ )
- return concat_dicts(
- [
- expand_address_attr(f"address_{key}", item_value)
- for key, item_value in value.items()
- if key != "text"
- ]
+ other_attrs = (
+ {"other_addresses": other_addresses} if len(other_addresses) > 0 else {}
)
+ return {
+ **concat_dicts(
+ [
+ expand_address_attr(f"address_{key}", item_value)
+ for key, item_value in primary_address.items()
+ if key != "text"
+ ]
+ ),
+ **other_attrs,
+ }
+
def expand_address_column(address_col):
return pd.DataFrame(map(expand_address_value, address_col.values))
|
lifeomic/phc-sdk-py
|
31adf1f7f451013216071c2d8737e3512e5b56e8
|
diff --git a/tests/test_easy_patient_address.py b/tests/test_easy_patient_address.py
index d51ff86..42bb1cd 100644
--- a/tests/test_easy_patient_address.py
+++ b/tests/test_easy_patient_address.py
@@ -1,18 +1,64 @@
import pandas as pd
+import math
from phc.easy.patients.address import expand_address_column
+
+def non_na_dict(dictionary: dict):
+ return {
+ k: v
+ for k, v in dictionary.items()
+ if not isinstance(v, float) or not math.isnan(v)
+ }
+
+
def test_expand_address_column():
- sample = pd.DataFrame([{
- 'address': [
- {'line': ['123 ABC Court'], 'city': 'Zionsville', 'state': 'IN', 'use': 'home'}
+ sample = pd.DataFrame(
+ [
+ {
+ "address": [
+ {
+ "line": ["123 ABC Court"],
+ "city": "Zionsville",
+ "state": "IN",
+ "use": "home",
+ }
+ ]
+ },
+ {
+ "address": [
+ {
+ "use": "old",
+ "state": "SC",
+ "period": {"start": "1999", "end": "2001"},
+ },
+ {
+ "state": "NC",
+ "city": "Raleigh",
+ "period": {"start": "2001"},
+ },
+ ]
+ },
]
- }])
+ )
df = expand_address_column(sample.address)
- assert df.iloc[0].to_dict() == {
- 'address_line_0': '123 ABC Court',
- 'address_city': 'Zionsville',
- 'address_state': 'IN',
- 'address_use': 'home'
+ assert non_na_dict(df.iloc[0].to_dict()) == {
+ "address_line_0": "123 ABC Court",
+ "address_city": "Zionsville",
+ "address_state": "IN",
+ "address_use": "home",
+ }
+
+ assert non_na_dict(df.iloc[1].to_dict()) == {
+ "address_city": "Raleigh",
+ "address_state": "NC",
+ "address_period_start": "2001",
+ "other_addresses": [
+ {
+ "use": "old",
+ "state": "SC",
+ "period": {"start": "1999", "end": "2001"},
+ }
+ ],
}
|
Multiple addresses in column breaks frame expansion
Pulling out address breaks when multiple values are present. For example:
```
[{'state': 'NC', 'postalCode': '27540', 'period': {'start': '2001'}}, {'use': 'old', 'state': 'SC', 'period': {'start': '1999', 'end': '2001'}}]
```
Error:
```
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-6-0355439bcf07> in <module>
----> 1 phc.Patient.get_data_frame()
/opt/conda/lib/python3.7/site-packages/phc/easy/patients/__init__.py in get_data_frame(limit, all_results, raw, query_overrides, auth_args, ignore_cache, expand_args)
101 query_overrides,
102 auth_args,
--> 103 ignore_cache,
104 )
/opt/conda/lib/python3.7/site-packages/phc/easy/query/__init__.py in execute_fhir_dsl_with_options(query, transform, all_results, raw, query_overrides, auth_args, ignore_cache)
168 return df
169
--> 170 return transform(df)
/opt/conda/lib/python3.7/site-packages/phc/easy/patients/__init__.py in transform(df)
92
93 def transform(df: pd.DataFrame):
---> 94 return Patient.transform_results(df, **expand_args)
95
96 return Query.execute_fhir_dsl_with_options(
/opt/conda/lib/python3.7/site-packages/phc/easy/patients/__init__.py in transform_results(data_frame, **expand_args)
32 }
33
---> 34 return Frame.expand(data_frame, **args)
35
36 @staticmethod
/opt/conda/lib/python3.7/site-packages/phc/easy/frame.py in expand(frame, code_columns, date_columns, custom_columns)
94 *[
95 column_to_frame(frame, key, func)
---> 96 for key, func in custom_columns
97 ],
98 frame.drop([*codeable_col_names, *custom_names], axis=1),
/opt/conda/lib/python3.7/site-packages/phc/easy/frame.py in <listcomp>(.0)
94 *[
95 column_to_frame(frame, key, func)
---> 96 for key, func in custom_columns
97 ],
98 frame.drop([*codeable_col_names, *custom_names], axis=1),
/opt/conda/lib/python3.7/site-packages/phc/easy/frame.py in column_to_frame(frame, column_name, expand_func)
29 "Converts a column (if exists) to a data frame with multiple columns"
30 if column_name in frame.columns:
---> 31 return expand_func(frame[column_name])
32
33 return pd.DataFrame([])
/opt/conda/lib/python3.7/site-packages/phc/easy/patients/address.py in expand_address_column(address_col)
32
33 def expand_address_column(address_col):
---> 34 return pd.DataFrame(map(expand_address_value, address_col.values))
/opt/conda/lib/python3.7/site-packages/pandas/core/frame.py in __init__(self, data, index, columns, dtype, copy)
467 elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)):
468 if not isinstance(data, (abc.Sequence, ExtensionArray)):
--> 469 data = list(data)
470 if len(data) > 0:
471 if is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1:
/opt/conda/lib/python3.7/site-packages/phc/easy/patients/address.py in expand_address_value(value)
22
23 # Value is always list of one item
---> 24 assert len(value) == 1
25 value = value[0]
26
AssertionError:
```
|
0.0
|
31adf1f7f451013216071c2d8737e3512e5b56e8
|
[
"tests/test_easy_patient_address.py::test_expand_address_column"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-08 20:08:48+00:00
|
mit
| 3,568 |
|
lifeomic__phc-sdk-py-70
|
diff --git a/phc/easy/patients/__init__.py b/phc/easy/patients/__init__.py
index c6ecfb0..fe3c26d 100644
--- a/phc/easy/patients/__init__.py
+++ b/phc/easy/patients/__init__.py
@@ -1,24 +1,23 @@
import pandas as pd
-from phc.easy.auth import Auth
from phc.easy.frame import Frame
+from phc.easy.item import Item
from phc.easy.patients.address import expand_address_column
from phc.easy.patients.name import expand_name_column
-from phc.easy.query import Query
-class Patient:
+class Patient(Item):
@staticmethod
- def get_count(query_overrides: dict = {}, auth_args=Auth.shared()):
- return Query.find_count_of_dsl_query(
- {
- "type": "select",
- "columns": "*",
- "from": [{"table": "patient"}],
- **query_overrides,
- },
- auth_args=auth_args,
- )
+ def table_name():
+ return "patient"
+
+ @staticmethod
+ def code_keys():
+ return [
+ "extension.valueCodeableConcept.coding",
+ "identifier.type.coding",
+ "meta.tag",
+ ]
@staticmethod
def transform_results(data_frame: pd.DataFrame, **expand_args):
@@ -32,65 +31,3 @@ class Patient:
}
return Frame.expand(data_frame, **args)
-
- @staticmethod
- def get_data_frame(
- all_results: bool = False,
- raw: bool = False,
- query_overrides: dict = {},
- auth_args: Auth = Auth.shared(),
- ignore_cache: bool = False,
- expand_args: dict = {},
- ):
- """Retrieve patients as a data frame with unwrapped FHIR columns
-
- Attributes
- ----------
- all_results : bool = False
- Override limit to retrieve all patients
-
- raw : bool = False
- If raw, then values will not be expanded (useful for manual
- inspection if something goes wrong). Note that this option will
- override all_results if True.
-
- query_overrides : dict = {}
- Override any part of the elasticsearch FHIR query
-
- auth_args : Any
- The authenication to use for the account and project (defaults to shared)
-
- ignore_cache : bool = False
- Bypass the caching system that auto-saves results to a CSV file.
- Caching only occurs when all results are being retrieved.
-
- expand_args : Any
- Additional arguments passed to phc.Frame.expand
-
- Examples
- --------
- >>> import phc.easy as phc
- >>> phc.Auth.set({'account': '<your-account-name>'})
- >>> phc.Project.set_current('My Project Name')
- >>> phc.Patient.get_data_frame()
-
- """
- query = {
- "type": "select",
- "columns": "*",
- "from": [{"table": "patient"}],
- **query_overrides,
- }
-
- def transform(df: pd.DataFrame):
- return Patient.transform_results(df, **expand_args)
-
- return Query.execute_fhir_dsl_with_options(
- query,
- transform,
- all_results,
- raw,
- query_overrides,
- auth_args,
- ignore_cache,
- )
diff --git a/phc/easy/patients/address.py b/phc/easy/patients/address.py
index edbd1c8..c7af9a3 100644
--- a/phc/easy/patients/address.py
+++ b/phc/easy/patients/address.py
@@ -1,4 +1,6 @@
import pandas as pd
+
+from funcy import first
from phc.easy.codeable import generic_codeable_to_dict
from phc.easy.util import concat_dicts
@@ -22,18 +24,29 @@ def expand_address_value(value):
if type(value) is not list:
return {}
- # Value is always list of one item
- assert len(value) == 1
- value = value[0]
+ primary_address = first(
+ filter(lambda v: v.get("use") != "old", value)
+ ) or first(value)
+
+ other_addresses = list(
+ filter(lambda address: address != primary_address, value)
+ )
- return concat_dicts(
- [
- expand_address_attr(f"address_{key}", item_value)
- for key, item_value in value.items()
- if key != "text"
- ]
+ other_attrs = (
+ {"other_addresses": other_addresses} if len(other_addresses) > 0 else {}
)
+ return {
+ **concat_dicts(
+ [
+ expand_address_attr(f"address_{key}", item_value)
+ for key, item_value in primary_address.items()
+ if key != "text"
+ ]
+ ),
+ **other_attrs,
+ }
+
def expand_address_column(address_col):
return pd.DataFrame(map(expand_address_value, address_col.values))
diff --git a/phc/util/api_cache.py b/phc/util/api_cache.py
index 0e7f58d..475d88b 100644
--- a/phc/util/api_cache.py
+++ b/phc/util/api_cache.py
@@ -1,5 +1,6 @@
import hashlib
import json
+import os
from pathlib import Path
from typing import Callable
@@ -86,6 +87,9 @@ class APICache:
writer = CSVWriter(filename)
def handle_batch(batch, is_finished):
+ if is_finished and not os.path.exists(filename):
+ return pd.DataFrame()
+
if is_finished:
print(f'Loading data frame from "{filename}"')
return APICache.read_csv(filename)
|
lifeomic/phc-sdk-py
|
31adf1f7f451013216071c2d8737e3512e5b56e8
|
diff --git a/tests/test_easy_patient_address.py b/tests/test_easy_patient_address.py
index d51ff86..42bb1cd 100644
--- a/tests/test_easy_patient_address.py
+++ b/tests/test_easy_patient_address.py
@@ -1,18 +1,64 @@
import pandas as pd
+import math
from phc.easy.patients.address import expand_address_column
+
+def non_na_dict(dictionary: dict):
+ return {
+ k: v
+ for k, v in dictionary.items()
+ if not isinstance(v, float) or not math.isnan(v)
+ }
+
+
def test_expand_address_column():
- sample = pd.DataFrame([{
- 'address': [
- {'line': ['123 ABC Court'], 'city': 'Zionsville', 'state': 'IN', 'use': 'home'}
+ sample = pd.DataFrame(
+ [
+ {
+ "address": [
+ {
+ "line": ["123 ABC Court"],
+ "city": "Zionsville",
+ "state": "IN",
+ "use": "home",
+ }
+ ]
+ },
+ {
+ "address": [
+ {
+ "use": "old",
+ "state": "SC",
+ "period": {"start": "1999", "end": "2001"},
+ },
+ {
+ "state": "NC",
+ "city": "Raleigh",
+ "period": {"start": "2001"},
+ },
+ ]
+ },
]
- }])
+ )
df = expand_address_column(sample.address)
- assert df.iloc[0].to_dict() == {
- 'address_line_0': '123 ABC Court',
- 'address_city': 'Zionsville',
- 'address_state': 'IN',
- 'address_use': 'home'
+ assert non_na_dict(df.iloc[0].to_dict()) == {
+ "address_line_0": "123 ABC Court",
+ "address_city": "Zionsville",
+ "address_state": "IN",
+ "address_use": "home",
+ }
+
+ assert non_na_dict(df.iloc[1].to_dict()) == {
+ "address_city": "Raleigh",
+ "address_state": "NC",
+ "address_period_start": "2001",
+ "other_addresses": [
+ {
+ "use": "old",
+ "state": "SC",
+ "period": {"start": "1999", "end": "2001"},
+ }
+ ],
}
|
Auto-caching breaks when no results
It appears that this behavior happens because it's the "last batch" and the APICache callback expects that the file already exists at that point.
Stacktrace:
```python
/opt/conda/lib/python3.7/site-packages/phc/easy/procedure.py in get_data_frame(all_results, raw, patient_id, query_overrides, auth_args, ignore_cache, expand_args)
96 query_overrides,
97 auth_args,
---> 98 ignore_cache,
99 )
/opt/conda/lib/python3.7/site-packages/phc/easy/query/__init__.py in execute_fhir_dsl_with_options(query, transform, all_results, raw, query_overrides, auth_args, ignore_cache)
157 auth_args,
158 callback=APICache.build_cache_fhir_dsl_callback(
--> 159 query, transform
160 ),
161 )
/opt/conda/lib/python3.7/site-packages/phc/easy/query/__init__.py in execute_fhir_dsl(query, all_results, auth_args, callback)
110 return with_progress(
111 lambda: tqdm(total=MAX_RESULT_SIZE),
--> 112 lambda progress: recursive_execute_fhir_dsl(
113 {
114 "limit": [
/opt/conda/lib/python3.7/site-packages/phc/easy/query/fhir_dsl.py in with_progress(init_progress, func)
20 if _has_tqdm:
21 progress = init_progress()
---> 22 result = func(progress)
23 progress.close()
24 return result
/opt/conda/lib/python3.7/site-packages/phc/easy/query/__init__.py in <lambda>(progress)
126 progress=progress,
127 callback=callback,
--> 128 auth_args=auth_args,
129 ),
130 )
/opt/conda/lib/python3.7/site-packages/phc/easy/query/fhir_dsl.py in recursive_execute_fhir_dsl(query, scroll, progress, auth_args, callback, _scroll_id, _prev_hits)
72 callback(current_results, False)
73 elif callback and is_last_batch:
---> 74 return callback(current_results, True)
75 elif is_last_batch:
76 suffix = "+" if actual_count == MAX_RESULT_SIZE else ""
/opt/conda/lib/python3.7/site-packages/phc/util/api_cache.py in handle_batch(batch, is_finished)
76 if is_finished:
77 print(f'Loading data frame from "{filename}"')
---> 78 return APICache.read_csv(filename)
79
80 df = pd.DataFrame(map(lambda r: r["_source"], batch))
/opt/conda/lib/python3.7/site-packages/phc/util/api_cache.py in read_csv(filename)
85 @staticmethod
86 def read_csv(filename: str) -> pd.DataFrame:
---> 87 df = pd.read_csv(filename)
88 min_count = max(min(int(len(df) / 3), 5), 1)
89
/opt/conda/lib/python3.7/site-packages/pandas/io/parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, dialect, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision)
674 )
675
--> 676 return _read(filepath_or_buffer, kwds)
677
678 parser_f.__name__ = name
/opt/conda/lib/python3.7/site-packages/pandas/io/parsers.py in _read(filepath_or_buffer, kwds)
446
447 # Create the parser.
--> 448 parser = TextFileReader(fp_or_buf, **kwds)
449
450 if chunksize or iterator:
/opt/conda/lib/python3.7/site-packages/pandas/io/parsers.py in __init__(self, f, engine, **kwds)
878 self.options["has_index_names"] = kwds["has_index_names"]
879
--> 880 self._make_engine(self.engine)
881
882 def close(self):
/opt/conda/lib/python3.7/site-packages/pandas/io/parsers.py in _make_engine(self, engine)
1112 def _make_engine(self, engine="c"):
1113 if engine == "c":
-> 1114 self._engine = CParserWrapper(self.f, **self.options)
1115 else:
1116 if engine == "python":
/opt/conda/lib/python3.7/site-packages/pandas/io/parsers.py in __init__(self, src, **kwds)
1889 kwds["usecols"] = self.usecols
1890
-> 1891 self._reader = parsers.TextReader(src, **kwds)
1892 self.unnamed_cols = self._reader.unnamed_cols
1893
pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader.__cinit__()
pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._setup_parser_source()
FileNotFoundError: [Errno 2] File ~/Downloads/phc/api-cache/fhir_dsl_procedure_where_********.csv does not exist: '~/Downloads/phc/api-cache/fhir_dsl_procedure_where_********.csv'
```
|
0.0
|
31adf1f7f451013216071c2d8737e3512e5b56e8
|
[
"tests/test_easy_patient_address.py::test_expand_address_column"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-08 20:33:36+00:00
|
mit
| 3,569 |
|
lifeomic__phc-sdk-py-71
|
diff --git a/phc/easy/item.py b/phc/easy/item.py
index b764b79..77c4b90 100644
--- a/phc/easy/item.py
+++ b/phc/easy/item.py
@@ -45,6 +45,7 @@ class Item:
cls,
all_results: bool = False,
raw: bool = False,
+ max_pages: Union[int, None] = None,
query_overrides: dict = {},
auth_args=Auth.shared(),
ignore_cache: bool = False,
@@ -62,6 +63,9 @@ class Item:
If raw, then values will not be expanded (useful for manual
inspection if something goes wrong)
+ max_pages : int
+ The number of pages to retrieve (useful if working with tons of records)
+
query_overrides : dict = {}
Override any part of the elasticsearch FHIR query
@@ -105,6 +109,7 @@ class Item:
query_overrides,
auth_args,
ignore_cache,
+ max_pages=max_pages,
log=log,
)
diff --git a/phc/easy/patient_item.py b/phc/easy/patient_item.py
index ccf1f46..96e6a15 100644
--- a/phc/easy/patient_item.py
+++ b/phc/easy/patient_item.py
@@ -28,6 +28,7 @@ class PatientItem(Item):
raw: bool = False,
patient_id: Union[None, str] = None,
patient_ids: List[str] = [],
+ max_pages: Union[int, None] = None,
query_overrides: dict = {},
auth_args=Auth.shared(),
ignore_cache: bool = False,
@@ -51,6 +52,9 @@ class PatientItem(Item):
patient_ids : List[str]
Find records for given patient_ids
+ max_pages : int
+ The number of pages to retrieve (useful if working with tons of records)
+
query_overrides : dict = {}
Override any part of the elasticsearch FHIR query
@@ -96,6 +100,7 @@ class PatientItem(Item):
ignore_cache,
patient_id=patient_id,
patient_ids=patient_ids,
+ max_pages=max_pages,
patient_key=cls.patient_key(),
log=log,
patient_id_prefixes=cls.patient_id_prefixes(),
diff --git a/phc/easy/patients/__init__.py b/phc/easy/patients/__init__.py
index c6ecfb0..fe3c26d 100644
--- a/phc/easy/patients/__init__.py
+++ b/phc/easy/patients/__init__.py
@@ -1,24 +1,23 @@
import pandas as pd
-from phc.easy.auth import Auth
from phc.easy.frame import Frame
+from phc.easy.item import Item
from phc.easy.patients.address import expand_address_column
from phc.easy.patients.name import expand_name_column
-from phc.easy.query import Query
-class Patient:
+class Patient(Item):
@staticmethod
- def get_count(query_overrides: dict = {}, auth_args=Auth.shared()):
- return Query.find_count_of_dsl_query(
- {
- "type": "select",
- "columns": "*",
- "from": [{"table": "patient"}],
- **query_overrides,
- },
- auth_args=auth_args,
- )
+ def table_name():
+ return "patient"
+
+ @staticmethod
+ def code_keys():
+ return [
+ "extension.valueCodeableConcept.coding",
+ "identifier.type.coding",
+ "meta.tag",
+ ]
@staticmethod
def transform_results(data_frame: pd.DataFrame, **expand_args):
@@ -32,65 +31,3 @@ class Patient:
}
return Frame.expand(data_frame, **args)
-
- @staticmethod
- def get_data_frame(
- all_results: bool = False,
- raw: bool = False,
- query_overrides: dict = {},
- auth_args: Auth = Auth.shared(),
- ignore_cache: bool = False,
- expand_args: dict = {},
- ):
- """Retrieve patients as a data frame with unwrapped FHIR columns
-
- Attributes
- ----------
- all_results : bool = False
- Override limit to retrieve all patients
-
- raw : bool = False
- If raw, then values will not be expanded (useful for manual
- inspection if something goes wrong). Note that this option will
- override all_results if True.
-
- query_overrides : dict = {}
- Override any part of the elasticsearch FHIR query
-
- auth_args : Any
- The authenication to use for the account and project (defaults to shared)
-
- ignore_cache : bool = False
- Bypass the caching system that auto-saves results to a CSV file.
- Caching only occurs when all results are being retrieved.
-
- expand_args : Any
- Additional arguments passed to phc.Frame.expand
-
- Examples
- --------
- >>> import phc.easy as phc
- >>> phc.Auth.set({'account': '<your-account-name>'})
- >>> phc.Project.set_current('My Project Name')
- >>> phc.Patient.get_data_frame()
-
- """
- query = {
- "type": "select",
- "columns": "*",
- "from": [{"table": "patient"}],
- **query_overrides,
- }
-
- def transform(df: pd.DataFrame):
- return Patient.transform_results(df, **expand_args)
-
- return Query.execute_fhir_dsl_with_options(
- query,
- transform,
- all_results,
- raw,
- query_overrides,
- auth_args,
- ignore_cache,
- )
diff --git a/phc/easy/patients/address.py b/phc/easy/patients/address.py
index edbd1c8..c7af9a3 100644
--- a/phc/easy/patients/address.py
+++ b/phc/easy/patients/address.py
@@ -1,4 +1,6 @@
import pandas as pd
+
+from funcy import first
from phc.easy.codeable import generic_codeable_to_dict
from phc.easy.util import concat_dicts
@@ -22,18 +24,29 @@ def expand_address_value(value):
if type(value) is not list:
return {}
- # Value is always list of one item
- assert len(value) == 1
- value = value[0]
+ primary_address = first(
+ filter(lambda v: v.get("use") != "old", value)
+ ) or first(value)
+
+ other_addresses = list(
+ filter(lambda address: address != primary_address, value)
+ )
- return concat_dicts(
- [
- expand_address_attr(f"address_{key}", item_value)
- for key, item_value in value.items()
- if key != "text"
- ]
+ other_attrs = (
+ {"other_addresses": other_addresses} if len(other_addresses) > 0 else {}
)
+ return {
+ **concat_dicts(
+ [
+ expand_address_attr(f"address_{key}", item_value)
+ for key, item_value in primary_address.items()
+ if key != "text"
+ ]
+ ),
+ **other_attrs,
+ }
+
def expand_address_column(address_col):
return pd.DataFrame(map(expand_address_value, address_col.values))
diff --git a/phc/easy/query/__init__.py b/phc/easy/query/__init__.py
index 45eccdf..b2bd787 100644
--- a/phc/easy/query/__init__.py
+++ b/phc/easy/query/__init__.py
@@ -69,6 +69,7 @@ class Query:
all_results: bool = False,
auth_args: Auth = Auth.shared(),
callback: Union[Callable[[Any, bool], None], None] = None,
+ max_pages: Union[int, None] = None,
log: bool = False,
**query_kwargs,
):
@@ -101,6 +102,9 @@ class Query:
if is_finished:
return "batch finished
+ max_pages : int
+ The number of pages to retrieve (useful if working with tons of records)
+
log : bool = False
Whether to log the elasticsearch query sent to the server
@@ -151,11 +155,16 @@ class Query:
progress=progress,
callback=callback,
auth_args=auth_args,
+ max_pages=max_pages,
),
)
return recursive_execute_fhir_dsl(
- query, scroll=all_results, callback=callback, auth_args=auth_args,
+ query,
+ scroll=all_results,
+ callback=callback,
+ auth_args=auth_args,
+ max_pages=max_pages,
)
@staticmethod
@@ -167,6 +176,7 @@ class Query:
query_overrides: dict,
auth_args: Auth,
ignore_cache: bool,
+ max_pages: Union[int, None],
log: bool = False,
**query_kwargs,
):
@@ -179,6 +189,7 @@ class Query:
(not ignore_cache)
and (not raw)
and (all_results or FhirAggregation.is_aggregation_query(query))
+ and (max_pages is None)
)
if use_cache and APICache.does_cache_for_fhir_dsl_exist(query):
@@ -191,7 +202,11 @@ class Query:
)
results = Query.execute_fhir_dsl(
- query, all_results, auth_args, callback=callback
+ query,
+ all_results,
+ auth_args,
+ callback=callback,
+ max_pages=max_pages,
)
if isinstance(results, FhirAggregation):
diff --git a/phc/easy/query/fhir_dsl.py b/phc/easy/query/fhir_dsl.py
index 3fb03f6..b8db491 100644
--- a/phc/easy/query/fhir_dsl.py
+++ b/phc/easy/query/fhir_dsl.py
@@ -33,6 +33,8 @@ def recursive_execute_fhir_dsl(
progress: Union[None, tqdm] = None,
auth_args: Auth = Auth.shared(),
callback: Union[Callable[[Any, bool], None], None] = None,
+ max_pages: Union[int, None] = None,
+ _current_page: int = 1,
_scroll_id: str = "true",
_prev_hits: List = [],
):
@@ -54,7 +56,11 @@ def recursive_execute_fhir_dsl(
if progress:
progress.update(current_result_count)
- is_last_batch = current_result_count == 0 or scroll is False
+ is_last_batch = (
+ (current_result_count == 0)
+ or (scroll is False)
+ or ((max_pages is not None) and (_current_page >= max_pages))
+ )
results = [] if callback else [*_prev_hits, *current_results]
if callback and not is_last_batch:
@@ -73,6 +79,8 @@ def recursive_execute_fhir_dsl(
progress=progress,
auth_args=auth_args,
callback=callback,
+ max_pages=max_pages,
+ _current_page=_current_page + 1,
_scroll_id=_scroll_id,
_prev_hits=results,
)
diff --git a/phc/util/api_cache.py b/phc/util/api_cache.py
index 0e7f58d..475d88b 100644
--- a/phc/util/api_cache.py
+++ b/phc/util/api_cache.py
@@ -1,5 +1,6 @@
import hashlib
import json
+import os
from pathlib import Path
from typing import Callable
@@ -86,6 +87,9 @@ class APICache:
writer = CSVWriter(filename)
def handle_batch(batch, is_finished):
+ if is_finished and not os.path.exists(filename):
+ return pd.DataFrame()
+
if is_finished:
print(f'Loading data frame from "{filename}"')
return APICache.read_csv(filename)
|
lifeomic/phc-sdk-py
|
31adf1f7f451013216071c2d8737e3512e5b56e8
|
diff --git a/tests/test_easy_patient_address.py b/tests/test_easy_patient_address.py
index d51ff86..42bb1cd 100644
--- a/tests/test_easy_patient_address.py
+++ b/tests/test_easy_patient_address.py
@@ -1,18 +1,64 @@
import pandas as pd
+import math
from phc.easy.patients.address import expand_address_column
+
+def non_na_dict(dictionary: dict):
+ return {
+ k: v
+ for k, v in dictionary.items()
+ if not isinstance(v, float) or not math.isnan(v)
+ }
+
+
def test_expand_address_column():
- sample = pd.DataFrame([{
- 'address': [
- {'line': ['123 ABC Court'], 'city': 'Zionsville', 'state': 'IN', 'use': 'home'}
+ sample = pd.DataFrame(
+ [
+ {
+ "address": [
+ {
+ "line": ["123 ABC Court"],
+ "city": "Zionsville",
+ "state": "IN",
+ "use": "home",
+ }
+ ]
+ },
+ {
+ "address": [
+ {
+ "use": "old",
+ "state": "SC",
+ "period": {"start": "1999", "end": "2001"},
+ },
+ {
+ "state": "NC",
+ "city": "Raleigh",
+ "period": {"start": "2001"},
+ },
+ ]
+ },
]
- }])
+ )
df = expand_address_column(sample.address)
- assert df.iloc[0].to_dict() == {
- 'address_line_0': '123 ABC Court',
- 'address_city': 'Zionsville',
- 'address_state': 'IN',
- 'address_use': 'home'
+ assert non_na_dict(df.iloc[0].to_dict()) == {
+ "address_line_0": "123 ABC Court",
+ "address_city": "Zionsville",
+ "address_state": "IN",
+ "address_use": "home",
+ }
+
+ assert non_na_dict(df.iloc[1].to_dict()) == {
+ "address_city": "Raleigh",
+ "address_state": "NC",
+ "address_period_start": "2001",
+ "other_addresses": [
+ {
+ "use": "old",
+ "state": "SC",
+ "period": {"start": "1999", "end": "2001"},
+ }
+ ],
}
|
Add max_pages for PatientItem
Especially when dealing with a tremendous amount of data, we'd like to page until we reach a certain count.
|
0.0
|
31adf1f7f451013216071c2d8737e3512e5b56e8
|
[
"tests/test_easy_patient_address.py::test_expand_address_column"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-09 13:05:12+00:00
|
mit
| 3,570 |
|
lig__pyventory-10
|
diff --git a/.gitignore b/.gitignore
index 80f81e0..04ae275 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,4 +2,8 @@
!.gitignore
*~
*.pyc
-*.egg-info/
+
+/venv
+/build
+/dist
+/*.egg-info
diff --git a/pyventory/asset.py b/pyventory/asset.py
index d244bb8..03c31ec 100644
--- a/pyventory/asset.py
+++ b/pyventory/asset.py
@@ -2,7 +2,7 @@ from collections import OrderedDict, Mapping, Sequence
import six
-from pyventory.errors import ValueSubstitutionError
+from pyventory import errors
__all__ = ['Asset']
@@ -33,11 +33,21 @@ class Asset(object):
if not attr_name.startswith('_'))
for name, value in _vars.copy().items():
+
+ if value is NotImplemented:
+ if strict_format:
+ raise errors.PropertyIsNotImplementedError(
+ 'Var "{}" is not implemented in "{}" asset instance',
+ name, obj._name())
+ else:
+ del _vars[name]
+ continue
+
try:
_vars[name] = cls.__format_value(value, _vars)
except KeyError as e:
if strict_format:
- raise ValueSubstitutionError(
+ raise errors.ValueSubstitutionError(
'Var "{}" must be available for "{}" asset instance',
e.args[0], obj._name())
else:
diff --git a/pyventory/errors.py b/pyventory/errors.py
index 3616bd4..c83d2cf 100644
--- a/pyventory/errors.py
+++ b/pyventory/errors.py
@@ -12,3 +12,7 @@ class PyventoryError(Exception):
class ValueSubstitutionError(PyventoryError):
pass
+
+
+class PropertyIsNotImplementedError(PyventoryError):
+ pass
|
lig/pyventory
|
1630f08e58c95e8a50c0256d13034022e4a75067
|
diff --git a/tests/unit/test_inventory.py b/tests/unit/test_inventory.py
index 7bf24ef..356ddbe 100644
--- a/tests/unit/test_inventory.py
+++ b/tests/unit/test_inventory.py
@@ -1,8 +1,7 @@
import pytest
import six
-from pyventory import Asset, ansible_inventory
-from pyventory.errors import ValueSubstitutionError
+from pyventory import Asset, ansible_inventory, errors
def test_allow_mixins_for_inventory_items():
@@ -209,7 +208,7 @@ def test_require_arguments_for_format_strings():
test_asset = TestAsset()
- with pytest.raises(ValueSubstitutionError):
+ with pytest.raises(errors.ValueSubstitutionError):
ansible_inventory(locals())
@@ -398,3 +397,57 @@ def test_multiple_children():
}
}
}'''
+
+
+def test_allow_notimplemented_value():
+
+ class BaseTestAsset(Asset):
+ foo = NotImplemented
+
+ class TestAsset(BaseTestAsset):
+ foo = 'bar'
+
+ test_asset = TestAsset()
+
+ result = six.StringIO()
+ ansible_inventory(locals(), out=result, indent=4)
+
+ # hack for py27 `json.dump()` behavior
+ result = '\n'.join([x.rstrip() for x in result.getvalue().split('\n')])
+
+ assert result == '''{
+ "BaseTestAsset": {
+ "children": [
+ "TestAsset"
+ ]
+ },
+ "TestAsset": {
+ "vars": {
+ "foo": "bar"
+ },
+ "hosts": [
+ "test_asset"
+ ]
+ },
+ "_meta": {
+ "hostvars": {
+ "test_asset": {
+ "foo": "bar"
+ }
+ }
+ }
+}'''
+
+
+def test_raise_notimplemented_value_in_host():
+
+ class BaseTestAsset(Asset):
+ foo = NotImplemented
+
+ class TestAsset(BaseTestAsset):
+ pass
+
+ test_asset = TestAsset()
+
+ with pytest.raises(errors.PropertyIsNotImplementedError):
+ ansible_inventory(locals())
|
NotImplemeted as property value
Allow defining `NotImplemented` as Asset property value.
Do not include such a property in group vars if not overridden.
Raise an exception if the value wasn't overridden in the host vars.
|
0.0
|
1630f08e58c95e8a50c0256d13034022e4a75067
|
[
"tests/unit/test_inventory.py::test_allow_notimplemented_value",
"tests/unit/test_inventory.py::test_raise_notimplemented_value_in_host"
] |
[
"tests/unit/test_inventory.py::test_allow_mixins_for_inventory_items",
"tests/unit/test_inventory.py::test_allow_host_specific_vars",
"tests/unit/test_inventory.py::test_allow_format_strings_as_values",
"tests/unit/test_inventory.py::test_allow_mapping_of_format_strings_as_values",
"tests/unit/test_inventory.py::test_allow_sequence_of_format_strings_as_values",
"tests/unit/test_inventory.py::test_strings_formatting_do_not_conflict_with_numbers",
"tests/unit/test_inventory.py::test_require_arguments_for_format_strings",
"tests/unit/test_inventory.py::test_inheritance_with_format",
"tests/unit/test_inventory.py::test_deep_multiple_inheritance_propagation",
"tests/unit/test_inventory.py::test_skip_non_asset_locals",
"tests/unit/test_inventory.py::test_multiple_children"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-07-10 15:39:16+00:00
|
mit
| 3,571 |
|
lig__pyventory-18
|
diff --git a/pyventory/asset.py b/pyventory/asset.py
index 1d58aa6..2b9a962 100644
--- a/pyventory/asset.py
+++ b/pyventory/asset.py
@@ -7,71 +7,119 @@ from pyventory import errors
__all__ = ['Asset']
-class Asset:
+class SKIP_ATTR:
+ pass
+
+
+class AssetAttr:
+ _value = None
+ _name = None
+
+ def __init__(self, value):
+ self._value = value
+
+ def __get__(self, instance, owner):
+ if instance:
+ return self._value
+
+ if not isinstance(self._value, (str, Mapping, Sequence)):
+ return self._value
+
+ def get_attr(value):
+ return owner._get_attr(owner, self._name, strict=True)
+
+ value_type = type(self._value)
+ return type(
+ value_type.__name__,
+ (value_type,),
+ {'__call__': get_attr}
+ )(self._value)
+
+ def __set_name__(self, owner, name):
+ self._name = name
+
+
+class AssetMeta(type):
+
+ def __new__(cls, name, bases, namespace, **kwds):
+ new_namespace = {
+ '_name': f'{namespace["__module__"]}.{name}',
+ }
+
+ for key, value in namespace.items():
+ if not key.startswith('_'):
+ value = AssetAttr(value)
+ new_namespace[key] = value
+
+ return super().__new__(cls, name, bases, new_namespace, **kwds)
+
+
+class Asset(metaclass=AssetMeta):
+
_string_format_regex = re.compile(r'{([\w_]+)}')
- def __init__(self, **kwargs):
- for name, value in kwargs.items():
- setattr(self, name, value)
-
- def _vars(self):
- return self.__build_vars(self, strict_format=True)
-
- @classmethod
- def _cls_vars(cls):
- return cls.__build_vars(cls)
-
- @classmethod
- def _name(cls):
- return '{module}.{name}'.format(
- module=cls.__module__, name=cls.__name__)
-
- @classmethod
- def __build_vars(cls, obj, strict_format=False):
- _vars = {
- attr_name: getattr(obj, attr_name)
- for attr_name in dir(obj)
- if not attr_name.startswith('_')}
-
- for name, value in _vars.copy().items():
- try:
- _vars[name] = cls.__format_value(value, _vars, name)
- except NotImplementedError:
- if strict_format:
- raise errors.PropertyIsNotImplementedError(
- f'Var "{name}" is not implemented in "{obj._name()}"'
- ' asset instance')
- else:
- del _vars[name]
- except KeyError as e:
- if strict_format:
- raise errors.ValueSubstitutionError(
- f'Attribute "{e.args[0]}" must be available for'
- ' "{obj._name()}" asset instance')
- else:
- del _vars[name]
- except errors.ValueSubstitutionInfiniteLoopError:
- raise errors.ValueSubstitutionInfiniteLoopError(
- f'Attribute "{name}" has an infinite string substitution'
- f' loop for "{obj._name()}" asset instance')
-
- return _vars
-
- @classmethod
- def __format_value(cls, value, context, start_key):
+ def __new__(cls, **kwargs):
+ self = super().__new__(cls)
+ self.__dict__.update(kwargs)
+ self.__dict__.update(self._vars(self, strict=True))
+ return self
+
+ @staticmethod
+ def _attrs(obj):
+ return [name for name in dir(obj) if not name.startswith('_')]
+
+ @staticmethod
+ def _context(obj):
+ return {name: getattr(obj, name) for name in obj._attrs(obj)}
+
+ @staticmethod
+ def _vars(obj, strict=False):
+ return {
+ name: value
+ for name, value in (
+ (name, obj._get_attr(obj, name, strict=strict))
+ for name in obj._attrs(obj))
+ if value is not SKIP_ATTR}
+
+ @staticmethod
+ def _get_attr(obj, name, strict=False):
+ try:
+ context = obj._context(obj).copy()
+ return obj._format_value(obj, context, context[name], name)
+ except NotImplementedError:
+ if strict:
+ raise errors.PropertyIsNotImplementedError(
+ f'Var "{name}" is not implemented in "{obj._name}" asset')
+ else:
+ return SKIP_ATTR
+ except KeyError as e:
+ if strict:
+ raise errors.ValueSubstitutionError(
+ f'Attribute "{e.args[0]}" must be available for'
+ f' "{obj._name}" asset instance')
+ else:
+ return SKIP_ATTR
+ except errors.ValueSubstitutionInfiniteLoopError:
+ raise errors.ValueSubstitutionInfiniteLoopError(
+ f'Attribute "{name}" has an infinite string substitution'
+ f' loop in "{obj._name}" asset instance')
+
+ @staticmethod
+ def _format_value(obj, context, value, start_key):
if value is NotImplemented:
raise NotImplementedError
if isinstance(value, str):
- for key in cls._string_format_regex.findall(value):
+ for key in obj._string_format_regex.findall(value):
if key == start_key:
raise errors.ValueSubstitutionInfiniteLoopError
- context[key] = cls.__format_value(
- context[key], context, start_key)
+ context[key] = obj._format_value(
+ obj, context, context[key], start_key)
return value.format(**context)
if isinstance(value, Mapping):
return {
- k: cls.__format_value(v, context, start_key)
+ k: obj._format_value(obj, context, v, start_key)
for k, v in value.items()}
if isinstance(value, Sequence):
- return [cls.__format_value(v, context, start_key) for v in value]
+ return [
+ obj._format_value(obj, context, v, start_key) for v in value]
return value
diff --git a/pyventory/export.py b/pyventory/export.py
index 1ea2c70..dddf750 100644
--- a/pyventory/export.py
+++ b/pyventory/export.py
@@ -11,48 +11,54 @@ from pyventory.inventory import Inventory
__all__ = ['pyventory_data', 'ansible_inventory', 'terraform_vars']
-def pyventory_data(hosts):
+def pyventory_data(instances):
"""Provides raw inventory data as Python `dict` containing Asset data in
- `assets` key and hosts data in `hosts` key.
+ `assets` key and instances data in `instances` key.
"""
- inventory = Inventory(hosts)
+ inventory = Inventory(instances)
assets = {
name: attr.asdict(asset)
for name, asset in inventory.assets.items()}
for asset in assets.values():
- for attr_name in ('hosts', 'vars', 'children',):
+ for attr_name in ('instances', 'vars', 'children',):
if not asset[attr_name]:
del asset[attr_name]
- hosts = inventory.hosts.copy()
+ instances = inventory.instances.copy()
- return {'assets': assets, 'hosts': hosts}
+ return {'assets': assets, 'instances': instances}
-def ansible_inventory(hosts, out=sys.stdout, indent=None):
+def ansible_inventory(instances, out=sys.stdout, indent=None):
"""Dumps inventory in the Ansible's Dynamic Inventory JSON format to `out`.
"""
- raw_data = pyventory_data(hosts)
+ raw_data = pyventory_data(instances)
- data = raw_data['assets']
- data['_meta'] = {'hostvars': raw_data['hosts']}
+ data = {}
+
+ for key, value in raw_data['assets'].items():
+ if 'instances' in value:
+ value['hosts'] = value.pop('instances')
+ data[key] = value
+
+ data['_meta'] = {'hostvars': raw_data['instances']}
json.dump(data, out, indent=indent, default=list)
-def terraform_vars(hosts, filename_base='pyventory', indent=None):
+def terraform_vars(instances, filename_base='pyventory', indent=None):
"""Dumps inventory in the Terraform's JSON format to `<filename_base>.tf`
setting their values as defaults.
"""
tf_config_path = pathlib.Path(filename_base).with_suffix('.tf')
- raw_data = pyventory_data(hosts)
+ raw_data = pyventory_data(instances)
tf_config = {}
- for asset_name, asset_data in raw_data['hosts'].items():
+ for asset_name, asset_data in raw_data['instances'].items():
for name, value in asset_data.items():
diff --git a/pyventory/inventory.py b/pyventory/inventory.py
index b789166..d8a43c5 100644
--- a/pyventory/inventory.py
+++ b/pyventory/inventory.py
@@ -1,5 +1,5 @@
-from ordered_set import OrderedSet
import attr
+from ordered_set import OrderedSet
from pyventory.asset import Asset
@@ -11,28 +11,28 @@ __all__ = []
class AssetData:
vars = attr.ib(default=attr.Factory(dict))
children = attr.ib(default=attr.Factory(OrderedSet))
- hosts = attr.ib(default=attr.Factory(OrderedSet))
+ instances = attr.ib(default=attr.Factory(OrderedSet))
class Inventory:
- def __init__(self, hosts):
+ def __init__(self, instances):
self.assets = {}
- self.hosts = {}
+ self.instances = {}
- for name, host in sorted(hosts.items()):
- self.add_host(name, host)
+ for name, instance in sorted(instances.items()):
+ self.add_instance(name, instance)
- def add_host(self, name, host):
- if not isinstance(host, Asset):
+ def add_instance(self, name, instance):
+ if not isinstance(instance, Asset):
return
- self.hosts[name] = host._vars()
- self.add_asset(host.__class__)
- self.assets[host._name()].hosts.add(name)
+ self.instances[name] = instance._vars(instance, strict=True)
+ self.add_asset(instance.__class__)
+ self.assets[instance._name].instances.add(name)
def add_asset(self, asset):
- if asset._name() in self.assets:
+ if asset._name in self.assets:
return
for parent_asset in asset.__bases__:
@@ -44,6 +44,6 @@ class Inventory:
continue
self.add_asset(parent_asset)
- self.assets[parent_asset._name()].children.add(asset._name())
+ self.assets[parent_asset._name].children.add(asset._name)
- self.assets[asset._name()] = AssetData(vars=asset._cls_vars())
+ self.assets[asset._name] = AssetData(vars=asset._vars(asset))
|
lig/pyventory
|
67d6a2a607b6731ecc83ee65ed90f78dbc2c73aa
|
diff --git a/tests/test_asset.py b/tests/test_asset.py
new file mode 100644
index 0000000..2489c87
--- /dev/null
+++ b/tests/test_asset.py
@@ -0,0 +1,31 @@
+import pytest
+
+from pyventory import Asset, errors
+
+
+def test_calculate_asset_class_atribute_value_on_call():
+
+ class TestAsset(Asset):
+ foo = '{bar}'
+ bar = 'bar'
+
+ assert TestAsset.foo() == 'bar'
+
+
+def test_use_raw_asset_class_atribute_value():
+
+ class TestAsset(Asset):
+ foo = '{bar}-{baz}'
+ bar = 'bar'
+
+ assert TestAsset.foo == '{bar}-{baz}'
+
+
+def test_asset_class_atribute_value_calculation_is_strict():
+
+ class TestAsset(Asset):
+ foo = '{bar}-{baz}'
+ bar = 'bar'
+
+ with pytest.raises(errors.ValueSubstitutionError):
+ TestAsset.foo()
diff --git a/tests/test_inventory.py b/tests/test_inventory.py
index ac7ebf4..541189f 100644
--- a/tests/test_inventory.py
+++ b/tests/test_inventory.py
@@ -26,12 +26,12 @@ def test_allow_mixins_for_inventory_items():
],
},
"test_inventory.TestAsset": {
- "hosts": [
+ "instances": [
"test_asset",
],
},
},
- "hosts": {
+ "instances": {
"test_asset": {}
},
}
@@ -49,12 +49,12 @@ def test_allow_host_specific_vars():
assert result == {
'assets': {
"test_inventory.TestAsset": {
- "hosts": [
+ "instances": [
"test_asset",
],
},
},
- "hosts": {
+ "instances": {
"test_asset": {
"foo": "bar"
},
@@ -74,12 +74,12 @@ def test_allow_format_strings_as_values():
assert result == {
'assets': {
"test_inventory.TestAsset": {
- "hosts": [
+ "instances": [
"test_asset"
]
},
},
- "hosts": {
+ "instances": {
"test_asset": {
"bar": "ham",
"foo": "test_ham"
@@ -102,12 +102,12 @@ def test_allow_mapping_of_format_strings_as_values():
assert result == {
'assets': {
"test_inventory.TestAsset": {
- "hosts": [
+ "instances": [
"test_asset"
]
},
},
- "hosts": {
+ "instances": {
"test_asset": {
"bar": "ham",
"foo": {
@@ -130,12 +130,12 @@ def test_allow_sequence_of_format_strings_as_values():
assert result == {
'assets': {
"test_inventory.TestAsset": {
- "hosts": [
+ "instances": [
"test_asset"
]
},
},
- "hosts": {
+ "instances": {
"test_asset": {
"bar": "ham",
"foo": [
@@ -162,12 +162,12 @@ def test_strings_formatting_do_not_conflict_with_numbers():
"vars": {
"foo": 42
},
- "hosts": [
+ "instances": [
"test_asset"
]
},
},
- "hosts": {
+ "instances": {
"test_asset": {
"bar": "ham",
"foo": 42
@@ -181,10 +181,8 @@ def test_require_arguments_for_format_strings():
class TestAsset(Asset):
foo = '{bar}'
- test_asset = TestAsset()
-
with pytest.raises(errors.ValueSubstitutionError):
- pyventory_data(locals())
+ test_asset = TestAsset()
def test_inheritance_with_format():
@@ -207,12 +205,12 @@ def test_inheritance_with_format():
]
},
"test_inventory.ChildAsset": {
- "hosts": [
+ "instances": [
"child_asset"
]
},
},
- "hosts": {
+ "instances": {
"child_asset": {
"bar": "ham",
"foo": "ham"
@@ -274,12 +272,12 @@ def test_deep_multiple_inheritance_propagation():
"baz": "Level3Asset4 baz value",
"foo": "Level1Asset1 foo value"
},
- "hosts": [
+ "instances": [
"level3_asset4"
]
},
},
- "hosts": {
+ "instances": {
"level3_asset4": {
"bar": "Level1Asset2 bar value",
"baz": "Level3Asset4 baz value",
@@ -305,12 +303,12 @@ def test_skip_non_asset_locals():
assert result == {
'assets': {
"test_inventory.TestAsset": {
- "hosts": [
+ "instances": [
"test_asset"
]
},
},
- "hosts": {
+ "instances": {
"test_asset": {}
}
}
@@ -341,17 +339,17 @@ def test_multiple_children():
]
},
"test_inventory.TestAsset1": {
- "hosts": [
+ "instances": [
"test_asset1"
]
},
"test_inventory.TestAsset2": {
- "hosts": [
+ "instances": [
"test_asset2"
]
},
},
- "hosts": {
+ "instances": {
"test_asset1": {},
"test_asset2": {}
}
@@ -381,12 +379,12 @@ def test_allow_notimplemented_value():
"vars": {
"foo": "bar"
},
- "hosts": [
+ "instances": [
"test_asset"
]
},
},
- "hosts": {
+ "instances": {
"test_asset": {
"foo": "bar"
}
@@ -402,10 +400,8 @@ def test_raise_notimplemented_value_in_final_asset():
class TestAsset(BaseTestAsset):
pass
- test_asset = TestAsset()
-
with pytest.raises(errors.PropertyIsNotImplementedError):
- pyventory_data(locals())
+ test_asset = TestAsset()
def test_string_format_does_not_miss_values():
@@ -443,7 +439,7 @@ def test_string_format_does_not_miss_values():
"baz": "baz-value",
"foo": "baz-value"
},
- "hosts": [
+ "instances": [
"test_asset_1"
]
},
@@ -453,12 +449,12 @@ def test_string_format_does_not_miss_values():
"baz": "baz-value",
"foo": "baz-value"
},
- "hosts": [
+ "instances": [
"test_asset_2"
]
},
},
- "hosts": {
+ "instances": {
"test_asset_1": {
"bar": "baz-value",
"baz": "baz-value",
@@ -479,7 +475,5 @@ def test_string_format_detects_infinite_loop():
bar = '{foo}'
foo = '{bar}'
- test_asset = TestAsset()
-
with pytest.raises(errors.ValueSubstitutionInfiniteLoopError):
- pyventory_data(locals())
+ test_asset = TestAsset()
|
Allow to use calculated value of an asset class
Given the following code
```python
class MyAsset(Asset):
foo = 'value is {bar}'
bar = 'my value'
```
Accessing `MyAsset.foo` will return `'value is {bar}'`.
It will be useful to be able to get `'value is my value'` instead.
|
0.0
|
67d6a2a607b6731ecc83ee65ed90f78dbc2c73aa
|
[
"tests/test_asset.py::test_calculate_asset_class_atribute_value_on_call",
"tests/test_asset.py::test_asset_class_atribute_value_calculation_is_strict",
"tests/test_inventory.py::test_allow_mixins_for_inventory_items",
"tests/test_inventory.py::test_allow_host_specific_vars",
"tests/test_inventory.py::test_allow_format_strings_as_values",
"tests/test_inventory.py::test_allow_mapping_of_format_strings_as_values",
"tests/test_inventory.py::test_allow_sequence_of_format_strings_as_values",
"tests/test_inventory.py::test_strings_formatting_do_not_conflict_with_numbers",
"tests/test_inventory.py::test_require_arguments_for_format_strings",
"tests/test_inventory.py::test_inheritance_with_format",
"tests/test_inventory.py::test_deep_multiple_inheritance_propagation",
"tests/test_inventory.py::test_skip_non_asset_locals",
"tests/test_inventory.py::test_multiple_children",
"tests/test_inventory.py::test_allow_notimplemented_value",
"tests/test_inventory.py::test_raise_notimplemented_value_in_final_asset",
"tests/test_inventory.py::test_string_format_does_not_miss_values",
"tests/test_inventory.py::test_string_format_detects_infinite_loop"
] |
[
"tests/test_asset.py::test_use_raw_asset_class_atribute_value"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-03-11 23:28:00+00:00
|
mit
| 3,572 |
|
lightkurve__lightkurve-1016
|
diff --git a/CHANGES.rst b/CHANGES.rst
index 6bd7de79..27614f56 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,3 +1,10 @@
+2.0.9 (unreleased)
+==================
+
+- Fixed a bug in ``LightCurve.append()`` which caused the method to crash
+ if the light curves contained incompatible column types. [#1015]
+
+
2.0.8 (2021-03-30)
==================
diff --git a/pyproject.toml b/pyproject.toml
index 59134843..726b0172 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "lightkurve"
-version = "2.0.8"
+version = "2.0.9dev"
description = "A friendly package for Kepler & TESS time series analysis in Python."
license = "MIT"
authors = ["Geert Barentsen <[email protected]>"]
diff --git a/setup.py b/setup.py
index 733c4823..d38f864f 100644
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@ if os.path.exists(readme_path):
setup(
long_description=readme,
name='lightkurve',
- version='2.0.8',
+ version='2.0.9dev',
description='A friendly package for Kepler & TESS time series analysis in Python.',
python_requires='>=3.6.1',
project_urls={"homepage": "https://docs.lightkurve.org", "repository": "https://github.com/lightkurve/lightkurve"},
diff --git a/src/lightkurve/lightcurve.py b/src/lightkurve/lightcurve.py
index c0118ac3..99fd3794 100644
--- a/src/lightkurve/lightcurve.py
+++ b/src/lightkurve/lightcurve.py
@@ -667,8 +667,10 @@ class LightCurve(QTimeSeries):
)
if not hasattr(others, "__iter__"):
others = (others,)
- # Need `join_type='inner'` until AstroPy supports masked Quantities
- return vstack((self, *others), join_type="inner", metadata_conflicts="silent")
+
+ # Re-use LightCurveCollection.stitch() to avoid code duplication
+ from .collections import LightCurveCollection # avoid circular import
+ return LightCurveCollection((self, *others)).stitch(corrector_func=None)
def flatten(
self,
diff --git a/src/lightkurve/version.py b/src/lightkurve/version.py
index e9964441..73fa8432 100644
--- a/src/lightkurve/version.py
+++ b/src/lightkurve/version.py
@@ -1,3 +1,3 @@
# It is important to store the version number in a separate file
# so that we can read it from setup.py without importing the package
-__version__ = "2.0.8"
+__version__ = "2.0.9dev"
|
lightkurve/lightkurve
|
ddefb61404904c92d88461fc03f9d84f7f3cae3e
|
diff --git a/tests/test_collections.py b/tests/test_collections.py
index 3654745d..997edaf3 100644
--- a/tests/test_collections.py
+++ b/tests/test_collections.py
@@ -357,8 +357,10 @@ def test_accessor_k2_campaign():
def test_unmergeable_columns():
- """Regression test for #954."""
+ """Regression test for #954 and #1015."""
lc1 = LightCurve(data={'time': [1,2,3], 'x': [1,2,3]})
lc2 = LightCurve(data={'time': [1,2,3], 'x': [1,2,3]*u.electron/u.second})
with pytest.warns(LightkurveWarning, match="column types are incompatible"):
LightCurveCollection([lc1, lc2]).stitch()
+ with pytest.warns(LightkurveWarning, match="column types are incompatible"):
+ lc1.append(lc2)
|
QLP problems
<!-- Fill in the information below before opening an issue. -->
#### Problem description
A value error is generated when a QLP light curve is appended to a Kepler light curve:
#### Example
<!-- Provide a link or minimal code snippet that demonstrates the issue. -->
```python
import lightkurve as lk
klcc = lk.search_lightcurve('Kepler-18', cadence='long')[:18].download_all()
klc = klcc.stitch()
qlcc = lk.search_lightcurve('Kepler-18', author='QLP').download_all()
qlc = qlcc[0].append(qlcc[1])
qlc.time.format = 'bkjd'
lc1 = klc.append(qlc)
lc1.scatter()
```
The first append works as intended, but the second append gives the value error
#### Expected behavior
<!-- Describe the behavior you expected and how it differs from the behavior observed in the example. -->
I expected QLP to work the same as TESS-SPOC, which does work:
```python
import lightkurve as lk
klcc = lk.search_lightcurve('Kepler-18', cadence='long')[:18].download_all()
klc = klcc.stitch()
tlcc = lk.search_lightcurve('Kepler-18', author='TESS-SPOC').download_all()
tlc = tlcc.stitch()
tlc.time.format = 'bkjd'
lc2 = klc.append(tlc)
lc2.scatter()
```
#### Environment
- platform (e.g. Linux, OSX, Windows): Jupyter Notebook 6.1.4 in Firefox 87.0 in Linux (Pop!_OS 20.04 Ubuntu based)
- lightkurve version (e.g. 1.0b6): 2.0.7
- installation method (e.g. pip, conda, source): copy & pasted in notebook cell: ! python -m pip install lightkurve --upgrade
|
0.0
|
ddefb61404904c92d88461fc03f9d84f7f3cae3e
|
[
"tests/test_collections.py::test_unmergeable_columns"
] |
[
"tests/test_collections.py::test_collection_init",
"tests/test_collections.py::test_collection_append",
"tests/test_collections.py::test_collection_stitch",
"tests/test_collections.py::test_collection_getitem",
"tests/test_collections.py::test_collection_getitem_by_boolean_array",
"tests/test_collections.py::test_collection_getitem_by_other_array",
"tests/test_collections.py::test_collection_setitem",
"tests/test_collections.py::test_tpfcollection",
"tests/test_collections.py::test_tpfcollection_plot",
"tests/test_collections.py::test_stitch_repr",
"tests/test_collections.py::test_accessor_tess_sector",
"tests/test_collections.py::test_accessor_kepler_quarter",
"tests/test_collections.py::test_accessor_k2_campaign"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-31 18:28:23+00:00
|
mit
| 3,573 |
|
lightkurve__lightkurve-1084
|
diff --git a/CHANGES.rst b/CHANGES.rst
index 878fac64..0f889711 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -28,6 +28,9 @@
- Added the ``LightCurve.select_flux()`` method to make it easier to use a different
column to populate the ``flux`` and ``flux_err`` columns. [#1076]
+- Modified the MIT Quicklook Pipeline (QLP) light curve file reader to use the "SAP_FLUX"
+ column as the default flux column. [#1083]
+
2.0.9 (2021-03-31)
==================
diff --git a/src/lightkurve/io/qlp.py b/src/lightkurve/io/qlp.py
index bca45640..eb752052 100644
--- a/src/lightkurve/io/qlp.py
+++ b/src/lightkurve/io/qlp.py
@@ -9,15 +9,22 @@ from ..utils import TessQualityFlags
from .generic import read_generic_lightcurve
-def read_qlp_lightcurve(filename, flux_column="kspsap_flux", quality_bitmask="default"):
- """Returns a `TessLightCurve`.
+def read_qlp_lightcurve(filename, flux_column="sap_flux", flux_err_column="kspsap_flux_err", quality_bitmask="default"):
+ """Returns a `TessLightCurve` object given a light curve file from the MIT Quicklook Pipeline (QLP).
+
+ By default, QLP's `sap_flux` column is used to populate the `flux` values,
+ and 'kspsap_flux_err' is used to populate `flux_err`. For a discussion
+ related to this choice, see https://github.com/lightkurve/lightkurve/issues/1083
Parameters
----------
filename : str
Local path or remote url of a QLP light curve FITS file.
- flux_column : 'pdcsap_flux' or 'sap_flux'
+ flux_column : 'sap_flux', 'kspsap_flux', 'kspsap_flux_sml', 'kspsap_flux_lag', or 'sap_bkg'
Which column in the FITS file contains the preferred flux data?
+ By default the "Simple Aperture Photometry" flux (sap_flux) is used.
+ flux_err_column: 'kspsap_flux_err', or 'sap_bkg_err'
+ Which column in the FITS file contains the preferred flux_err data?
quality_bitmask : str or int
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
@@ -33,7 +40,7 @@ def read_qlp_lightcurve(filename, flux_column="kspsap_flux", quality_bitmask="de
See the :class:`TessQualityFlags` class for details on the bitmasks.
"""
- lc = read_generic_lightcurve(filename, flux_column=flux_column, time_format="btjd")
+ lc = read_generic_lightcurve(filename, flux_column=flux_column, flux_err_column=flux_err_column, time_format="btjd")
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
|
lightkurve/lightkurve
|
e0061f5c03a0bd764c9515db46b63c5093aa3ad3
|
diff --git a/tests/io/test_qlp.py b/tests/io/test_qlp.py
new file mode 100644
index 00000000..99420432
--- /dev/null
+++ b/tests/io/test_qlp.py
@@ -0,0 +1,34 @@
+import pytest
+
+from astropy.io import fits
+import numpy as np
+from numpy.testing import assert_array_equal
+
+from lightkurve import search_lightcurve
+from lightkurve.io.qlp import read_qlp_lightcurve
+from lightkurve.io.detect import detect_filetype
+
+
[email protected]_data
+def test_qlp():
+ """Can we read in QLP light curves?"""
+ url = "https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/qlp/s0011/0000/0002/7755/4109/hlsp_qlp_tess_ffi_s0011-0000000277554109_tess_v01_llc.fits"
+ with fits.open(url, mode="readonly") as hdulist:
+ # Can we auto-detect a QLP file?
+ assert detect_filetype(hdulist) == "QLP"
+ # Are the correct fluxes read in?
+ lc = read_qlp_lightcurve(url, quality_bitmask=0)
+ assert lc.meta["FLUX_ORIGIN"] == "sap_flux"
+ assert_array_equal(lc.flux.value, hdulist[1].data["SAP_FLUX"])
+
+
[email protected]_data
+def test_search_qlp():
+ """Can we search and download QLP light curves from MAST?"""
+ search = search_lightcurve("TIC 277554109", author="QLP", sector=11)
+ assert len(search) == 1
+ assert search.table["author"][0] == "QLP"
+ lc = search.download()
+ assert type(lc).__name__ == "TessLightCurve"
+ assert lc.sector == 11
+ assert lc.author == "QLP"
|
Change default column for QLP light curves
#### Problem description
When LightKurve reads light curves from the QLP HLSP, it pulls from a post-processed column (called KSP_FLUX) that is already high-pass-filtered. This makes it impossible to recover the original light curve and see features like stellar variability, etc. Here's a comparison of the same star from three different pipelines:

You can see that the large modulation has been removed from QLP that is present in the other two light curves.
As far as I can tell, there is no way to change this behavior when calling search_lightcurve.
#### Example
search_result = lk.search_lightcurve('TIC 277554109', author='qlp')
lc = search_result.download()
lc.plot()
search_result2 = lk.search_lightcurve('TIC 277554109', author='pathos')
lc2 = search_result2.download()
lc2.plot()
search_result3 = lk.search_lightcurve('TIC 277554109', author='cdips')
lc3 = search_result3.download()
lc3.plot()
I think this can be fixed by swapping out "kspsap_flux" for "sap_flux" in this line:
https://github.com/lightkurve/lightkurve/blob/f452a4e4752d9b277025a2ec711ec06b88230d97/src/lightkurve/io/qlp.py#L12
Alternatively, it might be useful to allow the user to specify the flux column when downloading light curves in this way. But I would argue that by default, the QLP light curves should return the sap_flux column to match the behavior of other HLSPs.
#### Expected behavior
I would expect QLP to return the SAP_FLUX column rather than the KSPSAP_FLUX column
#### Environment
- platform: OSX
- lightkurve version: 2.0.9
- installation method: pip
|
0.0
|
e0061f5c03a0bd764c9515db46b63c5093aa3ad3
|
[
"tests/io/test_qlp.py::test_qlp"
] |
[
"tests/io/test_qlp.py::test_search_qlp"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-04 20:56:10+00:00
|
mit
| 3,574 |
|
lightkurve__lightkurve-1331
|
diff --git a/src/lightkurve/correctors/sffcorrector.py b/src/lightkurve/correctors/sffcorrector.py
index b92be92f..89c6b538 100644
--- a/src/lightkurve/correctors/sffcorrector.py
+++ b/src/lightkurve/correctors/sffcorrector.py
@@ -502,7 +502,12 @@ def _estimate_arclength(centroid_col, centroid_row):
"""
col = centroid_col - np.nanmin(centroid_col)
row = centroid_row - np.nanmin(centroid_row)
+ if np.all((col == 0) & (row == 0)):
+ raise RuntimeError("Arclength cannot be computed because there is no "
+ "centroid motion. Make sure that the aperture of "
+ "the TPF at least two pixels.")
# Force c to be correlated not anticorrelated
if np.polyfit(col.data, row.data, 1)[0] < 0:
col = np.nanmax(col) - col
- return (col ** 2 + row ** 2) ** 0.5
+ arclength = (col ** 2 + row ** 2) ** 0.5
+ return arclength
diff --git a/src/lightkurve/io/generic.py b/src/lightkurve/io/generic.py
index f571d0e2..d051df68 100644
--- a/src/lightkurve/io/generic.py
+++ b/src/lightkurve/io/generic.py
@@ -67,7 +67,13 @@ def read_generic_lightcurve(
elif unitstr == "ppm" and repr(tab[colname].unit).startswith("Unrecognized"):
# Workaround for issue #956
tab[colname].unit = ppm
-
+ elif unitstr == "ADU":
+ tab[colname].unit = "adu"
+ elif unitstr.lower() == "unitless":
+ tab[colname].unit = ""
+ elif unitstr.lower() == "degcelcius":
+ # CDIPS has non-astropy units
+ tab[colname].unit = "deg_C"
# Rename columns to lowercase
tab.rename_column(colname, colname.lower())
|
lightkurve/lightkurve
|
394246f61afddc21d584a8984e0f71c9a1673aa0
|
diff --git a/tests/io/test_cdips.py b/tests/io/test_cdips.py
index c67bff6d..b9f0ea7e 100644
--- a/tests/io/test_cdips.py
+++ b/tests/io/test_cdips.py
@@ -4,7 +4,7 @@ from astropy.io import fits
import numpy as np
from numpy.testing import assert_array_equal
-from lightkurve import search_lightcurve
+from lightkurve import search_lightcurve, LightCurveCollection
from lightkurve.io.cdips import read_cdips_lightcurve
from lightkurve.io.detect import detect_filetype
@@ -59,3 +59,7 @@ def test_search_cdips():
lc = search.download()
assert type(lc).__name__ == "TessLightCurve"
assert hasattr(lc, "sector")
+ assert str(lc['bge'].unit) == 'adu'
+ slc = LightCurveCollection([lc, lc]).stitch()
+ assert len(slc) == 2*len(lc)
+
|
`SFFCorrector` appears to crash when the aperture mask contains only one pixel
Example:
```python
In [1]: import lightkurve as lk
In [2]: tpf = lk.search_targetpixelfile("EPIC 211771334", campaign=18).download()
In [3]: tpf.to_lightcurve(aperture_mask="threshold").to_corrector("sff").correct()
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:610: RuntimeWarning: divide by zero encountered in true_divide
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:613: RuntimeWarning: invalid value encountered in multiply
y_new = slope*(x_new - x_lo)[:, None] + y_lo
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:610: RuntimeWarning: divide by zero encountered in true_divide
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:613: RuntimeWarning: invalid value encountered in multiply
y_new = slope*(x_new - x_lo)[:, None] + y_lo
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:610: RuntimeWarning: divide by zero encountered in true_divide
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:613: RuntimeWarning: invalid value encountered in multiply
y_new = slope*(x_new - x_lo)[:, None] + y_lo
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:610: RuntimeWarning: divide by zero encountered in true_divide
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:613: RuntimeWarning: invalid value encountered in multiply
y_new = slope*(x_new - x_lo)[:, None] + y_lo
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:610: RuntimeWarning: divide by zero encountered in true_divide
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:613: RuntimeWarning: invalid value encountered in multiply
y_new = slope*(x_new - x_lo)[:, None] + y_lo
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:610: RuntimeWarning: divide by zero encountered in true_divide
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:613: RuntimeWarning: invalid value encountered in multiply
y_new = slope*(x_new - x_lo)[:, None] + y_lo
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:610: RuntimeWarning: divide by zero encountered in true_divide
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:613: RuntimeWarning: invalid value encountered in multiply
y_new = slope*(x_new - x_lo)[:, None] + y_lo
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:610: RuntimeWarning: divide by zero encountered in true_divide
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:613: RuntimeWarning: invalid value encountered in multiply
y_new = slope*(x_new - x_lo)[:, None] + y_lo
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:610: RuntimeWarning: divide by zero encountered in true_divide
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:613: RuntimeWarning: invalid value encountered in multiply
y_new = slope*(x_new - x_lo)[:, None] + y_lo
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:610: RuntimeWarning: divide by zero encountered in true_divide
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
/home/gb/bin/anaconda/lib/python3.7/site-packages/scipy/interpolate/interpolate.py:613: RuntimeWarning: invalid value encountered in multiply
y_new = slope*(x_new - x_lo)[:, None] + y_lo
WARNING: Input data contains invalid values (NaNs or infs), which were automatically clipped. [astropy.stats.sigma_clipping]
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-3-30cf4b808ec9> in <module>
----> 1 tpf.to_lightcurve(aperture_mask="threshold").to_corrector("sff").correct()
~/dev/lightkurve/lightkurve/correctors/sffcorrector.py in correct(self, centroid_col, centroid_row, polyorder, niters, bins, windows, sigma_1, sigma_2, restore_trend)
117 tempflux = np.asarray([item for sublist in flux for item in sublist])
118 flux_outliers = sigma_clip(data=tempflux, sigma=sigma_1).mask
--> 119 self.bspline = self.fit_bspline(new_lc.time[~flux_outliers], tempflux[~flux_outliers])
120
121 # The SFF algorithm is going to be run on each window independently
~/dev/lightkurve/lightkurve/correctors/sffcorrector.py in fit_bspline(self, time, flux, knotspacing)
230 """Returns a `scipy.interpolate.BSpline` object to interpolate flux as a function of time."""
231 # By default, bspline knots are placed 1.5 days apart
--> 232 knots = np.arange(time[0], time[-1], knotspacing)
233
234 # If the light curve has breaks larger than the spacing between knots,
IndexError: index 0 is out of bounds for axis 0 with size 0
```
|
0.0
|
394246f61afddc21d584a8984e0f71c9a1673aa0
|
[
"tests/io/test_cdips.py::test_search_cdips"
] |
[
"tests/io/test_cdips.py::test_detect_cdips",
"tests/io/test_cdips.py::test_read_cdips"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-02 15:13:13+00:00
|
mit
| 3,575 |
|
lightkurve__lightkurve-1392
|
diff --git a/CHANGES.rst b/CHANGES.rst
index 3ac43947..a85ed588 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -2,6 +2,8 @@
=====================
- Fixed memory leak in reading Lightcurve / TargetPixel FITS files in v2.4.2 [#1390]
+- Added support for changes in QLP High Level Science Product
+ in TESS sectors 56 and later. [#1392]
2.4.2 (2023-11-03)
diff --git a/src/lightkurve/io/qlp.py b/src/lightkurve/io/qlp.py
index 8524879a..9115c90e 100644
--- a/src/lightkurve/io/qlp.py
+++ b/src/lightkurve/io/qlp.py
@@ -9,23 +9,26 @@ from ..utils import TessQualityFlags
from .generic import read_generic_lightcurve
-def read_qlp_lightcurve(filename, flux_column="sap_flux", flux_err_column="kspsap_flux_err", quality_bitmask="default"):
+def read_qlp_lightcurve(filename, flux_column="sap_flux", flux_err_column=None, quality_bitmask="default"):
"""Returns a `~lightkurve.lightcurve.LightCurve` object given a light curve file from the MIT Quicklook Pipeline (QLP).
By default, QLP's `sap_flux` column is used to populate the `flux` values,
- and 'kspsap_flux_err' is used to populate `flux_err`. For a discussion
+ and `kspsap_flux_err` / `det_flux_err` is used to populate `flux_err`. For a discussion
related to this choice, see https://github.com/lightkurve/lightkurve/issues/1083
+ For detrended flux, the columns are named with `kspsap_` prefix in sectors 1-55,
+ and `det_` prefix in sectors 56+. Column `sys_rm_flux` is available in sectors 56+.
+
More information: https://archive.stsci.edu/hlsp/qlp
Parameters
----------
filename : str
Local path or remote url of a QLP light curve FITS file.
- flux_column : 'sap_flux', 'kspsap_flux', 'kspsap_flux_sml', 'kspsap_flux_lag', or 'sap_bkg'
+ flux_column : 'sap_flux', 'kspsap_flux', 'kspsap_flux_sml', 'kspsap_flux_lag', 'det_flux', 'det_flux_sml', 'det_flux_lag', 'sys_rm_flux', or 'sap_bkg'
Which column in the FITS file contains the preferred flux data?
By default the "Simple Aperture Photometry" flux (sap_flux) is used.
- flux_err_column: 'kspsap_flux_err', or 'sap_bkg_err'
+ flux_err_column: 'kspsap_flux_err','det_flux_err', or 'sap_bkg_err'
Which column in the FITS file contains the preferred flux_err data?
quality_bitmask : str or int
Bitmask (integer) which identifies the quality flag bitmask that should
@@ -39,9 +42,17 @@ def read_qlp_lightcurve(filename, flux_column="sap_flux", flux_err_column="kspsa
* "hardest": removes all data that has been flagged.
This mask is not recommended.
+ QLP-specific "Low precision points" (bit 13 in sectors 1-55, bit 31 in sectors 56+)
+ is included in "hard" and "hardest" bitmasks.
+
See the `~lightkurve.utils.TessQualityFlags` class for details on the bitmasks.
"""
lc = read_generic_lightcurve(filename, flux_column=flux_column, flux_err_column=flux_err_column, time_format="btjd")
+ if flux_err_column is None:
+ if lc.meta.get("SECTOR", 0) >= 56:
+ lc["flux_err"] = lc["det_flux_err"]
+ else:
+ lc["flux_err"] = lc["kspsap_flux_err"]
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
@@ -50,6 +61,16 @@ def read_qlp_lightcurve(filename, flux_column="sap_flux", flux_err_column="kspsa
quality_mask = TessQualityFlags.create_quality_mask(
quality_array=lc["quality"], bitmask=quality_bitmask
)
+ # QLP-specific quality_bitmask handling
+ if quality_bitmask in ["hardest", "hard"]:
+ if lc.meta.get("SECTOR", 0) >= 56:
+ qlp_low_precision_bitmask = 2 ** 30
+ else:
+ # https://archive.stsci.edu/hlsps/qlp/hlsp_qlp_tess_ffi_all_tess_v1_data-prod-desc.pdf
+ qlp_low_precision_bitmask = 2 ** 12
+ q_mask2 = TessQualityFlags.create_quality_mask(
+ quality_array=lc["quality"], bitmask=qlp_low_precision_bitmask)
+ quality_mask = quality_mask & q_mask2
lc = lc[quality_mask]
lc.meta["AUTHOR"] = "QLP"
|
lightkurve/lightkurve
|
68fdf039371ca5cc669d9013f3a32e30c829d1a2
|
diff --git a/tests/io/test_qlp.py b/tests/io/test_qlp.py
index 99420432..eaba9e2e 100644
--- a/tests/io/test_qlp.py
+++ b/tests/io/test_qlp.py
@@ -10,9 +10,19 @@ from lightkurve.io.detect import detect_filetype
@pytest.mark.remote_data
-def test_qlp():
[email protected](
+ "url, flux_err_colname_expected, qlp_low_precision_bitmask", [
+ ("https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/qlp/s0011/0000/0002/7755/4109/hlsp_qlp_tess_ffi_s0011-0000000277554109_tess_v01_llc.fits",
+ "KSPSAP_FLUX_ERR", # for sectors 1 - 55
+ 2**12, # bit 13 for sectors 1 -55
+ ),
+ ("https://mast.stsci.edu/api/v0.1/Download/file/?uri=mast:HLSP/qlp/s0056/0000/0000/1054/9159/hlsp_qlp_tess_ffi_s0056-0000000010549159_tess_v01_llc.fits",
+ "DET_FLUX_ERR", # for sectors 56+
+ 2**30, # bit 31 for sectors 56+
+ ),
+ ])
+def test_qlp(url, flux_err_colname_expected, qlp_low_precision_bitmask):
"""Can we read in QLP light curves?"""
- url = "https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/qlp/s0011/0000/0002/7755/4109/hlsp_qlp_tess_ffi_s0011-0000000277554109_tess_v01_llc.fits"
with fits.open(url, mode="readonly") as hdulist:
# Can we auto-detect a QLP file?
assert detect_filetype(hdulist) == "QLP"
@@ -20,6 +30,23 @@ def test_qlp():
lc = read_qlp_lightcurve(url, quality_bitmask=0)
assert lc.meta["FLUX_ORIGIN"] == "sap_flux"
assert_array_equal(lc.flux.value, hdulist[1].data["SAP_FLUX"])
+ assert_array_equal(lc.flux_err.value, hdulist[1].data[flux_err_colname_expected])
+
+ # Test handling of QLP-specific low-precision bitmask
+ # - the cadences marked as such will be masked out by "hard" / "hardest"
+
+ # first assure the test FITS file has cadence marked by QLP bit only
+ # to easily isolate the effects of the quality_bitmask
+ assert (lc["quality"] == qlp_low_precision_bitmask).any()
+
+ lc = read_qlp_lightcurve(url, quality_bitmask="default")
+ assert (lc["quality"] & (qlp_low_precision_bitmask)).any()
+
+ lc = read_qlp_lightcurve(url, quality_bitmask="hard")
+ assert not (lc["quality"] & (qlp_low_precision_bitmask)).any()
+
+ lc = read_qlp_lightcurve(url, quality_bitmask="hardest")
+ assert not (lc["quality"] & (qlp_low_precision_bitmask)).any()
@pytest.mark.remote_data
|
Detrended flux columns in QLP lightcurves appear to have been renamed in recent sectors (56 and later)
<!-- Fill in the information below before opening an issue. -->
#### Problem description
<!-- Provide a clear and concise description of the issue. -->
The detrended columns in [QLP](https://archive.stsci.edu/hlsp/qlp) lighcurves used to be `'kspsap_flux', 'kspsap_flux_err', 'kspsap_flux_sml', 'kspsap_flux_lag'`.
Starting from sector 56 (year5), they appear to be renamed to `'det_flux', 'det_flux_err', 'det_flux_sml', 'det_flux_lag'`.
The change causes some headaches when accessing detrended lightcurves from a mix of sectors.
It is also unclear if the change is a matter of renaming columns, or it actually signify something more significant.
At this point, it is unclear if there is anything `lightkurve` should do.
#### Example
<!-- Provide a link or minimal code snippet that demonstrates the issue. -->
One can find the list of columns from the following samples
```python
import lightkurve as lk
_sr = lk.search_lightcurve("TIC10549159", author="QLP", sector=56) # new names
_lcs56 = _sr.download()
print('Sec 56', _lcs56.colnames)
_sr = lk.search_lightcurve("TIC855867", author="QLP", sector=55) # old names
_lcs55 = _sr.download()
print('Sec 55', _lcs55.colnames)
```
Output:
```python
Sec 56 ['time', 'flux', 'flux_err', 'cadenceno', 'sap_flux', 'det_flux', 'det_flux_err', 'quality', 'orbitid', 'sap_x', 'sap_y', 'sap_bkg', 'sap_bkg_err', 'det_flux_sml', 'det_flux_lag', 'sys_rm_flux']
Sec 55 ['time', 'flux', 'flux_err', 'cadenceno', 'sap_flux', 'kspsap_flux', 'kspsap_flux_err', 'quality', 'orbitid', 'sap_x', 'sap_y', 'sap_bkg', 'sap_bkg_err', 'kspsap_flux_sml', 'kspsap_flux_lag']
```
#### Expected behavior
<!-- Describe the behavior you expected and how it differs from the behavior observed in the example. -->
#### Environment
- lightkurve version (e.g. 1.0b6): v2.4.2
|
0.0
|
68fdf039371ca5cc669d9013f3a32e30c829d1a2
|
[
"tests/io/test_qlp.py::test_qlp[https://mast.stsci.edu/api/v0.1/Download/file/?uri=mast:HLSP/qlp/s0056/0000/0000/1054/9159/hlsp_qlp_tess_ffi_s0056-0000000010549159_tess_v01_llc.fits-DET_FLUX_ERR-1073741824]"
] |
[
"tests/io/test_qlp.py::test_qlp[https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/qlp/s0011/0000/0002/7755/4109/hlsp_qlp_tess_ffi_s0011-0000000277554109_tess_v01_llc.fits-KSPSAP_FLUX_ERR-4096]",
"tests/io/test_qlp.py::test_search_qlp"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-29 01:55:27+00:00
|
mit
| 3,576 |
|
lightkurve__lightkurve-996
|
diff --git a/CHANGES.rst b/CHANGES.rst
index 06979104..a6d14ec7 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -4,6 +4,10 @@
- Modified ``LightCurve.bin()`` to partially restore the ``bins`` parameter which
was available in Lightkurve v1.x, to improve backwards compatibility. [#995]
+- Modified ``LightCurveCollection.stitch()`` to ignore incompatible columns
+ instead of having them raise a ``ValueError``. [#996]
+
+
2.0.4 (2021-03-11)
==================
diff --git a/src/lightkurve/collections.py b/src/lightkurve/collections.py
index f132e96d..b3949de7 100644
--- a/src/lightkurve/collections.py
+++ b/src/lightkurve/collections.py
@@ -11,7 +11,7 @@ from astropy.utils.decorators import deprecated
from . import MPLSTYLE
from .targetpixelfile import TargetPixelFile
-from .utils import LightkurveDeprecationWarning
+from .utils import LightkurveWarning, LightkurveDeprecationWarning
log = logging.getLogger(__name__)
@@ -227,6 +227,25 @@ class LightCurveCollection(Collection):
with warnings.catch_warnings(): # ignore "already normalized" message
warnings.filterwarnings("ignore", message=".*already.*")
lcs = [corrector_func(lc) for lc in self]
+
+ # Address issue #954: ignore incompatible columns with the same name
+ columns_to_remove = set()
+ for col in lcs[0].columns:
+ for lc in lcs[1:]:
+ if col in lc.columns:
+ if not (issubclass(lcs[0][col].__class__, lc[col].__class__) \
+ or lcs[0][col].__class__.info is lc[col].__class__.info):
+ columns_to_remove.add(col)
+ continue
+
+ if len(columns_to_remove) > 0:
+ warnings.warn(
+ f"The following columns will be excluded from stitching because the column types are incompatible: {columns_to_remove}",
+ LightkurveWarning,
+ )
+ lcs = [lc.copy() for lc in lcs]
+ [lc.remove_columns(columns_to_remove.intersection(lc.columns)) for lc in lcs]
+
# Need `join_type='inner'` until AstroPy supports masked Quantities
return vstack(lcs, join_type="inner", metadata_conflicts="silent")
|
lightkurve/lightkurve
|
08c3b1951e1ba52bcb15a7d4291ec469be9f16c8
|
diff --git a/tests/test_collections.py b/tests/test_collections.py
index 3b998958..3654745d 100644
--- a/tests/test_collections.py
+++ b/tests/test_collections.py
@@ -1,6 +1,7 @@
import warnings
import pytest
+from astropy import units as u
from astropy.utils.data import get_pkg_data_filename
import matplotlib.pyplot as plt
import numpy as np
@@ -107,7 +108,7 @@ def test_collection_getitem_by_boolean_array():
lcc_f = lcc[[True, False, True]]
assert lcc_f.data == [lc0, lc2]
- assert (type(lcc_f), LightCurveCollection)
+ assert type(lcc_f) is LightCurveCollection
# boundary case: 1 element
lcc_f = lcc[[False, True, False]]
@@ -215,7 +216,7 @@ def test_tpfcollection():
# ensure index by boolean array also works for TPFs
tpfc_f = tpfc[[False, True, True]]
assert tpfc_f.data == [tpf2, tpf2]
- assert (type(tpfc_f), TargetPixelFileCollection)
+ assert type(tpfc_f) is TargetPixelFileCollection
# Test __setitem__
tpf3 = KeplerTargetPixelFile(filename_tpf_one_center, targetid=55)
tpfc[1] = tpf3
@@ -353,3 +354,11 @@ def test_accessor_k2_campaign():
tpf1.hdu[0].header["CAMPAIGN"] = 1
tpfc = TargetPixelFileCollection([tpf0, tpf1])
assert (tpfc.campaign == [2, 1]).all()
+
+
+def test_unmergeable_columns():
+ """Regression test for #954."""
+ lc1 = LightCurve(data={'time': [1,2,3], 'x': [1,2,3]})
+ lc2 = LightCurve(data={'time': [1,2,3], 'x': [1,2,3]*u.electron/u.second})
+ with pytest.warns(LightkurveWarning, match="column types are incompatible"):
+ LightCurveCollection([lc1, lc2]).stitch()
|
ValueError: unmergeable object classes
<!-- Fill in the information below before opening an issue. -->
#### Problem description
Hi!
I'm new to lightkurve. I was running this piece of code (below) and I haven't been able to work out the ValueError. I can't find similar errors within the documentation either. Any suggestions? Thank you in advance.
#### Example
<!-- Provide a link or minimal code snippet that demonstrates the issue. -->
```python
import lightkurve as lk
from lightkurve import search_lightcurve
lc = lk.search_lightcurve("HD 93083", mission='TESS').download_all().stitch().remove_nans()
ValueError: unmergeable object classes ['Quantity', 'QColumn']
```
#### Environment
- platform (e.g. Linux, OSX, Windows): Mac OSX
- lightkurve version (e.g. 1.0b6): 2.0.1
- installation method (e.g. pip, conda, source): pip
|
0.0
|
08c3b1951e1ba52bcb15a7d4291ec469be9f16c8
|
[
"tests/test_collections.py::test_unmergeable_columns"
] |
[
"tests/test_collections.py::test_collection_init",
"tests/test_collections.py::test_collection_append",
"tests/test_collections.py::test_collection_stitch",
"tests/test_collections.py::test_collection_getitem",
"tests/test_collections.py::test_collection_getitem_by_boolean_array",
"tests/test_collections.py::test_collection_getitem_by_other_array",
"tests/test_collections.py::test_collection_setitem",
"tests/test_collections.py::test_tpfcollection",
"tests/test_collections.py::test_tpfcollection_plot",
"tests/test_collections.py::test_stitch_repr",
"tests/test_collections.py::test_accessor_tess_sector",
"tests/test_collections.py::test_accessor_kepler_quarter",
"tests/test_collections.py::test_accessor_k2_campaign"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-12 20:37:26+00:00
|
mit
| 3,577 |
|
lightstep__lightstep-tracer-python-106
|
diff --git a/lightstep/b3_propagator.py b/lightstep/b3_propagator.py
index 940a86b..1d4568c 100644
--- a/lightstep/b3_propagator.py
+++ b/lightstep/b3_propagator.py
@@ -36,12 +36,12 @@ class B3Propagator(Propagator):
flags = baggage.pop(_FLAGS, None)
if flags is not None:
- carrier[_FLAGS] = flags
+ carrier[_FLAGS] = str(flags)
sampled = baggage.pop(_SAMPLED, None)
if sampled is None:
- carrier[_SAMPLED] = 1
+ carrier[_SAMPLED] = "1"
else:
if flags == 1:
_LOG.warning(
@@ -56,16 +56,17 @@ class B3Propagator(Propagator):
int(sampled), sampled
)
)
- carrier[_SAMPLED] = sampled
+ carrier[_SAMPLED] = str(sampled)
if sampled is flags is (traceid and spanid) is None:
warn(
"If not propagating only the sampling state, traceid and "
"spanid must be defined, setting sampling state to 1."
)
- carrier[_SAMPLED] = 1
+ carrier[_SAMPLED] = "1"
- carrier.update(baggage)
+ for key, value in baggage.items():
+ carrier[key] = str(value)
if traceid is not None:
carrier[_TRACEID] = format(traceid, "x")
|
lightstep/lightstep-tracer-python
|
d11a9435d0ff53aa05a706731a9f7522f66b3690
|
diff --git a/tests/b3_propagator_test.py b/tests/b3_propagator_test.py
index 4728711..17ca55e 100644
--- a/tests/b3_propagator_test.py
+++ b/tests/b3_propagator_test.py
@@ -32,7 +32,7 @@ class B3PropagatorTest(TestCase):
{
"x-b3-traceid": format(span.context.trace_id, "x"),
"x-b3-spanid": format(span.context.span_id, "x"),
- "x-b3-sampled": 1,
+ "x-b3-sampled": "1",
"checked": "baggage"
}
)
@@ -47,7 +47,7 @@ class B3PropagatorTest(TestCase):
{
"x-b3-traceid": format(span.context.trace_id, "x"),
"x-b3-spanid": format(span.context.span_id, "x"),
- "x-b3-flags": 1,
+ "x-b3-flags": "1",
}
)
@@ -174,9 +174,7 @@ class B3PropagatorTest(TestCase):
carrier = {}
tracer.inject(inject_span.context, Format.HTTP_HEADERS, carrier)
- self.assertTrue(
- isinstance(carrier["x-b3-sampled"], type(sampled_value))
- )
+ self.assertTrue(isinstance(carrier["x-b3-sampled"], str))
extract_span_context = tracer.extract(Format.HTTP_HEADERS, carrier)
@@ -200,7 +198,7 @@ class B3PropagatorTest(TestCase):
tracer.inject(inject_span.context, Format.HTTP_HEADERS, carrier)
- self.assertEqual(carrier["x-b3-sampled"], 1)
+ self.assertEqual(carrier["x-b3-sampled"], "1")
extract_span_context = tracer.extract(Format.HTTP_HEADERS, carrier)
|
B3 headers value not passed as str
We've instrumented our service with LightStep and OpenTracing but have run into a snag when using B3 propagation.
```python
opentracing.tracer = lightstep.Tracer(
component_name=c.SERVICE_NAME,
access_token=OPEN_TRACE_ACCESS_TOKEN,
tags=tags
)
opentracing.tracer.register_propagator(Format.TEXT_MAP, B3Propagator())
opentracing.tracer.register_propagator(
Format.HTTP_HEADERS, B3Propagator()
)
```
this is the error I'm seeing in developer satellite:
```
error
message:
"Value for header {x-b3-sampled: 1} must be of type str or bytes, not <class 'int'>"
error.object:
"Value for header {x-b3-sampled: 1} must be of type str or bytes, not <class 'int'>"
error.kind:
"InvalidHeader"
```
stack trace:
```
"line 51, in get response = requests.get( File "/usr/local/lib/python3.8/site-packages/requests/api.py", line 75, in get return request('get', url, params=params, **kwargs) File "/usr/local/lib/python3.8/site-packages/requests/api.py", line 60, in request return session.request(method=method, url=url, **kwargs) File "/usr/local/lib/python3.8/site-packages/requests/sessions.py", line 519, in request prep = self.prepare_request(req) File "/usr/local/lib/python3.8/site-packages/requests/sessions.py", line 452, in prepare_request p.prepare( File "/usr/local/lib/python3.8/site-packages/requests/models.py", line 314, in prepare self.prepare_headers(headers) File "/usr/local/lib/python3.8/site-packages/requests/models.py", line 448, in prepare_headers check_header_validity(header) File "/usr/local/lib/python3.8/site-packages/requests/utils.py", line 944, in check_header_validity raise InvalidHeader("Value for header {%s: %s} must be of type str or " "
```
Appears to be failing python requests library's [header validation](https://github.com/psf/requests/issues/3477)
|
0.0
|
d11a9435d0ff53aa05a706731a9f7522f66b3690
|
[
"tests/b3_propagator_test.py::B3PropagatorTest::test_inject",
"tests/b3_propagator_test.py::B3PropagatorTest::test_propagation"
] |
[
"tests/b3_propagator_test.py::B3PropagatorTest::test_extract_multiple_headers",
"tests/b3_propagator_test.py::B3PropagatorTest::test_extract_single_header",
"tests/b3_propagator_test.py::B3PropagatorTest::test_invalid_traceid_spanid"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-02 20:40:10+00:00
|
mit
| 3,578 |
|
lightstep__lightstep-tracer-python-109
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 96b11a1..4b8a2f2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,9 @@
CHANGELOG.md
+<a name="4.4.8"></a>
+## [4.4.8](https://github.com/lightstep/lightstep-tracer-python/compare/4.4.7...4.4.8)
+* Do not record non-sampled spans (#108)
+
<a name="4.4.7"></a>
## [4.4.7](https://github.com/lightstep/lightstep-tracer-python/compare/4.4.6...4.4.7)
* Cast all carrier values to string (#106)
diff --git a/lightstep/http_connection.py b/lightstep/http_connection.py
index 18268c7..4027a37 100644
--- a/lightstep/http_connection.py
+++ b/lightstep/http_connection.py
@@ -30,9 +30,11 @@ class _HTTPConnection(object):
with self._lock:
try:
report.auth.access_token = auth.access_token
- headers = {"Content-Type": "application/octet-stream",
- "Accept": "application/octet-stream",
- "Lightstep-Access-Token": auth.access_token}
+ headers = {
+ "Content-Type": "application/octet-stream",
+ "Accept": "application/octet-stream",
+ "Lightstep-Access-Token": auth.access_token
+ }
r = requests.post(
self._collector_url,
diff --git a/lightstep/recorder.py b/lightstep/recorder.py
index fb41b39..3b50038 100644
--- a/lightstep/recorder.py
+++ b/lightstep/recorder.py
@@ -71,10 +71,11 @@ class Recorder(SpanRecorder):
self._finest("Initialized with Tracer runtime: {0}", (self._runtime,))
secure = collector_encryption != 'none' # the default is 'tls'
self._collector_url = util._collector_url_from_hostport(
- secure,
- collector_host,
- collector_port,
- self.use_thrift)
+ secure,
+ collector_host,
+ collector_port,
+ self.use_thrift
+ )
self._timeout_seconds = timeout_seconds
self._auth = self.converter.create_auth(access_token)
self._mutex = threading.Lock()
@@ -82,7 +83,7 @@ class Recorder(SpanRecorder):
self._max_span_records = max_span_records
self._disabled_runtime = False
-
+
atexit.register(self.shutdown)
self._periodic_flush_seconds = periodic_flush_seconds
@@ -99,7 +100,7 @@ class Recorder(SpanRecorder):
def _maybe_init_flush_thread(self):
"""Start a periodic flush mechanism for this recorder if:
- 1. periodic_flush_seconds > 0, and
+ 1. periodic_flush_seconds > 0, and
2. self._flush_thread is None, indicating that we have not yet
initialized the background flush thread.
@@ -132,7 +133,7 @@ class Recorder(SpanRecorder):
Will drop a previously-added span if the limit has been reached.
"""
- if self._disabled_runtime:
+ if self._disabled_runtime or not span.context.sampled:
return
# Lazy-init the flush loop (if need be).
@@ -241,7 +242,7 @@ class Recorder(SpanRecorder):
def _flush_worker(self, connection):
"""Use the given connection to transmit the current logs and spans as a
report request."""
- if connection == None:
+ if connection is None:
return False
# If the connection is not ready, try reestablishing it. If that
@@ -268,8 +269,9 @@ class Recorder(SpanRecorder):
except Exception as e:
self._fine(
- "Caught exception during report: {0}, stack trace: {1}",
- (e, traceback.format_exc()))
+ "Caught exception during report: {0}, stack trace: {1}",
+ (e, traceback.format_exc())
+ )
self._restore_spans(report_request)
return False
|
lightstep/lightstep-tracer-python
|
1765cbd1266ef37c97bad19cd79941cb549cae14
|
diff --git a/tests/recorder_test.py b/tests/recorder_test.py
index 2daadec..3671313 100644
--- a/tests/recorder_test.py
+++ b/tests/recorder_test.py
@@ -55,6 +55,39 @@ def recorder(request):
yield lightstep.recorder.Recorder(**runtime_args)
+def test_non_sampled_span_thrift(recorder):
+
+ mock_connection = MockConnection()
+ mock_connection.open()
+
+ non_sampled_span = BasicSpan(
+ lightstep.tracer._LightstepTracer(False, recorder, None),
+ operation_name="non_sampled",
+ context=SpanContext(trace_id=1, span_id=1, sampled=False),
+ start_time=time.time(),
+ )
+ non_sampled_span.finish()
+
+ sampled_span = BasicSpan(
+ lightstep.tracer._LightstepTracer(False, recorder, None),
+ operation_name="sampled",
+ context=SpanContext(trace_id=1, span_id=2, sampled=True),
+ start_time=time.time(),
+ )
+ sampled_span.finish()
+ recorder.record_span(non_sampled_span)
+ recorder.record_span(sampled_span)
+
+ recorder.flush(mock_connection)
+
+ if recorder.use_thrift:
+ for span_record in mock_connection.reports[0].span_records:
+ assert span_record.span_name == "sampled"
+ else:
+ for span in mock_connection.reports[0].spans:
+ assert span.operation_name == "sampled"
+
+
def test_default_tags_set_correctly(recorder):
mock_connection = MockConnection()
mock_connection.open()
@@ -80,7 +113,7 @@ def test_default_tags_set_correctly(recorder):
"access_token": "{your_access_token}",
"component_name": "python/runtime_test",
"periodic_flush_seconds": 0,
- "tags": {"lightstep.hostname": "hostname",},
+ "tags": {"lightstep.hostname": "hostname"},
}
new_recorder = lightstep.recorder.Recorder(**runtime_args)
for tag in new_recorder._runtime.tags:
@@ -119,7 +152,7 @@ def test_shutdown_twice(recorder):
recorder.shutdown()
recorder.shutdown()
except Exception as error:
- self.fail("Unexpected exception raised: {}".format(error))
+ pytest.fail("Unexpected exception raised: {}".format(error))
# ------------
@@ -225,7 +258,7 @@ def test_exception_formatting(recorder):
assert len(recorder._span_records) == 1
assert recorder.flush(mock_connection)
spans = recorder.converter.get_span_records(mock_connection.reports[1])
-
+
if hasattr(spans[0], "log_records"):
assert len(spans[0].log_records) == 1
assert len(spans[0].log_records[0].fields) == 3
@@ -251,4 +284,3 @@ def test_exception_formatting(recorder):
assert field.string_value == ""
else:
raise AttributeError("unexpected field: %s".format(field.key))
-
|
Respecting sampling in recorders
What is the intended functionality around the `sampled` span context? I've noticed that it's propagated from parent spans and it's serialized into carriers, but I couldn't find code to read it during recording or sending spans to the collector.
|
0.0
|
1765cbd1266ef37c97bad19cd79941cb549cae14
|
[
"tests/recorder_test.py::test_non_sampled_span_thrift[True]",
"tests/recorder_test.py::test_non_sampled_span_thrift[False]"
] |
[
"tests/recorder_test.py::test_default_tags_set_correctly[True]",
"tests/recorder_test.py::test_default_tags_set_correctly[False]",
"tests/recorder_test.py::test_send_spans_after_shutdown[True]",
"tests/recorder_test.py::test_send_spans_after_shutdown[False]",
"tests/recorder_test.py::test_shutdown_twice[True]",
"tests/recorder_test.py::test_shutdown_twice[False]",
"tests/recorder_test.py::test_stress_logs[True]",
"tests/recorder_test.py::test_stress_logs[False]",
"tests/recorder_test.py::test_stress_spans[True]",
"tests/recorder_test.py::test_stress_spans[False]",
"tests/recorder_test.py::test_buffer_limits[True]",
"tests/recorder_test.py::test_buffer_limits[False]",
"tests/recorder_test.py::test_exception_formatting[True]",
"tests/recorder_test.py::test_exception_formatting[False]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-17 16:25:22+00:00
|
mit
| 3,579 |
|
lightstep__lightstep-tracer-python-98
|
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000..104ee6e
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,51 @@
+name: release
+
+on:
+ push:
+ tags:
+ - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
+
+jobs:
+ build:
+ name: Create Release
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@master
+ - name: Create Release
+ id: create_release
+ uses: actions/create-release@latest
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token
+ with:
+ tag_name: ${{ github.ref }}
+ release_name: Release ${{ github.ref }}
+ body: |
+ Changes in this Release
+ - ${{ github.ref }}
+ draft: false
+ prerelease: false
+
+ deploy:
+ name: Publish package
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python
+ uses: actions/setup-python@v1
+ with:
+ python-version: '3.x'
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install setuptools wheel twine cython
+ - name: Build and publish
+ env:
+ TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
+ TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
+ run: |
+ mkdir -p ./dist
+ python setup.py sdist # source distribution
+ python setup.py bdist_wheel
+ twine upload dist/*
diff --git a/RELEASING.md b/RELEASING.md
new file mode 100644
index 0000000..494d269
--- /dev/null
+++ b/RELEASING.md
@@ -0,0 +1,23 @@
+# Releasing
+
+Once all the changes for a release have been merged to master, ensure the following:
+
+- [ ] version has been updated in `VERSION`, `lightstep/version.py` and `setup.cfg`
+- [ ] tests are passing
+- [ ] user facing documentation has been updated
+
+# Publishing
+
+Publishing to [pypi](https://pypi.org/project/lightstep/) is automated via GitHub actions. Once a tag is pushed to the repo, a new GitHub Release is created and package is published via the actions defined here: https://github.com/lightstep/lightstep-tracer-python/blob/master/.github/workflows/release.yml
+
+```
+$ git clone [email protected]:lightstep/lightstep-tracer-python && cd lightstep-tracer-python
+# ensure the version matches the version beind released
+$ cat VERSION
+4.4.3
+$ cat lightstep/version.py
+LIGHTSTEP_PYTHON_TRACER_VERSION="4.4.3"
+$ cat setup.py | grep version
+ version='4.4.3',
+$ git tag v4.4.3 && git push origin v4.4.3
+```
diff --git a/lightstep/http_converter.py b/lightstep/http_converter.py
index c17573d..a05c582 100644
--- a/lightstep/http_converter.py
+++ b/lightstep/http_converter.py
@@ -1,8 +1,10 @@
+import socket
+import sys
+
from lightstep.collector_pb2 import Auth, ReportRequest, Span, Reporter, KeyValue, Reference, SpanContext
from lightstep.converter import Converter
from . import util
from . import version as tracer_version
-import sys
from google.protobuf.timestamp_pb2 import Timestamp
@@ -22,6 +24,7 @@ class HttpConverter(Converter):
if tags is None:
tags = {}
tracer_tags = tags.copy()
+ tracer_tags['lightstep.hostname'] = tracer_tags.get('lightstep.hostname', socket.gethostname())
tracer_tags.update({
'lightstep.tracer_platform': 'python',
diff --git a/lightstep/recorder.py b/lightstep/recorder.py
index 1e06282..9bedbee 100644
--- a/lightstep/recorder.py
+++ b/lightstep/recorder.py
@@ -14,7 +14,7 @@ import traceback
import warnings
from basictracer.recorder import SpanRecorder
-from opentracing.logs import ERROR_KIND, STACK
+from opentracing.logs import ERROR_KIND, STACK, ERROR_OBJECT
from lightstep.http_converter import HttpConverter
from lightstep.thrift_converter import ThriftConverter
@@ -171,7 +171,11 @@ class Recorder(SpanRecorder):
log.key_values[ERROR_KIND] = util._format_exc_type(log.key_values[ERROR_KIND])
if STACK in log.key_values:
- log.key_values[STACK] = util._format_exc_tb(log.key_values[STACK])
+ log.key_values[STACK] = util._format_exc_tb(
+ log.key_values.get(ERROR_OBJECT),
+ log.key_values.get(ERROR_KIND),
+ log.key_values[STACK]
+ )
return log
diff --git a/lightstep/thrift_converter.py b/lightstep/thrift_converter.py
index ffcf314..72debc5 100644
--- a/lightstep/thrift_converter.py
+++ b/lightstep/thrift_converter.py
@@ -1,7 +1,9 @@
+import socket
+import sys
+
from lightstep import constants
from lightstep.converter import Converter
from .crouton import ttypes
-import sys
from . import util
from . import version as tracer_version
import jsonpickle
@@ -22,6 +24,7 @@ class ThriftConverter(Converter):
if tags is None:
tags = {}
tracer_tags = tags.copy()
+ tracer_tags['lightstep.hostname'] = tracer_tags.get('lightstep.hostname', socket.gethostname())
tracer_tags.update({
'lightstep.tracer_platform': 'python',
diff --git a/lightstep/util.py b/lightstep/util.py
index 50b61d3..33c403e 100755
--- a/lightstep/util.py
+++ b/lightstep/util.py
@@ -105,9 +105,9 @@ def _coerce_to_unicode(val):
return '(encoding error)'
-def _format_exc_tb(exc_tb):
+def _format_exc_tb(exc_value, exc_type, exc_tb):
if type(exc_tb) is types.TracebackType:
- return ''.join(traceback.format_tb(exc_tb))
+ return ''.join(traceback.format_exception(exc_value, exc_type, exc_tb))
return exc_tb
|
lightstep/lightstep-tracer-python
|
540768ef727de0c0f00f14325a3b6cbf98add589
|
diff --git a/tests/recorder_test.py b/tests/recorder_test.py
index c514aba..b8d4163 100644
--- a/tests/recorder_test.py
+++ b/tests/recorder_test.py
@@ -1,3 +1,4 @@
+import socket
import time
import unittest
@@ -15,6 +16,7 @@ from lightstep.crouton import ttypes
class MockConnection(object):
"""MockConnection is used to debug and test Runtime.
"""
+
def __init__(self):
self.reports = []
self.ready = False
@@ -40,121 +42,140 @@ class MockConnection(object):
@pytest.fixture(params=[True, False])
def recorder(request):
- runtime_args = {'collector_encryption': 'none',
- 'collector_host': 'localhost',
- 'collector_port': 9998,
- 'access_token': '{your_access_token}',
- 'component_name': 'python/runtime_test',
- 'periodic_flush_seconds': 0,
- 'use_thrift': request.param,
- 'use_http': not request.param}
- recorder = lightstep.recorder.Recorder(runtime_args)
- yield recorder
-
-
-# ------_
-# HELPERS
-# ------_
-def check_spans(converter, report):
- """Checks spans' name.
- """
- def setUp(self):
- self.mock_connection = MockConnection()
- self.mock_connection.open()
- self.runtime_args = {'collector_encryption': 'none',
- 'collector_host': 'localhost',
- 'collector_port': 9998,
- 'access_token': '{your_access_token}',
- 'component_name': 'python/runtime_test',
- 'periodic_flush_seconds': 0}
-
- def create_test_recorder(self):
- """Returns a LightStep Recorder based on self.runtime_args.
- """
- return lightstep.recorder.Recorder(**self.runtime_args)
-
- # -------------
- # SHUTDOWN TESTS
- # -------------
- def test_send_spans_after_shutdown(self):
- recorder = self.create_test_recorder()
-
- # Send 10 spans
- for i in range(10):
- recorder.record_span(self.dummy_basic_span(recorder, i))
- self.assertTrue(recorder.flush(self.mock_connection))
-
- # Check 10 spans
- self.check_spans(self.mock_connection.reports[0].span_records)
-
- # Delete current logs and shutdown runtime
- self.mock_connection.clear()
- recorder.shutdown()
-
- # Send 10 spans, though none should get through
- for i in range(10):
- recorder.record_span(self.dummy_basic_span(recorder, i))
- self.assertFalse(recorder.flush(self.mock_connection))
- self.assertEqual(len(self.mock_connection.reports), 0)
-
- def test_shutdown_twice(self):
- recorder = self.create_test_recorder()
+ runtime_args = {
+ "collector_encryption": "none",
+ "collector_host": "localhost",
+ "collector_port": 9998,
+ "access_token": "{your_access_token}",
+ "component_name": "python/runtime_test",
+ "periodic_flush_seconds": 0,
+ "use_thrift": request.param,
+ "use_http": not request.param,
+ }
+ yield lightstep.recorder.Recorder(**runtime_args)
+
+
+def test_default_tags_set_correctly(recorder):
+ mock_connection = MockConnection()
+ mock_connection.open()
+ tags = getattr(recorder._runtime, "tags", None)
+ if tags is None:
+ tags = getattr(recorder._runtime, "attrs")
+ for tag in tags:
+ if hasattr(tag, "key"):
+ if tag.key == "lightstep.hostname":
+ assert tag.string_value == socket.gethostname()
+ elif tag.key == "lightstep.tracer_platform":
+ assert tag.string_value == "python"
+ else:
+ if tag.Key == "lightstep.hostname":
+ assert tag.Value == socket.gethostname()
+ elif tag.Key == "lightstep.tracer_platform":
+ assert tag.Value == "python"
+ assert len(tags) == 6
+ runtime_args = {
+ "collector_encryption": "none",
+ "collector_host": "localhost",
+ "collector_port": 9998,
+ "access_token": "{your_access_token}",
+ "component_name": "python/runtime_test",
+ "periodic_flush_seconds": 0,
+ "tags": {
+ "lightstep.hostname": "hostname",
+ },
+ }
+ new_recorder = lightstep.recorder.Recorder(**runtime_args)
+ for tag in new_recorder._runtime.tags:
+ if tag.key == "lightstep.hostname":
+ assert tag.string_value == "hostname"
+ assert len(new_recorder._runtime.tags) == 6
+
+
+# --------------
+# SHUTDOWN TESTS
+# --------------
+def test_send_spans_after_shutdown(recorder):
+ mock_connection = MockConnection()
+ mock_connection.open()
+ # Send 10 spans
+ for i in range(10):
+ dummy_basic_span(recorder, i)
+ assert recorder.flush(mock_connection)
+
+ # Check 10 spans
+ check_spans(recorder.converter, mock_connection.reports[0])
+
+ # Delete current logs and shutdown runtime
+ mock_connection.clear()
+ recorder.shutdown()
+
+ # Send 10 spans, though none should get through
+ for i in range(10):
+ recorder.record_span(dummy_basic_span(recorder, i))
+ assert not recorder.flush(mock_connection)
+ assert len(mock_connection.reports) == 0
+
+
+def test_shutdown_twice(recorder):
+ try:
recorder.shutdown()
recorder.shutdown()
+ except Exception as error:
+ self.fail("Unexpected exception raised: {}".format(error))
+
+
+# ------------
+# STRESS TESTS
+# ------------
+def test_stress_logs(recorder):
+ mock_connection = MockConnection()
+ mock_connection.open()
+ for i in range(1000):
+ dummy_basic_span(recorder, i)
+ assert recorder.flush(mock_connection)
+ assert recorder.converter.num_span_records(mock_connection.reports[0]) == 1000
+ check_spans(recorder.converter, mock_connection.reports[0])
+
+
+def test_stress_spans(recorder):
+ mock_connection = MockConnection()
+ mock_connection.open()
+ for i in range(1000):
+ dummy_basic_span(recorder, i)
+ assert recorder.flush(mock_connection)
+ assert recorder.converter.num_span_records(mock_connection.reports[0]) == 1000
+ check_spans(recorder.converter, mock_connection.reports[0])
+
+
+# -------------
+# RUNTIME TESTS
+# -------------
+def test_buffer_limits(recorder):
+ mock_connection = MockConnection()
+ mock_connection.open()
+ recorder._max_span_records = 88
+
+ assert len(recorder._span_records) == 0
+ for i in range(0, 100):
+ dummy_basic_span(recorder, i)
+ assert len(recorder._span_records) == 88
+ assert recorder.flush(mock_connection)
- # ------------
- # STRESS TESTS
- # ------------
- def test_stress_logs(self):
- recorder = self.create_test_recorder()
- for i in range(1000):
- recorder.record_span(self.dummy_basic_span(recorder, i))
- self.assertTrue(recorder.flush(self.mock_connection))
- self.assertEqual(len(self.mock_connection.reports[0].span_records), 1000)
- self.check_spans(self.mock_connection.reports[0].span_records)
-
- def test_stress_spans(self):
- recorder = self.create_test_recorder()
- for i in range(1000):
- recorder.record_span(self.dummy_basic_span(recorder, i))
- self.assertTrue(recorder.flush(self.mock_connection))
- self.assertEqual(len(self.mock_connection.reports[0].span_records), 1000)
- self.check_spans(self.mock_connection.reports[0].span_records)
-
- # -------------
- # RUNTIME TESTS
- # -------------
-
- def test_buffer_limits(self):
- self.runtime_args.update({
- 'max_span_records': 88,
- })
- recorder = self.create_test_recorder()
-
- self.assertEqual(len(recorder._span_records), 0)
- for i in range(0, 10000):
- recorder.record_span(self.dummy_basic_span(recorder, i))
- self.assertEqual(len(recorder._span_records), 88)
- self.assertTrue(recorder.flush(self.mock_connection))
-
- # ------
- # HELPER
- # ------
- def check_spans(self, spans):
- """Checks spans' name.
- """
- for i, span in enumerate(spans):
- self.assertEqual(span.span_name, str(i))
-
- def dummy_basic_span(self, recorder, i):
- return BasicSpan(
- lightstep.tracer._LightstepTracer(False, recorder, None),
- operation_name=str(i),
- context=SpanContext(
- trace_id=1000+i,
- span_id=2000+i),
- start_time=time.time())
-
-if __name__ == '__main__':
- unittest.main()
+def check_spans(converter, report):
+ """Checks spans' name.
+ """
+ spans = converter.get_span_records(report)
+ for i, span in enumerate(spans):
+ assert converter.get_span_name(span) == str(i)
+
+
+def dummy_basic_span(recorder, i):
+ span = BasicSpan(
+ lightstep.tracer._LightstepTracer(False, recorder, None),
+ operation_name=str(i),
+ context=SpanContext(trace_id=1000 + i, span_id=2000 + i),
+ start_time=time.time() - 100,
+ )
+ span.finish()
+ return span
|
Add the tag `lightstep.hostname`
Hola crew!
I would like to propose the tag 'lightstep.hostname' is appended to this library. It is present in the Go and JavaScript libraries, and would be useful such that we do not have to append that tag to the tracing constructor manually.
See:
- https://github.com/lightstep/lightstep-tracer-go/blob/eb40dd366f0f992fd08eb681d18c3898fcd68a64/options.go#L41
- https://github.com/lightstep/lightstep-tracer-javascript/blob/e9db0768cf7f3e063b738da479d734844092311e/src/imp/runtime_imp.js#L57
The practical value is to allow us to identify sick nodes for inspection. Whether it is the nodes themselves that are sick, or whether it is some condition about those nodes that is otherwise invisible to the trace (i.e. AZ, connection state or so) -- it's still useful to be able to see an identity to start the inspection.
|
0.0
|
540768ef727de0c0f00f14325a3b6cbf98add589
|
[
"tests/recorder_test.py::test_default_tags_set_correctly[True]",
"tests/recorder_test.py::test_default_tags_set_correctly[False]"
] |
[
"tests/recorder_test.py::test_send_spans_after_shutdown[True]",
"tests/recorder_test.py::test_send_spans_after_shutdown[False]",
"tests/recorder_test.py::test_shutdown_twice[True]",
"tests/recorder_test.py::test_shutdown_twice[False]",
"tests/recorder_test.py::test_stress_logs[True]",
"tests/recorder_test.py::test_stress_logs[False]",
"tests/recorder_test.py::test_stress_spans[True]",
"tests/recorder_test.py::test_stress_spans[False]",
"tests/recorder_test.py::test_buffer_limits[True]",
"tests/recorder_test.py::test_buffer_limits[False]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-04-21 21:03:11+00:00
|
mit
| 3,580 |
|
line__line-bot-sdk-python-148
|
diff --git a/examples/flask-kitchensink/app.py b/examples/flask-kitchensink/app.py
index 08a41a8..63dc012 100644
--- a/examples/flask-kitchensink/app.py
+++ b/examples/flask-kitchensink/app.py
@@ -40,6 +40,7 @@ from linebot.models import (
StickerMessage, StickerSendMessage, LocationMessage, LocationSendMessage,
ImageMessage, VideoMessage, AudioMessage, FileMessage,
UnfollowEvent, FollowEvent, JoinEvent, LeaveEvent, BeaconEvent,
+ MemberJoinedEvent, MemberLeftEvent,
FlexSendMessage, BubbleContainer, ImageComponent, BoxComponent,
TextComponent, SpacerComponent, IconComponent, ButtonComponent,
SeparatorComponent, QuickReply, QuickReplyButton,
@@ -436,6 +437,20 @@ def handle_beacon(event):
event.beacon.hwid, event.beacon.dm)))
[email protected](MemberJoinedEvent)
+def handle_member_joined(event):
+ line_bot_api.reply_message(
+ event.reply_token,
+ TextSendMessage(
+ text='Got memberJoined event. event={}'.format(
+ event)))
+
+
[email protected](MemberLeftEvent)
+def handle_member_left(event):
+ app.logger.info("Got memberLeft event")
+
+
@app.route('/static/<path:path>')
def send_static_content(path):
return send_from_directory('static', path)
diff --git a/linebot/models/__init__.py b/linebot/models/__init__.py
index 819a993..f8894ea 100644
--- a/linebot/models/__init__.py
+++ b/linebot/models/__init__.py
@@ -45,6 +45,8 @@ from .events import ( # noqa
LeaveEvent,
PostbackEvent,
AccountLinkEvent,
+ MemberJoinedEvent,
+ MemberLeftEvent,
BeaconEvent,
Postback,
Beacon,
diff --git a/linebot/models/events.py b/linebot/models/events.py
index bc7255d..2f570b9 100644
--- a/linebot/models/events.py
+++ b/linebot/models/events.py
@@ -261,6 +261,68 @@ class BeaconEvent(Event):
)
+class MemberJoinedEvent(Event):
+ """Webhook MemberJoinedEvent.
+
+ https://developers.line.biz/en/reference/messaging-api/#member-joined-event
+
+ Event object for when a user joins a group or room that the bot is in.
+
+ """
+
+ def __init__(self, timestamp=None, source=None, reply_token=None,
+ joined=None, **kwargs):
+ """__init__ method.
+
+ :param long timestamp: Time of the event in milliseconds
+ :param source: Source object
+ :type source: T <= :py:class:`linebot.models.sources.Source`
+ :param str reply_token: Reply token
+ :param joined: Joined object
+ :type joined: :py:class:`linebot.models.events.Joined`
+ :param kwargs:
+ """
+ super(MemberJoinedEvent, self).__init__(
+ timestamp=timestamp, source=source, **kwargs
+ )
+
+ self.type = 'memberJoined'
+ self.reply_token = reply_token
+ self.joined = self.get_or_new_from_json_dict(
+ joined, Joined
+ )
+
+
+class MemberLeftEvent(Event):
+ """Webhook MemberLeftEvent.
+
+ https://developers.line.biz/en/reference/messaging-api/#member-left-event
+
+ Event object for when a user leaves a group or room that the bot is in.
+
+ """
+
+ def __init__(self, timestamp=None, source=None,
+ left=None, **kwargs):
+ """__init__ method.
+
+ :param long timestamp: Time of the event in milliseconds
+ :param source: Source object
+ :type source: T <= :py:class:`linebot.models.sources.Source`
+ :param left: Left object
+ :type left: :py:class:`linebot.models.events.Left`
+ :param kwargs:
+ """
+ super(MemberLeftEvent, self).__init__(
+ timestamp=timestamp, source=source, **kwargs
+ )
+
+ self.type = 'memberLeft'
+ self.left = self.get_or_new_from_json_dict(
+ left, Left
+ )
+
+
class AccountLinkEvent(Event):
"""Webhook AccountLinkEvent.
@@ -345,6 +407,50 @@ class Beacon(Base):
return bytearray.fromhex(self.dm) if self.dm is not None else None
+class Joined(Base):
+ """Joined.
+
+ https://developers.line.biz/en/reference/messaging-api/#member-joined-event
+ """
+
+ def __init__(self, members=None, **kwargs):
+ """__init__ method.
+
+ :param dict members: Member of users who joined
+ :param kwargs:
+ """
+ super(Joined, self).__init__(**kwargs)
+
+ self._members = members
+
+ @property
+ def members(self):
+ """Get members as list of SourceUser."""
+ return [SourceUser(user_id=x['userId']) for x in self._members]
+
+
+class Left(Base):
+ """Left.
+
+ https://developers.line.biz/en/reference/messaging-api/#member-left-event
+ """
+
+ def __init__(self, members=None, **kwargs):
+ """__init__ method.
+
+ :param dict members: Member of users who joined
+ :param kwargs:
+ """
+ super(Left, self).__init__(**kwargs)
+
+ self._members = members
+
+ @property
+ def members(self):
+ """Get members as list of SourceUser."""
+ return [SourceUser(user_id=x['userId']) for x in self._members]
+
+
class Link(Base):
"""Link.
diff --git a/linebot/webhook.py b/linebot/webhook.py
index 6253dcb..415ceb1 100644
--- a/linebot/webhook.py
+++ b/linebot/webhook.py
@@ -32,6 +32,8 @@ from .models.events import (
PostbackEvent,
BeaconEvent,
AccountLinkEvent,
+ MemberJoinedEvent,
+ MemberLeftEvent,
)
from .utils import LOGGER, PY3, safe_compare_digest
@@ -144,6 +146,10 @@ class WebhookParser(object):
events.append(BeaconEvent.new_from_json_dict(event))
elif event_type == 'accountLink':
events.append(AccountLinkEvent.new_from_json_dict(event))
+ elif event_type == 'memberJoined':
+ events.append(MemberJoinedEvent.new_from_json_dict(event))
+ elif event_type == 'memberLeft':
+ events.append(MemberLeftEvent.new_from_json_dict(event))
else:
LOGGER.warn('Unknown event type. type=' + event_type)
|
line/line-bot-sdk-python
|
1b38bfc2497ff3e3c75be4b50e0f1b7425a07ce0
|
diff --git a/tests/test_webhook.py b/tests/test_webhook.py
index f26f1c5..980b5a5 100644
--- a/tests/test_webhook.py
+++ b/tests/test_webhook.py
@@ -24,6 +24,7 @@ from linebot import (
from linebot.models import (
MessageEvent, FollowEvent, UnfollowEvent, JoinEvent,
LeaveEvent, PostbackEvent, BeaconEvent, AccountLinkEvent,
+ MemberJoinedEvent, MemberLeftEvent,
TextMessage, ImageMessage, VideoMessage, AudioMessage,
LocationMessage, StickerMessage, FileMessage,
SourceUser, SourceRoom, SourceGroup
@@ -318,6 +319,31 @@ class TestWebhookParser(unittest.TestCase):
self.assertEqual(events[19].message.file_name, "file.txt")
self.assertEqual(events[19].message.file_size, 2138)
+ # MemberJoinedEvent
+ self.assertIsInstance(events[20], MemberJoinedEvent)
+ self.assertEqual(events[20].reply_token, '0f3779fba3b349968c5d07db31eabf65')
+ self.assertEqual(events[20].type, 'memberJoined')
+ self.assertEqual(events[20].timestamp, 1462629479859)
+ self.assertIsInstance(events[20].source, SourceGroup)
+ self.assertEqual(events[20].source.type, 'group')
+ self.assertEqual(events[20].source.group_id, 'C4af4980629...')
+ self.assertEqual(len(events[20].joined.members), 2)
+ self.assertIsInstance(events[20].joined.members[0], SourceUser)
+ self.assertEqual(events[20].joined.members[0].user_id, 'U4af4980629...')
+ self.assertEqual(events[20].joined.members[1].user_id, 'U91eeaf62d9...')
+
+ # MemberLeftEvent
+ self.assertIsInstance(events[21], MemberLeftEvent)
+ self.assertEqual(events[21].type, 'memberLeft')
+ self.assertEqual(events[21].timestamp, 1462629479960)
+ self.assertIsInstance(events[21].source, SourceGroup)
+ self.assertEqual(events[21].source.type, 'group')
+ self.assertEqual(events[21].source.group_id, 'C4af4980629...')
+ self.assertEqual(len(events[21].left.members), 2)
+ self.assertIsInstance(events[21].left.members[0], SourceUser)
+ self.assertEqual(events[21].left.members[0].user_id, 'U4af4980629...')
+ self.assertEqual(events[21].left.members[1].user_id, 'U91eeaf62d9...')
+
class TestWebhookHandler(unittest.TestCase):
def setUp(self):
diff --git a/tests/text/webhook.json b/tests/text/webhook.json
index 01d8bad..09ab5a9 100644
--- a/tests/text/webhook.json
+++ b/tests/text/webhook.json
@@ -260,6 +260,47 @@
"fileName": "file.txt",
"fileSize": 2138
}
+ },
+ {
+ "replyToken": "0f3779fba3b349968c5d07db31eabf65",
+ "type": "memberJoined",
+ "timestamp": 1462629479859,
+ "source": {
+ "type": "group",
+ "groupId": "C4af4980629..."
+ },
+ "joined": {
+ "members": [
+ {
+ "type": "user",
+ "userId": "U4af4980629..."
+ },
+ {
+ "type": "user",
+ "userId": "U91eeaf62d9..."
+ }
+ ]
+ }
+ },
+ {
+ "type": "memberLeft",
+ "timestamp": 1462629479960,
+ "source": {
+ "type": "group",
+ "groupId": "C4af4980629..."
+ },
+ "left": {
+ "members": [
+ {
+ "type": "user",
+ "userId": "U4af4980629..."
+ },
+ {
+ "type": "user",
+ "userId": "U91eeaf62d9..."
+ }
+ ]
+ }
}
]
}
|
Member Join/Leave Events
https://developers.line-beta.biz/en/reference/messaging-api/#member-joined-event
https://developers.line-beta.biz/en/reference/messaging-api/#member-left-event
|
0.0
|
1b38bfc2497ff3e3c75be4b50e0f1b7425a07ce0
|
[
"tests/test_webhook.py::TestSignatureValidator::test_validate",
"tests/test_webhook.py::TestWebhookParser::test_parse",
"tests/test_webhook.py::TestWebhookHandler::test_handler"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-03-26 04:57:24+00:00
|
apache-2.0
| 3,581 |
|
line__line-bot-sdk-python-274
|
diff --git a/linebot/models/flex_message.py b/linebot/models/flex_message.py
index afccc3e..bd95169 100644
--- a/linebot/models/flex_message.py
+++ b/linebot/models/flex_message.py
@@ -109,7 +109,12 @@ class BubbleContainer(FlexContainer):
self.size = size
self.direction = direction
self.header = self.get_or_new_from_json_dict(header, BoxComponent)
- self.hero = self.get_or_new_from_json_dict(hero, ImageComponent)
+ self.hero = self.get_or_new_from_json_dict_with_types(
+ hero, {
+ 'image': ImageComponent,
+ 'box': BoxComponent
+ }
+ )
self.body = self.get_or_new_from_json_dict(body, BoxComponent)
self.footer = self.get_or_new_from_json_dict(footer, BoxComponent)
self.styles = self.get_or_new_from_json_dict(styles, BubbleStyle)
|
line/line-bot-sdk-python
|
1753128db9654bf95fad8d13e06135a42fe40abf
|
diff --git a/tests/models/test_flex_message.py b/tests/models/test_flex_message.py
index ad752fc..9fe7c29 100644
--- a/tests/models/test_flex_message.py
+++ b/tests/models/test_flex_message.py
@@ -61,8 +61,6 @@ class TestFlexMessage(SerializeTestCase):
'header':
BoxComponent(layout='vertical',
contents=[TextComponent(text='Header text')]),
- 'hero':
- ImageComponent(uri='https://example.com/flex/images/image.jpg'),
'body':
BoxComponent(layout='vertical',
contents=[TextComponent(text='Body text')]),
@@ -79,10 +77,17 @@ class TestFlexMessage(SerializeTestCase):
separator_color='#00ffff')
)
}
- self.assertEqual(
- self.serialize_as_dict(arg, type=self.BUBBLE),
- BubbleContainer(**arg).as_json_dict()
- )
+ heros = [
+ ImageComponent(uri='https://example.com/flex/images/image.jpg'),
+ BoxComponent(layout='vertical',
+ contents=[TextComponent(text='Body text')]),
+ ]
+ for hero in heros:
+ arg['hero'] = hero
+ self.assertEqual(
+ self.serialize_as_dict(arg, type=self.BUBBLE),
+ BubbleContainer(**arg).as_json_dict()
+ )
def test_bubble_style(self):
arg = {
|
Can't send FlexMessage with Hero block
## System Informations
* Python version: 3.7.7
* SDK version: 1.16.0
* OS: macOS Mojave 10.14.3
## Expected Behavior
The docs say that box or image can be used for hero block.
https://developers.line.biz/en/reference/messaging-api/#bubble
> Hero block. Specify a box or an image.
## Current Behavior
<!-- Tell us what happens instead of the expected behavior -->
I tried to send FlexMessage with box block, but got an error.
```
e.status_code: 400
e.error.message: A message (messages[0]) in the request body is invalid
e.error.details: [{"message": "must be specified", "property": "/hero/url"}]
```
Hero seems to be initialized with ImageComponent if I pass in a json.
https://github.com/line/line-bot-sdk-python/blob/9e9b1f9fd0dc577fe79dd288d9234407f0f25470/linebot/models/flex_message.py#L112
Probably this question also has the same error.
https://www.line-community.me/question/5e7b039a851f7402cd963e3c
## Steps to Reproduce
<!-- Provide a link to a live example, or an unambigeous set of steps to -->
1. change parameters and execute following script.
```python
from linebot import LineBotApi
from linebot.models import FlexSendMessage
from linebot.exceptions import LineBotApiError
# fail and output following
#
# e.status_code: 400
# e.error.message: A message (messages[0]) in the request body is invalid
# e.error.details: [{"message": "must be specified", "property": "/hero/url"}]
messages = [{
'type': 'flex',
'altText': 'altText',
'contents':
{
'type': 'bubble',
'hero': {
'type': 'box',
'layout': 'vertical',
'contents': [
{
'type': 'text',
'text': 'Brown Cafe'
}
]
}
}
}]
# success if change "hero" to "body"
#
# messages = [{
# 'type': 'flex',
# 'altText': 'altText',
# 'contents':
# {
# 'type': 'bubble',
# 'body': {
# 'type': 'box',
# 'layout': 'vertical',
# 'contents': [
# {
# 'type': 'text',
# 'text': 'Brown Cafe'
# }
# ]
# }
# }
# }]
# success if change hero's child element from "box" to "image"
#
# messages = [{
# 'type': 'flex',
# 'altText': 'altText',
# 'contents':
# {
# 'type': 'bubble',
# 'hero': {
# 'type': 'image',
# 'url': 'VALID URL',
# }
# }
# }]
user_id = ''
channel_access_token = ''
line_bot_api = LineBotApi(channel_access_token)
flex_messages = [FlexSendMessage(alt_text=message['altText'], contents=message['contents']) for message in messages]
try:
line_bot_api.push_message(user_id, flex_messages)
except LineBotApiError as e:
print('e.status_code:', e.status_code)
print('e.error.message:',e.error.message)
print('e.error.details:',e.error.details)
```
## Logs
<!-- Provide logs if possible -->
|
0.0
|
1753128db9654bf95fad8d13e06135a42fe40abf
|
[
"tests/models/test_flex_message.py::TestFlexMessage::test_bubble_container"
] |
[
"tests/models/test_flex_message.py::TestFlexMessage::test_block_style",
"tests/models/test_flex_message.py::TestFlexMessage::test_box_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_bubble_style",
"tests/models/test_flex_message.py::TestFlexMessage::test_button_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_carousel_container",
"tests/models/test_flex_message.py::TestFlexMessage::test_filler_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_flex_message",
"tests/models/test_flex_message.py::TestFlexMessage::test_icon_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_image_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_separator_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_spacer_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_span_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_text_component"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-06-18 17:15:38+00:00
|
apache-2.0
| 3,582 |
|
line__line-bot-sdk-python-303
|
diff --git a/examples/flask-kitchensink/app.py b/examples/flask-kitchensink/app.py
index 2316df2..ba1e864 100644
--- a/examples/flask-kitchensink/app.py
+++ b/examples/flask-kitchensink/app.py
@@ -44,7 +44,7 @@ from linebot.models import (
UnfollowEvent, FollowEvent, JoinEvent, LeaveEvent, BeaconEvent,
MemberJoinedEvent, MemberLeftEvent,
FlexSendMessage, BubbleContainer, ImageComponent, BoxComponent,
- TextComponent, SpacerComponent, IconComponent, ButtonComponent,
+ TextComponent, IconComponent, ButtonComponent,
SeparatorComponent, QuickReply, QuickReplyButton,
ImageSendMessage)
@@ -309,8 +309,6 @@ def handle_text_message(event):
layout='vertical',
spacing='sm',
contents=[
- # callAction, separator, websiteAction
- SpacerComponent(size='sm'),
# callAction
ButtonComponent(
style='link',
diff --git a/linebot/models/__init__.py b/linebot/models/__init__.py
index cbaf9da..be0140e 100644
--- a/linebot/models/__init__.py
+++ b/linebot/models/__init__.py
@@ -78,7 +78,6 @@ from .flex_message import ( # noqa
IconComponent,
ImageComponent,
SeparatorComponent,
- SpacerComponent,
TextComponent,
SpanComponent
)
@@ -196,3 +195,8 @@ from .things import ( # noqa
ActionResult,
Things,
)
+
+from .background import ( # noqa
+ Background,
+ LinearGradientBackground,
+)
diff --git a/linebot/models/background.py b/linebot/models/background.py
new file mode 100644
index 0000000..a758a73
--- /dev/null
+++ b/linebot/models/background.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""linebot.models.emojis module."""
+
+from __future__ import unicode_literals
+
+from abc import ABCMeta
+
+from future.utils import with_metaclass
+
+from .base import Base
+
+
+class Background(with_metaclass(ABCMeta, Base)):
+ """Background."""
+
+ def __init__(self, **kwargs):
+ """__init__ method.
+
+ :param kwargs:
+ """
+ super(Background, self).__init__(**kwargs)
+
+ self.type = None
+
+
+class LinearGradientBackground(Background):
+ """LinearGradientBackground."""
+
+ def __init__(self, angle, start_color, end_color,
+ center_color=None, center_position=None, **kwargs):
+ """__init__ method.
+
+ :param str type: The type of background used
+ :param str angle: The angle at which a linear gradient moves
+ :param str start_color: The color at the gradient's starting point
+ :param str end_color: The color at the gradient's ending point
+ :param str center_color: The color in the middle of the gradient
+ :param str center_position: The position of the intermediate color stop
+ :param kwargs:
+ """
+ super(LinearGradientBackground, self).__init__(**kwargs)
+ self.type = 'linearGradient'
+ self.angle = angle
+ self.start_color = start_color
+ self.end_color = end_color
+ self.center_color = center_color
+ self.center_position = center_position
diff --git a/linebot/models/flex_message.py b/linebot/models/flex_message.py
index bd95169..4a7cdce 100644
--- a/linebot/models/flex_message.py
+++ b/linebot/models/flex_message.py
@@ -20,6 +20,7 @@ from abc import ABCMeta
from future.utils import with_metaclass
+from .background import Background, LinearGradientBackground
from .actions import get_action
from .base import Base
from .send_messages import SendMessage
@@ -234,6 +235,9 @@ class BoxComponent(FlexComponent):
border_color=None,
border_width=None,
corner_radius=None,
+ justify_content=None,
+ align_items=None,
+ background=None,
width=None,
height=None,
flex=None,
@@ -260,6 +264,12 @@ class BoxComponent(FlexComponent):
:param str border_color: Color of box border
:param str border_width: Width of box border
:param str corner_radius: Radius at the time of rounding the corners of the border
+ :param str justify_content: How child elements are aligned along the main axis of
+ the parent element
+ :param str align_items: How child elements are aligned along the cross axis of
+ the parent element
+ :param background: Background object
+ :type background: T <= :py:class:`linebot.models.background.Background`
:param str width: Width of the box
:param str height: Height of the box
:param float flex: The ratio of the width or height of this box within the parent box
@@ -291,6 +301,8 @@ class BoxComponent(FlexComponent):
self.border_color = border_color
self.border_width = border_width
self.corner_radius = corner_radius
+ self.justify_content = justify_content
+ self.align_items = align_items
self.width = width
self.height = height
self.flex = flex
@@ -307,6 +319,9 @@ class BoxComponent(FlexComponent):
self.offset_start = offset_start
self.offset_end = offset_end
self.action = get_action(action)
+ self.background = Background.get_or_new_from_json_dict_with_types(
+ background, {'linearGradient': LinearGradientBackground}
+ )
new_contents = []
if contents:
@@ -320,7 +335,6 @@ class BoxComponent(FlexComponent):
'image': ImageComponent,
'span': SpanComponent,
'separator': SeparatorComponent,
- 'spacer': SpacerComponent,
'text': TextComponent
}
))
@@ -349,6 +363,7 @@ class ButtonComponent(FlexComponent):
style=None,
color=None,
gravity=None,
+ adjust_mode=None,
**kwargs):
"""__init__ method.
@@ -368,6 +383,7 @@ class ButtonComponent(FlexComponent):
Background color when the style property is primary or secondary.
Use a hexadecimal color code
:param str gravity: Vertical alignment style
+ :param str adjust_mode: The method by which to adjust the text font size
:param kwargs:
"""
super(ButtonComponent, self).__init__(**kwargs)
@@ -384,6 +400,7 @@ class ButtonComponent(FlexComponent):
self.style = style
self.color = color
self.gravity = gravity
+ self.adjust_mode = adjust_mode
class FillerComponent(FlexComponent):
@@ -541,26 +558,6 @@ class SeparatorComponent(FlexComponent):
self.color = color
-class SpacerComponent(FlexComponent):
- """SpacerComponent.
-
- https://developers.line.biz/en/reference/messaging-api/#spacer
-
- This is an invisible component that places a fixed-size space
- at the beginning or end of the box
- """
-
- def __init__(self, size=None, **kwargs):
- """__init__ method.
-
- :param str size: Size of the space
- :param kwargs:
- """
- super(SpacerComponent, self).__init__(**kwargs)
- self.type = 'spacer'
- self.size = size
-
-
class SpanComponent(FlexComponent):
"""SpanComponent.
|
line/line-bot-sdk-python
|
5ab6ba225495248c52eb3aa728da427bedffb37d
|
diff --git a/tests/models/serialize_test_case.py b/tests/models/serialize_test_case.py
index 7784dcd..13de0bc 100644
--- a/tests/models/serialize_test_case.py
+++ b/tests/models/serialize_test_case.py
@@ -41,7 +41,6 @@ class SerializeTestCase(unittest.TestCase):
AGE = "age"
AREA = "area"
SUBSCRIPTION_PERIOD = "subscriptionPeriod"
- SPACER = 'spacer'
SPAN = 'span'
BUBBLE = 'bubble'
CAROUSEL = 'carousel'
@@ -60,6 +59,7 @@ class SerializeTestCase(unittest.TestCase):
BUTTONS = 'buttons'
CONFIRM = 'confirm'
IMAGE_CAROUSEL = 'image_carousel'
+ LINEAR_GRADIENT = 'linearGradient'
def serialize_as_dict(self, obj, type=None):
if isinstance(obj, Base):
diff --git a/tests/models/test_background.py b/tests/models/test_background.py
new file mode 100644
index 0000000..7cc68e7
--- /dev/null
+++ b/tests/models/test_background.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+# Licensed under the Apache License, Version 2.0 (the 'License'); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import unicode_literals, absolute_import
+
+import unittest
+
+from linebot.models import LinearGradientBackground
+from tests.models.serialize_test_case import SerializeTestCase
+
+
+class TestBackground(SerializeTestCase):
+ def test_background(self):
+ arg = {
+ "type": "linearGradient",
+ "angle": "0deg",
+ "start_color": "#ff0000",
+ "end_color": "#0000ff"
+ }
+ self.assertEqual(
+ self.serialize_as_dict(arg, type=self.LINEAR_GRADIENT),
+ LinearGradientBackground(**arg).as_json_dict()
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/models/test_flex_message.py b/tests/models/test_flex_message.py
index 9fe7c29..4618002 100644
--- a/tests/models/test_flex_message.py
+++ b/tests/models/test_flex_message.py
@@ -29,9 +29,9 @@ from linebot.models import (
ButtonComponent,
FillerComponent,
IconComponent,
- SpacerComponent,
SpanComponent,
URIAction,
+ LinearGradientBackground,
)
from tests.models.serialize_test_case import SerializeTestCase
@@ -158,6 +158,28 @@ class TestFlexMessage(SerializeTestCase):
BoxComponent(**arg).as_json_dict()
)
+ def test_box_component_with_linear_gradient(self):
+ arg = {
+ 'layout': 'vertical',
+ 'contents': [],
+ 'background_color': '#00000000',
+ 'border_width': 'light',
+ 'corner_radius': 'xs',
+ 'flex': 2,
+ 'background': LinearGradientBackground(
+ angle='0deg',
+ start_color='#ff0000',
+ center_color='#0000ff',
+ end_color='#00ff00',
+ center_position='10%'
+ )
+ }
+
+ self.assertEqual(
+ self.serialize_as_dict(arg, type=self.BOX),
+ BoxComponent(**arg).as_json_dict()
+ )
+
def test_button_component(self):
arg = {
'action':
@@ -212,15 +234,6 @@ class TestFlexMessage(SerializeTestCase):
SeparatorComponent(**arg).as_json_dict()
)
- def test_spacer_component(self):
- arg = {
- 'size': 'md'
- }
- self.assertEqual(
- self.serialize_as_dict(arg, type=self.SPACER),
- SpacerComponent(**arg).as_json_dict()
- )
-
def test_span_component(self):
arg = {
'type': 'span',
|
Support Flex Message Update 2
News: https://developers.line.biz/en/news/2020/10/08/flex-message-update-2-released/
1. - [ ] [Changed the maximum number of bubbles that can be included in a carousel](https://developers.line.biz/en/reference/messaging-api/#f-carousel)
2. - [ ] [Box component has justifyContent and alignItems property](https://developers.line.biz/en/reference/messaging-api/#box)
3. - [ ] [Use linear gradient as the background image for a box component by setting its background.type property equal to linearGradient.](https://developers.line.biz/en/reference/messaging-api/#box)
4. - [ ] [Allowed an empty array to be specified as the value of a box's contents property](https://developers.line.biz/en/reference/messaging-api/#box)
5. - [ ] [Added support for animated images](https://developers.line.biz/en/reference/messaging-api/#f-image)
6. - [ ] [Changed how sizes are specified](https://developers.line.biz/en/news/2020/10/08/flex-message-update-2-released/#update-size)
1. - [ ] [Image component](https://developers.line.biz/en/docs/messaging-api/flex-message-layout/#image-size)
2. - [ ] [Icon, text, and span components](https://developers.line.biz/en/docs/messaging-api/flex-message-layout/#other-component-size)
7. - [ ] [Allowed text to automatically shrink to fit a component's width](https://developers.line.biz/en/news/2020/10/08/flex-message-update-2-released/#update-adjust-mode) - [Text](https://developers.line.biz/en/reference/messaging-api/#f-text) and [Button](https://developers.line.biz/en/reference/messaging-api/#button)
8. - [ ] [Spacer has been discontinued](https://developers.line.biz/en/news/2020/10/08/flex-message-update-2-released/#update-spacer)
|
0.0
|
5ab6ba225495248c52eb3aa728da427bedffb37d
|
[
"tests/models/test_background.py::TestBackground::test_background",
"tests/models/test_flex_message.py::TestFlexMessage::test_block_style",
"tests/models/test_flex_message.py::TestFlexMessage::test_box_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_box_component_with_linear_gradient",
"tests/models/test_flex_message.py::TestFlexMessage::test_bubble_container",
"tests/models/test_flex_message.py::TestFlexMessage::test_bubble_style",
"tests/models/test_flex_message.py::TestFlexMessage::test_button_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_carousel_container",
"tests/models/test_flex_message.py::TestFlexMessage::test_filler_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_flex_message",
"tests/models/test_flex_message.py::TestFlexMessage::test_icon_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_image_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_separator_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_span_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_text_component"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-11-04 05:10:54+00:00
|
apache-2.0
| 3,583 |
|
line__line-bot-sdk-python-375
|
diff --git a/linebot/models/__init__.py b/linebot/models/__init__.py
index e687da5..8e34b6b 100644
--- a/linebot/models/__init__.py
+++ b/linebot/models/__init__.py
@@ -80,7 +80,8 @@ from .flex_message import ( # noqa
ImageComponent,
SeparatorComponent,
TextComponent,
- SpanComponent
+ SpanComponent,
+ VideoComponent
)
from .imagemap import ( # noqa
ImagemapSendMessage,
diff --git a/linebot/models/flex_message.py b/linebot/models/flex_message.py
index 2c8bed3..66213c2 100644
--- a/linebot/models/flex_message.py
+++ b/linebot/models/flex_message.py
@@ -93,6 +93,7 @@ class BubbleContainer(FlexContainer):
:param hero: Hero block
:type hero: :py:class:`linebot.models.flex_message.ImageComponent`
| :py:class:`linebot.models.flex_message.BoxComponent`
+ | :py:class:`linebot.models.flex_message.VideoComponent`
:param body: Body block
:type body: :py:class:`linebot.models.flex_message.BoxComponent`
:param footer: Footer block
@@ -112,7 +113,8 @@ class BubbleContainer(FlexContainer):
self.hero = self.get_or_new_from_json_dict_with_types(
hero, {
'image': ImageComponent,
- 'box': BoxComponent
+ 'box': BoxComponent,
+ 'video': VideoComponent
}
)
self.body = self.get_or_new_from_json_dict(body, BoxComponent)
@@ -238,7 +240,9 @@ class BoxComponent(FlexComponent):
align_items=None,
background=None,
width=None,
+ max_width=None,
height=None,
+ max_height=None,
flex=None,
spacing=None,
margin=None,
@@ -270,7 +274,9 @@ class BoxComponent(FlexComponent):
:param background: Background object
:type background: T <= :py:class:`linebot.models.background.Background`
:param str width: Width of the box
+ :param str max_width: Maximum width of the box
:param str height: Height of the box
+ :param str max_height: Maximum height of the box
:param float flex: The ratio of the width or height of this box within the parent box
and the previous component in the parent box
:param str spacing: Minimum space between components in this box
@@ -303,7 +309,9 @@ class BoxComponent(FlexComponent):
self.justify_content = justify_content
self.align_items = align_items
self.width = width
+ self.max_width = max_width
self.height = height
+ self.max_height = max_height
self.flex = flex
self.spacing = spacing
self.margin = margin
@@ -334,7 +342,8 @@ class BoxComponent(FlexComponent):
'image': ImageComponent,
'span': SpanComponent,
'separator': SeparatorComponent,
- 'text': TextComponent
+ 'text': TextComponent,
+ 'video': VideoComponent
}
))
self.contents = new_contents
@@ -615,6 +624,7 @@ class TextComponent(FlexComponent):
align=None,
gravity=None,
wrap=None,
+ line_spacing=None,
max_lines=None,
weight=None,
color=None,
@@ -640,6 +650,7 @@ class TextComponent(FlexComponent):
:param str gravity: Vertical alignment style
:param bool wrap: rue to wrap text. The default value is False.
If set to True, you can use a new line character (\n) to begin on a new line.
+ :param str line_spacing: Line spacing in a wrapping text
:param int max_lines: Max number of lines
:param str weight: Font weight
:param str color: Font color
@@ -663,6 +674,7 @@ class TextComponent(FlexComponent):
self.align = align
self.gravity = gravity
self.wrap = wrap
+ self.line_spacing = line_spacing
self.max_lines = max_lines
self.weight = weight
self.color = color
@@ -674,3 +686,45 @@ class TextComponent(FlexComponent):
self.contents = [self.get_or_new_from_json_dict(it, SpanComponent) for it in contents]
else:
self.contents = None
+
+
+class VideoComponent(FlexComponent):
+ """VideoComponent.
+
+ https://developers.line.biz/en/reference/messaging-api/#f-video
+
+ This component renders a video.
+ """
+
+ def __init__(self,
+ url=None,
+ preview_url=None,
+ alt_content=None,
+ aspect_ratio=None,
+ action=None,
+ **kwargs):
+ r"""__init__ method.
+
+ :param str url: URL of video file
+ :param str preview_url: URL of preview image
+ :param alt_content: Alternative content
+ :type alt_content: :py:class:`linebot.models.flex_message.ImageComponent`
+ | :py:class:`linebot.models.flex_message.BoxComponent`
+ :param float aspect_ratio: Aspect ratio of the video
+ :param action: Action performed when this video is tapped
+ :type action: list[T <= :py:class:`linebot.models.actions.Action`]
+ :param kwargs:
+ """
+ super(VideoComponent, self).__init__(**kwargs)
+
+ self.type = 'video'
+ self.url = url
+ self.preview_url = preview_url
+ self.alt_content = self.get_or_new_from_json_dict_with_types(
+ alt_content, {
+ 'image': ImageComponent,
+ 'box': BoxComponent
+ }
+ )
+ self.aspect_ratio = aspect_ratio
+ self.action = get_action(action)
|
line/line-bot-sdk-python
|
c9876c587c3a819c28ab412f7c971ca40f5a8895
|
diff --git a/tests/models/test_flex_message.py b/tests/models/test_flex_message.py
index 4618002..d25f842 100644
--- a/tests/models/test_flex_message.py
+++ b/tests/models/test_flex_message.py
@@ -30,6 +30,7 @@ from linebot.models import (
FillerComponent,
IconComponent,
SpanComponent,
+ VideoComponent,
URIAction,
LinearGradientBackground,
)
@@ -249,6 +250,25 @@ class TestFlexMessage(SerializeTestCase):
SpanComponent(**arg).as_json_dict()
)
+ def test_video_component(self):
+ arg = {
+ 'type': 'video',
+ 'url': 'https://example.com/video.mp4',
+ "preview_url": "https://example.com/video_preview.jpg",
+ "alt_content": {
+ "type": "image",
+ "size": "full",
+ "aspect_ratio": "20:13",
+ "aspect_mode": "cover",
+ "url": "https://example.com/image.jpg"
+ },
+ "aspect_ratio": "20:13"
+ }
+ self.assertEqual(
+ self.serialize_as_dict(arg, type=self.VIDEO),
+ VideoComponent(**arg).as_json_dict()
+ )
+
def test_text_component(self):
arg = {
'text': 'Hello, World!',
|
Flex Message Update 3 released
News:
https://developers.line.biz/ja/news/2022/03/11/flex-message-update-3-released/
- [ ] [A video can now be displayed in the hero block](https://developers.line.biz/en/news/2022/03/11/flex-message-update-3-released/#video-component-2022-03-11)
- [ ] [The max width and height of a box component can now be specified](https://developers.line.biz/en/news/2022/03/11/flex-message-update-3-released/#box-component-2022-03-11)
- [ ] [The line spacing in a text component can now be increased](https://developers.line.biz/en/news/2022/03/11/flex-message-update-3-released/#text-component-2022-03-11)
|
0.0
|
c9876c587c3a819c28ab412f7c971ca40f5a8895
|
[
"tests/models/test_flex_message.py::TestFlexMessage::test_block_style",
"tests/models/test_flex_message.py::TestFlexMessage::test_box_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_box_component_with_linear_gradient",
"tests/models/test_flex_message.py::TestFlexMessage::test_bubble_container",
"tests/models/test_flex_message.py::TestFlexMessage::test_bubble_style",
"tests/models/test_flex_message.py::TestFlexMessage::test_button_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_carousel_container",
"tests/models/test_flex_message.py::TestFlexMessage::test_filler_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_flex_message",
"tests/models/test_flex_message.py::TestFlexMessage::test_icon_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_image_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_separator_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_span_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_text_component",
"tests/models/test_flex_message.py::TestFlexMessage::test_video_component"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-03-14 13:12:59+00:00
|
apache-2.0
| 3,584 |
|
linkedin__shiv-167
|
diff --git a/docs/index.rst b/docs/index.rst
index bb58db3..dca92e0 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -24,7 +24,7 @@ unpack those dependencies to a known location, add them to your interpreter's se
.. note::
"Conventional" zipapps don't include any dependencies, which is what sets shiv apart from the stdlib zipapp module.
-shiv accepts only a few command line parameters of its own, `described here <cli-reference>`_, and any unprocessed parameters are
+shiv accepts only a few command line parameters of its own, `described here <cli-reference.html>`_, and any unprocessed parameters are
delegated to ``pip install``. This allows users to fully leverage all the functionality that pip provides.
For example, if you wanted to create an executable for ``flake8``, you'd specify the required
@@ -94,7 +94,7 @@ Influencing Runtime
-------------------
Whenever you are creating a zipapp with ``shiv``, you can specify a few flags that influence the runtime.
-For example, the `-c/--console-script` and `-e/--entry-point` options already mentioned in this doc.
+For example, the ``-c/--console-script`` and ``-e/--entry-point`` options already mentioned in this doc.
To see the full list of command line options, see this page.
In addition to options that are settable during zipapp creation, there are a number of environment variables
@@ -117,7 +117,7 @@ dropping into an interactive session in the environment of a built cli utility.
SHIV_ENTRY_POINT
^^^^^^^^^^^^^^^^
-.. note:: Same functionality as "-e/--entry-point" at build time
+.. note:: Same functionality as ``-e/--entry-point`` at build time
This should be populated with a setuptools-style callable, e.g. "module.main:main". This will
execute the pyz with whatever callable entry point you supply. Useful for sharing a single pyz
@@ -132,7 +132,7 @@ hotfixes/modifications to the 'cached' dependencies, this will overwrite them.
SHIV_EXTEND_PYTHONPATH
^^^^^^^^^^^^^^^^^^^^^^
-.. note:: Same functionality as "-E/--extend-pythonpath" at build time.
+.. note:: Same functionality as ``-E/--extend-pythonpath`` at build time.
This is a boolean that adds the modules bundled into the zipapp into the ``PYTHONPATH`` environment
variable. It is not needed for most applications, but if an application calls Python as a
diff --git a/src/shiv/bootstrap/__init__.py b/src/shiv/bootstrap/__init__.py
index 46a0454..708ab05 100644
--- a/src/shiv/bootstrap/__init__.py
+++ b/src/shiv/bootstrap/__init__.py
@@ -89,9 +89,17 @@ def cache_path(archive, root_dir, build_id):
"""Returns a ~/.shiv cache directory for unzipping site-packages during bootstrap.
:param ZipFile archive: The zipfile object we are bootstrapping from.
- :param Path root_dir: Optional, the path to a SHIV_ROOT.
+ :param str root_dir: Optional, either a path or environment variable pointing to a SHIV_ROOT.
:param str build_id: The build id generated at zip creation.
"""
+
+ if root_dir:
+
+ if root_dir.startswith("$"):
+ root_dir = os.environ.get(root_dir[1:], root_dir[1:])
+
+ root_dir = Path(root_dir)
+
root = root_dir or Path("~/.shiv").expanduser()
name = Path(archive.filename).resolve().stem
return root / f"{name}_{build_id}"
diff --git a/src/shiv/bootstrap/environment.py b/src/shiv/bootstrap/environment.py
index 1f7c6f0..9cce42e 100644
--- a/src/shiv/bootstrap/environment.py
+++ b/src/shiv/bootstrap/environment.py
@@ -5,8 +5,6 @@ overrides defined at runtime (via environment variables).
import json
import os
-from pathlib import Path
-
def str_bool(v):
if not isinstance(v, bool):
@@ -38,6 +36,7 @@ class Environment:
reproducible=False,
script=None,
preamble=None,
+ root=None,
):
self.always_write_cache = always_write_cache
self.build_id = build_id
@@ -53,6 +52,7 @@ class Environment:
self._entry_point = entry_point
self._compile_pyc = compile_pyc
self._extend_pythonpath = extend_pythonpath
+ self._root = root
@classmethod
def from_json(cls, json_data):
@@ -74,8 +74,8 @@ class Environment:
@property
def root(self):
- root = os.environ.get(self.ROOT)
- return Path(root) if root is not None else None
+ root = os.environ.get(self.ROOT, self._root)
+ return root
@property
def force_extract(self):
diff --git a/src/shiv/cli.py b/src/shiv/cli.py
index 8273395..f92f0ef 100644
--- a/src/shiv/cli.py
+++ b/src/shiv/cli.py
@@ -168,6 +168,7 @@ def copytree(src: Path, dst: Path) -> None:
"but before invoking your entry point."
),
)
[email protected]("--root", type=click.Path(), help="Override the 'root' path (default is ~/.shiv).")
@click.argument("pip_args", nargs=-1, type=click.UNPROCESSED)
def main(
output_file: str,
@@ -181,6 +182,7 @@ def main(
reproducible: bool,
no_modify: bool,
preamble: Optional[str],
+ root: Optional[str],
pip_args: List[str],
) -> None:
"""
@@ -258,6 +260,7 @@ def main(
no_modify=no_modify,
reproducible=reproducible,
preamble=Path(preamble).name if preamble else None,
+ root=root,
)
if no_modify:
diff --git a/src/shiv/constants.py b/src/shiv/constants.py
index f126101..36fee4f 100644
--- a/src/shiv/constants.py
+++ b/src/shiv/constants.py
@@ -17,7 +17,7 @@ DISALLOWED_ARGS: Dict[Tuple[str, ...], str] = {
"--editable",
): "Editable installs don't actually install via pip (they are just linked), so they are not allowed.",
("-d", "--download"): "Shiv needs to actually perform an install, not merely a download.",
- ("--user", "--root", "--prefix"): "Which conflicts with Shiv's internal use of '--target'.",
+ ("--user", "--prefix"): "Which conflicts with Shiv's internal use of '--target'.",
}
SOURCE_DATE_EPOCH_ENV = "SOURCE_DATE_EPOCH"
|
linkedin/shiv
|
502731dcaae4cc3305848c2ac05f931bee544318
|
diff --git a/test/conftest.py b/test/conftest.py
index c79dd0f..7a68895 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -1,3 +1,6 @@
+import os
+
+from contextlib import contextmanager
from pathlib import Path
import pytest
@@ -31,3 +34,15 @@ def env():
extend_pythonpath=False,
shiv_version="0.0.1",
)
+
+
[email protected]
+def env_var():
+
+ @contextmanager
+ def _env_var(key, value):
+ os.environ[key] = value
+ yield
+ del os.environ[key]
+
+ return _env_var
diff --git a/test/test_bootstrap.py b/test/test_bootstrap.py
index 6fb5111..54df208 100644
--- a/test/test_bootstrap.py
+++ b/test/test_bootstrap.py
@@ -2,7 +2,6 @@ import os
import sys
from code import interact
-from contextlib import contextmanager
from datetime import datetime
from pathlib import Path
from site import addsitedir
@@ -26,13 +25,6 @@ from shiv.bootstrap.filelock import FileLock
from shiv.pip import install
-@contextmanager
-def env_var(key, value):
- os.environ[key] = value
- yield
- del os.environ[key]
-
-
class TestBootstrap:
def test_import_string(self):
assert import_string("site.addsitedir") == addsitedir
@@ -68,12 +60,15 @@ class TestBootstrap:
with current_zipfile() as zipfile:
assert not zipfile
- def test_cache_path(self):
+ def test_cache_path(self, env_var):
mock_zip = mock.MagicMock(spec=ZipFile)
mock_zip.filename = "test"
uuid = str(uuid4())
- assert cache_path(mock_zip, Path.cwd(), uuid) == Path.cwd() / f"test_{uuid}"
+ assert cache_path(mock_zip, 'foo', uuid) == Path("foo", f"test_{uuid}")
+
+ with env_var("FOO", "foo"):
+ assert cache_path(mock_zip, '$FOO', uuid) == Path("foo", f"test_{uuid}")
def test_first_sitedir_index(self):
with mock.patch.object(sys, "path", ["site-packages", "dir", "dir", "dir"]):
@@ -124,7 +119,7 @@ class TestBootstrap:
class TestEnvironment:
- def test_overrides(self):
+ def test_overrides(self, env_var):
now = str(datetime.now())
version = "0.0.1"
env = Environment(now, version)
@@ -142,7 +137,7 @@ class TestEnvironment:
assert env.root is None
with env_var("SHIV_ROOT", "tmp"):
- assert env.root == Path("tmp")
+ assert env.root == "tmp"
assert env.force_extract is False
with env_var("SHIV_FORCE_EXTRACT", "1"):
diff --git a/test/test_cli.py b/test/test_cli.py
index 662a1ce..b5acf08 100644
--- a/test/test_cli.py
+++ b/test/test_cli.py
@@ -346,3 +346,54 @@ class TestCLI:
assert proc.returncode == 0
assert proc.stdout.decode().splitlines() == ["hello from preamble", "hello world"]
+
+ def test_alternate_root(self, runner, package_location, tmp_path):
+ """Test that the --root argument properly sets the extraction root."""
+
+ output_file = tmp_path / "test.pyz"
+ shiv_root = tmp_path / "root"
+ result = runner(
+ ["-e", "hello:main", "--root", str(shiv_root), "-o", str(output_file), str(package_location)]
+ )
+
+ # check that the command successfully completed
+ assert result.exit_code == 0
+
+ # ensure the created file actually exists
+ assert output_file.exists()
+
+ # now run the produced zipapp
+ proc = subprocess.run(
+ [str(output_file)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ,
+ )
+
+ assert proc.returncode == 0
+ assert "hello" in proc.stdout.decode()
+ assert shiv_root.exists()
+
+ def test_alternate_root_environment_variable(self, runner, package_location, tmp_path, env_var):
+ """Test that the --root argument works with environment variables."""
+
+ output_file = tmp_path / "test.pyz"
+ shiv_root_var = "NEW_ROOT"
+ shiv_root_path = tmp_path / 'new_root'
+ result = runner(
+ ["-e", "hello:main", "--root", "$" + shiv_root_var, "-o", str(output_file), str(package_location)]
+ )
+
+ with env_var(shiv_root_var, str(shiv_root_path)):
+
+ # check that the command successfully completed
+ assert result.exit_code == 0
+
+ # ensure the created file actually exists
+ assert output_file.exists()
+
+ # now run the produced zipapp
+ proc = subprocess.run(
+ [str(output_file)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ,
+ )
+
+ assert proc.returncode == 0
+ assert "hello" in proc.stdout.decode()
+ assert shiv_root_path.exists()
|
Set default $SHIV_HOME as CWD while packaging?
Is there a way to set the default SHIV_HOME path with current work dir?
These days I used app.pyz which packaged with shiv and raised an error for missing the HOME path on hadoop data nodes.
So could I set the default shiv cache folder path as ./_shiv_cache instead of ~/.shiv, without any environment variable?
|
0.0
|
502731dcaae4cc3305848c2ac05f931bee544318
|
[
"test/test_bootstrap.py::TestBootstrap::test_cache_path",
"test/test_bootstrap.py::TestEnvironment::test_overrides",
"test/test_cli.py::TestCLI::test_alternate_root",
"test/test_cli.py::TestCLI::test_alternate_root_environment_variable"
] |
[
"test/test_bootstrap.py::TestBootstrap::test_import_string",
"test/test_bootstrap.py::TestBootstrap::test_is_zipfile",
"test/test_bootstrap.py::TestBootstrap::test_argv0_is_not_zipfile",
"test/test_bootstrap.py::TestBootstrap::test_first_sitedir_index",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[False-False-False]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[False-False-True]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[False-True-False]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[False-True-True]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[True-False-False]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[True-False-True]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[True-True-False]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[True-True-True]",
"test/test_bootstrap.py::TestBootstrap::test_extend_path[additional_paths0]",
"test/test_bootstrap.py::TestBootstrap::test_extend_path[additional_paths1]",
"test/test_bootstrap.py::TestBootstrap::test_extend_path_existing_pythonpath",
"test/test_bootstrap.py::TestEnvironment::test_roundtrip",
"test/test_bootstrap.py::TestEnvironment::test_lock",
"test/test_bootstrap.py::TestEnvironment::test_ensure_no_modify",
"test/test_cli.py::TestCLI::test_find_entry_point",
"test/test_cli.py::TestCLI::test_find_entry_point_two_points",
"test/test_cli.py::TestCLI::test_console_script_exists",
"test/test_cli.py::TestCLI::test_no_args",
"test/test_cli.py::TestCLI::test_no_outfile",
"test/test_cli.py::TestCLI::test_find_interpreter",
"test/test_cli.py::TestCLI::test_find_interpreter_false",
"test/test_cli.py::TestCLI::test_disallowed_args[-t]",
"test/test_cli.py::TestCLI::test_disallowed_args[--target]",
"test/test_cli.py::TestCLI::test_disallowed_args[--editable]",
"test/test_cli.py::TestCLI::test_disallowed_args[-d]",
"test/test_cli.py::TestCLI::test_disallowed_args[--download]",
"test/test_cli.py::TestCLI::test_disallowed_args[--user]",
"test/test_cli.py::TestCLI::test_disallowed_args[--prefix]",
"test/test_cli.py::TestCLI::test_hello_world[yes-compile_option0]",
"test/test_cli.py::TestCLI::test_hello_world[yes-compile_option1]",
"test/test_cli.py::TestCLI::test_hello_world[no-compile_option0]",
"test/test_cli.py::TestCLI::test_hello_world[no-compile_option1]",
"test/test_cli.py::TestCLI::test_extend_pythonpath[extend_path0]",
"test/test_cli.py::TestCLI::test_extend_pythonpath[extend_path1]",
"test/test_cli.py::TestCLI::test_extend_pythonpath[extend_path2]",
"test/test_cli.py::TestCLI::test_multiple_site_packages",
"test/test_cli.py::TestCLI::test_no_entrypoint",
"test/test_cli.py::TestCLI::test_results_are_binary_identical_with_env_and_build_id",
"test/test_cli.py::TestCLI::test_preamble[preamble.py-#!/usr/bin/env",
"test/test_cli.py::TestCLI::test_preamble[preamble.sh-#!/bin/sh\\necho",
"test/test_cli.py::TestCLI::test_preamble_no_pip"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-16 17:08:53+00:00
|
bsd-2-clause
| 3,585 |
|
linkedin__shiv-237
|
diff --git a/setup.cfg b/setup.cfg
index 36d394a..f0e3dca 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -24,7 +24,7 @@ sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
[metadata]
name = shiv
-version = 1.0.3
+version = 1.0.4
description = A command line utility for building fully self contained Python zipapps.
long_description = file: README.md
long_description_content_type = text/markdown
diff --git a/src/shiv/builder.py b/src/shiv/builder.py
index e4c14aa..67a7753 100644
--- a/src/shiv/builder.py
+++ b/src/shiv/builder.py
@@ -12,9 +12,10 @@ import zipapp
import zipfile
from datetime import datetime, timezone
+from itertools import chain
from pathlib import Path
from stat import S_IFMT, S_IMODE, S_IXGRP, S_IXOTH, S_IXUSR
-from typing import IO, Any, List, Optional, Tuple
+from typing import Generator, IO, Any, List, Optional, Tuple
from . import bootstrap
from .bootstrap.environment import Environment
@@ -69,6 +70,15 @@ def write_to_zipapp(
archive.writestr(zinfo, data)
+def rglob_follow_symlinks(path: Path, glob: str) -> Generator[Path, None, None]:
+ """Path.rglob extended to follow symlinks, while we wait for Python 3.13."""
+ for p in path.rglob('*'):
+ if p.is_symlink() and p.is_dir():
+ yield from chain([p], rglob_follow_symlinks(p, glob))
+ else:
+ yield p
+
+
def create_archive(
sources: List[Path], target: Path, interpreter: str, main: str, env: Environment, compressed: bool = True
) -> None:
@@ -110,7 +120,11 @@ def create_archive(
# Glob is known to return results in non-deterministic order.
# We need to sort them by in-archive paths to ensure
# that archive contents are reproducible.
- for path in sorted(source.rglob("*"), key=str):
+ #
+ # NOTE: https://github.com/linkedin/shiv/issues/236
+ # this special rglob function can be replaced with "rglob('*', follow_symlinks=True)"
+ # when Python 3.13 becomes the lowest supported version
+ for path in sorted(rglob_follow_symlinks(source, "*"), key=str):
# Skip compiled files and directories (as they are not required to be present in the zip).
if path.suffix == ".pyc" or path.is_dir():
|
linkedin/shiv
|
eeec71f31a08203cffd184602eb6d10e30634126
|
diff --git a/test/test_builder.py b/test/test_builder.py
index 4d8a357..ec5af35 100644
--- a/test/test_builder.py
+++ b/test/test_builder.py
@@ -9,7 +9,7 @@ from zipapp import ZipAppError
import pytest
-from shiv.builder import create_archive, write_file_prefix
+from shiv.builder import create_archive, rglob_follow_symlinks, write_file_prefix
UGOX = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
@@ -39,6 +39,16 @@ class TestBuilder:
with pytest.raises(SystemExit):
tmp_write_prefix(f"/{'c' * 200}/python")
+ def test_rglob_follow_symlinks(self, tmp_path):
+ real_dir = tmp_path / 'real_dir'
+ real_dir.mkdir()
+ real_file = real_dir / 'real_file'
+ real_file.touch()
+ sym_dir = tmp_path / 'sym_dir'
+ sym_dir.symlink_to(real_dir)
+ sym_file = sym_dir / real_file.name
+ assert sorted(rglob_follow_symlinks(tmp_path, '*'), key=str) == [real_dir, real_file, sym_dir, sym_file]
+
def test_create_archive(self, sp, env):
with tempfile.TemporaryDirectory() as tmpdir:
target = Path(tmpdir, "test.zip")
|
Environments created with symlinked folders (by PDM with cache=True) are not correctly packed
Currently shiv uses `rglob('*')` to find all files in the site-packages directory; rglob does not follow symlinks (https://github.com/python/cpython/issues/77609). PDM uses folder symlinks when cache is enabled, to optimize performance and save space when managing multiple environments. Such environments cannot currently be used to generate shiv, because shiv will not pack folders which are symlinks.
|
0.0
|
eeec71f31a08203cffd184602eb6d10e30634126
|
[
"test/test_builder.py::TestBuilder::test_file_prefix[/usr/bin/python-#!/usr/bin/python\\n]",
"test/test_builder.py::TestBuilder::test_file_prefix[/usr/bin/env",
"test/test_builder.py::TestBuilder::test_file_prefix[/some/other/path/python",
"test/test_builder.py::TestBuilder::test_binprm_error",
"test/test_builder.py::TestBuilder::test_rglob_follow_symlinks",
"test/test_builder.py::TestBuilder::test_create_archive",
"test/test_builder.py::TestBuilder::test_archive_permissions"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-07 00:05:30+00:00
|
bsd-2-clause
| 3,586 |
|
linkedin__shiv-27
|
diff --git a/src/shiv/constants.py b/src/shiv/constants.py
index 1b82477..33d742b 100644
--- a/src/shiv/constants.py
+++ b/src/shiv/constants.py
@@ -18,3 +18,4 @@ BLACKLISTED_ARGS: Dict[Tuple[str, ...], str] = {
("-d", "--download"): "Shiv needs to actually perform an install, not merely a download.",
("--user", "--root", "--prefix"): "Which conflicts with Shiv's internal use of '--target'.",
}
+DISTUTILS_CFG_NO_PREFIX = "[install]\nprefix="
diff --git a/src/shiv/pip.py b/src/shiv/pip.py
index 3be1339..9deab31 100644
--- a/src/shiv/pip.py
+++ b/src/shiv/pip.py
@@ -3,9 +3,10 @@ import os
import subprocess
import sys
+from pathlib import Path
from typing import Generator, List
-from .constants import PIP_REQUIRE_VIRTUALENV, PIP_INSTALL_ERROR
+from .constants import PIP_REQUIRE_VIRTUALENV, PIP_INSTALL_ERROR, DISTUTILS_CFG_NO_PREFIX
@contextlib.contextmanager
@@ -17,12 +18,28 @@ def clean_pip_env() -> Generator[None, None, None]:
"""
require_venv = os.environ.pop(PIP_REQUIRE_VIRTUALENV, None)
+ # based on
+ # https://github.com/python/cpython/blob/8cf4b34b3665b8bb39ea7111e6b5c3410899d3e4/Lib/distutils/dist.py#L333-L363
+ pydistutils = Path.home() / (".pydistutils.cfg" if os.name == "posix" else "pydistutils.cfg")
+ pydistutils_already_existed = pydistutils.exists()
+
+ if not pydistutils_already_existed:
+ # distutils doesn't support using --target if there's a config file
+ # specifying --prefix. Homebrew's Pythons include a distutils.cfg that
+ # breaks `pip install --target` with any non-wheel packages. We can
+ # work around that by creating a temporary ~/.pydistutils.cfg
+ # specifying an empty prefix.
+ pydistutils.write_text(DISTUTILS_CFG_NO_PREFIX)
+
try:
yield
finally:
if require_venv is not None:
os.environ[PIP_REQUIRE_VIRTUALENV] = require_venv
+ if not pydistutils_already_existed:
+ # remove the temporary ~/.pydistutils.cfg
+ pydistutils.unlink()
def install(interpreter_path: str, args: List[str]) -> None:
|
linkedin/shiv
|
3c5c81fdd2c060e540e76c5df52424fc92980f37
|
diff --git a/test/conftest.py b/test/conftest.py
index 9229abb..3454d5f 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -1,11 +1,23 @@
+import os
+
from pathlib import Path
import pytest
[email protected]
-def package_location():
- return Path(__file__).absolute().parent / 'package'
[email protected](params=[True, False], ids=['.', 'absolute-path'])
+def package_location(request):
+ package_location = Path(__file__).absolute().parent / 'package'
+
+ if request.param is True:
+ # test building from the current directory
+ cwd = os.getcwd()
+ os.chdir(package_location)
+ yield Path('.')
+ os.chdir(cwd)
+ else:
+ # test building an absolute path
+ yield package_location
@pytest.fixture
diff --git a/test/test_cli.py b/test/test_cli.py
index ad2f6bb..a38017c 100644
--- a/test/test_cli.py
+++ b/test/test_cli.py
@@ -45,11 +45,20 @@ class TestCLI:
# assert we got the correct reason
assert strip_header(result.output) == DISALLOWED_PIP_ARGS.format(arg=arg, reason=reason)
- def test_hello_world(self, tmpdir, runner, package_location):
+ # /usr/local/bin/python3.6 is a test for https://github.com/linkedin/shiv/issues/16
+ @pytest.mark.parametrize('interpreter', [None, Path('/usr/local/bin/python3.6')])
+ def test_hello_world(self, tmpdir, runner, package_location, interpreter):
+ if interpreter is not None and not interpreter.exists():
+ pytest.skip(f'Interpreter "{interpreter}" does not exist')
+
with tempfile.TemporaryDirectory(dir=tmpdir) as tmpdir:
output_file = Path(tmpdir, 'test.pyz')
- result = runner(['-e', 'hello:main', '-o', output_file.as_posix(), package_location.as_posix()])
+ args = ['-e', 'hello:main', '-o', output_file.as_posix(), package_location.as_posix()]
+ if interpreter is not None:
+ args = ['-p', interpreter.as_posix()] + args
+
+ result = runner(args)
# check that the command successfully completed
assert result.exit_code == 0
diff --git a/test/test_pip.py b/test/test_pip.py
new file mode 100644
index 0000000..aba1721
--- /dev/null
+++ b/test/test_pip.py
@@ -0,0 +1,48 @@
+import os
+
+from pathlib import Path
+
+import pytest
+
+from shiv.constants import PIP_REQUIRE_VIRTUALENV, DISTUTILS_CFG_NO_PREFIX
+from shiv.pip import clean_pip_env
+
+
[email protected]("pydistutils_path, os_name", [
+ ("pydistutils.cfg", "nt"),
+ (".pydistutils.cfg", "posix"),
+ (None, os.name),
+])
+def test_clean_pip_env(monkeypatch, tmpdir, pydistutils_path, os_name):
+ home = tmpdir.join("home").ensure(dir=True)
+ monkeypatch.setenv("HOME", home)
+
+ # patch os.name so distutils will use `pydistutils_path` for its config
+ monkeypatch.setattr(os, 'name', os.name)
+
+ if pydistutils_path:
+ pydistutils = Path.home() / pydistutils_path
+ pydistutils_contents = "foobar"
+ pydistutils.write_text(pydistutils_contents)
+ else:
+ pydistutils = Path.home() / ".pydistutils.cfg"
+ pydistutils_contents = None
+
+ before_env_var = "foo"
+ monkeypatch.setenv(PIP_REQUIRE_VIRTUALENV, before_env_var)
+
+ with clean_pip_env():
+ assert PIP_REQUIRE_VIRTUALENV not in os.environ
+
+ if not pydistutils_path:
+ # ~/.pydistutils.cfg was created
+ assert pydistutils.read_text() == DISTUTILS_CFG_NO_PREFIX
+ else:
+ # ~/.pydistutils.cfg was not modified
+ assert pydistutils.read_text() == pydistutils_contents
+
+ assert os.environ.get(PIP_REQUIRE_VIRTUALENV) == before_env_var
+
+ # If a temporary ~/.pydistutils.cfg was created, it was deleted. If
+ # ~/.pydistutils.cfg already existed, it still exists.
+ assert pydistutils.exists() == bool(pydistutils_path)
|
distutils.errors.DistutilsOptionError: must supply either home or prefix/exec-prefix -- not both
With shiv 0.0.14 from PyPI:
```console
$ shiv aws -c aws -p $(which python3.6) -o blergh
shiv! 🔪
Collecting aws
Collecting fabric>=1.6 (from aws)
Collecting boto (from aws)
Using cached https://files.pythonhosted.org/packages/bd/b7/a88a67002b1185ed9a8e8a6ef15266728c2361fcb4f1d02ea331e4c7741d/boto-2.48.0-py2.py3-none-any.whl
Collecting prettytable>=0.7 (from aws)
Collecting paramiko<3.0,>=1.10 (from fabric>=1.6->aws)
Using cached https://files.pythonhosted.org/packages/3e/db/cb7b6656e0e7387637ce850689084dc0b94b44df31cc52e5fc5c2c4fd2c1/paramiko-2.4.1-py2.py3-none-any.whl
Collecting pyasn1>=0.1.7 (from paramiko<3.0,>=1.10->fabric>=1.6->aws)
Using cached https://files.pythonhosted.org/packages/ba/fe/02e3e2ee243966b143657fb8bd6bc97595841163b6d8c26820944acaec4d/pyasn1-0.4.2-py2.py3-none-any.whl
Collecting pynacl>=1.0.1 (from paramiko<3.0,>=1.10->fabric>=1.6->aws)
Using cached https://files.pythonhosted.org/packages/74/8e/a6c0d340972d9e2f1a405aaa3f2460950b4c0337f92db0291a4355974529/PyNaCl-1.2.1-cp36-cp36m-macosx_10_6_intel.whl
Collecting bcrypt>=3.1.3 (from paramiko<3.0,>=1.10->fabric>=1.6->aws)
Using cached https://files.pythonhosted.org/packages/7e/59/d48fd712941da1a5d6490964a37bb3de2e526965b6766273f6a7049ee590/bcrypt-3.1.4-cp36-cp36m-macosx_10_6_intel.whl
Collecting cryptography>=1.5 (from paramiko<3.0,>=1.10->fabric>=1.6->aws)
Using cached https://files.pythonhosted.org/packages/40/87/acdcf84ce6d25a7db1c113f4b9b614fd8d707b7ab56fbf17cf18cd26a627/cryptography-2.2.2-cp34-abi3-macosx_10_6_intel.whl
Collecting cffi>=1.4.1 (from pynacl>=1.0.1->paramiko<3.0,>=1.10->fabric>=1.6->aws)
Using cached https://files.pythonhosted.org/packages/8e/be/40b1bc2c3221acdefeb9dab6773d43cda7543ed0d8c8df8768f05af2d01e/cffi-1.11.5-cp36-cp36m-macosx_10_6_intel.whl
Collecting six (from pynacl>=1.0.1->paramiko<3.0,>=1.10->fabric>=1.6->aws)
Using cached https://files.pythonhosted.org/packages/67/4b/141a581104b1f6397bfa78ac9d43d8ad29a7ca43ea90a2d863fe3056e86a/six-1.11.0-py2.py3-none-any.whl
Collecting idna>=2.1 (from cryptography>=1.5->paramiko<3.0,>=1.10->fabric>=1.6->aws)
Using cached https://files.pythonhosted.org/packages/27/cc/6dd9a3869f15c2edfab863b992838277279ce92663d334df9ecf5106f5c6/idna-2.6-py2.py3-none-any.whl
Collecting asn1crypto>=0.21.0 (from cryptography>=1.5->paramiko<3.0,>=1.10->fabric>=1.6->aws)
Using cached https://files.pythonhosted.org/packages/ea/cd/35485615f45f30a510576f1a56d1e0a7ad7bd8ab5ed7cdc600ef7cd06222/asn1crypto-0.24.0-py2.py3-none-any.whl
Collecting pycparser (from cffi>=1.4.1->pynacl>=1.0.1->paramiko<3.0,>=1.10->fabric>=1.6->aws)
Installing collected packages: pyasn1, pycparser, cffi, six, pynacl, bcrypt, idna, asn1crypto, cryptography, paramiko, fabric, boto, prettytable, aws
Exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/pip/_internal/basecommand.py", line 228, in main
status = self.run(options, args)
File "/usr/local/lib/python3.6/site-packages/pip/_internal/commands/install.py", line 335, in run
use_user_site=options.use_user_site,
File "/usr/local/lib/python3.6/site-packages/pip/_internal/req/__init__.py", line 49, in install_given_reqs
**kwargs
File "/usr/local/lib/python3.6/site-packages/pip/_internal/req/req_install.py", line 748, in install
use_user_site=use_user_site, pycompile=pycompile,
File "/usr/local/lib/python3.6/site-packages/pip/_internal/req/req_install.py", line 961, in move_wheel_files
warn_script_location=warn_script_location,
File "/usr/local/lib/python3.6/site-packages/pip/_internal/wheel.py", line 216, in move_wheel_files
prefix=prefix,
File "/usr/local/lib/python3.6/site-packages/pip/_internal/locations.py", line 165, in distutils_scheme
i.finalize_options()
File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/distutils/command/install.py", line 248, in finalize_options
"must supply either home or prefix/exec-prefix -- not both")
distutils.errors.DistutilsOptionError: must supply either home or prefix/exec-prefix -- not both
Pip install failed!
```
Here's the packages installed systemwide alongside shiv:
```
click==6.7
importlib-resources==0.5
pip==10.0.1
setuptools==39.1.0
shiv==0.0.14
wheel==0.31.0
```
OS X, Python 3.6.5 from Homebrew.
|
0.0
|
3c5c81fdd2c060e540e76c5df52424fc92980f37
|
[
"test/test_cli.py::TestCLI::test_no_args",
"test/test_cli.py::TestCLI::test_no_outfile",
"test/test_cli.py::TestCLI::test_blacklisted_args[-t]",
"test/test_cli.py::TestCLI::test_blacklisted_args[--target]",
"test/test_cli.py::TestCLI::test_blacklisted_args[--editable]",
"test/test_cli.py::TestCLI::test_blacklisted_args[-d]",
"test/test_cli.py::TestCLI::test_blacklisted_args[--download]",
"test/test_cli.py::TestCLI::test_blacklisted_args[--user]",
"test/test_cli.py::TestCLI::test_blacklisted_args[--root]",
"test/test_cli.py::TestCLI::test_blacklisted_args[--prefix]",
"test/test_cli.py::TestCLI::test_hello_world[.-None]",
"test/test_cli.py::TestCLI::test_hello_world[absolute-path-None]",
"test/test_cli.py::TestCLI::test_interpreter",
"test/test_cli.py::TestCLI::test_real_interpreter",
"test/test_pip.py::test_clean_pip_env[pydistutils.cfg-nt]",
"test/test_pip.py::test_clean_pip_env[.pydistutils.cfg-posix]",
"test/test_pip.py::test_clean_pip_env[None-posix]"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-05-09 17:44:36+00:00
|
bsd-2-clause
| 3,587 |
|
linkedin__shiv-52
|
diff --git a/src/shiv/bootstrap/__init__.py b/src/shiv/bootstrap/__init__.py
index 82169bf..02488ba 100644
--- a/src/shiv/bootstrap/__init__.py
+++ b/src/shiv/bootstrap/__init__.py
@@ -83,6 +83,12 @@ def extract_site_packages(archive, target_path):
shutil.move(str(target_path_tmp), str(target_path))
+def _first_sitedir_index():
+ for index, part in enumerate(sys.path):
+ if Path(part).stem == 'site-packages':
+ return index
+
+
def bootstrap():
"""Actually bootstrap our shiv environment."""
@@ -99,18 +105,18 @@ def bootstrap():
if not site_packages.exists() or env.force_extract:
extract_site_packages(archive, site_packages.parent)
- preserved = sys.path[1:]
+ # get sys.path's length
+ length = len(sys.path)
- # truncate the sys.path so our package will be at the start,
- # and take precedence over anything else (eg: dist-packages)
- sys.path = sys.path[0:1]
+ # Find the first instance of an existing site-packages on sys.path
+ index = _first_sitedir_index() or length
# append site-packages using the stdlib blessed way of extending path
# so as to handle .pth files correctly
site.addsitedir(site_packages)
- # restore the previous sys.path entries after our package
- sys.path.extend(preserved)
+ # reorder to place our site-packages before any others found
+ sys.path = sys.path[:index] + sys.path[length:] + sys.path[index:length]
# do entry point import and call
if env.entry_point is not None and env.interpreter is None:
|
linkedin/shiv
|
6d00b754852f4f3e79d494d7577a029ecb72c1a1
|
diff --git a/test/test_bootstrap.py b/test/test_bootstrap.py
index 5ece54f..1f77034 100644
--- a/test/test_bootstrap.py
+++ b/test/test_bootstrap.py
@@ -12,7 +12,7 @@ import pytest
from unittest import mock
-from shiv.bootstrap import import_string, current_zipfile, cache_path
+from shiv.bootstrap import import_string, current_zipfile, cache_path, _first_sitedir_index
from shiv.bootstrap.environment import Environment
@@ -61,6 +61,13 @@ class TestBootstrap:
assert cache_path(mock_zip, Path.cwd(), uuid) == Path.cwd() / f"test_{uuid}"
+ def test_first_sitedir_index(self):
+ with mock.patch.object(sys, 'path', ['site-packages', 'dir', 'dir', 'dir']):
+ assert _first_sitedir_index() == 0
+
+ with mock.patch.object(sys, 'path', []):
+ assert _first_sitedir_index() is None
+
class TestEnvironment:
def test_overrides(self):
|
The change to sys.path potentially overrides standard library modules.
The change in #48 makes sure that shiv packed content (packages) has the precedence over the vended Python distribution packages or things installed into system Python site-packages. Fair enough. We want to run what we packed into the shiv.
However, it does have a potentially buggy behavior. Example:
* an old deprecated 3rd party package such as *uuid* or *argparse* is pulled in transitively
* these packages are unmaintained and did not get any changes (uuid since Python 2.5, for example)
* the standard library modules have been maintained and added new APIs (uuid did)
* since shiv's site-packages are stored on the sys.path **before** the standard library path, an obsolete 3rd party backward-compatibility package (but not **forward** compatible) will override the corresponding standard library module
* if any other package uses the new APIs from the affected package (e.g., new uuid APIs), the application will break because the old 3rd party package does not have these functions/methods.
I believe this requires changes to what was done in #48 to insert *shiv* site-packages *before* any other site-packages paths, but *after* the standard library path.
|
0.0
|
6d00b754852f4f3e79d494d7577a029ecb72c1a1
|
[
"test/test_bootstrap.py::TestBootstrap::test_various_imports",
"test/test_bootstrap.py::TestBootstrap::test_is_zipfile",
"test/test_bootstrap.py::TestBootstrap::test_argv0_is_not_zipfile",
"test/test_bootstrap.py::TestBootstrap::test_cache_path",
"test/test_bootstrap.py::TestBootstrap::test_first_sitedir_index",
"test/test_bootstrap.py::TestEnvironment::test_overrides",
"test/test_bootstrap.py::TestEnvironment::test_serialize"
] |
[] |
{
"failed_lite_validators": [
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-08-15 21:32:17+00:00
|
bsd-2-clause
| 3,588 |
|
linkedin__shiv-70
|
diff --git a/README.md b/README.md
index 831be3b..656dfb2 100644
--- a/README.md
+++ b/README.md
@@ -69,8 +69,8 @@ pip install shiv
You can even create a pyz _of_ shiv _using_ shiv!
```sh
-python3 -m venv shiv
-source shiv/bin/activate
+python3 -m venv .
+source bin/activate
pip install shiv
shiv -c shiv -o shiv shiv
```
diff --git a/src/shiv/bootstrap/__init__.py b/src/shiv/bootstrap/__init__.py
index 6c2989b..52558a0 100644
--- a/src/shiv/bootstrap/__init__.py
+++ b/src/shiv/bootstrap/__init__.py
@@ -123,7 +123,7 @@ def bootstrap():
mod = import_string(env.entry_point)
try:
sys.exit(mod())
- except TypeError as e:
+ except TypeError:
# catch "<module> is not callable", which is thrown when the entry point's
# callable shares a name with it's parent module
# e.g. "from foo.bar import bar; bar()"
diff --git a/src/shiv/cli.py b/src/shiv/cli.py
index 27010cc..ec97dea 100644
--- a/src/shiv/cli.py
+++ b/src/shiv/cli.py
@@ -26,7 +26,7 @@ from .constants import (
NO_ENTRY_POINT,
)
-__version__ = '0.0.36'
+__version__ = "0.0.36"
# This is the 'knife' emoji
SHIV = u"\U0001F52A"
@@ -42,6 +42,7 @@ def find_entry_point(site_packages: Path, console_script: str) -> str:
:param site_packages: A path to a site-packages directory on disk.
:param console_script: A console_script string.
"""
+
config_parser = ConfigParser()
config_parser.read(site_packages.rglob("entry_points.txt"))
return config_parser["console_scripts"][console_script]
@@ -55,18 +56,52 @@ def copy_bootstrap(bootstrap_target: Path) -> None:
:param bootstrap_target: The temporary directory where we are staging pyz contents.
"""
+
for bootstrap_file in importlib_resources.contents(bootstrap):
if importlib_resources.is_resource(bootstrap, bootstrap_file):
with importlib_resources.path(bootstrap, bootstrap_file) as f:
shutil.copyfile(f.absolute(), bootstrap_target / f.name)
+def _interpreter_path(append_version: bool = False) -> str:
+ """A function to return the path to the current Python interpreter.
+
+ Even when inside a venv, this will return the interpreter the venv was created with.
+
+ """
+
+ base_dir = Path(getattr(sys, "real_prefix", sys.base_prefix)).resolve()
+ sys_exec = Path(sys.executable)
+ name = sys_exec.stem
+ suffix = sys_exec.suffix
+
+ if append_version:
+ name += str(sys.version_info.major)
+
+ name += suffix
+
+ try:
+ return str(next(iter(base_dir.rglob(name))))
+
+ except StopIteration:
+
+ if not append_version:
+ # If we couldn't find an interpreter, it's likely that we looked for
+ # "python" when we should've been looking for "python3"
+ # so we try again with append_version=True
+ return _interpreter_path(append_version=True)
+
+ # If we were still unable to find a real interpreter for some reason
+ # we fallback to the current runtime's interpreter
+ return sys.executable
+
+
@click.command(
context_settings=dict(
help_option_names=["-h", "--help", "--halp"], ignore_unknown_options=True
)
)
[email protected]_option(version=__version__, prog_name='shiv')
[email protected]_option(version=__version__, prog_name="shiv")
@click.option("--entry-point", "-e", default=None, help="The entry point to invoke.")
@click.option(
"--console-script", "-c", default=None, help="The console_script to invoke."
@@ -103,7 +138,8 @@ def main(
Shiv is a command line utility for building fully self-contained Python zipapps
as outlined in PEP 441, but with all their dependencies included!
"""
- quiet = "-q" in pip_args or '--quiet' in pip_args
+
+ quiet = "-q" in pip_args or "--quiet" in pip_args
if not quiet:
click.secho(" shiv! " + SHIV, bold=True)
@@ -158,8 +194,8 @@ def main(
# create the zip
builder.create_archive(
Path(working_path),
- target=Path(output_file),
- interpreter=python or sys.executable,
+ target=Path(output_file).expanduser(),
+ interpreter=python or _interpreter_path(),
main="_bootstrap:bootstrap",
compressed=compressed,
)
|
linkedin/shiv
|
801db3717d3957c0b7e59e9cbd10ac4f714a0dfb
|
diff --git a/test/test_cli.py b/test/test_cli.py
index 30cd78b..3e469cc 100644
--- a/test/test_cli.py
+++ b/test/test_cli.py
@@ -8,7 +8,7 @@ import pytest
from click.testing import CliRunner
-from shiv.cli import main
+from shiv.cli import main, _interpreter_path
from shiv.constants import DISALLOWED_PIP_ARGS, NO_PIP_ARGS_OR_SITE_PACKAGES, NO_OUTFILE, BLACKLISTED_ARGS
@@ -31,6 +31,11 @@ class TestCLI:
assert result.exit_code == 1
assert strip_header(result.output) == NO_OUTFILE
+ def test_find_interpreter(self):
+ interpreter = _interpreter_path()
+ assert Path(interpreter).exists()
+ assert Path(interpreter).is_file()
+
@pytest.mark.parametrize("arg", [arg for tup in BLACKLISTED_ARGS.keys() for arg in tup])
def test_blacklisted_args(self, runner, arg):
result = runner(['-o', 'tmp', arg])
|
The "create a pyz of shiv using shiv" example doesn't work
As written, the example leaves the shiv pyz file with a shebang pointing at the (presumably temporary) virtual environment used to build it.
The example should probably include a `-p "/usr/bin/env python3"` argument to specify an explicit interpreter.
Actually, given that shiv pyz files are self-contained, there's no real benefit to them ever having shebangs that refer to a virtualenv - maybe it would be worth making the default shebang `/usr/bin/env python3`, or at least locate the "base" Python interpreter for a virtualenv - which can be done as
```python
Path(getattr(sys, 'real_prefix', sys.base_prefix)) / Path(sys.executable).relative_to(sys.prefix)
```
|
0.0
|
801db3717d3957c0b7e59e9cbd10ac4f714a0dfb
|
[
"test/test_cli.py::TestCLI::test_no_args",
"test/test_cli.py::TestCLI::test_no_outfile",
"test/test_cli.py::TestCLI::test_find_interpreter",
"test/test_cli.py::TestCLI::test_blacklisted_args[-t]",
"test/test_cli.py::TestCLI::test_blacklisted_args[--target]",
"test/test_cli.py::TestCLI::test_blacklisted_args[--editable]",
"test/test_cli.py::TestCLI::test_blacklisted_args[-d]",
"test/test_cli.py::TestCLI::test_blacklisted_args[--download]",
"test/test_cli.py::TestCLI::test_blacklisted_args[--user]",
"test/test_cli.py::TestCLI::test_blacklisted_args[--root]",
"test/test_cli.py::TestCLI::test_blacklisted_args[--prefix]",
"test/test_cli.py::TestCLI::test_hello_world[.---compile-pyc]",
"test/test_cli.py::TestCLI::test_hello_world[.---no-compile-pyc]",
"test/test_cli.py::TestCLI::test_hello_world[absolute-path---compile-pyc]",
"test/test_cli.py::TestCLI::test_hello_world[absolute-path---no-compile-pyc]"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-10-25 04:53:00+00:00
|
bsd-2-clause
| 3,589 |
|
linkedin__shiv-94
|
diff --git a/src/shiv/bootstrap/__init__.py b/src/shiv/bootstrap/__init__.py
index 997f288..fc62175 100644
--- a/src/shiv/bootstrap/__init__.py
+++ b/src/shiv/bootstrap/__init__.py
@@ -73,8 +73,13 @@ def extract_site_packages(archive, target_path, compile_pyc, compile_workers=0,
:param ZipFile archive: The zipfile object we are bootstrapping from.
:param Path target_path: The path to extract our zip to.
"""
- target_path_tmp = Path(target_path.parent, target_path.stem + ".tmp")
- lock = Path(target_path.parent, target_path.stem + ".lock")
+ parent = target_path.parent
+ target_path_tmp = Path(parent, target_path.stem + ".tmp")
+ lock = Path(parent, target_path.stem + ".lock")
+
+ # If this is the first time that a pyz is being extracted, we'll need to create the ~/.shiv dir
+ if not parent.exists():
+ parent.mkdir(parents=True, exist_ok=True)
with FileLock(lock):
|
linkedin/shiv
|
7aba2d94b62c7e587ce5e26cf59464a1c9a3b0df
|
diff --git a/test/test_bootstrap.py b/test/test_bootstrap.py
index 95b7c47..10330bd 100644
--- a/test/test_bootstrap.py
+++ b/test/test_bootstrap.py
@@ -76,13 +76,18 @@ class TestBootstrap:
with mock.patch.object(sys, 'path', []):
assert _first_sitedir_index() is None
+ @pytest.mark.parametrize("nested", (False, True))
@pytest.mark.parametrize("compile_pyc", (False, True))
@pytest.mark.parametrize("force", (False, True))
- def test_extract_site_packages(self, tmpdir, zip_location, compile_pyc, force):
+ def test_extract_site_packages(self, tmpdir, zip_location, nested, compile_pyc, force):
zipfile = ZipFile(str(zip_location))
target = Path(tmpdir, "test")
+ if nested:
+ # we want to test for not-yet-created shiv root dirs
+ target = target / "nested" / "root"
+
if force:
# we want to make sure we overwrite if the target exists when using force
target.mkdir(parents=True, exist_ok=True)
|
FileNotFoundError during bootstrapping to acquire_nix lock
Attempting to follow the example given in the readme, using OSX 10.11.6:
```
$ mktmpenv --python=`which python3.6`
$ which python3
/Users/<username>/Virtualenvs/tmp-cf98a678da0c70e/bin/python3
$ python -V
Python 3.6.2
$ python3 -m pip download boto
[...] Saved ./boto-2.49.0-py2.py3-none-any.whl
$ python3 -m pip install shiv
[...]
$ shiv -o boto.pyz --find-links . --no-index boto
[...] Successfully installed boto-2.49.0
$ ./boto.pyz
Traceback (most recent call last):
File "/Users/<username>/.pyenv/versions/3.6.2/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/Users/<username>/.pyenv/versions/3.6.2/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "boto.pyz/__main__.py", line 3, in <module>
File "boto.pyz/_bootstrap/__init__.py", line 127, in bootstrap
File "boto.pyz/_bootstrap/__init__.py", line 79, in extract_site_packages
File "boto.pyz/_bootstrap/filelock.py", line 71, in __enter__
File "boto.pyz/_bootstrap/filelock.py", line 39, in acquire_nix
FileNotFoundError: [Errno 2] No such file or directory: '/Users/<username>/.shiv/boto_cab7037d-ea57-4001-9b01-da4b73096d73.lock'
$ ls -al $HOME/.shiv
ls: /Users/<username>/.shiv: No such file or directory
$ mkdir $HOME/.shiv
$ ./boto.pyz
Python 3.6.2 [...]
>>>
```
It seems like the lock acquisition assumes the directory exists, but I can't see where that's explained in the documentation.
|
0.0
|
7aba2d94b62c7e587ce5e26cf59464a1c9a3b0df
|
[
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[False-False-True]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[False-True-True]"
] |
[
"test/test_bootstrap.py::TestBootstrap::test_import_string",
"test/test_bootstrap.py::TestBootstrap::test_is_zipfile",
"test/test_bootstrap.py::TestBootstrap::test_argv0_is_not_zipfile",
"test/test_bootstrap.py::TestBootstrap::test_cache_path",
"test/test_bootstrap.py::TestBootstrap::test_first_sitedir_index",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[False-False-False]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[False-True-False]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[True-False-False]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[True-False-True]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[True-True-False]",
"test/test_bootstrap.py::TestBootstrap::test_extract_site_packages[True-True-True]",
"test/test_bootstrap.py::TestBootstrap::test_extend_path[additional_paths0]",
"test/test_bootstrap.py::TestBootstrap::test_extend_path[additional_paths1]",
"test/test_bootstrap.py::TestEnvironment::test_overrides",
"test/test_bootstrap.py::TestEnvironment::test_roundtrip",
"test/test_bootstrap.py::TestEnvironment::test_lock"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-01-09 18:55:07+00:00
|
bsd-2-clause
| 3,590 |
|
linkml__linkml-1048
|
diff --git a/linkml/_version.py b/linkml/_version.py
new file mode 100644
index 00000000..666640ee
--- /dev/null
+++ b/linkml/_version.py
@@ -0,0 +1,7 @@
+from importlib.metadata import version, PackageNotFoundError
+
+try:
+ __version__ = version("linkml")
+except PackageNotFoundError:
+ # package is not installed
+ __version__ = "0.0.0"
diff --git a/linkml/utils/converter.py b/linkml/utils/converter.py
index b1acd164..75b0a3f6 100644
--- a/linkml/utils/converter.py
+++ b/linkml/utils/converter.py
@@ -1,5 +1,6 @@
import logging
import os
+import sys
from pathlib import Path
from typing import List
@@ -16,6 +17,8 @@ from linkml.utils.datautils import (_get_context, _get_format, _is_xsv,
dumpers_loaders, get_dumper, get_loader,
infer_index_slot, infer_root_class)
+from linkml._version import __version__
+
@click.command()
@click.option("--module", "-m", help="Path to python datamodel module")
@@ -61,6 +64,7 @@ from linkml.utils.datautils import (_get_context, _get_format, _is_xsv,
help="Infer missing slot values",
)
@click.option("--context", "-c", multiple=True, help="path to JSON-LD context file")
[email protected]_option(__version__, "-V", "--version")
@click.argument("input")
def cli(
input,
diff --git a/pyproject.toml b/pyproject.toml
index ff308548..d22658ae 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -42,6 +42,11 @@ packages = [
{ include = "linkml" }
]
+[tool.poetry-dynamic-versioning]
+enable = true
+vcs = "git"
+style = "pep440"
+
[tool.poetry.scripts]
gen-jsonld-context = "linkml.generators.jsonldcontextgen:cli"
gen-prefix-map = "linkml.generators.prefixmapgen:cli"
@@ -130,5 +135,5 @@ coverage = "^6.4.1"
docs = ["Sphinx", "sphinx-rtd-theme", "sphinxcontrib-mermaid", "furo"]
[build-system]
-requires = ["poetry-core>=1.0.0"]
-build-backend = "poetry.core.masonry.api"
+requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning"]
+build-backend = "poetry_dynamic_versioning.backend"
|
linkml/linkml
|
ce98424b34b76488f1f8c2fc955bbe15fb791ab5
|
diff --git a/tests/test_utils/test_converter.py b/tests/test_utils/test_converter.py
index 5666dcce..3b1d1cb8 100644
--- a/tests/test_utils/test_converter.py
+++ b/tests/test_utils/test_converter.py
@@ -58,3 +58,9 @@ class TestCommandLineInterface(unittest.TestCase):
self.assertEqual(p2["age_in_months"], 240)
self.assertEqual(p2["age_category"], "adult")
self.assertEqual(p2["full_name"], "first2 last2")
+
+ def test_version(self):
+ runner = CliRunner(mix_stderr=False)
+ result = runner.invoke(cli, ["--version"])
+ # self.assertEqual(0, result.exit_code) # fails! unclear why result.exit_code is 1 not 0
+ self.assertIn("version", result.stdout)
|
Add `--version` flag
As per https://clig.dev/ we should have a `--version` flag on generator and other commands. This should display the linkml version - perhaps also runtime.
not sure how to implement this:
```python
>>> import linkml
RDFLib Version: 5.0.0
>>> linkml.__version__
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: module 'linkml' has no attribute '__version__'
```
|
0.0
|
ce98424b34b76488f1f8c2fc955bbe15fb791ab5
|
[
"tests/test_utils/test_converter.py::TestCommandLineInterface::test_version"
] |
[
"tests/test_utils/test_converter.py::TestCommandLineInterface::test_help",
"tests/test_utils/test_converter.py::TestCommandLineInterface::test_infer_and_convert"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-26 09:09:43+00:00
|
cc0-1.0
| 3,591 |
|
linkml__linkml-1130
|
diff --git a/linkml/generators/jsonschemagen.py b/linkml/generators/jsonschemagen.py
index fe3c7ff5..788219b1 100644
--- a/linkml/generators/jsonschemagen.py
+++ b/linkml/generators/jsonschemagen.py
@@ -378,17 +378,17 @@ class JsonSchemaGenerator(Generator):
if slot.any_of is not None and len(slot.any_of) > 0:
if not slot_has_range_union:
- prop['anyOf'] = [self.get_subschema_for_slot(s, omit_type) for s in slot.any_of]
+ prop['anyOf'] = [self.get_subschema_for_slot(s, omit_type=True) for s in slot.any_of]
if slot.all_of is not None and len(slot.all_of) > 0:
- prop['allOf'] = [self.get_subschema_for_slot(s, omit_type) for s in slot.all_of]
+ prop['allOf'] = [self.get_subschema_for_slot(s, omit_type=True) for s in slot.all_of]
if slot.exactly_one_of is not None and len(slot.exactly_one_of) > 0:
- prop['oneOf'] = [self.get_subschema_for_slot(s, omit_type) for s in slot.exactly_one_of]
+ prop['oneOf'] = [self.get_subschema_for_slot(s, omit_type=True) for s in slot.exactly_one_of]
if slot.none_of is not None and len(slot.none_of) > 0:
prop['not'] = {
- 'anyOf': [self.get_subschema_for_slot(s, omit_type) for s in slot.none_of]
+ 'anyOf': [self.get_subschema_for_slot(s, omit_type=True) for s in slot.none_of]
}
return prop
|
linkml/linkml
|
b8a1c8a247059d64d24a2e4f2cbe19934de776a7
|
diff --git a/tests/test_generators/input/jsonschema_value_constraints.yaml b/tests/test_generators/input/jsonschema_value_constraints.yaml
new file mode 100644
index 00000000..50806f45
--- /dev/null
+++ b/tests/test_generators/input/jsonschema_value_constraints.yaml
@@ -0,0 +1,132 @@
+schema:
+ id: http://example.org/test_value_constraints
+ name: test_value_constraints
+
+ imports:
+ - https://w3id.org/linkml/types
+
+ slots:
+ string_constant:
+ range: string
+ equals_string: constant
+ string_pattern:
+ range: string
+ pattern: "pat{2,}ern[!]?"
+ integer_constant:
+ range: integer
+ equals_number: 10
+ integer_under:
+ range: integer
+ maximum_value: 10
+ integer_over:
+ range: integer
+ minimum_value: 10
+ string_any_constant:
+ range: string
+ any_of:
+ - equals_string: this
+ - equals_string: that
+ integer_big_or_small:
+ range: integer
+ any_of:
+ - maximum_value: 10
+ - minimum_value: 100
+ string_all_patterns:
+ range: string
+ all_of:
+ - pattern: Michael \S+
+ - pattern: \S+ Jordan
+ string_exactly_one_pattern:
+ range: string
+ exactly_one_of:
+ - pattern: Michael \S+
+ - pattern: \S+ Jordan
+ string_none_of:
+ range: string
+ none_of:
+ - equals_string: purple
+ - equals_string: green
+
+ classes:
+ Test:
+ tree_root: true
+ slots:
+ - string_constant
+ - string_pattern
+ - integer_constant
+ - integer_under
+ - integer_over
+ - string_any_constant
+ - integer_big_or_small
+ - string_all_patterns
+ - string_exactly_one_pattern
+ - string_none_of
+
+data_cases:
+ - data:
+ string_constant: constant
+ - data:
+ string_constant: wrong
+ error_message: "'constant' was expected"
+ - data:
+ string_pattern: pattttttern
+ - data:
+ string_pattern: pattern!
+ - data:
+ string_pattern: patern
+ error_message: does not match
+ - data:
+ integer_constant: 10
+ - data:
+ integer_constant: 9
+ error_message: 10 was expected
+ - data:
+ integer_under: 10
+ - data:
+ integer_under: 11
+ error_message: 11 is greater than the maximum of 10
+ - data:
+ integer_over: 10
+ - data:
+ integer_over: 9
+ error_message: 9 is less than the minimum of 10
+ - data:
+ string_any_constant: this
+ - data:
+ string_any_constant: that
+ - data:
+ string_any_constant: another
+ error_message: Failed validating 'anyOf'
+ - data:
+ integer_big_or_small: 9
+ - data:
+ integer_big_or_small: 101
+ - data:
+ integer_big_or_small: 50
+ error_message: Failed validating 'anyOf'
+ - data:
+ string_all_patterns: Michael Jeffrey Jordan
+ - data:
+ string_all_patterns: Air Jordan
+ error_message: does not match 'Michael \S+'
+ - data:
+ string_all_patterns: Michael J. Fox
+ error_message: does not match '\S+ Jordan'
+ - data:
+ string_exactly_one_pattern: Air Jordan
+ - data:
+ string_exactly_one_pattern: Michael J. Fox
+ - data:
+ string_exactly_one_pattern: Michael Jordan
+ error_message: is valid under each of
+ - data:
+ string_exactly_one_pattern: Scottie Pippen
+ error_message: is not valid under any of
+ - data:
+ string_none_of: orange
+ - data:
+ string_none_of: purple
+ error_message: should not be valid under
+ - data:
+ string_none_of: green
+ error_message: should not be valid under
diff --git a/tests/test_generators/test_jsonschemagen.py b/tests/test_generators/test_jsonschemagen.py
index 85c2a081..afab82e3 100644
--- a/tests/test_generators/test_jsonschemagen.py
+++ b/tests/test_generators/test_jsonschemagen.py
@@ -173,6 +173,26 @@ classes:
self.assertIn("id", json_schema["required"])
+ def test_value_constraints(self):
+ with open(env.input_path("jsonschema_value_constraints.yaml")) as f:
+ test_def = yaml.safe_load(f)
+
+ generator = JsonSchemaGenerator(yaml.dump(test_def["schema"]), stacktrace=True, not_closed=False)
+ json_schema = json.loads(generator.serialize())
+
+ for data_case in test_def.get('data_cases', []):
+ data = data_case['data']
+ with self.subTest(data=data):
+ if 'error_message' in data_case:
+ self.assertRaisesRegex(
+ jsonschema.ValidationError,
+ data_case['error_message'],
+ lambda: jsonschema.validate(data, json_schema),
+ )
+ else:
+ jsonschema.validate(data, json_schema)
+
+
def test_rules(self):
with open(RULES_CASES) as cases_file:
cases = yaml.safe_load(cases_file)
|
JSON Schema generation fails when slot uses `any_of` to combine multiple value conditions
**Describe the bug**
The `gen-json-schema` command fails when the input schema contains a slot with an `any_of` (or likely any other boolean operator) to combine multiple value conditions. The error is:
```
AttributeError: 'AnonymousSlotExpression' object has no attribute 'multivalued'
```
**To reproduce**
With the following schema:
```yaml
# test.yaml
id: https://example.com/test/
name: test
imports:
- linkml:types
prefixes:
linkml: https://w3id.org/linkml/
slots:
s:
range: string
any_of:
- equals_string: s1
- equals_string: s2
- equals_string: s3
classes:
Test:
slots:
- s
```
Run:
```shell
gen-json-schema test.yaml
```
|
0.0
|
b8a1c8a247059d64d24a2e4f2cbe19934de776a7
|
[
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_value_constraints"
] |
[
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_class_uri_any",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_jsonschema_integration",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_range_unions",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_top_class_identifier",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_type_inheritance"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-23 18:51:01+00:00
|
cc0-1.0
| 3,592 |
|
linkml__linkml-1226
|
diff --git a/linkml/generators/linkmlgen.py b/linkml/generators/linkmlgen.py
index df94c703..a03a58ce 100644
--- a/linkml/generators/linkmlgen.py
+++ b/linkml/generators/linkmlgen.py
@@ -35,8 +35,8 @@ class LinkmlGenerator(Generator):
def __post_init__(self):
# TODO: consider moving up a level
- self.schemaview = SchemaView(self.schema)
super().__post_init__()
+ self.schemaview = SchemaView(self.schema, merge_imports=self.mergeimports)
def materialize_classes(self) -> None:
"""Materialize class slots from schema as attribues, in place"""
|
linkml/linkml
|
c33ab57563d01123d2e0a937239607119f44ed57
|
diff --git a/tests/test_generators/test_linkmlgen.py b/tests/test_generators/test_linkmlgen.py
index eb7fa862..a9dd9488 100644
--- a/tests/test_generators/test_linkmlgen.py
+++ b/tests/test_generators/test_linkmlgen.py
@@ -25,7 +25,7 @@ class LinkMLGenTestCase(unittest.TestCase):
self.assertNotIn("activity", sv.all_classes(imports=False))
self.assertListEqual(["is_living"], list(sv.get_class("Person").attributes.keys()))
- gen = LinkmlGenerator(SCHEMA, format='yaml')
+ gen = LinkmlGenerator(SCHEMA, format='yaml', mergeimports=False)
out = gen.serialize()
# TODO: restore this when imports works for string inputs
#schema2 = YAMLGenerator(out).schema
@@ -39,12 +39,22 @@ class LinkMLGenTestCase(unittest.TestCase):
self.assertEqual(len(yobj["classes"]), len(sv.all_classes(imports=False)))
# self.assertNotIn("attributes", yobj["classes"]["Person"])
# test with material-attributes option
- gen2 = LinkmlGenerator(SCHEMA, format='yaml')
+ gen2 = LinkmlGenerator(SCHEMA, format='yaml', mergeimports=False)
gen2.materialize_attributes = True
out2 = gen2.serialize()
yobj2 = yaml.safe_load(out2)
self.assertEqual(len(yobj2["classes"]), len(sv.all_classes(imports=False)))
self.assertIn("attributes", yobj2["classes"]["Person"])
+ self.assertNotIn("activity", yobj2["classes"])
+ self.assertNotIn("agent", yobj2["classes"])
+
+ # turn on mergeimports option
+ gen3 = LinkmlGenerator(SCHEMA, format="yaml", mergeimports=True)
+ out3 = gen3.serialize()
+ yobj3 = yaml.safe_load(out3)
+ self.assertEqual(len(yobj3["classes"]), len(sv.all_classes(imports=True)))
+ self.assertIn("activity", yobj3["classes"])
+ self.assertIn("agent", yobj3["classes"])
# test that structured patterns are being expanded
# and populated into the pattern property on a class
|
gen-linkml CLI tool does not honor (default) --mergeimports parameter
`gen-linkml` advertises a `--mergeimports` parameter and even states that merging is the default state, as compared to `--no-mergeimports`
> --mergeimports / --no-mergeimports
Merge imports into source file
(default=mergeimports)
However, the output of running the MIxS schema through `gen-linkml` does not contain the imports
```shell
poetry run gen-linkml \
--format yaml \
--mergeimports \
--no-materialize-attributes model/schema/mixs.yaml
```
---
```yaml
name: MIxS
description: Minimal Information about any Sequence Standard
id: http://w3id.org/mixs
imports:
- linkml:types
- checklists
- core
- agriculture
- food_animal_and_animal_feed
- food_farm_environment
- food_food_production_facility
- food_human_foods
- symbiont_associated
- host_associated
- microbial_mat_biofilm
- miscellaneous_natural_or_artificial_environment
- plant_associated
- sediment
- soil
- wastewater_sludge
- water
- human_associated
- human_gut
- human_oral
- human_skin
- human_vaginal
- air
- built_environment
- hydrocarbon_resources_cores
- hydrocarbon_resources_fluids_swabs
prefixes:
linkml:
prefix_prefix: linkml
prefix_reference: https://w3id.org/linkml/
mixs.vocab:
prefix_prefix: mixs.vocab
prefix_reference: https://w3id.org/mixs/vocab/
MIXS:
prefix_prefix: MIXS
prefix_reference: https://w3id.org/mixs/terms/
MIGS:
prefix_prefix: MIGS
prefix_reference: https://w3id.org/mixs/migs/
default_prefix: mixs.vocab
```
|
0.0
|
c33ab57563d01123d2e0a937239607119f44ed57
|
[
"tests/test_generators/test_linkmlgen.py::LinkMLGenTestCase::test_generate"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-01-12 19:58:34+00:00
|
cc0-1.0
| 3,593 |
|
linkml__linkml-1335
|
diff --git a/linkml/generators/excelgen.py b/linkml/generators/excelgen.py
index fd697895..5fb493f8 100644
--- a/linkml/generators/excelgen.py
+++ b/linkml/generators/excelgen.py
@@ -1,144 +1,173 @@
import os
-from dataclasses import dataclass, field
-from typing import List, Optional, TextIO, Union
+
+from dataclasses import dataclass
+from typing import List
import click
-from linkml_runtime.linkml_model.meta import (ClassDefinition, EnumDefinition,
- PermissibleValue,
- PermissibleValueText,
- SchemaDefinition, SlotDefinition)
-from linkml_runtime.utils.formatutils import camelcase
-from openpyxl import Workbook, load_workbook
-from openpyxl.utils import get_column_letter
-from openpyxl.worksheet.datavalidation import DataValidation
-from linkml._version import __version__
+from linkml_runtime.utils.schemaview import SchemaView
from linkml.utils.generator import Generator, shared_arguments
+from linkml._version import __version__
+from linkml.utils.helpers import convert_to_snake_case
+from openpyxl import Workbook
+from openpyxl.worksheet.worksheet import Worksheet
+from openpyxl.worksheet.datavalidation import DataValidation
+from openpyxl.utils import get_column_letter
@dataclass
class ExcelGenerator(Generator):
- """This class is a blueprint for the generator module that is responsible
- for automatically creating Excel spreadsheets from the LinkML schema.
-
- :param schema: LinkML schema object
- :type schema: class:`SchemaDefinition`
- :param output: LinkML schema specification in YAML format
- :type output: str
- """
-
# ClassVars
- generator_name = os.path.splitext(os.path.basename(__file__))[0]
- generator_version = "0.0.1"
+ generatorname = os.path.basename(__file__)
+ generatorversion = "0.1.1"
valid_formats = ["xlsx"]
- uses_schemaloader = True
+ uses_schemaloader = False
requires_metamodel = False
- # ObjectVars
- sheet_name_cols: List[str] = field(default_factory=lambda: [])
- output: str = None
- workbook: Workbook = field(default_factory=lambda: Workbook())
- wb_name: str = None
- enum_dict: dict = field(default_factory=lambda: dict())
- """dictionary with slot types and possibles values for those types"""
-
- def _workbook_path(self, yaml_filename: str, wb_name: str = None):
- """Internal method that computes the path where the Excel workbook
- should be stored.
-
- :param yaml_filename: Name of provided LinkML schema
- :type yaml_filename: str
- :param wb_name: Prefix for the generated Excel spreadsheet name
- :type wb_name: str
+ def __post_init__(self) -> None:
+ super().__post_init__()
+ self.schemaview = SchemaView(self.schema)
+
+ def create_workbook(self, workbook_name: str) -> Workbook:
+ """
+ Creates an Excel workbook using the openpyxl library and returns it.
+
+ :param workbook_name: Name of the workbook to be created.
+ :return: An openpyxl Workbook object representing the newly created workbook.
+ """
+ workbook = Workbook()
+ workbook.title = workbook_name
+ return workbook
+
+ def get_workbook_name(self, workbook: Workbook) -> str:
"""
- # handle the case when an output filename is not provided
- if not wb_name:
- prefix, _ = os.path.splitext(os.path.basename(yaml_filename))
- prefix_root, prefix_ext = os.path.splitext(prefix)
+ Returns the name of the given workbook.
- if prefix_ext == ".yaml":
- prefix = prefix_root
+ :param workbook: The workbook whose name should be returned.
+ :return: Name of the workbook.
+ """
+ return workbook.title
- output_xlsx = (
- f"{prefix}_{self.generator_name}_{self.generator_version}.xlsx"
- )
+ def remove_worksheet_by_name(self, workbook: Workbook, worksheet_name: str) -> None:
+ """
+ Remove worksheet from workbook by name.
+ """
+ worksheet = workbook[worksheet_name]
+ workbook.remove(worksheet)
- return output_xlsx
+ def create_worksheet(self, workbook: Workbook, worksheet_name: str) -> Worksheet:
+ """
+ Creates an Excel worksheet with the given name in the given workbook.
- return wb_name
+ :param workbook: The workbook to which the worksheet should be added.
+ :param worksheet_name: Name of the worksheet to be created.
+ """
+ worksheet = workbook.create_sheet(worksheet_name)
+ workbook_name = self.get_workbook_name(workbook)
+ workbook.save(workbook_name)
- def __post_init__(self):
- super().__post_init__()
- self.wb_name = self._workbook_path(yaml_filename=self.schema, wb_name=self.output)
- self.workbook.remove(self.workbook["Sheet"])
+ return worksheet
- def _create_spreadsheet(self, ws_name: str, columns: List[str]) -> None:
- """Method to add worksheets to the Excel workbook.
+ def create_schema_worksheets(self, workbook: str) -> None:
+ """
+ Creates worksheets in a given Excel workbook based on the classes in the
+ schema.
- :param ws_name: Name of each of the worksheets
- :type ws_name: str
- :param columns: Columns that are relevant to each of the worksheets
- :type columns: List[str]
+ :param workbook: The workbook to which the worksheet should be added.
"""
- ws = self.workbook.create_sheet(ws_name)
- self.workbook.active = ws
- ws.append(columns)
- self.workbook.save(self.wb_name)
-
- def visit_class(self, cls: ClassDefinition) -> bool:
- """Overridden method to intercept classes from generator framework."""
- self._create_spreadsheet(ws_name=camelcase(cls.name), columns=cls.slots)
-
- return True
-
- def visit_enum(self, enum: EnumDefinition) -> bool:
- """Overridden method to intercept enums from generator framework."""
-
- def extract_permissible_text(pv):
- if type(pv) is str:
- return pv
- if type(pv) is PermissibleValue:
- return pv.text.code
- if type(pv) is PermissibleValueText:
- return pv
- raise ValueError(f"Invalid permissible value in enum {enum}: {pv}")
-
- permissible_values_texts = list(
- map(extract_permissible_text, enum.permissible_values or [])
- )
+ sv = self.schemaview
+ for cls_name, cls in sv.all_classes(imports=self.mergeimports).items():
+ if not cls.mixin and not cls.abstract:
+ self.create_worksheet(workbook, cls_name)
- self.enum_dict[enum.name] = permissible_values_texts
+ def add_columns_to_worksheet(
+ self, workbook: Workbook, worksheet_name: str, sheet_headings: List[str]
+ ) -> None:
+ """
+ Get a worksheet by name and add a column to it in an existing workbook.
- def visit_class_slot(
- self, cls: ClassDefinition, aliased_slot_name: str, slot: SlotDefinition
+ :param workbook: The workbook to which the worksheet should be added.
+ :param worksheet_name: Name of the worksheet to add the column to.
+ :param column_data: List of data to populate the column with.
+ """
+ # Get the worksheet by name
+ worksheet = workbook[worksheet_name]
+
+ # Add the headings to the worksheet
+ for i, heading in enumerate(sheet_headings):
+ worksheet.cell(row=1, column=i + 1, value=heading)
+
+ # Save the changes to the workbook
+ workbook_name = self.get_workbook_name(workbook)
+ workbook.save(workbook_name)
+
+ def column_enum_validation(
+ self,
+ workbook: Workbook,
+ worksheet_name: str,
+ column_name: str,
+ dropdown_values: List[str],
) -> None:
- """Overridden method to intercept classes and associated slots from generator
- framework."""
- self.workbook = load_workbook(self.wb_name)
+ """
+ Get worksheet by name and add a dropdown to a specific column in it
+ based on a list of values.
- if cls.name in self.workbook.sheetnames:
- if slot.range in self.enum_dict:
+ :param workbook: The workbook to which the worksheet should be added.
+ :param worksheet_name: Name of the worksheet to add the column dropdown to.
+ :param column_name: Name of the worksheet column to add the dropdown to.
+ :param dropdown_values: List of dropdown values to add to a column in a worksheet.
+ """
+ worksheet = workbook[worksheet_name]
- valid = ",".join(self.enum_dict[slot.range])
- valid = '"' + valid + '"'
+ column_list = [cell.value for cell in worksheet[1]]
+ column_number = column_list.index(column_name) + 1
+ column_letter = get_column_letter(column_number)
- ws = self.workbook[cls.name]
+ # Create the data validation object and set the dropdown values
+ dv = DataValidation(
+ type="list", formula1=f'"{",".join(dropdown_values)}"', allow_blank=True
+ )
- rows = ws.iter_rows(min_row=1, max_row=1) # returns a generator of rows
- first_row = next(rows) # get the first row
- headings = [
- c.value for c in first_row
- ] # extract the values from the cells
+ worksheet.add_data_validation(dv)
- idx = headings.index(slot.name)
- col_letter = get_column_letter(idx + 1)
+ dv.add(f"{column_letter}2:{column_letter}1048576")
- dv = DataValidation(type="list", formula1=valid, allow_blank=True)
- ws.add_data_validation(dv)
+ workbook_name = self.get_workbook_name(workbook)
+ workbook.save(workbook_name)
- dv.add(f"{col_letter}2:{col_letter}1048576")
+ def serialize(self, **kwargs) -> str:
+ output = (
+ convert_to_snake_case(self.schema.name) + ".xlsx"
+ if not self.output
+ else self.output
+ )
- self.workbook.save(self.wb_name)
+ workbook = self.create_workbook(output)
+ self.remove_worksheet_by_name(workbook, "Sheet")
+ self.create_schema_worksheets(workbook)
+
+ sv = self.schemaview
+ for cls_name, cls in sv.all_classes(imports=self.mergeimports).items():
+ if not cls.mixin and not cls.abstract:
+ slots = [
+ s.name
+ for s in sv.class_induced_slots(cls_name, imports=self.mergeimports)
+ ]
+ self.add_columns_to_worksheet(workbook, cls_name, slots)
+
+ enum_list = [
+ e_name for e_name, _ in sv.all_enums(imports=self.mergeimports).items()
+ ]
+ for cls_name, cls in sv.all_classes(imports=self.mergeimports).items():
+ if not cls.mixin and not cls.abstract:
+ for s in sv.class_induced_slots(cls_name, imports=self.mergeimports):
+ if s.range in enum_list:
+ pv_list = []
+ for pv_name, _ in sv.get_enum(
+ s.range
+ ).permissible_values.items():
+ pv_list.append(pv_name)
+ self.column_enum_validation(workbook, cls_name, s.name, pv_list)
@shared_arguments(ExcelGenerator)
diff --git a/linkml/generators/owlgen.py b/linkml/generators/owlgen.py
index e4d613db..6fc7c030 100644
--- a/linkml/generators/owlgen.py
+++ b/linkml/generators/owlgen.py
@@ -507,7 +507,7 @@ class OwlSchemaGenerator(Generator):
for k, v in el.__dict__.items():
if k in self.metamodel.schema.slots:
defining_slot = self.metamodel.schema.slots[k]
- if v is not None and "owl" in defining_slot.in_subset:
+ if v is not None and ("owl" in defining_slot.in_subset or "OwlProfile" in defining_slot.in_subset):
ve = v if isinstance(v, list) else [v]
for e in ve:
if (
diff --git a/linkml/utils/helpers.py b/linkml/utils/helpers.py
index e9b89987..1d9d348f 100644
--- a/linkml/utils/helpers.py
+++ b/linkml/utils/helpers.py
@@ -1,3 +1,5 @@
+import re
+
def remove_duplicates(lst):
"""Remove duplicate tuples from a list of tuples."""
return [t for t in (set(tuple(i) for i in lst))]
@@ -6,3 +8,7 @@ def remove_duplicates(lst):
def write_to_file(file_path, data, mode="w", encoding="utf-8"):
with open(file_path, mode, encoding=encoding) as f:
f.write(data)
+
+def convert_to_snake_case(str):
+ str = re.sub(r"(?<=[a-z])(?=[A-Z])|[^a-zA-Z]", " ", str).strip().replace(' ', '_')
+ return ''.join(str.lower())
diff --git a/linkml/utils/ifabsent_functions.py b/linkml/utils/ifabsent_functions.py
index 33ab4044..fa4aa7e5 100644
--- a/linkml/utils/ifabsent_functions.py
+++ b/linkml/utils/ifabsent_functions.py
@@ -72,11 +72,22 @@ default_library: List[
(r"float\(([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\)", False, lambda m, __, ___, ____: float(m[1])),
(r"date\((\d{4})-(\d{2})-(\d{2})\)", False, lambda m, __, ___, ____: f"datetime.date({m[1]}, {m[2]}, {m[3]})"),
(r"datetime\((\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})Z\)", False, lambda m, __, ___, ____: f"datetime.datetime({m[1]}, {m[2]}, {m[3]}, {m[4]}, {m[5]}, {m[6]})"),
- ("class_uri", False, lambda _, __, class_definition, ____: 'class_class_uri'),
- ("class_curie", False, lambda _, __, class_definition, ____: 'class_class_curie'),
- ("slot_uri", True, lambda _, loader, ___, slot_definition: f'slots.{slot_definition.name}.uri'),
- ("slot_curie", True, lambda _, loader, ___, slot_definition: f'slots.{slot_definition.name}.curie'),
- ("default_range", False, lambda _, loader, __, ____: f"{strval(loader.schema.default_range)}"),
+ # TODO: We have to make the real URI available before any of these can work
+ # ("class_uri", True, lambda _, loader, ___, ____: f'"{default_uri_for(loader)}" + camelcase(self.name)'),
+ # ("slot_uri", True, lambda _, loader, ___, ____: f'"{default_uri_for(loader)}" + underscore(self.alias if self.alias else self.name)'),
+ # ("class_curie", True, lambda _, loader, ___, ____: curie_for(loader, True)),
+ # ("slot_curie", True, lambda _, loader, ___, ____: curie_for(loader, False)),
+ ("class_uri", True, lambda _, loader, ___, ____: "None"),
+ ("slot_uri", True, lambda _, loader, ___, ____: "None"),
+ ("class_curie", True, lambda _, loader, ___, ____: "None"),
+ ("slot_curie", True, lambda _, loader, ___, ____: "None"),
+ # See: https://github.com/linkml/linkml/issues/1333
+ # ("class_uri", False, lambda _, __, class_definition, ____: 'class_class_uri'),
+ # ("class_curie", False, lambda _, __, class_definition, ____: 'class_class_curie'),
+ # ("slot_uri", True, lambda _, loader, ___, slot_definition: f'slots.{slot_definition.name}.uri'),
+ # ("slot_curie", True, lambda _, loader, ___, slot_definition: f'slots.{slot_definition.name}.curie'),
+ # ("default_range", False, lambda _, loader, __, ____: f"{strval(loader.schema.default_range)}"),
+ ("default_range", False, lambda _, __, ___, ____: "None"),
("bnode", False, lambda _, __, ___, ____: "bnode()"),
(r"string\((.*)\)", False, lambda m, __, ___, ____: strval(m[1])),
(r"uri\((.*)\)", False, lambda m, loader, _, __: uri_for(m[1], loader)),
|
linkml/linkml
|
d7b6ad51997e9dd9a49effba5b956cf57da24d20
|
diff --git a/tests/test_generators/test_docgen.py b/tests/test_generators/test_docgen.py
index 7764f94a..c6a5a79d 100644
--- a/tests/test_generators/test_docgen.py
+++ b/tests/test_generators/test_docgen.py
@@ -3,6 +3,7 @@ import os
import shutil
import tempfile
import unittest
+import yaml
from copy import copy
from typing import List
@@ -303,6 +304,16 @@ class DocGeneratorTestCase(unittest.TestCase):
"Example: Person",
after="## Examples",)
+ # checks correctness of the YAML representation of source schema
+ person_source = gen.yaml(gen.schemaview.get_class("Person"))
+ person_dict = yaml.load(person_source, Loader=yaml.Loader)
+ # consider the species name slot
+ # species name has the Person class repeated multiple times in domain_of
+ domain_of_species_name = person_dict["slot_usage"]["species name"]["domain_of"]
+ self.assertTrue(
+ len(set(domain_of_species_name)) == len(domain_of_species_name)
+ )
+
def test_docgen_no_mergeimports(self):
"""Tests when imported schemas are not folded into main schema"""
gen = DocGenerator(SCHEMA, mergeimports=False, no_types_dir=True)
diff --git a/tests/test_generators/test_excelgen.py b/tests/test_generators/test_excelgen.py
index eb307b3b..9644d24e 100644
--- a/tests/test_generators/test_excelgen.py
+++ b/tests/test_generators/test_excelgen.py
@@ -25,47 +25,47 @@ class ExcelGenTestCase(unittest.TestCase):
wb_obj = load_workbook(xlsx_filename)
# check the names of the created worksheets that are part of the workbook
- assert wb_obj.sheetnames == ["Employee", "Manager", "Organization"]
+ assert wb_obj.sheetnames == ["organization", "employee", "manager"]
# test case to check the column names in Employee worksheet
employee_cols_list = []
- max_col = wb_obj["Employee"].max_column
+ max_col = wb_obj["employee"].max_column
for i in range(1, max_col + 1):
- cell_obj = wb_obj["Employee"].cell(row=1, column=i)
+ cell_obj = wb_obj["employee"].cell(row=1, column=i)
employee_cols_list.append(cell_obj.value)
assert sorted(employee_cols_list) == [
"age in years",
"aliases",
- "employee_last name",
"first name",
"id",
+ "last name"
]
# test case to check the column names in Manager worksheet
manager_cols_list = []
- max_col = wb_obj["Manager"].max_column
+ max_col = wb_obj["manager"].max_column
for i in range(1, max_col + 1):
- cell_obj = wb_obj["Manager"].cell(row=1, column=i)
+ cell_obj = wb_obj["manager"].cell(row=1, column=i)
manager_cols_list.append(cell_obj.value)
assert sorted(manager_cols_list) == [
"age in years",
"aliases",
- "employee_last name",
"first name",
"has employees",
"id",
+ "last name",
]
# test case to check the column names in Organization worksheet
organization_cols_list = []
- max_col = wb_obj["Organization"].max_column
+ max_col = wb_obj["organization"].max_column
for i in range(1, max_col + 1):
- cell_obj = wb_obj["Organization"].cell(row=1, column=i)
+ cell_obj = wb_obj["organization"].cell(row=1, column=i)
organization_cols_list.append(cell_obj.value)
assert sorted(organization_cols_list) == ["has boss", "id", "name"]
diff --git a/tests/test_issues/test_issue_675.py b/tests/test_issues/test_issue_675.py
index 6a8acb02..e88ab330 100644
--- a/tests/test_issues/test_issue_675.py
+++ b/tests/test_issues/test_issue_675.py
@@ -1,5 +1,6 @@
import unittest
+from linkml.generators.pydanticgen import PydanticGenerator
from linkml_runtime.utils.compile_python import compile_python
from linkml.generators.pythongen import PythonGenerator
@@ -76,27 +77,77 @@ classes:
class IfAbsentTestCase(unittest.TestCase):
def test_ifabsent(self):
- print(PythonGenerator(model_txt).serialize())
+ """
+ Tests pythongenerator with ifabsent_functions.
+
+ See: https://github.com/linkml/linkml/issues/1333
+ """
+ # print(PythonGenerator(model_txt).serialize())
m = compile_python(PythonGenerator(model_txt).serialize())
sample = m.HighClass()
self.assertEqual(sample.bool_true_slot, True)
self.assertEqual(sample.bool_false_slot, False)
print("class_curie_slot fails")
- self.assertEqual(sample.class_curie_slot, m.HighClass.class_class_curie)
+ # self.assertEqual(sample.class_curie_slot, m.HighClass.class_class_curie)
+ self.assertIsNone(sample.class_curie_slot)
+ print("class_uri_slot fails")
+ # self.assertEqual(sample.class_uri_slot, m.HighClass.class_class_uri)
+ self.assertIsNone(sample.class_uri_slot)
+ print(m.HighClass.class_class_uri)
+ print("default_ns fails")
+ self.assertEqual(sample.default_ns_slot, 'ex')
+ print("default_range fails")
+ # self.assertEqual(sample.default_range_slot, 'string')
+ self.assertIsNone(sample.default_range_slot)
+ print("int(0) fails")
+ self.assertEqual(sample.int_0_slot, 0)
+ self.assertEqual(sample.int_42_slot, 42)
+ self.assertEqual(sample.neg_int_slot, -117243)
+ print("slot_curie fails")
+ # self.assertEqual(sample.slot_curie_slot, m.slots.slot_curie_slot.curie)
+ self.assertIsNone(sample.slot_curie_slot)
+ print("slot_uri fails")
+ # self.assertEqual(sample.slot_uri_slot, m.slots.slot_uri_slot.uri)
+ self.assertIsNone(sample.slot_uri_slot)
+ self.assertIsNone(sample.slot_curie_slot)
+ self.assertEqual(sample.string_slot, "s1")
+ self.assertEqual(sample.mt_string_slot, "")
+
+ @unittest.skip("TODO: https://github.com/linkml/linkml/issues/1334")
+ def test_ifabsent_pydantic(self):
+ """
+ Tests pydantic generator with ifabsent_functions.
+
+ See: https://github.com/linkml/linkml/issues/1334
+ """
+ print(PydanticGenerator(model_txt).serialize())
+ m = compile_python(PydanticGenerator(model_txt).serialize())
+ sample = m.HighClass()
+ self.assertEqual(sample.bool_true_slot, True)
+ self.assertEqual(sample.bool_false_slot, False)
+ print("class_curie_slot fails")
+ # self.assertEqual(sample.class_curie_slot, m.HighClass.class_class_curie)
+ self.assertIsNone(sample.class_curie_slot)
print("class_uri_slot fails")
- self.assertEqual(sample.class_uri_slot, m.HighClass.class_class_uri)
+ # self.assertEqual(sample.class_uri_slot, m.HighClass.class_class_uri)
+ self.assertIsNone(sample.class_uri_slot)
+ print(m.HighClass.class_class_uri)
print("default_ns fails")
self.assertEqual(sample.default_ns_slot, 'ex')
print("default_range fails")
- self.assertEqual(sample.default_range_slot, 'string')
+ # self.assertEqual(sample.default_range_slot, 'string')
+ self.assertIsNone(sample.default_range_slot)
print("int(0) fails")
self.assertEqual(sample.int_0_slot, 0)
self.assertEqual(sample.int_42_slot, 42)
self.assertEqual(sample.neg_int_slot, -117243)
print("slot_curie fails")
- self.assertEqual(sample.slot_curie_slot, m.slots.slot_curie_slot.curie)
+ # self.assertEqual(sample.slot_curie_slot, m.slots.slot_curie_slot.curie)
+ self.assertIsNone(sample.slot_curie_slot)
print("slot_uri fails")
- self.assertEqual(sample.slot_uri_slot, m.slots.slot_uri_slot.uri)
+ # self.assertEqual(sample.slot_uri_slot, m.slots.slot_uri_slot.uri)
+ self.assertIsNone(sample.slot_uri_slot)
+ self.assertIsNone(sample.slot_curie_slot)
self.assertEqual(sample.string_slot, "s1")
self.assertEqual(sample.mt_string_slot, "")
|
pythongen ifabsent generates incorrect values for class_curie and slot_curie
the metamodel has a slot class_uri, with an `ifabsent` defined as follows:
```yaml
classes:
ClassDefinition:
slots:
- ...
- class_uri
- ...
slots:
class_uri:
range: uriorcurie
description: URI of the class that provides a semantic interpretation of the element in a linked data context. The URI may come from any namespace and may be shared between schemas
ifabsent: class_curie
```
The documentation for `ifabsent` says:
```
* class_curie -- CURIE for the containing class
* class_uri -- URI for the containing class
```
this means that if I would expect
```python
>>> cls = ClassDefinition("Foo")
>>> print(cls.class_curie)
myschema:Foo
```
however, with the `ifabsent_function` changes in
- #1171.
meta.py now says:
```python
class ClassDefinition(...):
...
class_curie: ... = class_class_curie
```
so the actual behavior we get is:
```python
>>> cls = ClassDefinition("Foo")
>>> print(cls.class_curie)
linkml:ClassDefinition
```
I think we revert to having the generated default being `None` for all x_uri slots.
|
0.0
|
d7b6ad51997e9dd9a49effba5b956cf57da24d20
|
[
"tests/test_generators/test_excelgen.py::ExcelGenTestCase::test_excel_generation",
"tests/test_issues/test_issue_675.py::IfAbsentTestCase::test_ifabsent"
] |
[
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_class_hierarchy_as_tuples",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_class_hierarchy_as_tuples_no_mergeimports",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_class_slots_inheritance",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_custom_directory",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_docgen",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_docgen_no_mergeimports",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_docgen_rank_ordering",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_fetch_slots_of_class",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_gen_metamodel",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_html",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_latex_generation",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_myst_dialect",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_use_slot_uris"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-03-11 22:36:18+00:00
|
cc0-1.0
| 3,594 |
|
linkml__linkml-1410
|
diff --git a/linkml/generators/jsonschemagen.py b/linkml/generators/jsonschemagen.py
index 3d21a670..4d6158f3 100644
--- a/linkml/generators/jsonschemagen.py
+++ b/linkml/generators/jsonschemagen.py
@@ -371,11 +371,34 @@ class JsonSchemaGenerator(Generator):
if slot_is_inlined:
# If inline we have to include redefined slots
if slot.multivalued:
- range_id_slot = self.schemaview.get_identifier_slot(slot.range, use_key=True)
+ range_id_slot, range_simple_dict_value_slot, range_required_slots = self._get_range_associated_slots(slot)
+ # if the range class has an ID and the slot is not inlined as a list, then we need to consider
+ # various inlined as dict formats
if range_id_slot is not None and not slot.inlined_as_list:
+ # At a minimum, the inlined dict can have keys (additionalProps) that are IDs
+ # and the values are the range class but possibly omitting the ID.
+ additionalProps = [JsonSchema.ref_for(reference, identifier_optional=True)]
+
+ # If the range can be collected as a simple dict, then we can also accept the value
+ # of that simple dict directly.
+ if range_simple_dict_value_slot is not None:
+ additionalProps.append(self.get_subschema_for_slot(range_simple_dict_value_slot))
+
+ # If the range has no required slots, then null is acceptable
+ if len(range_required_slots) == 0:
+ additionalProps.append(JsonSchema({"type": "null"}))
+
+ # If through the above logic we identified multiple acceptable forms, then wrap them
+ # in an "anyOf", otherwise just take the only acceptable form
+ if len(additionalProps) == 1:
+ additionalProps = additionalProps[0]
+ else:
+ additionalProps = JsonSchema({
+ "anyOf": additionalProps
+ })
prop = JsonSchema({
"type": "object",
- "additionalProperties": JsonSchema.ref_for(reference, identifier_optional=True)
+ "additionalProperties": additionalProps
})
self.top_level_schema.add_lax_def(reference, self.aliased_slot_name(range_id_slot))
else:
@@ -453,6 +476,26 @@ class JsonSchemaGenerator(Generator):
self.handle_class(class_definition)
return self.top_level_schema.to_json(sort_keys=True, indent=self.indent if self.indent > 0 else None)
+
+ def _get_range_associated_slots(self, slot: SlotDefinition) -> Tuple[Union[SlotDefinition, None], Union[SlotDefinition, None], Union[List[SlotDefinition], None]]:
+ range_class = self.schemaview.get_class(slot.range)
+ if range_class is None:
+ return None, None, None
+
+ range_class_id_slot = self.schemaview.get_identifier_slot(range_class.name, use_key=True)
+ if range_class_id_slot is None:
+ return None, None, None
+
+ non_id_slots = [
+ s for s in self.schemaview.class_induced_slots(range_class.name) if s.name != range_class_id_slot.name
+ ]
+ non_id_required_slots = [s for s in non_id_slots if s.required]
+
+ range_simple_dict_value_slot = None
+ if len(non_id_slots) == 1:
+ range_simple_dict_value_slot = non_id_slots[0]
+
+ return range_class_id_slot, range_simple_dict_value_slot, non_id_required_slots
@shared_arguments(JsonSchemaGenerator)
diff --git a/linkml/linter/cli.py b/linkml/linter/cli.py
index 7f5b9fbd..6bdaf3f3 100644
--- a/linkml/linter/cli.py
+++ b/linkml/linter/cli.py
@@ -53,6 +53,18 @@ def get_yaml_files(root: Path) -> Iterable[str]:
help="Report format.",
show_default=True,
)
[email protected](
+ "--validate",
+ is_flag=True,
+ default=False,
+ help="Validate the schema against the LinkML Metamodel before linting.",
+)
[email protected](
+ "--validate-only",
+ is_flag=True,
+ default=False,
+ help="Validate the schema against the LinkML Metamodel and then exit without checking linter rules.",
+)
@click.option("-v", "--verbose", is_flag=True)
@click.option(
"-o", "--output", type=click.File("w"), default="-", help="Report file name."
@@ -77,10 +89,12 @@ def main(
fix: bool,
config: str,
format: str,
+ validate: bool,
+ validate_only: bool,
output,
ignore_warnings: bool,
max_warnings: int,
- verbose: bool
+ verbose: bool,
):
"""Run linter on SCHEMA.
@@ -117,7 +131,9 @@ def main(
formatter.start_report()
for path in get_yaml_files(schema):
formatter.start_schema(path)
- report = linter.lint(path, fix=fix)
+ report = linter.lint(
+ path, fix=fix, validate_schema=validate, validate_only=validate_only
+ )
for problem in report:
if str(problem.level) is RuleLevel.error.text:
error_count += 1
diff --git a/linkml/linter/formatters/terminal_formatter.py b/linkml/linter/formatters/terminal_formatter.py
index 1c284c9f..a4833209 100644
--- a/linkml/linter/formatters/terminal_formatter.py
+++ b/linkml/linter/formatters/terminal_formatter.py
@@ -60,3 +60,5 @@ class TerminalFormatter(Formatter):
+ " "
+ plural("schema", problem_schemas)
)
+ else:
+ self.write(click.style("\u2713", fg="green") + " No problems found")
diff --git a/linkml/linter/linter.py b/linkml/linter/linter.py
index c4eb92e5..f4d3c8b5 100644
--- a/linkml/linter/linter.py
+++ b/linkml/linter/linter.py
@@ -1,15 +1,21 @@
import inspect
+import json
from copy import deepcopy
from dataclasses import dataclass
from functools import lru_cache
from pathlib import Path
from typing import Any, Dict, Iterable, Union
+import jsonschema
import yaml
+from jsonschema.exceptions import best_match
from linkml_runtime import SchemaView
from linkml_runtime.dumpers import yaml_dumper
from linkml_runtime.linkml_model import SchemaDefinition
+from linkml.generators.jsonschemagen import JsonSchemaGenerator
+
+from .. import LOCAL_METAMODEL_YAML_FILE
from .config.datamodel.config import Config, ExtendableConfigs, RuleLevel
@@ -29,6 +35,14 @@ def get_named_config(name: str) -> Dict[str, Any]:
return yaml.safe_load(config_file)
+@lru_cache
+def get_metamodel_validator() -> jsonschema.Validator:
+ meta_json_gen = JsonSchemaGenerator(LOCAL_METAMODEL_YAML_FILE, not_closed=False)
+ meta_json_schema = json.loads(meta_json_gen.serialize())
+ validator = jsonschema.Draft7Validator(meta_json_schema)
+ return validator
+
+
def merge_configs(original: dict, other: dict):
result = deepcopy(original)
for key, value in other.items():
@@ -39,6 +53,18 @@ def merge_configs(original: dict, other: dict):
return result
+def _format_path_component(value):
+ if isinstance(value, int):
+ return f"[{value}]"
+ return value
+
+
+def _format_path(path):
+ if not path:
+ return "<root>"
+ return " > ".join(_format_path_component(p) for p in path)
+
+
class Linter:
def __init__(self, config: Dict[str, Any] = {}) -> None:
default_config = deepcopy(get_named_config("default"))
@@ -61,17 +87,45 @@ class Linter:
]
)
+ def validate_schema(self, schema_path: str):
+ with open(schema_path) as schema_file:
+ schema = yaml.safe_load(schema_file)
+
+ validator = get_metamodel_validator()
+ for err in validator.iter_errors(schema):
+ best_err = best_match([err])
+ message = f"In {_format_path(best_err.absolute_path)}: {best_err.message}"
+ if best_err.context:
+ message += f" ({', '.join(e.message for e in best_err.context)})"
+ yield LinterProblem(
+ rule_name="valid-schema",
+ message=message,
+ level=RuleLevel(RuleLevel.error),
+ schema_source=schema,
+ )
+
def lint(
- self, schema=Union[str, SchemaDefinition], fix: bool = False
+ self,
+ schema: Union[str, SchemaDefinition],
+ fix: bool = False,
+ validate_schema: bool = False,
+ validate_only: bool = False,
) -> Iterable[LinterProblem]:
+ if (validate_schema or validate_only) and isinstance(schema, str):
+ yield from self.validate_schema(schema)
+
+ if validate_only:
+ return
+
try:
schema_view = SchemaView(schema)
except:
- yield LinterProblem(
- message="File is not a valid LinkML schema",
- level=RuleLevel(RuleLevel.error),
- schema_source=(schema if isinstance(schema, str) else None),
- )
+ if not validate_schema:
+ yield LinterProblem(
+ message="File is not a valid LinkML schema. Use --validate for more details.",
+ level=RuleLevel(RuleLevel.error),
+ schema_source=(schema if isinstance(schema, str) else None),
+ )
return
for rule_id, rule_config in self.config.rules.__dict__.items():
diff --git a/linkml/linter/rules.py b/linkml/linter/rules.py
index 9deef5d4..89f17b80 100644
--- a/linkml/linter/rules.py
+++ b/linkml/linter/rules.py
@@ -18,7 +18,6 @@ from .linter import LinterProblem
class LinterRule(ABC):
-
PATTERNS = {
"snake": re.compile(r"[a-z][_a-z0-9]+"),
"uppersnake": re.compile(r"[A-Z][_A-Z0-9]+"),
@@ -52,7 +51,6 @@ class LinterRule(ABC):
class NoEmptyTitleRule(LinterRule):
-
id = "no_empty_title"
def check(
@@ -71,7 +69,6 @@ class NoEmptyTitleRule(LinterRule):
class NoXsdIntTypeRule(LinterRule):
-
id = "no_xsd_int_type"
def check(self, schema_view: SchemaView, fix: bool = False):
@@ -86,7 +83,6 @@ class NoXsdIntTypeRule(LinterRule):
class PermissibleValuesFormatRule(LinterRule):
-
id = "permissible_values_format"
def check(
@@ -114,7 +110,6 @@ def _get_recommended_metamodel_slots() -> List[str]:
class RecommendedRule(LinterRule):
-
id = "recommended"
def __init__(self, config: RecommendedRuleConfig) -> None:
@@ -138,7 +133,6 @@ class RecommendedRule(LinterRule):
class TreeRootClassRule(LinterRule):
-
id = "tree_root_class"
def __init__(self, config: TreeRootClassRuleConfig) -> None:
@@ -224,7 +218,6 @@ class TreeRootClassRule(LinterRule):
class NoInvalidSlotUsageRule(LinterRule):
-
id = "no_invalid_slot_usage"
def check(
@@ -245,7 +238,6 @@ class NoInvalidSlotUsageRule(LinterRule):
class StandardNamingRule(LinterRule):
-
id = "standard_naming"
def __init__(self, config: StandardNamingConfig) -> None:
@@ -283,7 +275,6 @@ class StandardNamingRule(LinterRule):
class CanonicalPrefixesRule(LinterRule):
-
id = "canonical_prefixes"
def __init__(self, config: CanonicalPrefixesConfig) -> None:
|
linkml/linkml
|
503ab9b6575deae9edfe2444138c0143d87b217f
|
diff --git a/tests/test_generators/input/jsonschema_collection_forms.yaml b/tests/test_generators/input/jsonschema_collection_forms.yaml
new file mode 100644
index 00000000..c2ad0872
--- /dev/null
+++ b/tests/test_generators/input/jsonschema_collection_forms.yaml
@@ -0,0 +1,79 @@
+schema:
+ id: http://example.org/test_collection_forms
+ name: test_collection_forms
+ imports:
+ - https://w3id.org/linkml/types
+ default_range: string
+
+ slots:
+ key:
+ key: true
+ value:
+ value2:
+ key_value_pairs:
+ range: KeyValuePair
+ multivalued: true
+ inlined: true
+ more_than_one_non_key_slots:
+ range: MoreThanOneNonKeySlot
+ multivalued: true
+ inlined: true
+
+ classes:
+ Test:
+ tree_root: true
+ slots:
+ - key_value_pairs
+ - more_than_one_non_key_slots
+ KeyValuePair:
+ slots:
+ - key
+ - value
+ MoreThanOneNonKeySlot:
+ slots:
+ - key
+ - value
+ - value2
+
+data_cases:
+ - data:
+ key_value_pairs:
+ k1:
+ key: k1
+ value: v1
+ k2:
+ key: k2
+ value: v2
+ - data:
+ key_value_pairs:
+ k1:
+ value: v1
+ k2:
+ value: v2
+ - data:
+ key_value_pairs:
+ k1: v1
+ k2: v2
+ - data:
+ more_than_one_non_key_slots:
+ k1:
+ key: k1
+ value: v1
+ value2: v12
+ k2:
+ key: k2
+ value: v2
+ value2: v22
+ - data:
+ more_than_one_non_key_slots:
+ k1:
+ value: v1
+ value2: v12
+ k2:
+ value: v2
+ value2: v22
+ - data:
+ more_than_one_non_key_slots:
+ k1: v1
+ k2: v2
+ error_message: "not valid under any of the given schemas"
diff --git a/tests/test_generators/input/jsonschema_empty_inlined_as_dict_objects.yaml b/tests/test_generators/input/jsonschema_empty_inlined_as_dict_objects.yaml
new file mode 100644
index 00000000..d748ac2b
--- /dev/null
+++ b/tests/test_generators/input/jsonschema_empty_inlined_as_dict_objects.yaml
@@ -0,0 +1,60 @@
+schema:
+ id: http://example.org/test_empty_inlined_as_dict_objects
+ name: test_empty_inlined_as_dict_objects
+ imports:
+ - https://w3id.org/linkml/types
+ default_range: string
+
+ slots:
+ id:
+ key: true
+ s1:
+ s2:
+ required: true
+ no_non_key_required_slots:
+ range: HasNoNonKeyRequiredSlots
+ multivalued: true
+ inlined: true
+ non_key_required_slots:
+ range: HasNonKeyRequiredSlots
+ multivalued: true
+ inlined: true
+
+ classes:
+ Test:
+ tree_root: true
+ slots:
+ - no_non_key_required_slots
+ - non_key_required_slots
+ HasNoNonKeyRequiredSlots:
+ slots:
+ - id
+ - s1
+ HasNonKeyRequiredSlots:
+ slots:
+ - id
+ - s1
+ - s2
+
+data_cases:
+ - data:
+ no_non_key_required_slots:
+ id1:
+ s1: value1
+ id2:
+ s1: value2
+ - data:
+ no_non_key_required_slots:
+ id1:
+ id2:
+ - data:
+ non_key_required_slots:
+ id1:
+ s2: value1
+ id2:
+ s2: value2
+ - data:
+ non_key_required_slots:
+ id1:
+ id2:
+ error_message: "None is not of type 'object'"
diff --git a/tests/test_generators/test_jsonschemagen.py b/tests/test_generators/test_jsonschemagen.py
index 5c1100fd..62e93b75 100644
--- a/tests/test_generators/test_jsonschemagen.py
+++ b/tests/test_generators/test_jsonschemagen.py
@@ -229,6 +229,17 @@ class JsonSchemaTestCase(unittest.TestCase):
self.externalFileTest("jsonschema_multivalued_element_constraints.yaml")
+ def test_collection_forms(self):
+ """Tests that expanded, compact, and simple dicts can be validated"""
+
+ self.externalFileTest("jsonschema_collection_forms.yaml")
+
+ def test_empty_inlined_as_dict_objects(self):
+ """Tests that inlined objects with no non-key required slots can be null/empty"""
+
+ self.externalFileTest("jsonschema_empty_inlined_as_dict_objects.yaml")
+
+
# **********************************************************
#
# Utility methods
diff --git a/tests/test_issues/test_issue_129.py b/tests/test_issues/test_issue_129.py
index 5315e207..7d2336a8 100644
--- a/tests/test_issues/test_issue_129.py
+++ b/tests/test_issues/test_issue_129.py
@@ -39,11 +39,17 @@ class IssueJSONSchemaTypesTestCase(TestEnvironmentTestCase):
assert props["has_ds"]["items"]["$ref"] == "#/$defs/D"
# multi-valued, inlined (as dict) #411
- D_id_opt = props["has_ds2"]["additionalProperties"]["$ref"].replace(
+ D_id_any_of = props["has_ds2"]["additionalProperties"]["anyOf"]
+ D_id_with_ref = next(d for d in D_id_any_of if "$ref" in d)
+ assert D_id_with_ref
+ D_id_opt = D_id_with_ref["$ref"].replace(
"#/$defs/", ""
)
assert D_id_opt in defs
assert defs[D_id_opt]["required"] == []
+ # D has no required slots other than the id, so the inlined value can also be null
+ D_type_null = next(d for d in D_id_any_of if "type" in d and d.type == 'null')
+ assert D_type_null
# single-valued, non-inlined (foreign key)
assert props["parent"]["type"] == "string"
diff --git a/tests/test_linter/test_cli.py b/tests/test_linter/test_cli.py
index 09bfafd1..3e4494d0 100644
--- a/tests/test_linter/test_cli.py
+++ b/tests/test_linter/test_cli.py
@@ -236,3 +236,42 @@ slots:
self.assertIn("Class has name 'person'", result.stdout)
self.assertIn(str(schema_b), result.stdout)
self.assertIn("Slot has name 'a slot'", result.stdout)
+
+ def test_validate_schema(self):
+ with self.runner.isolated_filesystem():
+ with open(SCHEMA_FILE, "w") as f:
+ f.write("""
+id: http://example.org/test
+classes:
+ person:
+ description: a person
+""")
+
+ result = self.runner.invoke(main, ['--validate', SCHEMA_FILE])
+ self.assertEqual(result.exit_code, 2)
+ self.assertIn(
+ "error In <root>: 'name' is a required property (valid-schema)",
+ result.stdout,
+ )
+ self.assertIn(
+ "warning Class has name 'person' (standard_naming)",
+ result.stdout,
+ )
+
+ def test_validate_schema_only(self):
+ with self.runner.isolated_filesystem():
+ with open(SCHEMA_FILE, "w") as f:
+ f.write("""
+id: http://example.org/test
+classes:
+ person:
+ description: a person
+""")
+
+ result = self.runner.invoke(main, ['--validate-only', SCHEMA_FILE])
+ self.assertEqual(result.exit_code, 2)
+ self.assertIn(
+ "error In <root>: 'name' is a required property (valid-schema)",
+ result.stdout,
+ )
+ self.assertNotIn("(standard_naming)", result.stdout)
|
Add command line tool: linkml-schema-validate
Currently we get schema validation indirectly, whenever any command in the gen-X suite is executed, but it would be good to have a more direct command, `linkml-schema-validate`
This would be a convenience wrapper for running `linkml-validate` on the schema, using the version of the metamodel distributed with linkml-runtime (there could be an option for passing an explicit metamodel version but then this becomes nearly identical to the generic linkml-validate)
We may still want to include some of the procedural checks that are performed by SchemaLoader right now, but ultimately these should all be translated to declarative form in the metamodel.
|
0.0
|
503ab9b6575deae9edfe2444138c0143d87b217f
|
[
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_collection_forms",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_empty_inlined_as_dict_objects",
"tests/test_issues/test_issue_129.py::IssueJSONSchemaTypesTestCase::test_issue_types",
"tests/test_linter/test_cli.py::TestLinterCli::test_validate_schema",
"tests/test_linter/test_cli.py::TestLinterCli::test_validate_schema_only"
] |
[
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_class_uri_any",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_jsonschema_integration",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_multivalued_element_constraints",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_multivalued_slot_cardinality",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_range_unions",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_rules_in_non_root_class",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_top_class_identifier",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_type_inheritance",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_value_constraints",
"tests/test_linter/test_cli.py::TestLinterCli::test_config_extends_recommended",
"tests/test_linter/test_cli.py::TestLinterCli::test_directory_of_files",
"tests/test_linter/test_cli.py::TestLinterCli::test_exceeded_max_warnings_flag",
"tests/test_linter/test_cli.py::TestLinterCli::test_explicit_config_file",
"tests/test_linter/test_cli.py::TestLinterCli::test_ignore_warnings_flag",
"tests/test_linter/test_cli.py::TestLinterCli::test_implicit_config_file",
"tests/test_linter/test_cli.py::TestLinterCli::test_max_warnings_flag",
"tests/test_linter/test_cli.py::TestLinterCli::test_no_config",
"tests/test_linter/test_cli.py::TestLinterCli::test_no_schema_errors",
"tests/test_linter/test_cli.py::TestLinterCli::test_warning_exit_code"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-27 22:51:31+00:00
|
cc0-1.0
| 3,595 |
|
linkml__linkml-1411
|
diff --git a/linkml/generators/jsonschemagen.py b/linkml/generators/jsonschemagen.py
index 7ed11914..3d21a670 100644
--- a/linkml/generators/jsonschemagen.py
+++ b/linkml/generators/jsonschemagen.py
@@ -87,14 +87,11 @@ class JsonSchema(UserDict):
self['required'].append(canonical_name)
- def add_keyword(self, keyword: str, value: Any, applies_to_all_array_elements: bool = False):
+ def add_keyword(self, keyword: str, value: Any):
if value is None:
return
- if applies_to_all_array_elements and self.is_array:
- self['items'][keyword] = value
- else:
- self[keyword] = value
+ self[keyword] = value
@property
def is_array(self):
@@ -241,12 +238,23 @@ class JsonSchemaGenerator(Generator):
rule_subschemas.append(inverse_subschema)
if len(rule_subschemas) == 1:
- self.top_level_schema.update(rule_subschemas[0])
+ class_subschema.update(rule_subschemas[0])
elif len(rule_subschemas) > 1:
- self.top_level_schema["allOf"] = rule_subschemas
+ if "allOf" not in class_subschema:
+ class_subschema["allOf"] = []
+ class_subschema["allOf"].extend(rule_subschemas)
self.top_level_schema.add_def(cls.name, class_subschema)
+ if (
+ self.top_class is not None and camelcase(self.top_class) == camelcase(cls.name)
+ ) or (self.top_class is None and cls.tree_root):
+ for key, value in class_subschema.items():
+ # check this first to ensure we don't overwrite things like additionalProperties
+ # or description on the root. But we do want to copy over properties, required,
+ # if, then, etc.
+ if key not in self.top_level_schema:
+ self.top_level_schema[key] = value
def get_subschema_for_anonymous_class(self, cls: AnonymousClassExpression, properties_required: bool = False) -> Union[None, JsonSchema]:
if not cls:
@@ -320,6 +328,18 @@ class JsonSchemaGenerator(Generator):
typ = "string"
return (typ, fmt, reference)
+
+ def get_value_constraints_for_slot(self, slot: Union[AnonymousSlotExpression, None]) -> JsonSchema:
+ if slot is None:
+ return JsonSchema()
+
+ constraints = JsonSchema()
+ constraints.add_keyword('pattern', slot.pattern)
+ constraints.add_keyword('minimum', slot.minimum_value)
+ constraints.add_keyword('maximum', slot.maximum_value)
+ constraints.add_keyword('const', slot.equals_string)
+ constraints.add_keyword('const', slot.equals_number)
+ return constraints
def get_subschema_for_slot(self, slot: SlotDefinition, omit_type: bool = False) -> JsonSchema:
slot_has_range_union = slot.any_of is not None and len(slot.any_of) > 0 and all(s.range is not None for s in slot.any_of)
@@ -380,15 +400,19 @@ class JsonSchemaGenerator(Generator):
prop.add_keyword('description', slot.description)
- prop.add_keyword('pattern', slot.pattern, applies_to_all_array_elements=True)
- prop.add_keyword('minimum', slot.minimum_value, applies_to_all_array_elements=True)
- prop.add_keyword('maximum', slot.maximum_value, applies_to_all_array_elements=True)
- prop.add_keyword('const', slot.equals_string, applies_to_all_array_elements=True)
- prop.add_keyword('const', slot.equals_number, applies_to_all_array_elements=True)
+ own_constraints = self.get_value_constraints_for_slot(slot)
if prop.is_array:
+ all_element_constraints = self.get_value_constraints_for_slot(slot.all_members)
+ any_element_constraints = self.get_value_constraints_for_slot(slot.has_member)
prop.add_keyword('minItems', slot.minimum_cardinality)
prop.add_keyword('maxItems', slot.maximum_cardinality)
+ prop["items"].update(own_constraints)
+ prop["items"].update(all_element_constraints)
+ if any_element_constraints:
+ prop["contains"] = any_element_constraints
+ else:
+ prop.update(own_constraints)
if prop.is_object:
prop.add_keyword('minProperties', slot.minimum_cardinality)
@@ -420,11 +444,6 @@ class JsonSchemaGenerator(Generator):
prop = self.get_subschema_for_slot(slot)
subschema.add_property(aliased_slot_name, prop, slot_is_required)
- if (
- self.top_class is not None and camelcase(self.top_class) == camelcase(cls.name)
- ) or (self.top_class is None and cls.tree_root):
- self.top_level_schema.add_property(aliased_slot_name, prop, slot_is_required)
-
def serialize(self, **kwargs) -> str:
self.start_schema()
for enum_definition in self.schemaview.all_enums().values():
diff --git a/linkml/generators/pydanticgen.py b/linkml/generators/pydanticgen.py
index b685d45b..9171ab00 100644
--- a/linkml/generators/pydanticgen.py
+++ b/linkml/generators/pydanticgen.py
@@ -46,7 +46,8 @@ class ConfiguredBaseModel(WeakRefShimBaseModel,
validate_all = True,
underscore_attrs_are_private = True,
extra = {% if allow_extra %}'allow'{% else %}'forbid'{% endif %},
- arbitrary_types_allowed = True):
+ arbitrary_types_allowed = True,
+ use_enum_values = True):
pass
{% for e in enums.values() %}
|
linkml/linkml
|
a238ae038345d03a54e401e5713a310adf925e7e
|
diff --git a/tests/test_generators/input/jsonschema_multivalued_element_constraints.yaml b/tests/test_generators/input/jsonschema_multivalued_element_constraints.yaml
index a4c0bd89..0eaddd7f 100644
--- a/tests/test_generators/input/jsonschema_multivalued_element_constraints.yaml
+++ b/tests/test_generators/input/jsonschema_multivalued_element_constraints.yaml
@@ -17,8 +17,31 @@ schema:
multivalued: true
pattern: e.*
- # TODO: this should also contain test cases for has_member and all_members
- # See: https://github.com/linkml/linkml/issues/1107
+ int_list_with_all_members:
+ range: integer
+ multivalued: true
+ all_members:
+ minimum_value: 2
+ maximum_value: 5
+
+ int_list_with_has_member:
+ range: integer
+ multivalued: true
+ has_member:
+ minimum_value: 2
+ maximum_value: 5
+
+ string_list_with_all_members:
+ range: string
+ multivalued: true
+ all_members:
+ pattern: e.*
+
+ string_list_with_has_member:
+ range: string
+ multivalued: true
+ has_member:
+ pattern: e.*
classes:
Test:
@@ -26,6 +49,10 @@ schema:
slots:
- int_list
- string_list
+ - int_list_with_all_members
+ - int_list_with_has_member
+ - string_list_with_all_members
+ - string_list_with_has_member
json_schema:
properties:
int_list:
@@ -35,6 +62,20 @@ json_schema:
string_list:
items:
pattern: e.*
+ int_list_with_all_members:
+ items:
+ minimum: 2
+ maximum: 5
+ int_list_with_has_member:
+ contains:
+ minimum: 2
+ maximum: 5
+ string_list_with_all_members:
+ items:
+ pattern: e.*
+ string_list_with_has_member:
+ contains:
+ pattern: e.*
data_cases:
- data:
int_list: [2, 3, 4, 5]
@@ -53,3 +94,37 @@ data_cases:
- echo
- foxtrot
error_message: Failed validating 'pattern'
+ - data:
+ int_list_with_all_members: [2, 3, 4, 5]
+ - data:
+ int_list_with_all_members: [1, 2, 3]
+ error_message: Failed validating 'minimum'
+ - data:
+ int_list_with_has_member: [2, 3, 4, 5]
+ - data:
+ int_list_with_has_member: [0, 1, 2]
+ - data:
+ int_list_with_has_member: [6, 7, 8]
+ error_message: Failed validating 'contains'
+ - data:
+ string_list_with_all_members:
+ - echo
+ - elephant
+ - data:
+ string_list_with_all_members:
+ - echo
+ - foxtrot
+ error_message: Failed validating 'pattern'
+ - data:
+ string_list_with_has_member:
+ - echo
+ - elephant
+ - data:
+ string_list_with_has_member:
+ - echo
+ - foxtrot
+ - data:
+ string_list_with_has_member:
+ - foxtrot
+ - golf
+ error_message: Failed validating 'contains'
diff --git a/tests/test_generators/input/jsonschema_rules_in_non_root_class.yaml b/tests/test_generators/input/jsonschema_rules_in_non_root_class.yaml
new file mode 100644
index 00000000..b6abe7b6
--- /dev/null
+++ b/tests/test_generators/input/jsonschema_rules_in_non_root_class.yaml
@@ -0,0 +1,69 @@
+schema:
+ name: test_rules_in_non_root_class
+ id: http://example.org/test_rules_in_non_root_class
+ prefixes:
+ linkml: https://w3id.org/linkml/
+ imports:
+ - linkml:types
+ slots:
+ addresses:
+ range: Address
+ multivalued: true
+ inlined: true
+ inlined_as_list: true
+ street_address:
+ country:
+ postal_code:
+ telephone:
+
+ classes:
+ Address:
+ slots:
+ - street_address
+ - country
+ - postal_code
+ - telephone
+ rules:
+ - preconditions:
+ slot_conditions:
+ country:
+ any_of:
+ - equals_string: USA
+ - equals_string: USA_territory
+ postconditions:
+ slot_conditions:
+ postal_code:
+ pattern: "[0-9]{5}(-[0-9]{4})?"
+ telephone:
+ pattern: "^\\+1 "
+ AddressCollection:
+ slots:
+ - addresses
+ tree_root: true
+
+data_cases:
+ - data:
+ addresses:
+ - street_address: 123 main street
+ country: USA
+ postal_code: "12345"
+ telephone: "+1 555 555 1234"
+ - data:
+ addresses:
+ - street_address: 123 main street
+ country: USA_territory
+ postal_code: "12345"
+ telephone: "+1 555 555 1234"
+ - data:
+ addresses:
+ - street_address: 123 main street
+ country: USA
+ postal_code: "DK-1448"
+ telephone: "+1 555 555 1234"
+ error_message: "'DK-1448' does not match"
+ - data:
+ addresses:
+ - street_address: Asiatisk Plads 2
+ country: DEN
+ postal_code: "DK-1448"
+ telephone: "+45 5555 5555"
diff --git a/tests/test_generators/test_jsonschemagen.py b/tests/test_generators/test_jsonschemagen.py
index 463733e9..5c1100fd 100644
--- a/tests/test_generators/test_jsonschemagen.py
+++ b/tests/test_generators/test_jsonschemagen.py
@@ -201,6 +201,11 @@ class JsonSchemaTestCase(unittest.TestCase):
schema, case["json_schema"], case.get("data_cases", [])
)
+ def test_rules_in_non_root_class(self):
+ """Tests that rules are applied to slots in non-root classes. """
+
+ self.externalFileTest("jsonschema_rules_in_non_root_class.yaml")
+
def test_range_unions(self):
"""Tests various permutations of range unions.
diff --git a/tests/test_issues/test_linkml_issue_723.py b/tests/test_issues/test_linkml_issue_723.py
index db6db433..a4927b0a 100644
--- a/tests/test_issues/test_linkml_issue_723.py
+++ b/tests/test_issues/test_linkml_issue_723.py
@@ -277,7 +277,6 @@ class Issue723ExportCase(TestEnvironmentTestCase):
p3.roles = [mod.Role.ANALYST, mod.Role.INVESTIGATOR]
self.assertEqual(p, p3)
self.assertEqual(p.status, mod.VitalStatus.ALIVE)
- self.assertEqual(type(p.status), mod.VitalStatus)
self.assertEqual(p.roles, [mod.Role.ANALYST, mod.Role.INVESTIGATOR])
# test the "double wrap" code
p.status = mod.VitalStatus(mod.VitalStatus.ALIVE)
|
Support multivalued slot constraints in JSON Schema generator
The LinkML model slots `all_members`, `has_member`, `minimum_cardinality`, and `maximum_cardinality` all have analogous concepts in JSON Schema, but currently `JsonSchemaGenerator` does not do anything with them.
For example, this LinkML model:
```yaml
id: http://example.org/test
name: test
imports:
- https://w3id.org/linkml/types
slots:
s:
range: integer
multivalued: true
has_member:
minimum_value: 0
maximum_value: 999
classes:
C:
tree_root: true
slots:
- s
```
should produce JSON Schema with a `contains` subschema:
```javascript
"properties": {
"s": {
"items": {
"type": "integer"
},
// ↓↓↓ this is not included currently ↓↓↓
"contains": {
"minimum": 0,
"maximum": 999
},
// ↑↑↑ ↑↑↑
"type": "array"
}
},
```
`minimum_cardinality` and `maximum_cardinality` should map directly to `minItems` and `maxItems` respectively (https://json-schema.org/understanding-json-schema/reference/array.html#length)
|
0.0
|
a238ae038345d03a54e401e5713a310adf925e7e
|
[
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_multivalued_element_constraints",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_rules_in_non_root_class"
] |
[
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_class_uri_any",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_jsonschema_integration",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_multivalued_slot_cardinality",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_range_unions",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_top_class_identifier",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_type_inheritance",
"tests/test_generators/test_jsonschemagen.py::JsonSchemaTestCase::test_value_constraints"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-28 16:47:44+00:00
|
cc0-1.0
| 3,596 |
|
linkml__linkml-1418
|
diff --git a/linkml/generators/README.md b/linkml/generators/README.md
index d518b47b..af044001 100644
--- a/linkml/generators/README.md
+++ b/linkml/generators/README.md
@@ -6,6 +6,7 @@
| gen-jsonld-context | [jsonldcontextgen.py](https://github.com/linkml/linkml/blob/main/linkml/generators/jsonldcontextgen.py) | ContextGenerator | generate a JSON-LD @context block | [contextgen help](../../tests/test_scripts/output/gencontext/help) |
| gen-csv | [csvgen.py](https://github.com/linkml/linkml/blob/main/linkml/generators/csvgen.py) | CsvGenerator | generate a csv summary | [csvgen help](../../tests/test_scripts/output/gencsv/help) |
| gen-graphviz | [dotgen.py](https://github.com/linkml/linkml/blob/main/linkml/generators/dotgen.py) | DotGenerator | generate graphviz representation | [dotgen help](../../tests/test_scripts/output/gengraphviz/help) |
+| gen-golang | [golanggen.py](https://github.com/linkml/linkml/blob/main/linkml/generators/golanggen.py) | GolangGenerator | generate Golang representation | [golanggen help](../../tests/test_scripts/output/gengolang/help) |
| gen-golr-views | [golrgen.py](https://github.com/linkml/linkml/blob/main/linkml/generators/golrgen.py) | GolrSchemaGenerator | generate a GOLR(?) representation | [golrgen help](../../tests/test_scripts/output/genglor/help) |
| gen-graphql | [graphqlgen.py](https://github.com/linkml/linkml/blob/main/linkml/generators/graphqlgen.py) | GraphqlGenerator | generate a graphql representation | [graphql help](../../tests/test_scripts/output/gengraphql/help) |
| gen-proto | [protogen.py](https://github.com/linkml/linkml/blob/main/linkml/generators/protogen.py) | ProtoGenerator | generate Protobuf Schema representation | [proto help](../../tests/test_scripts/output/genproto/help) |
diff --git a/linkml/generators/golanggen.py b/linkml/generators/golanggen.py
new file mode 100644
index 00000000..2f87a761
--- /dev/null
+++ b/linkml/generators/golanggen.py
@@ -0,0 +1,216 @@
+import logging
+import os
+from copy import deepcopy
+from dataclasses import dataclass
+from pathlib import Path
+from typing import (Callable, Dict, Iterator, List, Optional, Set, TextIO,
+ Tuple, Union)
+
+import click
+from jinja2 import Environment, FileSystemLoader, Template
+from linkml_runtime.dumpers import yaml_dumper
+from linkml_runtime.linkml_model.meta import (Annotation, ClassDefinition,
+ ClassDefinitionName, Definition,
+ DefinitionName, Element,
+ EnumDefinition, SchemaDefinition,
+ SlotDefinition,
+ SlotDefinitionName,
+ TypeDefinition)
+from linkml_runtime.utils.formatutils import camelcase, underscore
+from linkml_runtime.utils.schemaview import SchemaView
+
+from linkml._version import __version__
+from linkml.utils.generator import Generator, shared_arguments
+
+type_map = {
+ "str": "string",
+ "int": "int",
+ "Bool": "bool",
+ "float": "float64",
+ "XSDDate": "time.Date",
+}
+
+default_template = """
+{%- if '_' in view.schema.name -%}
+ {%- set package_name = view.schema.name[:view.schema.name.find('_')] -%}
+{%- else -%}
+ {%- set package_name = view.schema.name -%}
+{%- endif -%}
+package {{package_name}}
+
+{% for c in view.all_classes().values() -%}
+ {%- for sn in view.class_slots(c.name, direct=False) %}
+ {%- set s = view.induced_slot(sn, c.name) -%}
+ {%- if "time." in gen.range(s) -%}
+ {%- set usesTime = True %}
+ {%- else -%}
+ {%- set usesTime = False %}
+ {%- endif -%}
+ {%- endfor -%}
+{%- endfor -%}
+{%- if usesTime -%}
+import (
+ "time" // for time.Date
+)
+{%- endif -%}
+
+{% for c in view.all_classes().values() -%}
+{%- if c.description -%}
+/*
+ * {{c.description}}
+ */
+{%- endif -%}
+{% set parents = gen.parents(c) %}
+type {{gen.name(c)}} struct {
+ {%- if parents %}
+ /*
+ * parent types
+ */
+ {%- for p in parents %}
+ {{p}}
+ {%- endfor %}
+ {%- endif -%}
+ {%- for sn in view.class_slots(c.name, direct=False) %}
+ {%- set s = view.induced_slot(sn, c.name) -%}
+ {%- if s.description %}
+ /*
+ * {{s.description}}
+ */
+ {%- endif %}
+ {{gen.name(s)}} {{gen.range(s)}} `json:"{{gen.json_name(s)}}"`
+ {%- endfor %}
+}
+
+{% endfor %}
+"""
+
+
+@dataclass
+class GolangGenerator(Generator):
+ """
+ Generates Golang code from a schema
+ """
+
+ # ClassVars
+ generatorname = os.path.basename(__file__)
+ generatorversion = "0.1.0"
+ valid_formats = ["text"]
+ uses_schemaloader = False
+
+ def serialize(self) -> str:
+ """
+ Serialize a schema to Golang string
+ :return:
+ """
+ template_obj = Template(default_template)
+ out_str = template_obj.render(
+ gen=self, schema=self.schemaview.schema, view=self.schemaview
+ )
+ return out_str
+
+ def name(self, element: Element) -> str:
+ """
+ Returns the name of the element in its canonical form
+
+ :param element:
+ :return:
+ """
+ alias = element.name
+ if isinstance(element, SlotDefinition) and element.alias:
+ alias = element.alias
+ return camelcase(alias)
+
+ def json_name(self, element: Element) -> str:
+ """
+ Returns the name of the element in its JSON (snake-case) form
+
+ :param element:
+ :return:
+ """
+ alias = element.name
+ if isinstance(element, SlotDefinition) and element.alias:
+ alias = element.alias
+ return underscore(alias)
+
+ def classref(self, cls: ClassDefinition) -> Optional[str]:
+ """
+ Returns the class name for the class that holds a reference (foreign key) to members of this class
+
+ E.g. if a class Person has an identifier field called unique_id, then this will
+ return PersonUniqueId
+
+ :param cls:
+ :return: ref name, None if no identifier
+ """
+ id_slot = self.get_identifier_or_key_slot(cls.name)
+ if id_slot:
+ return f"{self.name(cls)}{camelcase(id_slot.name)}"
+ else:
+ return None
+
+ def get_identifier_or_key_slot(
+ self, cn: ClassDefinitionName
+ ) -> Optional[SlotDefinition]:
+ sv = self.schemaview
+ id_slot = sv.get_identifier_slot(cn)
+ if id_slot:
+ return id_slot
+ else:
+ for s in sv.class_induced_slots(cn):
+ if s.key:
+ return s
+ return None
+
+ def range(self, slot: SlotDefinition) -> str:
+ sv = self.schemaview
+ r = slot.range
+ if r in sv.all_classes():
+ rc = sv.get_class(r)
+ rc_ref = self.classref(rc)
+ rc_name = self.name(rc)
+ id_slot = self.get_identifier_or_key_slot(r)
+ if slot.multivalued:
+ if not id_slot or slot.inlined:
+ if slot.inlined_as_list or not id_slot:
+ return f"[]{rc_name}"
+ else:
+ return f"[]{rc_name}"
+ else:
+ return f"{rc_ref}[]"
+ else:
+ if not id_slot or slot.inlined:
+ return rc_name
+ else:
+ return f"{rc_ref}"
+ else:
+ if r in sv.all_types():
+ t = sv.get_type(r)
+ if t.base and t.base in type_map:
+ return type_map[t.base]
+ else:
+ logging.warning(f"Unknown type.base: {t.name}")
+ return "string"
+
+ def parents(self, cls: ClassDefinition) -> List[ClassDefinitionName]:
+ if cls.is_a:
+ parents = [cls.is_a]
+ else:
+ parents = []
+ return [ClassDefinitionName(camelcase(p)) for p in parents + cls.mixins]
+
+
+@shared_arguments(GolangGenerator)
[email protected]_option(__version__, "-V", "--version")
[email protected]()
+def cli(yamlfile, **args):
+ """Generate Golang types
+
+ This very simple generator produces a Golang package named after the given
+ schema with structs that implement the classes in that schema.
+ """
+ gen = GolangGenerator(yamlfile, **args)
+ print(gen.serialize())
+
+
+if __name__ == "__main__":
+ cli()
diff --git a/linkml/validators/jsonschemavalidator.py b/linkml/validators/jsonschemavalidator.py
index 96c585e1..74f74c17 100644
--- a/linkml/validators/jsonschemavalidator.py
+++ b/linkml/validators/jsonschemavalidator.py
@@ -52,7 +52,8 @@ class JsonSchemaDataValidator(DataValidator):
if self.jsonschema_objs is None:
self.jsonschema_objs = {}
schema_id = self.schema.id if isinstance(self.schema, SchemaDefinition) else self.schema
- if schema_id not in self.jsonschema_objs:
+ cache_params = frozenset([schema_id, target_class.class_name])
+ if cache_params not in self.jsonschema_objs:
jsonschemastr = JsonSchemaGenerator(
self.schema,
mergeimports=True,
@@ -60,10 +61,10 @@ class JsonSchemaDataValidator(DataValidator):
not_closed=not_closed,
).serialize(not_closed=not_closed)
jsonschema_obj = json.loads(jsonschemastr)
- self.jsonschema_objs[schema_id] = jsonschema_obj
+ self.jsonschema_objs[cache_params] = jsonschema_obj
else:
logging.info(f"Using cached jsonschema for {schema_id}")
- jsonschema_obj = self.jsonschema_objs[schema_id]
+ jsonschema_obj = self.jsonschema_objs[cache_params]
return jsonschema.validate(inst_dict, schema=jsonschema_obj, format_checker=jsonschema.Draft7Validator.FORMAT_CHECKER)
def validate_dict(
@@ -148,7 +149,7 @@ def cli(
outargs = {}
if datautils._is_xsv(input_format):
if index_slot is None:
- index_slot = infer_index_slot(sv, target_class)
+ index_slot = datautils.infer_index_slot(sv, target_class)
if index_slot is None:
raise Exception("--index-slot is required for CSV input")
inargs["index_slot"] = index_slot
diff --git a/pyproject.toml b/pyproject.toml
index 7077b38c..10b4ef33 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -52,6 +52,7 @@ gen-jsonld-context = "linkml.generators.jsonldcontextgen:cli"
gen-prefix-map = "linkml.generators.prefixmapgen:cli"
gen-csv = "linkml.generators.csvgen:cli"
gen-graphviz = "linkml.generators.dotgen:cli"
+gen-golang = "linkml.generators.golanggen:cli"
gen-golr-views = "linkml.generators.golrgen:cli"
gen-graphql = "linkml.generators.graphqlgen:cli"
gen-java = "linkml.generators.javagen:cli"
|
linkml/linkml
|
db3f98a024a3fec97f3687faae54b165e6cdf02d
|
diff --git a/tests/test_generators/test_golanggen.py b/tests/test_generators/test_golanggen.py
new file mode 100644
index 00000000..a5510f21
--- /dev/null
+++ b/tests/test_generators/test_golanggen.py
@@ -0,0 +1,28 @@
+import sys
+import unittest
+
+from linkml.generators.golanggen import GolangGenerator
+from tests.test_generators.environment import env
+
+SCHEMA = env.input_path("kitchen_sink.yaml")
+OUT = env.expected_path("kitchen_sink.go")
+
+
+class GolangGeneratorTestCase(unittest.TestCase):
+ def test_golanggen(self):
+ """typescript"""
+ code = GolangGenerator(SCHEMA, mergeimports=True).serialize()
+ with open(OUT, "w") as stream:
+ stream.write(code)
+
+ def assert_in(s: str) -> None:
+ assert s.replace(" ", "") in code.replace(" ", "")
+
+ assert "package kitchen" in code
+ assert_in("type Person struct {")
+ assert_in("HasFamilialRelationships []FamilialRelationship")
+ assert_in("CodeSystems []CodeSystem")
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/test_validation/input/kitchen_sink_inst_02.yaml b/tests/test_validation/input/kitchen_sink_inst_02.yaml
new file mode 100644
index 00000000..cee57ba9
--- /dev/null
+++ b/tests/test_validation/input/kitchen_sink_inst_02.yaml
@@ -0,0 +1,32 @@
+persons:
+ - id: P:003
+ name: hermoine granger
+ age_in_years: 33
+ - id: P:004
+ name: harry potter
+ has_employment_history:
+ - employed_at: ROR:1
+ started_at_time: 2023-01-01
+ is_current: true
+ has_familial_relationships:
+ - related_to: P:001
+ type: SIBLING_OF
+ has_medical_history:
+ - started_at_time: 2023-01-01
+ in_location: GEO:1234
+ diagnosis:
+ id: CODE:P1789
+ name: hypertension
+ procedure:
+ id: CODE:P1846
+ name: valve repair
+ addresses:
+ - street: 4 privet drive
+ city: surrey
+companies:
+ - id: ROR:1
+ name: hogwarts
+activities:
+ - id: A:1
+ started_at_time: 2023-01-01
+ was_associated_with: Agent:987
diff --git a/tests/test_validation/test_jsonschemavalidation.py b/tests/test_validation/test_jsonschemavalidation.py
index 54e18f12..6b2f9c80 100644
--- a/tests/test_validation/test_jsonschemavalidation.py
+++ b/tests/test_validation/test_jsonschemavalidation.py
@@ -1,27 +1,40 @@
import unittest
-from linkml_runtime.loaders import json_loader, yaml_loader
-from linkml_runtime.utils.schemaview import SchemaView
+from linkml_runtime.loaders import yaml_loader
from linkml.generators.pythongen import PythonGenerator
from linkml.validators import JsonSchemaDataValidator
from tests.test_validation.environment import env
SCHEMA = env.input_path("kitchen_sink.yaml")
-DATA = env.input_path("kitchen_sink_inst_01.yaml")
+INSTANCE_DATA_1 = env.input_path("kitchen_sink_inst_01.yaml")
+INSTANCE_DATA_2 = env.input_path("kitchen_sink_inst_02.yaml")
class JsonSchemaValidatorTestCase(unittest.TestCase):
def test_jsonschema_validation(self):
"""Validate data against a LinkML module using a json-schema validator"""
- print(f"TEST: Loading {SCHEMA}")
mod = PythonGenerator(SCHEMA).compile_module()
- obj = yaml_loader.load(source=DATA, target_class=mod.Dataset)
- # schema = SchemaView(SCHEMA).schema
+ obj1 = yaml_loader.load(source=INSTANCE_DATA_1, target_class=mod.Dataset)
v = JsonSchemaDataValidator(schema=SCHEMA)
- print(f"Validating: {obj}")
- results = v.validate_object(obj)
- print(results)
+ # check that jsonschema_objs dict cache is empty before validate_object()
+ # first call
+ self.assertIsNone(v.jsonschema_objs)
+ v.validate_object(obj1, target_class=mod.Dataset)
+
+ obj2 = yaml_loader.load(source=INSTANCE_DATA_2, target_class=mod.Dataset)
+ v.validate_object(obj2, target_class=mod.Dataset)
+
+ # check that the cache store is a dict
+ self.assertEqual(type(v.jsonschema_objs), dict)
+ # check that the cache store is not empty
+ self.assertGreater(len(v.jsonschema_objs.keys()), 0)
+ for f, j in v.jsonschema_objs.items():
+ # check that cache store keys are of type frozendict()
+ self.assertEqual(type(f), frozenset)
+ # check that cache store values are dicts
+ self.assertEqual(type(j), dict)
+ self.assertGreater(len(j.keys()), 0)
if __name__ == "__main__":
|
Any interest in a Golang generator?
**Is your feature request related to a problem? Please describe.**
As a developer on a KBase-JGI co-development project, I'm working on a data transfer microservice in Go that incorporates some data types from KBase's Credit Engine. The Credit Engine articulates these types in a LinkML schema, and so it would be natural for me to generate Go code using this schema.
**Describe the solution you'd like**
I'm interested in adding a Golang generator to LinkML, and willing to do the work if I can ask some questions related to the generators that may not be handled in [your relevant documentation](https://linkml.io/linkml/howtos/port-linkml.html). I'm sure this effort can be justified in the context of the co-development project, since it's part of a greater interoperability strategy being pursued by KBase and JGI.
**How important is this feature?** Select from the options below:
• Medium - can do work without it; but it's important (e.g. to save time or for convenience)
The number of types I need for this project is small, so I can certainly write the code myself by inspecting the LinkML schema. But as we all know, maintenance is the elephant in the software room, and it would be nice to have a workflow that simplified updates to the KBase Credit Engine schema (and/or updates to LinkML and its schema features).
**When will use cases depending on this become relevant?** Select from the options below:
• Mid-term - 2-4 months
Again, the LinkML generator is for ease of maintenance, so my need is not urgent. But I do have some time in the short term to throw at this if there's no opposition from the project.
@ialarmedalien
|
0.0
|
db3f98a024a3fec97f3687faae54b165e6cdf02d
|
[
"tests/test_generators/test_golanggen.py::GolangGeneratorTestCase::test_golanggen",
"tests/test_validation/test_jsonschemavalidation.py::JsonSchemaValidatorTestCase::test_jsonschema_validation"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-02 18:48:16+00:00
|
cc0-1.0
| 3,597 |
|
linkml__linkml-1420
|
diff --git a/linkml/validators/jsonschemavalidator.py b/linkml/validators/jsonschemavalidator.py
index 96c585e1..74f74c17 100644
--- a/linkml/validators/jsonschemavalidator.py
+++ b/linkml/validators/jsonschemavalidator.py
@@ -52,7 +52,8 @@ class JsonSchemaDataValidator(DataValidator):
if self.jsonschema_objs is None:
self.jsonschema_objs = {}
schema_id = self.schema.id if isinstance(self.schema, SchemaDefinition) else self.schema
- if schema_id not in self.jsonschema_objs:
+ cache_params = frozenset([schema_id, target_class.class_name])
+ if cache_params not in self.jsonschema_objs:
jsonschemastr = JsonSchemaGenerator(
self.schema,
mergeimports=True,
@@ -60,10 +61,10 @@ class JsonSchemaDataValidator(DataValidator):
not_closed=not_closed,
).serialize(not_closed=not_closed)
jsonschema_obj = json.loads(jsonschemastr)
- self.jsonschema_objs[schema_id] = jsonschema_obj
+ self.jsonschema_objs[cache_params] = jsonschema_obj
else:
logging.info(f"Using cached jsonschema for {schema_id}")
- jsonschema_obj = self.jsonschema_objs[schema_id]
+ jsonschema_obj = self.jsonschema_objs[cache_params]
return jsonschema.validate(inst_dict, schema=jsonschema_obj, format_checker=jsonschema.Draft7Validator.FORMAT_CHECKER)
def validate_dict(
@@ -148,7 +149,7 @@ def cli(
outargs = {}
if datautils._is_xsv(input_format):
if index_slot is None:
- index_slot = infer_index_slot(sv, target_class)
+ index_slot = datautils.infer_index_slot(sv, target_class)
if index_slot is None:
raise Exception("--index-slot is required for CSV input")
inargs["index_slot"] = index_slot
|
linkml/linkml
|
db3f98a024a3fec97f3687faae54b165e6cdf02d
|
diff --git a/tests/test_validation/input/kitchen_sink_inst_02.yaml b/tests/test_validation/input/kitchen_sink_inst_02.yaml
new file mode 100644
index 00000000..cee57ba9
--- /dev/null
+++ b/tests/test_validation/input/kitchen_sink_inst_02.yaml
@@ -0,0 +1,32 @@
+persons:
+ - id: P:003
+ name: hermoine granger
+ age_in_years: 33
+ - id: P:004
+ name: harry potter
+ has_employment_history:
+ - employed_at: ROR:1
+ started_at_time: 2023-01-01
+ is_current: true
+ has_familial_relationships:
+ - related_to: P:001
+ type: SIBLING_OF
+ has_medical_history:
+ - started_at_time: 2023-01-01
+ in_location: GEO:1234
+ diagnosis:
+ id: CODE:P1789
+ name: hypertension
+ procedure:
+ id: CODE:P1846
+ name: valve repair
+ addresses:
+ - street: 4 privet drive
+ city: surrey
+companies:
+ - id: ROR:1
+ name: hogwarts
+activities:
+ - id: A:1
+ started_at_time: 2023-01-01
+ was_associated_with: Agent:987
diff --git a/tests/test_validation/test_jsonschemavalidation.py b/tests/test_validation/test_jsonschemavalidation.py
index 54e18f12..6b2f9c80 100644
--- a/tests/test_validation/test_jsonschemavalidation.py
+++ b/tests/test_validation/test_jsonschemavalidation.py
@@ -1,27 +1,40 @@
import unittest
-from linkml_runtime.loaders import json_loader, yaml_loader
-from linkml_runtime.utils.schemaview import SchemaView
+from linkml_runtime.loaders import yaml_loader
from linkml.generators.pythongen import PythonGenerator
from linkml.validators import JsonSchemaDataValidator
from tests.test_validation.environment import env
SCHEMA = env.input_path("kitchen_sink.yaml")
-DATA = env.input_path("kitchen_sink_inst_01.yaml")
+INSTANCE_DATA_1 = env.input_path("kitchen_sink_inst_01.yaml")
+INSTANCE_DATA_2 = env.input_path("kitchen_sink_inst_02.yaml")
class JsonSchemaValidatorTestCase(unittest.TestCase):
def test_jsonschema_validation(self):
"""Validate data against a LinkML module using a json-schema validator"""
- print(f"TEST: Loading {SCHEMA}")
mod = PythonGenerator(SCHEMA).compile_module()
- obj = yaml_loader.load(source=DATA, target_class=mod.Dataset)
- # schema = SchemaView(SCHEMA).schema
+ obj1 = yaml_loader.load(source=INSTANCE_DATA_1, target_class=mod.Dataset)
v = JsonSchemaDataValidator(schema=SCHEMA)
- print(f"Validating: {obj}")
- results = v.validate_object(obj)
- print(results)
+ # check that jsonschema_objs dict cache is empty before validate_object()
+ # first call
+ self.assertIsNone(v.jsonschema_objs)
+ v.validate_object(obj1, target_class=mod.Dataset)
+
+ obj2 = yaml_loader.load(source=INSTANCE_DATA_2, target_class=mod.Dataset)
+ v.validate_object(obj2, target_class=mod.Dataset)
+
+ # check that the cache store is a dict
+ self.assertEqual(type(v.jsonschema_objs), dict)
+ # check that the cache store is not empty
+ self.assertGreater(len(v.jsonschema_objs.keys()), 0)
+ for f, j in v.jsonschema_objs.items():
+ # check that cache store keys are of type frozendict()
+ self.assertEqual(type(f), frozenset)
+ # check that cache store values are dicts
+ self.assertEqual(type(j), dict)
+ self.assertGreater(len(j.keys()), 0)
if __name__ == "__main__":
|
jsonschema validation caching should be keyed on both schema and entry point
The changes made in https://github.com/linkml/linkml/pull/1363 consider only the _schema_ when implementing the caching logic.
However, the JSON Schema that the instance data needs to be validated against is dependent on two parameters - the schema, as well as the entry point, which is the `target_class`.
|
0.0
|
db3f98a024a3fec97f3687faae54b165e6cdf02d
|
[
"tests/test_validation/test_jsonschemavalidation.py::JsonSchemaValidatorTestCase::test_jsonschema_validation"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-05-03 21:57:31+00:00
|
cc0-1.0
| 3,598 |
|
linkml__linkml-1543
|
diff --git a/linkml/validators/jsonschemavalidator.py b/linkml/validators/jsonschemavalidator.py
index 551db8a2..039ca65a 100644
--- a/linkml/validators/jsonschemavalidator.py
+++ b/linkml/validators/jsonschemavalidator.py
@@ -26,7 +26,7 @@ class HashableSchemaDefinition(SchemaDefinition):
@lru_cache(maxsize=None)
-def _generate_jsonschema(schema, top_class, closed):
+def _generate_jsonschema(schema, top_class, closed, include_range_class_descendants):
logging.debug("Generating JSON Schema")
not_closed = not closed
return JsonSchemaGenerator(
@@ -34,6 +34,7 @@ def _generate_jsonschema(schema, top_class, closed):
mergeimports=True,
top_class=top_class,
not_closed=not_closed,
+ include_range_class_descendants=include_range_class_descendants,
).generate()
@@ -49,6 +50,7 @@ class JsonSchemaDataValidator(DataValidator):
Implementation of DataValidator that wraps jsonschema validation
"""
+ include_range_class_descendants: bool = False
_hashable_schema: Union[str, HashableSchemaDefinition] = field(init=False, repr=False)
def __setattr__(self, __name: str, __value: Any) -> None:
@@ -104,7 +106,9 @@ class JsonSchemaDataValidator(DataValidator):
if len(roots) != 1:
raise ValueError(f"Cannot determine tree root: {roots}")
target_class_name = roots[0]
- jsonschema_obj = _generate_jsonschema(self._hashable_schema, target_class_name, closed)
+ jsonschema_obj = _generate_jsonschema(
+ self._hashable_schema, target_class_name, closed, self.include_range_class_descendants
+ )
validator = jsonschema.Draft7Validator(
jsonschema_obj, format_checker=jsonschema.Draft7Validator.FORMAT_CHECKER
)
@@ -135,6 +139,14 @@ class JsonSchemaDataValidator(DataValidator):
default=False,
help="Exit after the first validation failure is found. If not specified all validation failures are reported.",
)
[email protected](
+ "--include-range-class-descendants/--no-range-class-descendants",
+ default=False,
+ show_default=False,
+ help="""
+When handling range constraints, include all descendants of the range class instead of just the range class
+""",
+)
@click.argument("input")
@click.version_option(__version__, "-V", "--version")
def cli(
@@ -145,6 +157,7 @@ def cli(
schema=None,
index_slot=None,
exit_on_first_failure=False,
+ include_range_class_descendants=False,
) -> None:
"""
Validates instance data
@@ -188,7 +201,9 @@ def cli(
if schema is None:
raise Exception("--schema must be passed in order to validate. Suppress with --no-validate")
- validator = JsonSchemaDataValidator(schema)
+ validator = JsonSchemaDataValidator(
+ schema, include_range_class_descendants=include_range_class_descendants
+ )
error_count = 0
for error in validator.iter_validate_dict(
data_as_dict, target_class_name=py_target_class.class_name
|
linkml/linkml
|
bb525c266dc120cb81db8c987f58418b0e50c33b
|
diff --git a/tests/test_validation/input/Person-02.yaml b/tests/test_validation/input/Person-02.yaml
new file mode 100644
index 00000000..6aa60c50
--- /dev/null
+++ b/tests/test_validation/input/Person-02.yaml
@@ -0,0 +1,15 @@
+id: P:004
+name: eventful life
+has_events:
+ - employed_at: ROR:1
+ started_at_time: "2019-01-01"
+ is_current: true
+ - started_at_time: "2023-01-01"
+ in_location: GEO:1234
+ diagnosis:
+ id: CODE:P1789
+ name: hypertension
+ procedure:
+ id: CODE:P1846
+ name: valve repair
+
diff --git a/tests/test_validation/input/kitchen_sink.yaml b/tests/test_validation/input/kitchen_sink.yaml
index ea0b5b7b..ac7aaeb5 100644
--- a/tests/test_validation/input/kitchen_sink.yaml
+++ b/tests/test_validation/input/kitchen_sink.yaml
@@ -74,6 +74,7 @@ classes:
- age in years
- addresses
- has birth event
+ - has events
slot_usage:
name:
pattern: "^\\S+ \\S+" ## do not do this in a real schema, people have all kinds of names
@@ -240,6 +241,10 @@ slots:
city:
has birth event:
range: BirthEvent
+ has events:
+ multivalued: True
+ range: Event
+ inlined_as_list: true
enums:
FamilialRelationshipType:
diff --git a/tests/test_validation/test_jsonschemavalidation.py b/tests/test_validation/test_jsonschemavalidation.py
index 30981d22..a7968595 100644
--- a/tests/test_validation/test_jsonschemavalidation.py
+++ b/tests/test_validation/test_jsonschemavalidation.py
@@ -16,6 +16,7 @@ SCHEMA = env.input_path("kitchen_sink.yaml")
DATASET_1 = env.input_path("Dataset-01.yaml")
DATASET_2 = env.input_path("Dataset-02.yaml")
PERSON_1 = env.input_path("Person-01.yaml")
+PERSON_2 = env.input_path("Person-02.yaml")
PERSON_INVALID_1 = env.input_path("Person-invalid-01.yaml")
@@ -48,6 +49,35 @@ class JsonSchemaValidatorTestCase(unittest.TestCase):
result = validator.validate_dict(obj, "Person")
self.assertIsNone(result)
+ with open(PERSON_2) as file:
+ obj = yaml.safe_load(file)
+
+ with self.assertRaises(JsonSchemaDataValidatorError) as ctx:
+ validator.validate_dict(obj, "Person")
+
+ with open(PERSON_INVALID_1) as file:
+ obj = yaml.safe_load(file)
+
+ with self.assertRaises(JsonSchemaDataValidatorError) as ctx:
+ validator.validate_dict(obj, "Person")
+
+ messages = ctx.exception.validation_messages
+ self.assertEqual(len(messages), 1)
+ self.assertIn("name", messages[0])
+
+ def test_validate_dict_including_descendants(self):
+ validator = JsonSchemaDataValidator(schema=SCHEMA, include_range_class_descendants=True)
+
+ with open(PERSON_1) as file:
+ obj = yaml.safe_load(file)
+ result = validator.validate_dict(obj, "Person")
+ self.assertIsNone(result)
+
+ with open(PERSON_2) as file:
+ obj = yaml.safe_load(file)
+ result = validator.validate_dict(obj, "Person")
+ self.assertIsNone(result)
+
with open(PERSON_INVALID_1) as file:
obj = yaml.safe_load(file)
|
Add `--include-range-class-descendants` option to "linkml-validate"
**Is your feature request related to a problem? Please describe.**
`linkml-validate` doesn't accept children of a class specified in a range as valid. Being used to the behavior of inheritance in OOP that also applies to the generated Python code, I find it misleading and frustrating.
**Describe the solution you'd like**
IMO (point of view of an OOP programmer, might be different for an "ontologist") accepting children should be the default behavior. Since `gen-json-schema` already has the option `--include-range-class-descendants`, having such an option for `linkml-validate` would be also fine for me.
**How important is this feature?** Select from the options below:
• Medium - As of now I have to ignore validation errors that appear due to this limitation of `linkml-validate`
**When will use cases depending on this become relevant?** Select from the options below:
• Short-term - 2-4 weeks
**Additional context**
I'm providing a PR to implement this feature.
|
0.0
|
bb525c266dc120cb81db8c987f58418b0e50c33b
|
[
"tests/test_validation/test_jsonschemavalidation.py::JsonSchemaValidatorTestCase::test_validate_dict_including_descendants"
] |
[
"tests/test_validation/test_jsonschemavalidation.py::JsonSchemaValidatorTestCase::test_jsonschema_caching",
"tests/test_validation/test_jsonschemavalidation.py::JsonSchemaValidatorTestCase::test_validate_dict",
"tests/test_validation/test_jsonschemavalidation.py::JsonSchemaValidatorTestCase::test_validate_object"
] |
{
"failed_lite_validators": [
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-18 07:17:50+00:00
|
cc0-1.0
| 3,599 |
|
linkml__linkml-1623
|
diff --git a/linkml/generators/jsonschemagen.py b/linkml/generators/jsonschemagen.py
index 9cf8aabc..f1440339 100644
--- a/linkml/generators/jsonschemagen.py
+++ b/linkml/generators/jsonschemagen.py
@@ -387,6 +387,14 @@ class JsonSchemaGenerator(Generator):
return JsonSchema()
constraints = JsonSchema()
+ if slot.range in self.schemaview.all_types().keys():
+ # types take lower priority
+ schema_type = self.schemaview.induced_type(slot.range)
+ constraints.add_keyword("pattern", schema_type.pattern)
+ constraints.add_keyword("minimum", schema_type.minimum_value)
+ constraints.add_keyword("maximum", schema_type.maximum_value)
+ constraints.add_keyword("const", schema_type.equals_string)
+ constraints.add_keyword("const", schema_type.equals_number)
constraints.add_keyword("pattern", slot.pattern)
constraints.add_keyword("minimum", slot.minimum_value)
constraints.add_keyword("maximum", slot.maximum_value)
diff --git a/linkml/utils/generator.py b/linkml/utils/generator.py
index c3e8e3d6..62d925bb 100644
--- a/linkml/utils/generator.py
+++ b/linkml/utils/generator.py
@@ -210,7 +210,6 @@ class Generator(metaclass=abc.ABCMeta):
# See https://github.com/linkml/linkml/issues/923 for discussion on how
# to simplify the overall framework
if isinstance(schema, Generator):
- logging.info("Instantiating generator with another generator is deprecated")
gen = schema
self.schema = gen.schema
self.synopsis = gen.synopsis
|
linkml/linkml
|
3e04645fc5faf1d9a3486d9ff2fac7c7d7ea54ab
|
diff --git a/tests/test_issues/input/issue_1371/test.schema.yaml b/tests/test_issues/input/issue_1371/test.schema.yaml
new file mode 100644
index 00000000..31df0a13
--- /dev/null
+++ b/tests/test_issues/input/issue_1371/test.schema.yaml
@@ -0,0 +1,26 @@
+# test.schema.yaml
+id: http://example.org/pattern
+name: pattern
+
+imports:
+ - https://w3id.org/linkml/types
+
+slots:
+ us_phone_number:
+ range: string
+ pattern: \d{3} \d{3} \d{4}
+ de_phone_number:
+ range: DePhoneNumber
+
+classes:
+ Test:
+ tree_root: true
+ slots:
+ - us_phone_number
+ - de_phone_number
+
+types:
+ DePhoneNumber:
+ uri: xsd:String
+ base: str
+ pattern: 0\d{3}-\d{8}
diff --git a/tests/test_issues/test_linkml_issue_1371.py b/tests/test_issues/test_linkml_issue_1371.py
new file mode 100644
index 00000000..3a543051
--- /dev/null
+++ b/tests/test_issues/test_linkml_issue_1371.py
@@ -0,0 +1,13 @@
+import json
+from pathlib import Path
+
+from linkml.generators import JsonSchemaGenerator
+
+SCHEMA = str(Path(__file__).parent / "input" / "issue_1371" / "test.schema.yaml")
+
+
+def test_json_schema():
+ jschema = json.loads(JsonSchemaGenerator(SCHEMA).serialize())
+ props = jschema["$defs"]["Test"]["properties"]
+ assert props["de_phone_number"]["pattern"] == r"0\d{3}-\d{8}"
+ assert props["us_phone_number"]["pattern"] == r"\d{3} \d{3} \d{4}"
|
LinkML Patterns should generate JSON Schema Patterns
**Describe the bug**
Specifying a [LinkML Pattern](https://linkml.io/linkml-model/docs/pattern/) in a **TypeDefinition**, for example, results in a pattern in a JSON-LD `pattern` attribute. But it doesn't generate a [JSON Schema `pattern`](https://json-schema.org/understanding-json-schema/reference/string.html#regular-expressions).
**To reproduce**
Steps to reproduce the behavior:
1. Create a schema with a pattern in any of the [Applicable Classes](https://linkml.io/linkml-model/docs/pattern/#applicable-classes)
2. Generate JSON-LD
3. Confirm the `pattern` attribute appears
4. Generate JSON Schema
5. Confirm the `pattern` does NOT appear
**Expected behavior**
The resulting JSON Schema has the specified pattern.
**Additional context**
JSON-LD patterns cannot be validated with COTS software. Whereas [JSON Schema `pattern` entries](https://json-schema.org/understanding-json-schema/reference/string.html#regular-expressions) can be validated with any JSON Schema validator.
|
0.0
|
3e04645fc5faf1d9a3486d9ff2fac7c7d7ea54ab
|
[
"tests/test_issues/test_linkml_issue_1371.py::test_json_schema"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-14 00:54:17+00:00
|
cc0-1.0
| 3,600 |
|
linkml__linkml-1709
|
diff --git a/linkml/generators/docgen.py b/linkml/generators/docgen.py
index c7d04a89..0dee9da8 100644
--- a/linkml/generators/docgen.py
+++ b/linkml/generators/docgen.py
@@ -130,6 +130,9 @@ class DocGenerator(Generator):
directory: str = None
"""directory in which to write documents"""
+ index_name: str = "index"
+ """name of the index document"""
+
template_directory: str = None
"""directory for custom templates"""
@@ -188,7 +191,7 @@ class DocGenerator(Generator):
}
template = self._get_template("index")
out_str = template.render(gen=self, schema=sv.schema, schemaview=sv, **template_vars)
- self._write(out_str, directory, "index") # TODO: make configurable
+ self._write(out_str, directory, self.index_name)
if self._is_single_file_format(self.format):
logging.info(f"{self.format} is a single-page format, skipping non-index elements")
return
@@ -844,6 +847,7 @@ class DocGenerator(Generator):
required=True,
help="Folder to which document files are written",
)
[email protected]("--index-name", default="index", show_default=True, help="Name of the index document.")
@click.option("--dialect", help="Dialect or 'flavor' of Markdown used.")
@click.option(
"--diagram-type",
@@ -885,7 +889,7 @@ class DocGenerator(Generator):
)
@click.version_option(__version__, "-V", "--version")
@click.command()
-def cli(yamlfile, directory, dialect, template_directory, use_slot_uris, hierarchical_class_view, **args):
+def cli(yamlfile, directory, index_name, dialect, template_directory, use_slot_uris, hierarchical_class_view, **args):
"""Generate documentation folder from a LinkML YAML schema
Currently a default set of templates for markdown is provided (see the
@@ -912,6 +916,7 @@ def cli(yamlfile, directory, dialect, template_directory, use_slot_uris, hierarc
template_directory=template_directory,
use_slot_uris=use_slot_uris,
hierarchical_class_view=hierarchical_class_view,
+ index_name=index_name,
**args,
)
print(gen.serialize())
|
linkml/linkml
|
598376ce7f8c11bd3cf31f0ca7e3d5c34770021a
|
diff --git a/tests/test_generators/test_docgen.py b/tests/test_generators/test_docgen.py
index 01037a12..18b8e71d 100644
--- a/tests/test_generators/test_docgen.py
+++ b/tests/test_generators/test_docgen.py
@@ -4,6 +4,7 @@ Tests generation of markdown and similar documents
Note that docgen replaces markdowngen
"""
import logging
+import os
from collections import Counter
from copy import copy
from typing import List
@@ -426,6 +427,15 @@ def test_custom_directory(kitchen_sink_path, input_path, tmp_path):
assert_mdfile_contains(tmp_path / "Organization.md", "FAKE TEMPLATE")
+def test_gen_custom_named_index(kitchen_sink_path, tmp_path):
+ """Tests that the name of the index page can be customized"""
+ gen = DocGenerator(kitchen_sink_path, index_name="custom-index")
+ gen.serialize(directory=str(tmp_path))
+ assert_mdfile_contains(tmp_path / "custom-index.md", "# Kitchen Sink Schema")
+ # Additionally test that the default index.md has NOT been created
+ assert not os.path.exists(tmp_path / "index.md")
+
+
def test_html(kitchen_sink_path, input_path, tmp_path):
"""
Tests ability to specify a complete new set of templates in a different format
|
`gen-doc` should allow renaming the index page
**Is your feature request related to a problem? Please describe.**
The `gen-doc` tool automatically generates an index page for the documentation it generates. That page is always named `index.html`. This is annoying as most web servers are configured by default to return `index.html` to a client that asks for a directory index.
This means, for example, that when someone visits the [website for SSSOM](https://mapping-commons.github.io/sssom/) (which is partially generated by LinkML), the page the visitor lands on first is the LinkML index, which is not very informative – the visitor then needs to manually go to the [“home” page](https://mapping-commons.github.io/sssom/home/) to learn what SSSOM actually is.
**Describe the solution you'd like**
I would like a way to change the default behaviour of `gen-doc` so that the auto-generated index page may be named differently. For example, a command-line option as follows:
```sh
gen-doc --index-name schema-index ...
```
which would lead `gen-doc` to write the index in a file named `schema-index.html` instead of `index.html`.
**How important is this feature?** Select from the options below:
• Low - it's an enhancement but not crucial for work
There are at least two possible workarounds:
* changing the configuration of the web server to use a different filename for the directory index (not always possible depending on where and how the website is hosted);
* forcibly rename the generated `index.html` to something else and replace it with the intended home page after the `gen-doc` run (would work everywhere but definitely not ideal).
**When will use cases depending on this become relevant?** Select from the options below:
• Short-term - 2-4 weeks
|
0.0
|
598376ce7f8c11bd3cf31f0ca7e3d5c34770021a
|
[
"tests/test_generators/test_docgen.py::test_gen_custom_named_index"
] |
[
"tests/test_generators/test_docgen.py::test_latex_generation",
"tests/test_generators/test_docgen.py::test_docgen",
"tests/test_generators/test_docgen.py::test_docgen_no_mergeimports",
"tests/test_generators/test_docgen.py::test_docgen_rank_ordering",
"tests/test_generators/test_docgen.py::test_gen_metamodel",
"tests/test_generators/test_docgen.py::test_myst_dialect",
"tests/test_generators/test_docgen.py::test_custom_directory",
"tests/test_generators/test_docgen.py::test_html",
"tests/test_generators/test_docgen.py::test_class_hierarchy_as_tuples",
"tests/test_generators/test_docgen.py::test_class_hierarchy_as_tuples_no_mergeimports",
"tests/test_generators/test_docgen.py::test_fetch_slots_of_class",
"tests/test_generators/test_docgen.py::test_class_slots_inheritance",
"tests/test_generators/test_docgen.py::test_use_slot_uris",
"tests/test_generators/test_docgen.py::test_hierarchical_class_view",
"tests/test_generators/test_docgen.py::test_uml_diagram_er",
"tests/test_generators/test_docgen.py::test_uml_diagram_classr"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-11 23:08:05+00:00
|
cc0-1.0
| 3,601 |
|
linkml__linkml-1772
|
diff --git a/linkml/generators/pydanticgen.py b/linkml/generators/pydanticgen.py
index c9ea5d6d..b933db57 100644
--- a/linkml/generators/pydanticgen.py
+++ b/linkml/generators/pydanticgen.py
@@ -108,7 +108,46 @@ class {{ e.name }}(str{% if e['values'] %}, Enum{% endif %}):
{% endfor %}
"""
### CLASSES ###
- template += """
+ if pydantic_ver == "1":
+ template += """
+{%- for c in schema.classes.values() %}
+class {{ c.name }}
+ {%- if class_isa_plus_mixins[c.name] -%}
+ ({{class_isa_plus_mixins[c.name]|join(', ')}})
+ {%- else -%}
+ (ConfiguredBaseModel)
+ {%- endif -%}
+ :
+ {% if c.description -%}
+ \"\"\"
+ {{ c.description }}
+ \"\"\"
+ {%- endif %}
+ {% for attr in c.attributes.values() if c.attributes -%}
+ {{attr.name}}: {{ attr.annotations['python_range'].value }} = Field(
+ {%- if predefined_slot_values[c.name][attr.name] -%}
+ {{ predefined_slot_values[c.name][attr.name] }}
+ {%- elif (attr.required or attr.identifier or attr.key) -%}
+ ...
+ {%- else -%}
+ None
+ {%- endif -%}
+ {%- if attr.title != None %}, title="{{attr.title}}"{% endif -%}
+ {%- if attr.description %}, description=\"\"\"{{attr.description}}\"\"\"{% endif -%}
+ {%- if attr.pattern %}, regex=\"{{attr.pattern}}\"{% endif -%}
+ {%- if attr.equals_number != None %}, le={{attr.equals_number}}, ge={{attr.equals_number}}
+ {%- else -%}
+ {%- if attr.minimum_value != None %}, ge={{attr.minimum_value}}{% endif -%}
+ {%- if attr.maximum_value != None %}, le={{attr.maximum_value}}{% endif -%}
+ {%- endif -%}
+ )
+ {% else -%}
+ None
+ {% endfor %}
+{% endfor %}
+"""
+ elif pydantic_ver == "2":
+ template += """
{%- for c in schema.classes.values() %}
class {{ c.name }}
{%- if class_isa_plus_mixins[c.name] -%}
@@ -133,6 +172,7 @@ class {{ c.name }}
{%- endif -%}
{%- if attr.title != None %}, title="{{attr.title}}"{% endif -%}
{%- if attr.description %}, description=\"\"\"{{attr.description}}\"\"\"{% endif -%}
+ {%- if attr.pattern %}, pattern=\"{{attr.pattern}}\"{% endif -%}
{%- if attr.equals_number != None %}, le={{attr.equals_number}}, ge={{attr.equals_number}}
{%- else -%}
{%- if attr.minimum_value != None %}, ge={{attr.minimum_value}}{% endif -%}
@@ -144,6 +184,7 @@ class {{ c.name }}
{% endfor %}
{% endfor %}
"""
+
### FWD REFS / REBUILD MODEL ###
if pydantic_ver == "1":
template += """
|
linkml/linkml
|
656d1719297b2a394273db13262ba76b1e0be118
|
diff --git a/tests/test_generators/test_pydanticgen.py b/tests/test_generators/test_pydanticgen.py
index a7ea9b5e..b0f984ed 100644
--- a/tests/test_generators/test_pydanticgen.py
+++ b/tests/test_generators/test_pydanticgen.py
@@ -326,3 +326,14 @@ def test_multiline_module(input_path):
)
assert 'INTERNAL "REORGANIZATION"' in gen.schema.enums["EmploymentEventType"].permissible_values
+
+
+def test_pydantic_pattern(kitchen_sink_path, tmp_path, input_path):
+ """Generate pydantic classes"""
+ gen = PydanticGenerator(kitchen_sink_path, package=PACKAGE)
+ code = gen.serialize()
+ module = compile_python(code, PACKAGE)
+ p1 = module.Person(id="01", name="John Doe")
+ assert p1.name == "John Doe"
+ with pytest.raises(ValidationError):
+ module.Person(id="01", name="x")
|
Add pattern/regexp validation for pydanticgen
pydantic supports regexes/patterns:
https://docs.pydantic.dev/latest/concepts/fields/#string-constraints
We should generate this in pydanticgen
Should this be optional, to avoid potentially expensive checks at time of object generation?
This issue should be closed by removing PYDANTICGEN from:
https://github.com/linkml/linkml/blob/5c0dfa25f65b7170768b52c94f67ec609afeb745/tests/test_compliance/test_pattern_compliance.py#L57-L59
|
0.0
|
656d1719297b2a394273db13262ba76b1e0be118
|
[
"tests/test_generators/test_pydanticgen.py::test_pydantic_pattern"
] |
[
"tests/test_generators/test_pydanticgen.py::test_pydantic",
"tests/test_generators/test_pydanticgen.py::test_compile_pydantic",
"tests/test_generators/test_pydanticgen.py::test_pydantic_enums",
"tests/test_generators/test_pydanticgen.py::test_pydantic_any_of",
"tests/test_generators/test_pydanticgen.py::test_pydantic_inlining",
"tests/test_generators/test_pydanticgen.py::test_ifabsent",
"tests/test_generators/test_pydanticgen.py::test_multiline_module"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-12-08 20:41:52+00:00
|
apache-2.0
| 3,602 |
|
linkml__linkml-1795
|
diff --git a/linkml/generators/jsonschemagen.py b/linkml/generators/jsonschemagen.py
index c287696a..aa8dbc5d 100644
--- a/linkml/generators/jsonschemagen.py
+++ b/linkml/generators/jsonschemagen.py
@@ -181,6 +181,10 @@ class JsonSchemaGenerator(Generator):
super().__post_init__()
+ if self.top_class:
+ if self.schemaview.get_class(self.top_class) is None:
+ logging.warning(f"No class in schema named {self.top_class}")
+
def start_schema(self, inline: bool = False) -> JsonSchema:
self.inline = inline
|
linkml/linkml
|
2549522d967ea53a5a3974ad978d2ac85d166ccf
|
diff --git a/tests/test_generators/test_jsonschemagen.py b/tests/test_generators/test_jsonschemagen.py
index 6044035c..d7a7ff43 100644
--- a/tests/test_generators/test_jsonschemagen.py
+++ b/tests/test_generators/test_jsonschemagen.py
@@ -246,6 +246,11 @@ def test_empty_inlined_as_dict_objects(subtests, input_path):
external_file_test(subtests, input_path("jsonschema_empty_inlined_as_dict_objects.yaml"))
+def test_missing_top_class(input_path, caplog):
+ JsonSchemaGenerator(input_path("kitchen_sink.yaml"), top_class="NotARealClass")
+ assert "No class in schema named NotARealClass" in caplog.text
+
+
# **********************************************************
#
# Utility functions
|
gen-json-schema should warn or error if --top-class option does not correspond to class in schema
If I run `gen-json-schema` with and the value I provide for the `--top-class` does _not_ correspond to a class in the schema, I have made an obvious mistake that `gen-json-schema` can easily detect. I think it should either warn or error out in this situation.
|
0.0
|
2549522d967ea53a5a3974ad978d2ac85d166ccf
|
[
"tests/test_generators/test_jsonschemagen.py::test_missing_top_class"
] |
[
"tests/test_generators/test_jsonschemagen.py::test_jsonschema_integration"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-12-15 20:30:10+00:00
|
cc0-1.0
| 3,603 |
|
linkml__linkml-283
|
diff --git a/linkml/generators/jsonldcontextgen.py b/linkml/generators/jsonldcontextgen.py
index 343874ff..d7067140 100644
--- a/linkml/generators/jsonldcontextgen.py
+++ b/linkml/generators/jsonldcontextgen.py
@@ -165,42 +165,6 @@ class ContextGenerator(Generator):
if slot_def:
self.context_body[underscore(aliased_slot_name)] = slot_def
- def add_prefix(self, ncname: str) -> None:
- """ Add a prefix to the list of prefixes to emit
-
- @param ncname: name to add
- """
- if ncname not in self.namespaces:
- self.logger.warning(f"Unrecognized prefix: {ncname}")
- self.namespaces[ncname] = f"http://example.org/UNKNOWN/{ncname}/"
- self.emit_prefixes.add(self.namespaces._cased_key(ncname))
-
- def add_mappings(self, defn: Definition) -> None:
- """
- Process any mappings in defn, adding all of the mappings prefixes to the namespace map
- :param defn: Class or Slot Definition
- """
- self.add_id_prefixes(defn)
- mappings = defn.mappings + defn.related_mappings + defn.close_mappings + \
- defn.narrow_mappings + defn.broad_mappings + defn.exact_mappings
- for mapping in mappings:
- if '://' in mapping:
- mcurie = self.namespaces.curie_for(mapping)
- if mcurie is None:
- self.logger.warning(f"No namespace defined for URI: {mapping}")
- return # Absolute path - no prefix/name
- else:
- mapping = mcurie
- if ':' not in mapping or len(mapping.split(':')) != 2:
- raise ValueError(f"Definition {defn.name} - unrecognized mapping: {mapping}")
- ns = mapping.split(':')[0]
- self.add_prefix(ns)
-
- def add_id_prefixes(self, element: Element) -> None:
- for id_prefix in element.id_prefixes:
- self.add_prefix(id_prefix)
-
-
@shared_arguments(ContextGenerator)
@click.command()
@click.option("--base", help="Base URI for model")
diff --git a/linkml/generators/owlgen.py b/linkml/generators/owlgen.py
index 868b7f1a..74ff2a4e 100644
--- a/linkml/generators/owlgen.py
+++ b/linkml/generators/owlgen.py
@@ -39,6 +39,7 @@ class OwlSchemaGenerator(Generator):
SchemaLoader(METAMODEL_YAML_URI, base_dir=META_BASE_URI, importmap=kwargs.get('importmap', None),
mergeimports=self.merge_imports)
self.metamodel.resolve()
+ self.emit_prefixes: Set[str] = set()
self.top_value_uri: Optional[URIRef] = None
self.ontology_uri_suffix = ontology_uri_suffix
@@ -119,6 +120,7 @@ class OwlSchemaGenerator(Generator):
logging.warning(f'No URI for {m}')
def visit_class(self, cls: ClassDefinition) -> bool:
+ self.add_mappings(cls)
cls_uri = self._class_uri(cls.name)
self.add_metadata(cls, cls_uri)
self.graph.add((cls_uri, RDF.type, OWL.Class))
@@ -235,6 +237,7 @@ class OwlSchemaGenerator(Generator):
@param slot:
@return:
"""
+ self.add_mappings(slot)
# Note: We use the raw name in OWL and add a subProperty arc
slot_uri = self._prop_uri(slot.name)
self._add_element_properties(slot_uri, slot)
diff --git a/linkml/generators/prefixmapgen.py b/linkml/generators/prefixmapgen.py
index 7a4c04a9..0df2dd83 100644
--- a/linkml/generators/prefixmapgen.py
+++ b/linkml/generators/prefixmapgen.py
@@ -85,38 +85,8 @@ class PrefixGenerator(Generator):
# We don't bother to visit class slots - just all slots
return False
- def add_prefix(self, ncname: str) -> None:
- """ Add a prefix to the list of prefixes to emit
-
- @param ncname: name to add
- """
- if ncname not in self.namespaces:
- self.logger.warning(f"Unrecognized prefix: {ncname}")
- self.namespaces[ncname] = f"http://example.org/UNKNOWN/{ncname}/"
- self.emit_prefixes.add(ncname)
-
- def add_mappings(self, defn: Definition) -> None:
- """
- Process any mappings in defn, adding all of the mappings prefixes to the namespace map
- :param defn: Class or Slot Definition
- """
- self.add_id_prefixes(defn)
- for mapping in defn.mappings:
- if '://' in mapping:
- mcurie = self.namespaces.curie_for(mapping)
- self.logger.warning(f"No namespace defined for URI: {mapping}")
- if mcurie is None:
- return # Absolute path - no prefix/name
- else:
- mapping = mcurie
- if ':' not in mapping or len(mapping.split(':')) != 2:
- raise ValueError(f"Definition {defn.name} - unrecognized mapping: {mapping}")
- ns = mapping.split(':')[0]
- self.add_prefix(ns)
-
- def add_id_prefixes(self, element: Element) -> None:
- for id_prefix in element.id_prefixes:
- self.add_prefix(id_prefix)
+ def visit_slot(self, aliased_slot_name: str, slot: SlotDefinition) -> None:
+ self.add_mappings(slot)
@shared_arguments(PrefixGenerator)
diff --git a/linkml/generators/pythongen.py b/linkml/generators/pythongen.py
index 7e250e76..a748e558 100644
--- a/linkml/generators/pythongen.py
+++ b/linkml/generators/pythongen.py
@@ -69,28 +69,6 @@ class PythonGenerator(Generator):
if type_prefix:
self.emit_prefixes.add(type_prefix)
- def add_mappings(self, defn: Definition) -> None:
- """
- Process any mappings in defn, adding all of the mappings prefixes to the namespace map
- :param defn: Class or Slot Definition
- """
- self.add_id_prefixes(defn)
- for mapping in defn.mappings:
- if '://' in mapping:
- mcurie = self.namespaces.curie_for(mapping)
- self.logger.warning(f"No namespace defined for URI: {mapping}")
- if mcurie is None:
- return # Absolute path - no prefix/name
- else:
- mapping = mcurie
- if ':' not in mapping or len(mapping.split(':')) != 2:
- raise ValueError(f"Definition {defn.name} - unrecognized mapping: {mapping}")
- ns = mapping.split(':')[0]
- self.emit_prefixes.add(ns)
-
- def add_id_prefixes(self, element: Element) -> None:
- self.emit_prefixes.update(element.id_prefixes)
-
def gen_schema(self) -> str:
# The metamodel uses Enumerations to define itself, so don't import if we are generating the metamodel
enumimports = '' if self.genmeta else \
diff --git a/linkml/utils/generator.py b/linkml/utils/generator.py
index 52e58d76..cfd44f7e 100644
--- a/linkml/utils/generator.py
+++ b/linkml/utils/generator.py
@@ -9,7 +9,7 @@ from click import Command, Argument, Option
from linkml_runtime.linkml_model.meta import SchemaDefinition, ClassDefinition, SlotDefinition, ClassDefinitionName, \
TypeDefinition, Element, SlotDefinitionName, TypeDefinitionName, PrefixPrefixPrefix, ElementName, \
- SubsetDefinition, SubsetDefinitionName, EnumDefinition, EnumDefinitionName
+ SubsetDefinition, SubsetDefinitionName, EnumDefinition, EnumDefinitionName, Definition
from linkml_runtime.utils.formatutils import camelcase, underscore
from linkml.utils.mergeutils import alias_root
from linkml.utils.schemaloader import SchemaLoader
@@ -561,6 +561,46 @@ class Generator(metaclass=abc.ABCMeta):
return [slot for slot in [self.schema.slots[sn] for sn in cls.slots] if cls.name in slot.domain_of or
(set(cls.mixins).intersection(slot.domain_of))]
+ def add_mappings(self, defn: Definition) -> None:
+ """
+ Process any mappings in defn, adding all of the mappings prefixes to the namespace map
+ :param defn: Class or Slot Definition
+ """
+ self.add_id_prefixes(defn)
+ mappings = defn.mappings + defn.related_mappings + defn.close_mappings + \
+ defn.narrow_mappings + defn.broad_mappings + defn.exact_mappings
+ # see https://github.com/linkml/linkml/pull/283
+ #if isinstance(defn, ClassDefinition):
+ # mappings.append(defn.class_uri)
+ #if isinstance(defn, SlotDefinition):
+ # mappings.append(defn.slot_uri)
+ for mapping in mappings:
+ if '://' in mapping:
+ mcurie = self.namespaces.curie_for(mapping)
+ if mcurie is None:
+ self.logger.warning(f"No namespace defined for URI: {mapping}")
+ return # Absolute path - no prefix/name
+ else:
+ mapping = mcurie
+ if ':' not in mapping or len(mapping.split(':')) != 2:
+ raise ValueError(f"Definition {defn.name} - unrecognized mapping: {mapping}")
+ ns = mapping.split(':')[0]
+ self.add_prefix(ns)
+
+ def add_id_prefixes(self, element: Element) -> None:
+ for id_prefix in element.id_prefixes:
+ self.add_prefix(id_prefix)
+
+ def add_prefix(self, ncname: str) -> None:
+ """ Add a prefix to the list of prefixes to emit
+
+ @param ncname: name to add
+ """
+ if ncname not in self.namespaces:
+ self.logger.warning(f"Unrecognized prefix: {ncname}")
+ self.namespaces[ncname] = f"http://example.org/UNKNOWN/{ncname}/"
+ self.emit_prefixes.add(ncname)
+
def shared_arguments(g: Type[Generator]) -> Callable[[Command], Command]:
_LOG_LEVEL_STRINGS = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']
|
linkml/linkml
|
08866026f4a0ae6977cabc9de1a984079873823b
|
diff --git a/tests/test_prefixes/input/prefixtest.yaml b/tests/test_prefixes/input/prefixtest.yaml
index facfb560..5fcf384b 100644
--- a/tests/test_prefixes/input/prefixtest.yaml
+++ b/tests/test_prefixes/input/prefixtest.yaml
@@ -45,6 +45,7 @@ classes:
Class:
class_uri: owl:Class
+
slots:
id:
diff --git a/tests/test_prefixes/output/prefixtest.prefixmap.json b/tests/test_prefixes/output/prefixtest.prefixmap.json
index f35e1783..2b519f98 100644
--- a/tests/test_prefixes/output/prefixtest.prefixmap.json
+++ b/tests/test_prefixes/output/prefixtest.prefixmap.json
@@ -1,7 +1,10 @@
{
+ "BFO": "http://purl.obolibrary.org/obo/BFO_",
"CL": "http://purl.obolibrary.org/obo/CL_",
"GO": "http://purl.obolibrary.org/obo/GO_",
+ "PR": "http://purl.obolibrary.org/obo/PR_",
"SIO": "http://semanticscience.org/resource/SIO_",
+ "SO": "http://purl.obolibrary.org/obo/SO_",
"biolink": "https://w3id.org/biolink/",
"dbont": "http://dbpedia.org/ontology/",
"dce": "http://purl.org/dc/elements/1.1/",
@@ -10,6 +13,8 @@
"owl": "http://www.w3.org/2002/07/owl#",
"pav": "http://purl.org/pav/",
"prefixtest": "https://w3id.org/linkml/tests/prefixtest/",
+ "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
+ "rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"sdo": "http://schema.org/",
"wd": "https://www.wikidata.org/wiki/",
"Class": {
diff --git a/tests/test_prefixes/test_prefixes.py b/tests/test_prefixes/test_prefixes.py
index 021c7f8f..743f620c 100644
--- a/tests/test_prefixes/test_prefixes.py
+++ b/tests/test_prefixes/test_prefixes.py
@@ -47,10 +47,12 @@ class PrefixTestCase(unittest.TestCase):
with open(PM_OUTPUT, 'w') as stream:
stream.write(out)
expected = {
- # TODO: rdf, rdfs, BFO, ... should all be here
+ "BFO": "http://purl.obolibrary.org/obo/BFO_",
"CL": "http://purl.obolibrary.org/obo/CL_",
"GO": "http://purl.obolibrary.org/obo/GO_",
+ "PR": "http://purl.obolibrary.org/obo/PR_",
"SIO": "http://semanticscience.org/resource/SIO_",
+ "SO": "http://purl.obolibrary.org/obo/SO_",
"biolink": "https://w3id.org/biolink/",
"dbont": "http://dbpedia.org/ontology/",
"dce": "http://purl.org/dc/elements/1.1/",
@@ -59,6 +61,8 @@ class PrefixTestCase(unittest.TestCase):
"owl": "http://www.w3.org/2002/07/owl#",
"pav": "http://purl.org/pav/",
"prefixtest": "https://w3id.org/linkml/tests/prefixtest/",
+ "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
+ "rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"sdo": "http://schema.org/",
"wd": "https://www.wikidata.org/wiki/"
}
|
Abstract add_mappings code into common generator superclass
`def add_mappings` is duplicated across different generators with drift between the code, causing a variety of subtle issues relating to loss of prefix information
See my comment here:
https://github.com/linkml/linkml/issues/192#issuecomment-883537162
The fix is to simply move add_mappings from jsonldcontext gen into the parent, and to remove it from pythongen and prefixmapgen
I hypothesize this will fix #163
|
0.0
|
08866026f4a0ae6977cabc9de1a984079873823b
|
[
"tests/test_prefixes/test_prefixes.py::PrefixTestCase::test_prefixmapgen"
] |
[
"tests/test_prefixes/test_prefixes.py::PrefixTestCase::test_jsonldcontext"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-20 23:52:09+00:00
|
cc0-1.0
| 3,604 |
|
linkml__linkml-781
|
diff --git a/docs/faq/python.md b/docs/faq/python.md
new file mode 100644
index 00000000..e834ae62
--- /dev/null
+++ b/docs/faq/python.md
@@ -0,0 +1,123 @@
+# FAQ: Python
+
+this section of the FAQ is for questions about writing Python code that uses the LinkML framework.
+
+## Why would I use LinkML in my Python code?
+
+You can use LinkML without writing a line of Python code, but if you
+are coding in Python there are a number of advantages to using parts
+of the LinkML toolchain.
+
+The primary advantage is to get the benefit of a Python object model
+derived from your schema. Not only will this model gives you lots of
+benefits such as avoiding boilerplate and IDE support, you also get
+lots of "freebies" for things like loading/dumping to formats like YAML/JSON/RDF.
+
+See:
+
+- [python generator](https://linkml.io/linkml/generators/python.html)
+- [working with python](https://linkml.io/linkml/data/python.html)
+
+## Where do I find out more?
+
+- [working with python](https://linkml.io/linkml/data/python.html)
+- [developers guide](https://linkml.io/linkml/developers/index.html)
+
+## Which packages do I need to install?
+
+If you want to generate Python dataclasses, then you need the full [linkml](https://github.com/linkml/linkml) package.
+
+However, your code doesn't need this at *runtime*. Once you have generated your project files your only dependency will be on the more lightweight [linkml-runtime](https://github.com/linkml/linkml-runtime) package.
+
+*Note* you don't need to install [linkml-model](https://github.com/linkml/linkml-model), the metamodel is included as part of the runtime
+
+## How do I get started?
+
+We recommend going through the tutorial, and in particular, [Part 5: Using Python](https://linkml.io/linkml/intro/tutorial05.html)
+
+## What is a loader or a dumper?
+
+- loaders allow data to be loaded *into* python objects from supported formats
+- dumpers allow data to be loaded *from* python objects into supported formats
+
+Example dumper code:
+
+```python
+from linkml_runtime.dumpers import json_dumper
+from personinfo import Person
+
+p1 = Person(id='ORCID:9876', full_name='Lex Luthor', aliases=["Bad Guy"])
+yaml_dumper.dump(p1, to_file='my-data.yaml')
+```
+
+Then to do the reverse:
+
+```
+p1 = yaml_loader('my-data.yaml', target_class=Person)
+```
+
+In contrast to dumpers, loaders need to know the class they are loading into, as
+there is insufficient information in the json/yaml
+
+Each loader and dumper supports a pair of methods:
+
+- `load` and `loads` for loaders, for filehandles and strings respectively
+- `dump` and `dumps` for loaders, for filehandles and strings respectively
+
+This is following the convention of other python packages, such as the widely used json package
+
+Note: dumpers shouldn't be confused with *generators* - dumpers are for exporting *data* that conforms to a LinkML schema, and generators are for converting an actual LinkML schema to another framework (e.g. json-schema)
+
+## What formats are supported?
+
+The core formats for LinkML data are:
+
+* json (see json_dumper and json_loader)
+* yaml (see yaml_dumper and yaml_loader)
+* rdf (see rdflib_dumper and rdflib_loader)
+
+csv is also partially supported, but this only works for certain "shapes" of data due to the fact nesting is not possible in CSVs.
+
+Data can also be exported to and imported from SQL databases, but this is not part of the loader/dumper framework
+
+## Why do I get a "not JSON serializable" message?
+
+This is probably because you are using the `dump` method from the `json` package, which expects dictionaries, not objects.
+
+you should use `json_dumper` instead.
+
+## What is the difference between gen-python and gen-pydantic?
+
+TODO
+
+## How do I work programmatically with RDF data?
+
+TODO
+
+## How do I work programmatically with SQL databases?
+
+TODO
+
+## Is LinkML compatible with RDFLib?
+
+TODO
+
+## Is LinkML compatible with SQL Alchemy?
+
+TODO
+
+## Is LinkML compatible with FastAPI?
+
+TODO
+
+## Is LinkML compatible with Flask?
+
+TODO
+
+## Is LinkML compatible with Python GraphQL frameworks?
+
+TODO
+
+
+
+
diff --git a/linkml/generators/docgen.py b/linkml/generators/docgen.py
index 2aa37fa6..4410383f 100644
--- a/linkml/generators/docgen.py
+++ b/linkml/generators/docgen.py
@@ -57,6 +57,7 @@ class DocGenerator(Generator):
template_mappings: Dict[str, str] = None
directory = None
template_directory = None
+ genmeta = False
def __init__(self, schema: Union[str, TextIO, SchemaDefinition],
directory: str = None,
@@ -81,6 +82,7 @@ class DocGenerator(Generator):
self.format = format
self.directory = directory
self.template_directory = template_directory
+ self.genmeta = genmeta
def serialize(self, directory: str = None) -> None:
"""
@@ -108,6 +110,8 @@ class DocGenerator(Generator):
self._write(out_str, directory, imported_schema.name)
template = self._get_template('class')
for cn, c in sv.all_classes().items():
+ if self._is_external(c):
+ continue
n = self.name(c)
out_str = template.render(gen=self,
element=c,
@@ -115,6 +119,8 @@ class DocGenerator(Generator):
self._write(out_str, directory, n)
template = self._get_template('slot')
for sn, s in sv.all_slots().items():
+ if self._is_external(s):
+ continue
n = self.name(s)
out_str = template.render(gen=self,
element=s,
@@ -122,6 +128,8 @@ class DocGenerator(Generator):
self._write(out_str, directory, n)
template = self._get_template('enum')
for en, e in sv.all_enums().items():
+ if self._is_external(e):
+ continue
n = self.name(e)
out_str = template.render(gen=self,
element=e,
@@ -129,6 +137,8 @@ class DocGenerator(Generator):
self._write(out_str, directory, n)
template = self._get_template('type')
for tn, t in sv.all_types().items():
+ if self._is_external(t):
+ continue
n = self.name(t)
out_str = template.render(gen=self,
element=t,
@@ -192,8 +202,6 @@ class DocGenerator(Generator):
env = Environment(loader=loader)
return env.get_template(base_file_name)
-
-
def name(self, element: Element) -> str:
"""
Returns the name of the element in its canonical form
@@ -230,7 +238,6 @@ class DocGenerator(Generator):
sc = element.from_schema
return f'[{curie}]({uri})'
-
def link(self, e: Union[Definition, DefinitionName]) -> str:
"""
Render an element as a hyperlink
@@ -242,7 +249,9 @@ class DocGenerator(Generator):
return 'NONE'
if not isinstance(e, Definition):
e = self.schemaview.get_element(e)
- if isinstance(e, ClassDefinition):
+ if self._is_external(e):
+ return self.uri_link(e)
+ elif isinstance(e, ClassDefinition):
return self._markdown_link(camelcase(e.name))
elif isinstance(e, EnumDefinition):
return self._markdown_link(camelcase(e.name))
@@ -253,6 +262,13 @@ class DocGenerator(Generator):
else:
return e.name
+ def _is_external(self, element: Element) -> bool:
+ # note: this is currently incomplete. See: https://github.com/linkml/linkml/issues/782
+ if element.from_schema == 'https://w3id.org/linkml/types' and not self.genmeta:
+ return True
+ else:
+ return False
+
def _markdown_link(self, n: str, subfolder: str = None) -> str:
if subfolder:
rel_path = f'{subfolder}/{n}'
diff --git a/linkml/generators/docgen/common_metadata.md.jinja2 b/linkml/generators/docgen/common_metadata.md.jinja2
index e7482dd7..1f5f4e06 100644
--- a/linkml/generators/docgen/common_metadata.md.jinja2
+++ b/linkml/generators/docgen/common_metadata.md.jinja2
@@ -19,4 +19,15 @@ Instances of this class *should* have identifiers with one of the following pref
{% for a in element.annotations -%}
| {{a}} | {{ element.annotations[a].value }} |
{% endfor %}
+{% endif %}
+
+{% if element.from_schema or element.imported_from %}
+### Schema Source
+
+{% if element.from_schema %}
+* from schema: {{ element.from_schema }}
+{% endif %}
+{% if element.imported_from %}
+* imported from: {{ element.imported_from }}
+{% endif %}
{% endif %}
\ No newline at end of file
diff --git a/linkml/generators/docgen/slot.md.jinja2 b/linkml/generators/docgen/slot.md.jinja2
index 5b15e7c4..dcb16015 100644
--- a/linkml/generators/docgen/slot.md.jinja2
+++ b/linkml/generators/docgen/slot.md.jinja2
@@ -22,7 +22,7 @@ URI: [{{ gen.uri(element) }}]({{ gen.uri(element) }})
## Properties
- * Range: {{element.range}}
+ * Range: {{gen.link(element.range)}}
{% if schemaview.usage_index().get(element.name) %}
| used by | used in | type | used |
diff --git a/linkml/utils/converter.py b/linkml/utils/converter.py
index 8fd7a3c4..56b04f98 100644
--- a/linkml/utils/converter.py
+++ b/linkml/utils/converter.py
@@ -5,6 +5,8 @@ from typing import List
import click
from linkml_runtime.linkml_model import Prefix
+from linkml_runtime.utils import inference_utils
+from linkml_runtime.utils.inference_utils import infer_all_slot_values
from linkml.utils import validation, datautils
from linkml_runtime.utils.compile_python import compile_python
@@ -43,6 +45,10 @@ from linkml.utils.datautils import dumpers_loaders, _get_format, get_loader, _ge
default=True,
show_default=True,
help="Validate against the schema")
[email protected]("--infer/--no-infer",
+ default=False,
+ show_default=True,
+ help="Infer missing slot values")
@click.option("--context", "-c",
multiple=True,
help="path to JSON-LD context file. Required for RDF input/output")
@@ -50,7 +56,7 @@ from linkml.utils.datautils import dumpers_loaders, _get_format, get_loader, _ge
def cli(input, module, target_class, context=None, output=None, input_format=None, output_format=None,
prefix: List = [],
target_class_from_path=None,
- schema=None, validate=None, index_slot=None) -> None:
+ schema=None, validate=None, infer=None, index_slot=None) -> None:
"""
Converts instance data to and from different LinkML Runtime serialization formats.
@@ -103,6 +109,9 @@ def cli(input, module, target_class, context=None, output=None, input_format=Non
inargs['index_slot'] = index_slot
inargs['schema'] = schema
obj = loader.load(source=input, target_class=py_target_class, **inargs)
+ if infer:
+ infer_config = inference_utils.Config(use_expressions=True, use_string_serialization=True)
+ infer_all_slot_values(obj, schemaview=sv, config=infer_config)
if validate:
if schema is None:
raise Exception('--schema must be passed in order to validate. Suppress with --no-validate')
|
linkml/linkml
|
b498d41f97b12bf98b1e2cb73bca52ced51c096d
|
diff --git a/tests/test_generators/test_docgen.py b/tests/test_generators/test_docgen.py
index e771bf45..d27b506d 100644
--- a/tests/test_generators/test_docgen.py
+++ b/tests/test_generators/test_docgen.py
@@ -32,8 +32,38 @@ class DocGeneratorTestCase(unittest.TestCase):
""" Tests basic document generator functionality """
gen = DocGenerator(SCHEMA, mergeimports=True, no_types_dir=True)
md = gen.serialize(directory=MD_DIR)
+ # test class docs
assert_mdfile_contains('Organization.md', 'Organization', after='Inheritance')
- # TODO: add more tests
+ assert_mdfile_contains('Organization.md', '[aliases](aliases.md)', after='Slots')
+ assert_mdfile_contains('Organization.md',
+ 'URI: [ks:Organization](https://w3id.org/linkml/tests/kitchen_sink/Organization)',
+ after='Class: Organization')
+ assert_mdfile_contains('Organization.md',
+ 'from_schema: https://w3id.org/linkml/tests/kitchen_sink',
+ after='Class: Organization')
+ assert_mdfile_contains('Organization.md',
+ 'slot_uri: skos:altLabel',
+ after='Induced')
+ # test type docs
+ assert_mdfile_contains('PhoneNumberType.md',
+ 'URI: http://www.w3.org/2001/XMLSchema#string',
+ after='PhoneNumberType')
+ # test enum docs
+ assert_mdfile_contains('EmploymentEventType.md',
+ 'codes for different kinds of employment/HR related events',
+ after='EmploymentEventType')
+ assert_mdfile_contains('EmploymentEventType.md',
+ 'PROMOTION | bizcodes:003 | promotion event',
+ after='Permissible Values')
+ # test slot docs
+ assert_mdfile_contains('aliases.md',
+ 'http://www.w3.org/2004/02/skos/core#altLabel',
+ after='aliases')
+ # test index docs
+ assert_mdfile_contains('aliases.md',
+ 'http://www.w3.org/2004/02/skos/core#altLabel',
+ after='aliases')
+
def test_custom_directory(self):
"""
diff --git a/tests/test_utils/input/data_example.yaml b/tests/test_utils/input/data_example.yaml
new file mode 100644
index 00000000..6cf30063
--- /dev/null
+++ b/tests/test_utils/input/data_example.yaml
@@ -0,0 +1,9 @@
+persons:
+ P:1:
+ first_name: first1
+ last_name: last1
+ age_in_years: 10
+ P:2:
+ first_name: first2
+ last_name: last2
+ age_in_months: 240
diff --git a/tests/test_utils/input/schema_with_inference.yaml b/tests/test_utils/input/schema_with_inference.yaml
new file mode 100644
index 00000000..c2751ba4
--- /dev/null
+++ b/tests/test_utils/input/schema_with_inference.yaml
@@ -0,0 +1,148 @@
+id: https://w3id.org/linkml/examples/inference
+title: inference example
+name: inference
+description: This demonstrates the use of inference
+license: https://creativecommons.org/publicdomain/zero/1.0/
+
+prefixes:
+ linkml: https://w3id.org/linkml/
+ ex: https://w3id.org/linkml/examples/inference/
+ skos: http://www.w3.org/2004/02/skos/core#
+ pav: http://purl.org/pav/
+ schema: http://schema.org/
+ sh: https://w3id.org/shacl/
+ P: http://example.org/
+
+default_prefix: ex
+default_range: string
+
+default_curi_maps:
+ - semweb_context
+
+imports:
+ - linkml:types
+
+
+#==================================
+# Slots #
+#==================================
+slots:
+ id:
+ identifier: true
+ name:
+ description: name
+ synonyms:
+ multivalued: true
+ full_name:
+ string_serialization: "{first_name} {last_name}"
+ first_name:
+ last_name:
+ age_in_years:
+ range: decimal
+ minimum_value: 0
+ maximum_value: 999
+ equals_expression: "{age_in_months} / 12"
+ age_in_months:
+ range: decimal
+ equals_expression: "{age_in_years} * 12"
+ is_juvenile:
+ range: boolean
+ equals_expression: "{age_in_years} < 18"
+ age_category:
+ range: AgeEnum
+ equals_expression: "case( ({age_in_years} < 2, 'infant'), ({age_in_years} < 18, 'juvenile'), ({age_in_years} > 18, 'adult') )"
+ prohibited:
+ equals_expression: "__import__('os').listdir()"
+ street:
+ city:
+ verbatim:
+ primary_address:
+ range: Address
+ description:
+ summary:
+
+
+#==================================
+# Classes #
+#==================================
+
+classes:
+ Term:
+ slots:
+ - id
+ - name
+ - synonyms
+
+ Person:
+ slots:
+ - id
+ - first_name
+ - last_name
+ - full_name
+ - age_in_years
+ - age_in_months
+ - primary_address
+ - description
+ - is_juvenile
+ - age_category
+ slot_usage:
+ description:
+ string_serialization: |-
+ {last_name}, {first_name}, {primary_address}
+ summary:
+ equals_expression: |-
+ {first_name} + {last_name} + ' ' + ('AGE: '+str(age_in_years) if age_in_years else 'NO AGE SPECIFIED')
+ rules:
+ preconditions:
+ slot_conditions:
+ primary_address:
+ postconditions:
+ slot_conditions:
+ description:
+ string_serialization: |-
+ {last_name}, {first_name}, {primary_address}
+ {primary_address.street}
+ {primary_address.city}
+
+ Evil:
+ slots:
+ - prohibited
+
+ Relationship:
+ attributes:
+ person1:
+ range: Person
+ inlined: true
+ person2:
+ range: Person
+ inlined: true
+ type:
+ description:
+ string_serialization: |-
+ "{person1.last_name}, {person1.first_name}" IS {type} "{person2.last_name}, {person2.first_name}"
+ description2:
+ string_serialization: |-
+ "{person1.full_name}" IS {type} "{person2.full_name}"
+
+ Address:
+ slots:
+ - street
+ - city
+ string_serialization: |-
+ {street}
+ {city}
+
+ Container:
+ tree_root: true
+ attributes:
+ persons:
+ range: Person
+ inlined: true
+ multivalued: true
+
+enums:
+ AgeEnum:
+ permissible_values:
+ infant:
+ juvenile:
+ adult:
diff --git a/tests/test_utils/test_converter.py b/tests/test_utils/test_converter.py
new file mode 100644
index 00000000..ee9a8b16
--- /dev/null
+++ b/tests/test_utils/test_converter.py
@@ -0,0 +1,53 @@
+import json
+import unittest
+
+from click.testing import CliRunner
+from tests.test_utils.environment import env
+from linkml.utils.converter import cli
+
+SCHEMA = env.input_path('schema_with_inference.yaml')
+DATA_IN = env.input_path('data_example.yaml')
+JSON_OUT = env.expected_path('data_example.out.json')
+YAML_OUT = env.expected_path('data_example.out.yaml')
+RDF_OUT = env.expected_path('data_example.out.ttl')
+
+
+class TestCommandLineInterface(unittest.TestCase):
+
+ def setUp(self) -> None:
+ runner = CliRunner(mix_stderr=False)
+ self.runner = runner
+
+ def test_help(self):
+ result = self.runner.invoke(cli, ['--help'])
+ out = result.stdout
+ err = result.stderr
+ #print(err)
+ self.assertIn('INPUT', out)
+ #self.assertEqual(0, result.exit_code)
+
+ def test_infer_and_convert(self):
+ """
+ Tests using the --infer option to add missing values, and also roundtripping
+ through yaml->json->yaml->rdf->json
+ """
+ result = self.runner.invoke(cli, ['--infer', '-s', SCHEMA, DATA_IN, '-o', JSON_OUT])
+ result = self.runner.invoke(cli, ['-s', SCHEMA, JSON_OUT, '-t', 'yaml', '-o', YAML_OUT])
+ result = self.runner.invoke(cli, ['-s', SCHEMA, YAML_OUT, '-t', 'rdf', '-o', RDF_OUT])
+ result = self.runner.invoke(cli, ['-s', SCHEMA, RDF_OUT, '-t', 'rdf', '-o', JSON_OUT])
+ with open(JSON_OUT) as file:
+ obj = json.load(file)
+ persons = obj['persons']
+ p1 = persons['P:1']
+ p2 = persons['P:2']
+ self.assertTrue(p1['is_juvenile'])
+ self.assertTrue('is_juvenile' not in p2)
+ self.assertEqual(p1['age_in_years'], 10)
+ self.assertEqual(p1['age_in_months'], 120)
+ self.assertEqual(p1['age_category'], "juvenile")
+ self.assertEqual(p1['full_name'], "first1 last1")
+ self.assertEqual(p2['age_in_years'], 20)
+ self.assertEqual(p2['age_in_months'], 240)
+ self.assertEqual(p2['age_category'], "adult")
+ self.assertEqual(p2['full_name'], "first2 last2")
+
|
add link to the range of a slot in Jinja based the schema documentation
At the moment the range class of a slot is not linked to in the schema documentation. I guess, it has to do with:
https://github.com/linkml/linkml/blob/1bbf442f5c0dab5b6a4eb3309ef25b95c74d0892/linkml/generators/docgen/slot.md.jinja2#L25
where it should actually be `{{gen.link(element.range)}}`. If I'm correct, I could attempt a PR.
|
0.0
|
b498d41f97b12bf98b1e2cb73bca52ced51c096d
|
[
"tests/test_utils/test_converter.py::TestCommandLineInterface::test_infer_and_convert"
] |
[
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_custom_directory",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_docgen",
"tests/test_generators/test_docgen.py::DocGeneratorTestCase::test_html",
"tests/test_utils/test_converter.py::TestCommandLineInterface::test_help"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-05-17 10:05:42+00:00
|
cc0-1.0
| 3,605 |
|
linkml__linkml-828
|
diff --git a/linkml/utils/sqlutils.py b/linkml/utils/sqlutils.py
index f79ceeaf..290c0807 100644
--- a/linkml/utils/sqlutils.py
+++ b/linkml/utils/sqlutils.py
@@ -9,9 +9,10 @@ import inspect
import click
from linkml_runtime import SchemaView
-from linkml_runtime.linkml_model import SchemaDefinition
+from linkml_runtime.linkml_model import SchemaDefinition, PermissibleValue
import linkml_runtime.linkml_model.meta as metamodel
from linkml_runtime.utils.compile_python import compile_python
+from linkml_runtime.utils.enumerations import EnumDefinitionImpl
from linkml_runtime.utils.formatutils import underscore
from linkml_runtime.utils.yamlutils import YAMLRoot
from linkml_runtime.utils.introspection import package_schemaview
@@ -182,6 +183,10 @@ class SQLStore:
return nu_obj
else:
return None
+ #elif isinstance(obj, PermissibleValue):
+ # return str(obj.text)
+ elif isinstance(obj, EnumDefinitionImpl):
+ return str(obj)
elif isinstance(obj, YAMLRoot):
typ = type(obj)
inst_args = {}
@@ -236,7 +241,9 @@ class SQLStore:
for n, nu_typ in inspect.getmembers(self.native_module):
# TODO: make more efficient
if n == typ.__name__:
+ #print(f'CREATING {nu_typ} FROM {inst_args}')
nu_obj = nu_typ(**inst_args)
+ #print(f'CREATED {nu_obj}')
return nu_obj
raise ValueError(f'Cannot find {typ.__name__} in {self.native_module}')
else:
|
linkml/linkml
|
cb2ec6bbd0ad69ab2fc26e9e91d7d6657bddd173
|
diff --git a/tests/test_data/input/personinfo.yaml b/tests/test_data/input/personinfo.yaml
index b732b5ee..16429479 100644
--- a/tests/test_data/input/personinfo.yaml
+++ b/tests/test_data/input/personinfo.yaml
@@ -232,6 +232,7 @@ slots:
maximum_value: 999
related_to:
type:
+ range: FamilialRelationshipType
street:
city:
mission_statement:
@@ -283,17 +284,17 @@ enums:
meaning: famrel:01
GenderType:
permissible_values:
- nonbinary man:
+ nonbinary_man:
meaning: GSSO:009254
- nonbinary woman:
+ nonbinary_woman:
meaning: GSSO:009253
- transgender woman:
+ transgender_woman:
meaning: GSSO:000384
- transgender man:
+ transgender_man:
meaning: GSSO:000372
- cisgender man:
+ cisgender_man:
meaning: GSSO:000371
- cisgender woman:
+ cisgender_woman:
meaning: GSSO:000385
DiagnosisType:
diff --git a/tests/test_data/input/personinfo_data01.yaml b/tests/test_data/input/personinfo_data01.yaml
index 8428e865..24fb18b9 100644
--- a/tests/test_data/input/personinfo_data01.yaml
+++ b/tests/test_data/input/personinfo_data01.yaml
@@ -28,6 +28,7 @@ persons:
name: prescribe cough medicine
- id: X:P2
name: person2
+ gender: cisgender_man
aliases:
- p2a1
- p2a2
diff --git a/tests/test_data/model/personinfo.py b/tests/test_data/model/personinfo.py
index 87628747..7256ea12 100644
--- a/tests/test_data/model/personinfo.py
+++ b/tests/test_data/model/personinfo.py
@@ -1,5 +1,5 @@
# Auto generated from personinfo.yaml by pythongen.py version: 0.9.0
-# Generation date: 2022-02-21T16:17:22
+# Generation date: 2022-06-01T12:12:50
# Schema: personinfo
#
# id: https://w3id.org/linkml/examples/personinfo
@@ -33,6 +33,8 @@ dataclasses._init_fn = dataclasses_init_fn_with_kwargs
# Namespaces
GSSO = CurieNamespace('GSSO', 'http://purl.obolibrary.org/obo/GSSO_')
+ONT = CurieNamespace('ONT', 'http://example.org/ont/')
+X = CurieNamespace('X', 'http://example.org/data/')
FAMREL = CurieNamespace('famrel', 'https://example.org/FamilialRelations#')
LINKML = CurieNamespace('linkml', 'https://w3id.org/linkml/')
PERSONINFO = CurieNamespace('personinfo', 'https://w3id.org/linkml/examples/personinfo/')
@@ -40,7 +42,7 @@ PROV = CurieNamespace('prov', 'http://www.w3.org/ns/prov#')
RDF = CurieNamespace('rdf', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#')
RDFS = CurieNamespace('rdfs', 'http://www.w3.org/2000/01/rdf-schema#')
SCHEMA = CurieNamespace('schema', 'http://schema.org/')
-SKOS = CurieNamespace('skos', 'http://example.org/UNKNOWN/skos/')
+SKOS = CurieNamespace('skos', 'http://www.w3.org/2004/02/skos/core#')
XSD = CurieNamespace('xsd', 'http://www.w3.org/2001/XMLSchema#')
DEFAULT_ = PERSONINFO
@@ -76,6 +78,10 @@ class ProcedureConceptId(ConceptId):
pass
+class BiologicalSpecimenId(NamedThingId):
+ pass
+
+
@dataclass
class NamedThing(YAMLRoot):
"""
@@ -426,7 +432,7 @@ class Relationship(YAMLRoot):
started_at_time: Optional[Union[str, XSDDate]] = None
ended_at_time: Optional[Union[str, XSDDate]] = None
related_to: Optional[str] = None
- type: Optional[str] = None
+ type: Optional[Union[str, "FamilialRelationshipType"]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.started_at_time is not None and not isinstance(self.started_at_time, XSDDate):
@@ -438,8 +444,8 @@ class Relationship(YAMLRoot):
if self.related_to is not None and not isinstance(self.related_to, str):
self.related_to = str(self.related_to)
- if self.type is not None and not isinstance(self.type, str):
- self.type = str(self.type)
+ if self.type is not None and not isinstance(self.type, FamilialRelationshipType):
+ self.type = FamilialRelationshipType(self.type)
super().__post_init__(**kwargs)
@@ -550,6 +556,26 @@ class WithLocation(YAMLRoot):
super().__post_init__(**kwargs)
+@dataclass
+class BiologicalSpecimen(NamedThing):
+ _inherited_slots: ClassVar[List[str]] = []
+
+ class_class_uri: ClassVar[URIRef] = PERSONINFO.BiologicalSpecimen
+ class_class_curie: ClassVar[str] = "personinfo:BiologicalSpecimen"
+ class_name: ClassVar[str] = "biological specimen"
+ class_model_uri: ClassVar[URIRef] = PERSONINFO.BiologicalSpecimen
+
+ id: Union[str, BiologicalSpecimenId] = None
+
+ def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
+ if self._is_empty(self.id):
+ self.MissingRequiredField("id")
+ if not isinstance(self.id, BiologicalSpecimenId):
+ self.id = BiologicalSpecimenId(self.id)
+
+ super().__post_init__(**kwargs)
+
+
@dataclass
class Container(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
@@ -593,31 +619,23 @@ class FamilialRelationshipType(EnumDefinitionImpl):
class GenderType(EnumDefinitionImpl):
+ nonbinary_man = PermissibleValue(text="nonbinary_man",
+ meaning=GSSO["009254"])
+ nonbinary_woman = PermissibleValue(text="nonbinary_woman",
+ meaning=GSSO["009253"])
+ transgender_woman = PermissibleValue(text="transgender_woman",
+ meaning=GSSO["000384"])
+ transgender_man = PermissibleValue(text="transgender_man",
+ meaning=GSSO["000372"])
+ cisgender_man = PermissibleValue(text="cisgender_man",
+ meaning=GSSO["000371"])
+ cisgender_woman = PermissibleValue(text="cisgender_woman",
+ meaning=GSSO["000385"])
+
_defn = EnumDefinition(
name="GenderType",
)
- @classmethod
- def _addvals(cls):
- setattr(cls, "nonbinary man",
- PermissibleValue(text="nonbinary man",
- meaning=GSSO["009254"]) )
- setattr(cls, "nonbinary woman",
- PermissibleValue(text="nonbinary woman",
- meaning=GSSO["009253"]) )
- setattr(cls, "transgender woman",
- PermissibleValue(text="transgender woman",
- meaning=GSSO["000384"]) )
- setattr(cls, "transgender man",
- PermissibleValue(text="transgender man",
- meaning=GSSO["000372"]) )
- setattr(cls, "cisgender man",
- PermissibleValue(text="cisgender man",
- meaning=GSSO["000371"]) )
- setattr(cls, "cisgender woman",
- PermissibleValue(text="cisgender woman",
- meaning=GSSO["000385"]) )
-
class DiagnosisType(EnumDefinitionImpl):
_defn = EnumDefinition(
@@ -680,7 +698,7 @@ slots.related_to = Slot(uri=PERSONINFO.related_to, name="related_to", curie=PERS
model_uri=PERSONINFO.related_to, domain=None, range=Optional[str])
slots.type = Slot(uri=PERSONINFO.type, name="type", curie=PERSONINFO.curie('type'),
- model_uri=PERSONINFO.type, domain=None, range=Optional[str])
+ model_uri=PERSONINFO.type, domain=None, range=Optional[Union[str, "FamilialRelationshipType"]])
slots.street = Slot(uri=PERSONINFO.street, name="street", curie=PERSONINFO.curie('street'),
model_uri=PERSONINFO.street, domain=None, range=Optional[str])
diff --git a/tests/test_data/test_sqlite.py b/tests/test_data/test_sqlite.py
index 24897c2c..5fb1a9cc 100644
--- a/tests/test_data/test_sqlite.py
+++ b/tests/test_data/test_sqlite.py
@@ -11,7 +11,7 @@ from sqlalchemy.orm import sessionmaker
from linkml.utils.schema_builder import SchemaBuilder
from linkml.utils.sqlutils import SQLStore
-from tests.test_data.model.personinfo import Container, Person
+from tests.test_data.model.personinfo import Container, Person, FamilialRelationship, GenderType, FamilialRelationshipType
import tests.test_data.model.personinfo
from tests.test_data.environment import env
from tests.utils.dict_comparator import compare_yaml, compare_objs
@@ -33,6 +33,17 @@ class SQLiteStoreTest(unittest.TestCase):
- :meth:`SQLStore.load`
"""
+ def test_enums(self):
+ """
+ Tests that enum objects can be constructed inlined.
+
+ See https://github.com/linkml/linkml/issues/817
+ """
+ r = FamilialRelationship(type='SIBLING_OF', related_to='x')
+ p = Person(id='x', gender=GenderType(GenderType.cisgender_man))
+ self.assertEqual(type(p.gender), GenderType)
+ c = Container(persons=[p])
+
def test_sqlite_store(self):
"""
tests a complete end-to-end example with a dump-load cycle
@@ -54,6 +65,14 @@ class SQLiteStoreTest(unittest.TestCase):
q = session.query(endpoint.module.Person)
all_objs = q.all()
self.assertEqual(2, len(all_objs))
+ for p in all_objs:
+ print(p)
+ for rel in p.has_familial_relationships:
+ print(rel)
+ print(rel.type)
+ q = session.query(endpoint.module.FamilialRelationship)
+ for r in q.all():
+ print(r)
# step 4: test loading from SQLStore
# 4a: first test load_all, diff to original data should be empty
x = endpoint.load_all(target_class=Container)
@@ -80,8 +99,8 @@ class SQLiteStoreTest(unittest.TestCase):
endpoint.compile()
# step 2: load data from file and store in SQLStore
container: SchemaDefinition = yaml_loader.load(SCHEMA, target_class=SchemaDefinition)
- endpoint.dump(container)
-
+ schema_instance = SchemaDefinition(id='test', name='test')
+ endpoint.dump(schema_instance)
def test_mixin(self):
b = SchemaBuilder()
|
linkml-sqldb dump can't handle slots with enum ranges
```
Traceback (most recent call last):
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/bin/linkml-sqldb", line 8, in <module>
sys.exit(main())
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/click/core.py", line 1055, in main
rv = self.invoke(ctx)
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/linkml/utils/sqlutils.py", line 336, in dump
endpoint.dump(obj)
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/linkml/utils/sqlutils.py", line 142, in dump
nu_obj = self.to_sqla(element)
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/linkml/utils/sqlutils.py", line 189, in to_sqla
v2 = self.to_sqla(v)
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/linkml/utils/sqlutils.py", line 170, in to_sqla
nu_obj = [self.to_sqla(x) for x in obj]
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/linkml/utils/sqlutils.py", line 170, in <listcomp>
nu_obj = [self.to_sqla(x) for x in obj]
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/linkml/utils/sqlutils.py", line 189, in to_sqla
v2 = self.to_sqla(v)
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/linkml/utils/sqlutils.py", line 189, in to_sqla
v2 = self.to_sqla(v)
File "/Users/MAM/Library/Caches/pypoetry/virtualenvs/linkml-project-template-d6UeGt0V-py3.9/lib/python3.9/site-packages/linkml/utils/sqlutils.py", line 198, in to_sqla
raise ValueError(f'Cannot find {typ.__name__} in {self.module}')
ValueError: Cannot find PermissibleValue in <module 'test'>
```
|
0.0
|
cb2ec6bbd0ad69ab2fc26e9e91d7d6657bddd173
|
[
"tests/test_data/test_sqlite.py::SQLiteStoreTest::test_sqlite_store"
] |
[
"tests/test_data/test_sqlite.py::SQLiteStoreTest::test_enums",
"tests/test_data/test_sqlite.py::SQLiteStoreTest::test_mixin"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-06-07 23:41:45+00:00
|
cc0-1.0
| 3,606 |
|
linkml__linkml-992
|
diff --git a/linkml/linter/cli.py b/linkml/linter/cli.py
index 07483e43..1d9718b4 100644
--- a/linkml/linter/cli.py
+++ b/linkml/linter/cli.py
@@ -51,6 +51,7 @@ def get_yaml_files(root: Path) -> Iterable[str]:
help="Report format.",
show_default=True,
)
[email protected]("-v", "--verbose", is_flag=True)
@click.option(
"-o", "--output", type=click.File("w"), default="-", help="Report file name."
)
@@ -76,6 +77,7 @@ def main(
output,
ignore_warnings: bool,
max_warnings: int,
+ verbose: bool
):
"""Run linter on SCHEMA.
@@ -99,7 +101,7 @@ def main(
linter = Linter(config_dict)
if format == "terminal":
- formatter = TerminalFormatter(output)
+ formatter = TerminalFormatter(output, verbose)
elif format == "markdown":
formatter = MarkdownFormatter(output)
elif format == "json":
diff --git a/linkml/linter/formatters/terminal_formatter.py b/linkml/linter/formatters/terminal_formatter.py
index 4dac4ebf..1c284c9f 100644
--- a/linkml/linter/formatters/terminal_formatter.py
+++ b/linkml/linter/formatters/terminal_formatter.py
@@ -14,17 +14,20 @@ def plural(word: str, count: int):
class TerminalFormatter(Formatter):
- def __init__(self, file: Optional[IO[Any]] = None) -> None:
+ def __init__(self, file: Optional[IO[Any]] = None, verbose: bool = False) -> None:
super().__init__(file)
+ self.verbose = verbose
self.problem_counts = defaultdict(int)
self.current_schema = None
def start_schema(self, name: str):
self.current_schema = name
+ if self.verbose:
+ self.write(click.style(name, underline=True))
def handle_problem(self, problem: LinterProblem):
key = self.current_schema
- if key not in self.problem_counts:
+ if not self.verbose and key not in self.problem_counts:
self.write(click.style(key, underline=True))
self.problem_counts[key] += 1
@@ -39,7 +42,7 @@ class TerminalFormatter(Formatter):
self.write(formatted)
def end_schema(self):
- if self.current_schema in self.problem_counts:
+ if self.verbose or self.current_schema in self.problem_counts:
self.write("")
def end_report(self):
|
linkml/linkml
|
a4d277de1b25fb00f0a4df5a3cc0256accf323d3
|
diff --git a/tests/test_linter/test_formatters.py b/tests/test_linter/test_formatters.py
index 98376a14..ed9ad92a 100644
--- a/tests/test_linter/test_formatters.py
+++ b/tests/test_linter/test_formatters.py
@@ -31,6 +31,9 @@ def populate_report(formatter):
)
formatter.end_schema()
+ formatter.start_schema("no_problems.yaml")
+ formatter.end_schema()
+
formatter.start_schema("b.yaml")
formatter.handle_problem(
LinterProblem(
@@ -64,6 +67,26 @@ b.yaml
"""
self.assertEqual(output.getvalue().strip(), expected.strip())
+ def test_terminal_formatter_verbose(self):
+ output = io.StringIO()
+ formatter = TerminalFormatter(file=output, verbose=True)
+ populate_report(formatter)
+
+ expected = """
+a.yaml
+ error this is an error (rule_1)
+ warning this is a warning (rule_2)
+
+no_problems.yaml
+
+b.yaml
+ error this is another error (rule_3)
+
+✖ Found 3 problems in 2 schemas
+"""
+ self.assertEqual(output.getvalue().strip(), expected.strip())
+
+
def test_markdown_formatter(self):
output = io.StringIO()
formatter = MarkdownFormatter(file=output)
@@ -74,7 +97,7 @@ b.yaml
| | Count |
|----------------------|-------|
-| Schemas Checked | 2 |
+| Schemas Checked | 3 |
| Schemas with Error | 2 |
| Schemas with Warning | 1 |
| Total Errors | 2 |
|
linkml-lint command should have a verbose flag
The `linkml-link` command should have a verbose flag. If provided it should list all the files checked, even if they have no issues.
|
0.0
|
a4d277de1b25fb00f0a4df5a3cc0256accf323d3
|
[
"tests/test_linter/test_formatters.py::TestFormatters::test_terminal_formatter_verbose"
] |
[
"tests/test_linter/test_formatters.py::TestFormatters::test_json_formatter",
"tests/test_linter/test_formatters.py::TestFormatters::test_markdown_formatter",
"tests/test_linter/test_formatters.py::TestFormatters::test_terminal_formatter",
"tests/test_linter/test_formatters.py::TestFormatters::test_tsv_formatter"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-05 16:37:50+00:00
|
cc0-1.0
| 3,607 |
|
linkml__linkml-994
|
diff --git a/linkml/linter/rules.py b/linkml/linter/rules.py
index d3893001..9deef5d4 100644
--- a/linkml/linter/rules.py
+++ b/linkml/linter/rules.py
@@ -1,10 +1,10 @@
-from functools import lru_cache
import re
from abc import ABC, abstractmethod
+from functools import lru_cache
from typing import Callable, Iterable, List
from linkml_runtime.linkml_model import (ClassDefinition, ClassDefinitionName,
- SlotDefinition)
+ Element, SlotDefinition)
from linkml_runtime.utils.schemaview import SchemaView
from prefixmaps.io.parser import load_multi_context
@@ -45,6 +45,11 @@ class LinterRule(ABC):
def uncamel(n: str) -> str:
return LinterRule.PATTERNS._uncamel.sub(" ", n)
+ @staticmethod
+ def format_element(element: Element):
+ class_name = element.__class__.__name__.replace("Definition", "")
+ return f"{class_name} '{element.name}'"
+
class NoEmptyTitleRule(LinterRule):
@@ -60,7 +65,7 @@ class NoEmptyTitleRule(LinterRule):
e.title = title
if e.title is None:
problem = LinterProblem(
- message=f"{type(e).__name__} '{e.name}' has no title"
+ message=f"{self.format_element(e)} has no title"
)
yield problem
@@ -70,12 +75,14 @@ class NoXsdIntTypeRule(LinterRule):
id = "no_xsd_int_type"
def check(self, schema_view: SchemaView, fix: bool = False):
- for type_name, type_definition in schema_view.all_types(imports=False).items():
+ for type_definition in schema_view.all_types(imports=False).values():
if type_definition.uri == "xsd:int":
if fix:
type_definition.uri = "xsd:integer"
else:
- yield LinterProblem(f"Type '{type_name}' has uri xsd:int")
+ yield LinterProblem(
+ f"{self.format_element(type_definition)} has uri xsd:int"
+ )
class PermissibleValuesFormatRule(LinterRule):
@@ -86,11 +93,11 @@ class PermissibleValuesFormatRule(LinterRule):
self, schema_view: SchemaView, fix: bool = False
) -> Iterable[LinterProblem]:
pattern = self.PATTERNS.get(self.config.format, re.compile(self.config.format))
- for enum_name, enum_def in schema_view.all_enums(imports=False).items():
+ for enum_def in schema_view.all_enums(imports=False).values():
for value in enum_def.permissible_values.keys():
if pattern.fullmatch(value) is None:
yield LinterProblem(
- f"Enum '{enum_name}' has permissible value '{value}'"
+ f"{self.format_element(enum_def)} has permissible value '{value}'"
)
@@ -123,11 +130,10 @@ class RecommendedRule(LinterRule):
if element_name in self.config.exclude:
continue
for meta_slot_name, meta_slot_value in vars(element_definition).items():
- meta_class_name = type(element_definition).class_name
- key = f"{meta_class_name}__{meta_slot_name}"
+ key = f"{element_definition.class_name}__{meta_slot_name}"
if key in recommended_meta_slots and not meta_slot_value:
yield LinterProblem(
- f"{meta_class_name} '{element_name}' does not have recommended slot '{meta_slot_name}'"
+ f"{self.format_element(element_definition)} does not have recommended slot '{meta_slot_name}'"
)
@@ -272,7 +278,7 @@ class StandardNamingRule(LinterRule):
for permissible_value_name in enum_definition.permissible_values.keys():
if permissible_value_pattern.fullmatch(permissible_value_name) is None:
yield LinterProblem(
- f"Permissible value of enum '{enum_name}' has name '{permissible_value_name}'"
+ f"Permissible value of {self.format_element(enum_definition)} has name '{permissible_value_name}'"
)
|
linkml/linkml
|
3ea865ec1fcb7ea2c4f34f5291cab81f127285d7
|
diff --git a/tests/test_linter/test_cli.py b/tests/test_linter/test_cli.py
index ee4cb89d..5a95a434 100644
--- a/tests/test_linter/test_cli.py
+++ b/tests/test_linter/test_cli.py
@@ -64,7 +64,7 @@ class TestLinterCli(unittest.TestCase):
result = self.runner.invoke(main, [SCHEMA_FILE])
self.assertEqual(result.exit_code, 1)
self.assertIn(
- "warning class_definition 'Adult' does not have recommended slot 'description' (recommended)",
+ "warning Class 'Adult' does not have recommended slot 'description' (recommended)",
result.stdout,
)
self.assertIn(
diff --git a/tests/test_linter/test_rule_no_empty_title.py b/tests/test_linter/test_rule_no_empty_title.py
new file mode 100644
index 00000000..201a06ee
--- /dev/null
+++ b/tests/test_linter/test_rule_no_empty_title.py
@@ -0,0 +1,31 @@
+import unittest
+
+from linkml_runtime import SchemaView
+
+from linkml.linter.config.datamodel.config import RuleConfig, RuleLevel
+from linkml.linter.rules import NoEmptyTitleRule
+from linkml.utils.schema_builder import SchemaBuilder
+
+
+class TestRuleNoEmptyTitle(unittest.TestCase):
+ def test_elements_with_empty_title(self):
+ builder = SchemaBuilder()
+ builder.add_class("AClass")
+ builder.add_slot("a_slot")
+ builder.add_enum("AnEnum")
+ builder.add_type("a_type")
+ builder.add_class("WithTitle", title="With title")
+
+ schema_view = SchemaView(builder.schema)
+ config = RuleConfig(level=RuleLevel.error.text)
+
+ rule = NoEmptyTitleRule(config)
+ problems = list(rule.check(schema_view))
+
+ self.assertEqual(len(problems), 4)
+
+ messages = [p.message for p in problems]
+ self.assertIn("Class 'AClass' has no title", messages)
+ self.assertIn("Slot 'a_slot' has no title", messages)
+ self.assertIn("Enum 'AnEnum' has no title", messages)
+ self.assertIn("Type 'a_type' has no title", messages)
diff --git a/tests/test_linter/test_rule_recommended.py b/tests/test_linter/test_rule_recommended.py
index df0d29bc..6958fa9f 100644
--- a/tests/test_linter/test_rule_recommended.py
+++ b/tests/test_linter/test_rule_recommended.py
@@ -28,15 +28,15 @@ class TestRecommendedRule(unittest.TestCase):
messages = [p.message for p in problems]
self.assertIn(
- "class_definition 'MyClass' does not have recommended slot 'description'",
+ "Class 'MyClass' does not have recommended slot 'description'",
messages,
)
self.assertIn(
- "slot_definition 'my_slot' does not have recommended slot 'description'",
+ "Slot 'my_slot' does not have recommended slot 'description'",
messages,
)
self.assertIn(
- "enum_definition 'MyEnum' does not have recommended slot 'description'",
+ "Enum 'MyEnum' does not have recommended slot 'description'",
messages,
)
@@ -74,7 +74,7 @@ class TestRecommendedRule(unittest.TestCase):
messages = [p.message for p in problems]
self.assertIn(
- "slot_definition 'my_slot' does not have recommended slot 'description'",
+ "Slot 'my_slot' does not have recommended slot 'description'",
messages,
)
@@ -95,10 +95,10 @@ class TestRecommendedRule(unittest.TestCase):
messages = [p.message for p in problems]
self.assertIn(
- "class_definition 'MyClass' does not have recommended slot 'description'",
+ "Class 'MyClass' does not have recommended slot 'description'",
messages,
)
self.assertIn(
- "enum_definition 'MyEnum' does not have recommended slot 'description'",
+ "Enum 'MyEnum' does not have recommended slot 'description'",
messages,
)
diff --git a/tests/test_linter/test_rule_standard_naming.py b/tests/test_linter/test_rule_standard_naming.py
index a2831c3c..bfc1665d 100644
--- a/tests/test_linter/test_rule_standard_naming.py
+++ b/tests/test_linter/test_rule_standard_naming.py
@@ -47,18 +47,18 @@ class TestStandardNamingRule(unittest.TestCase):
self.assertIn("Slot has name 'BadSlot'", messages)
self.assertIn("Slot has name 'worse slot'", messages)
self.assertIn(
- "Permissible value of enum 'GoodEnumWithBadPV' has name 'Bad_PV'", messages
+ "Permissible value of Enum 'GoodEnumWithBadPV' has name 'Bad_PV'", messages
)
self.assertIn(
- "Permissible value of enum 'GoodEnumUpperPV' has name 'GOOD_UPPER_PV'",
+ "Permissible value of Enum 'GoodEnumUpperPV' has name 'GOOD_UPPER_PV'",
messages,
)
self.assertIn(
- "Permissible value of enum 'GoodEnumUpperPV' has name 'GREAT_UPPER_PV'",
+ "Permissible value of Enum 'GoodEnumUpperPV' has name 'GREAT_UPPER_PV'",
messages,
)
self.assertIn(
- "Permissible value of enum 'GoodEnumBadUpperPV' has name 'GOOD_UPPER_PV'",
+ "Permissible value of Enum 'GoodEnumBadUpperPV' has name 'GOOD_UPPER_PV'",
messages,
)
self.assertIn("Enum has name 'bad_enum'", messages)
@@ -79,25 +79,25 @@ class TestStandardNamingRule(unittest.TestCase):
self.assertIn("Slot has name 'BadSlot'", messages)
self.assertIn("Slot has name 'worse slot'", messages)
self.assertIn(
- "Permissible value of enum 'GoodEnum' has name 'good_lower_pv'", messages
+ "Permissible value of Enum 'GoodEnum' has name 'good_lower_pv'", messages
)
self.assertIn(
- "Permissible value of enum 'GoodEnum' has name 'great_lower_pv'", messages
+ "Permissible value of Enum 'GoodEnum' has name 'great_lower_pv'", messages
)
self.assertIn(
- "Permissible value of enum 'GoodEnumWithBadPV' has name 'good_lower_pv'",
+ "Permissible value of Enum 'GoodEnumWithBadPV' has name 'good_lower_pv'",
messages,
)
self.assertIn(
- "Permissible value of enum 'GoodEnumWithBadPV' has name 'Bad_PV'", messages
+ "Permissible value of Enum 'GoodEnumWithBadPV' has name 'Bad_PV'", messages
)
self.assertIn(
- "Permissible value of enum 'GoodEnumBadUpperPV' has name 'bad_pv'", messages
+ "Permissible value of Enum 'GoodEnumBadUpperPV' has name 'bad_pv'", messages
)
self.assertIn("Enum has name 'bad_enum'", messages)
self.assertIn(
- "Permissible value of enum 'bad_enum' has name 'good_lower_pv'", messages
+ "Permissible value of Enum 'bad_enum' has name 'good_lower_pv'", messages
)
self.assertIn(
- "Permissible value of enum 'bad_enum' has name 'great_lower_pv'", messages
+ "Permissible value of Enum 'bad_enum' has name 'great_lower_pv'", messages
)
|
linter is inconsistent in how it mentions meta model elements
For example, in the `message` column of the TSV output, one may see wither of these styles:
- SlotDefinition 'source_file_size' has no title
- slot_definition 'associated_part' does not have recommended slot 'description'
|
0.0
|
3ea865ec1fcb7ea2c4f34f5291cab81f127285d7
|
[
"tests/test_linter/test_cli.py::TestLinterCli::test_no_config",
"tests/test_linter/test_rule_no_empty_title.py::TestRuleNoEmptyTitle::test_elements_with_empty_title",
"tests/test_linter/test_rule_recommended.py::TestRecommendedRule::test_exclude",
"tests/test_linter/test_rule_recommended.py::TestRecommendedRule::test_include",
"tests/test_linter/test_rule_recommended.py::TestRecommendedRule::test_missing_descriptions",
"tests/test_linter/test_rule_standard_naming.py::TestStandardNamingRule::test_standard_naming_lower_pv",
"tests/test_linter/test_rule_standard_naming.py::TestStandardNamingRule::test_standard_naming_upper_pv"
] |
[
"tests/test_linter/test_cli.py::TestLinterCli::test_config_extends_recommended",
"tests/test_linter/test_cli.py::TestLinterCli::test_directory_of_files",
"tests/test_linter/test_cli.py::TestLinterCli::test_explicit_config_file",
"tests/test_linter/test_cli.py::TestLinterCli::test_implicit_config_file",
"tests/test_linter/test_cli.py::TestLinterCli::test_no_schema_errors",
"tests/test_linter/test_rule_recommended.py::TestRecommendedRule::test_present_descriptions"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-05 20:39:01+00:00
|
cc0-1.0
| 3,608 |
|
linkml__linkml-runtime-183
|
diff --git a/linkml_runtime/utils/schema_as_dict.py b/linkml_runtime/utils/schema_as_dict.py
index c590936..edaabca 100644
--- a/linkml_runtime/utils/schema_as_dict.py
+++ b/linkml_runtime/utils/schema_as_dict.py
@@ -29,12 +29,20 @@ def _remove_names(obj: Any, parent: Optional[str]) -> Any:
Also compacts representation of prefixes
+ It also takes care of the edge case:
+
+ .. code-block:: yaml
+
+ slots:
+ name:
+ ...
+
:param obj: dictionary object to recursively transform
:param parent: key for parent dict
:return:
"""
if isinstance(obj, dict):
- return {k: _remove_names(v, k) for k, v in obj.items() if k != 'name' or parent is None}
+ return {k: _remove_names(v, k) for k, v in obj.items() if k != 'name' or parent is None or parent == 'slots'}
elif isinstance(obj, list):
return [_remove_names(x, parent) for x in obj]
else:
|
linkml/linkml-runtime
|
3a1a0107a36006df7a95c2b0a18fc4894c928f33
|
diff --git a/tests/test_utils/input/kitchen_sink_noimports.yaml b/tests/test_utils/input/kitchen_sink_noimports.yaml
index 384bfcc..4464533 100644
--- a/tests/test_utils/input/kitchen_sink_noimports.yaml
+++ b/tests/test_utils/input/kitchen_sink_noimports.yaml
@@ -215,6 +215,8 @@ classes:
class_uri: prov:Agent
rank: 1
+ EmptyClass:
+
slots:
employed at:
range: Company
@@ -344,3 +346,7 @@ enums:
PARENT_OF:
CHILD_OF:
DiagnosisType:
+ OtherEnum:
+ permissible_values:
+ a:
+ b:
diff --git a/tests/test_utils/test_schema_as_dict.py b/tests/test_utils/test_schema_as_dict.py
index f3b8218..0965df5 100644
--- a/tests/test_utils/test_schema_as_dict.py
+++ b/tests/test_utils/test_schema_as_dict.py
@@ -24,6 +24,8 @@ class SchemaAsDictTestCase(unittest.TestCase):
tests schema_as_dict, see https://github.com/linkml/linkml/issues/100
"""
view = SchemaView(SCHEMA_NO_IMPORTS)
+ all_slots = view.all_slots()
+ self.assertIn('name', all_slots)
logging.debug(view.schema.id)
ystr = schema_as_yaml_dump(view.schema)
with open(CLEAN_SCHEMA, 'w') as stream:
@@ -41,6 +43,7 @@ class SchemaAsDictTestCase(unittest.TestCase):
for e in elt_dict.values():
for pv in e.get('permissible_values', {}).values():
assert 'text' not in pv
+ self.assertIn('name', obj['slots'])
if __name__ == '__main__':
unittest.main()
diff --git a/tests/test_utils/test_schemaview.py b/tests/test_utils/test_schemaview.py
index a08c780..62eae1f 100644
--- a/tests/test_utils/test_schemaview.py
+++ b/tests/test_utils/test_schemaview.py
@@ -218,8 +218,9 @@ class SchemaViewTestCase(unittest.TestCase):
ordered_c = []
for c in classes.values():
ordered_c.append(c.name)
- assert "HasAliases" == ordered_c[0]
- assert "agent" == ordered_c[-1]
+ self.assertEqual("HasAliases", ordered_c[0])
+ self.assertEqual("EmptyClass", ordered_c[-1])
+ self.assertEqual("agent", ordered_c[-2])
def test_all_slots_ordered_lexical(self):
view = SchemaView(SCHEMA_NO_IMPORTS)
|
schema_as_dict is over eager and eliminates "name" slots
schema_as_dict will compact the direct yaml representation of a schema such that `name` keys are not repeated in the element when they are used as keys - but this is over-eager and gets rid of name slots
|
0.0
|
3a1a0107a36006df7a95c2b0a18fc4894c928f33
|
[
"tests/test_utils/test_schema_as_dict.py::SchemaAsDictTestCase::test_as_dict"
] |
[
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_all_classes_ordered_lexical",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_all_classes_ordered_no_ordered_by",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_all_classes_ordered_rank",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_all_slots_ordered_lexical",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_all_slots_ordered_rank",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_caching",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_imports",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_merge_imports",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_metamodel_in_schemaview",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_rollup_rolldown",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_schemaview",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_slot_inheritance",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_traversal"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-06-07 19:57:55+00:00
|
cc0-1.0
| 3,609 |
|
linkml__linkml-runtime-204
|
diff --git a/.gitignore b/.gitignore
index 2c2b194..b84a58f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -140,3 +140,4 @@ Pipfile.lock
# No Pycharm
.idea/
+.vscode
diff --git a/linkml_runtime/utils/csvutils.py b/linkml_runtime/utils/csvutils.py
index 884f0b0..36fa11e 100644
--- a/linkml_runtime/utils/csvutils.py
+++ b/linkml_runtime/utils/csvutils.py
@@ -1,3 +1,4 @@
+import logging
from json_flattener import KeyConfig, GlobalConfig, Serializer
from json_flattener.flattener import CONFIGMAP
from linkml_runtime.linkml_model.meta import SlotDefinitionName, SchemaDefinition, \
@@ -14,8 +15,11 @@ def get_configmap(schemaview: SchemaView, index_slot: SlotDefinitionName) -> CON
:param index_slot: key that indexes the top level object
:return: mapping between top level keys and denormalization configurations
"""
+ slot = None
if index_slot is not None and schemaview is not None:
slot = schemaview.get_slot(index_slot)
+
+ if slot is not None:
if slot.range is not None and slot.range in schemaview.all_classes():
cm = {}
for sn in schemaview.class_slots(slot.range):
@@ -24,9 +28,9 @@ def get_configmap(schemaview: SchemaView, index_slot: SlotDefinitionName) -> CON
cm[sn] = config
return cm
else:
- logging.warn(f'Index slot range not to class: {slot.range}')
+ logging.warning(f'Index slot range not to class: {slot.range}')
else:
- logging.warn(f'Index slot or schema not specified')
+ logging.warning(f'Index slot or schema not specified')
return {}
def _get_key_config(schemaview: SchemaView, tgt_cls: ClassDefinitionName, sn: SlotDefinitionName, sep='_'):
diff --git a/linkml_runtime/utils/pattern.py b/linkml_runtime/utils/pattern.py
index 9da2225..6045a2c 100644
--- a/linkml_runtime/utils/pattern.py
+++ b/linkml_runtime/utils/pattern.py
@@ -1,83 +1,88 @@
+from functools import lru_cache
import re
from typing import Dict
-
+# We might want to deprecate this method in favor of PatternResolver in the future
def generate_patterns(schema_view) -> Dict[str, str]:
"""Generates a dictionary of slot patterns corresponding to
the structured patterns in the settings.
-
:param schema_view: SchemaView object with LinkML YAML
already loaded
:return generated_patterns: dictionary with the
expanded structured patterns
"""
- # fetch settings from schema_view
- settings_dict = schema_view.schema.settings
-
- # dictionary of key and string value of settings dict
- format_spec = {}
-
- for k, setting in settings_dict.items():
-
- # create spec dictionary with keys that will replace
- # substrings in the structured pattern syntax
- format_spec[k] = setting.setting_value
+ resolver = PatternResolver(schema_view)
# dictionary with structured patterns in the key and
# expanded, or materialized patterns as values
generated_patterns = {}
- # regular expression capturing the various use cases
- # for the optionally dot separated, curly braces bound, pattern syntax
- var_name = re.compile("{([a-z0-9_-]+([\.-_ ][a-z0-9]+)*)}", re.IGNORECASE)
-
for _, slot_defn in schema_view.all_slots().items():
if slot_defn.structured_pattern:
struct_pat = slot_defn.structured_pattern
-
pattern = struct_pat.syntax
+ generated_patterns[pattern] = resolver.resolve(pattern)
- # compute pattern from structured patterns
- # and format_spec dictionary
-
-
- # apply the regex to the pattern and look for matches
- matches = var_name.finditer(pattern)
-
- reversed = []
- for item in matches:
- # Detect double set brackets
- match_string = None
- if (
- item.start() > 0
- and item.end() < len(pattern)
- and pattern[item.start() - 1] == "{"
- and pattern[item.end()] == "}"
- ):
- match_string = item.group(1)
-
- elif item.group(1) in format_spec:
- match_string = str(format_spec[item.group(1)])
-
- if match_string:
- reversed.insert(
- 0,
- {
- "string": match_string,
- "start": item.start(),
- "end": item.end(),
- },
- )
-
- converted = pattern
- for item in reversed:
- converted = (
- converted[: item["start"]]
- + item["string"]
- + converted[item["end"] :]
+ return generated_patterns
+
+
+class PatternResolver():
+
+ # regular expression capturing the various use cases
+ # for the optionally dot separated, curly braces bound, pattern syntax
+ var_name = re.compile("{([a-z0-9_-]+([\.-_ ][a-z0-9]+)*)}", re.IGNORECASE)
+
+ def __init__(self, schema_view):
+ # fetch settings from schema_view
+ settings_dict = schema_view.schema.settings
+
+ # dictionary of key and string value of settings dict
+ self.format_spec = {}
+
+ for k, setting in settings_dict.items():
+
+ # create spec dictionary with keys that will replace
+ # substrings in the structured pattern syntax
+ self.format_spec[k] = setting.setting_value
+
+ @lru_cache()
+ def resolve(self, pattern: str) -> str:
+ # apply the regex to the pattern and look for matches
+ matches = self.var_name.finditer(pattern)
+
+ reversed = []
+ for item in matches:
+ # Detect double set brackets
+ match_string = None
+ if (
+ item.start() > 0
+ and item.end() < len(pattern)
+ and pattern[item.start() - 1] == "{"
+ and pattern[item.end()] == "}"
+ ):
+ match_string = item.group(1)
+
+ elif item.group(1) in self.format_spec:
+ match_string = str(self.format_spec[item.group(1)])
+
+ if match_string:
+ reversed.insert(
+ 0,
+ {
+ "string": match_string,
+ "start": item.start(),
+ "end": item.end(),
+ },
)
- generated_patterns[pattern] = converted
+ converted = pattern
+ for item in reversed:
+ converted = (
+ converted[: item["start"]]
+ + item["string"]
+ + converted[item["end"] :]
+ )
+
+ return converted
- return generated_patterns
diff --git a/linkml_runtime/utils/schemaview.py b/linkml_runtime/utils/schemaview.py
index 4cd440e..7ce48f1 100644
--- a/linkml_runtime/utils/schemaview.py
+++ b/linkml_runtime/utils/schemaview.py
@@ -9,7 +9,7 @@ from typing import Mapping, Tuple, Type
from linkml_runtime.utils.namespaces import Namespaces
from deprecated.classic import deprecated
from linkml_runtime.utils.context_utils import parse_import_map, map_import
-from linkml_runtime.utils.pattern import generate_patterns
+from linkml_runtime.utils.pattern import PatternResolver
from linkml_runtime.linkml_model.meta import *
from enum import Enum
logger = logging.getLogger(__name__)
@@ -1485,12 +1485,22 @@ class SchemaView(object):
into regular expressions based on composite patterns
provided in the settings dictionary.
"""
- patterns_dict = generate_patterns(self)
-
- for _, slot_defn in self.all_slots().items():
- if slot_defn.structured_pattern:
-
- pattern = slot_defn.structured_pattern.syntax
-
- if pattern in patterns_dict:
- slot_defn.pattern = patterns_dict[pattern]
+ resolver = PatternResolver(self)
+
+ def materialize_pattern_into_slot_definition(slot_definition: SlotDefinition) -> None:
+ if not slot_definition.structured_pattern:
+ return
+ pattern = slot_definition.structured_pattern.syntax
+ slot_definition.pattern = resolver.resolve(pattern)
+
+ for slot_definition in self.all_slots().values():
+ materialize_pattern_into_slot_definition(slot_definition)
+
+ for class_definition in self.all_classes().values():
+ if class_definition.slot_usage:
+ for slot_definition in class_definition.slot_usage.values():
+ materialize_pattern_into_slot_definition(slot_definition)
+
+ if class_definition.attributes:
+ for slot_definition in class_definition.attributes.values():
+ materialize_pattern_into_slot_definition(slot_definition)
|
linkml/linkml-runtime
|
7188ab550ffce1c44492613239808e4d99748c96
|
diff --git a/tests/test_utils/input/pattern-example.yaml b/tests/test_utils/input/pattern-example.yaml
index 544f1ed..f40b1d8 100644
--- a/tests/test_utils/input/pattern-example.yaml
+++ b/tests/test_utils/input/pattern-example.yaml
@@ -32,6 +32,7 @@ settings:
unit.length: "(centimeter|meter|inch)"
unit.weight: "(kg|g|lbs|stone)"
email: "\\S+@\\S+{\\.\\w}+"
+ hyphenated_name: "\\S+-\\S+"
#==================================
# Classes #
@@ -45,6 +46,23 @@ classes:
- height
- email
+ FancyPersonInfo:
+ is_a: PersonInfo
+ slot_usage:
+ name:
+ structured_pattern:
+ syntax: "\\S+ {hyphenated_name}"
+ interpolated: true
+ partial_match: false
+
+ ClassWithAttributes:
+ attributes:
+ weight:
+ structured_pattern:
+ syntax: "{float} {unit.weight}"
+ interpolated: true
+ partial_match: false
+
#==================================
# Slots #
#==================================
diff --git a/tests/test_utils/test_pattern.py b/tests/test_utils/test_pattern.py
index 8c48e02..81df8c0 100644
--- a/tests/test_utils/test_pattern.py
+++ b/tests/test_utils/test_pattern.py
@@ -4,13 +4,12 @@ from tests.test_utils.environment import env
from linkml_runtime.utils.schemaview import SchemaView
-from linkml_runtime.utils.pattern import generate_patterns
+from linkml_runtime.utils.pattern import PatternResolver, generate_patterns
class PatternTestCase(unittest.TestCase):
def test_generate_patterns(self):
"""Test method that consolidates composite patterns."""
-
sv = SchemaView(env.input_path("pattern-example.yaml"))
# actual result returned from call to generate_patterns()
@@ -24,5 +23,14 @@ class PatternTestCase(unittest.TestCase):
self.assertDictEqual(actual_dict, expected_dict)
+ def test_pattern_resolver(self):
+ sv = SchemaView(env.input_path("pattern-example.yaml"))
+
+ resolver = PatternResolver(sv)
+
+ self.assertEqual(resolver.resolve("{float} {unit.length}"), "\\d+[\\.\\d+] (centimeter|meter|inch)")
+ self.assertEqual(resolver.resolve("{float} {unit.weight}"), "\\d+[\\.\\d+] (kg|g|lbs|stone)")
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/tests/test_utils/test_schemaview.py b/tests/test_utils/test_schemaview.py
index 3b3e175..9e24b7c 100644
--- a/tests/test_utils/test_schemaview.py
+++ b/tests/test_utils/test_schemaview.py
@@ -15,6 +15,7 @@ from tests.test_utils import INPUT_DIR
SCHEMA_NO_IMPORTS = os.path.join(INPUT_DIR, 'kitchen_sink_noimports.yaml')
SCHEMA_WITH_IMPORTS = os.path.join(INPUT_DIR, 'kitchen_sink.yaml')
+SCHEMA_WITH_STRUCTURED_PATTERNS = os.path.join(INPUT_DIR, "pattern-example.yaml")
yaml_loader = YAMLLoader()
@@ -542,7 +543,7 @@ class SchemaViewTestCase(unittest.TestCase):
self.assertListEqual(actual_result, expected_result)
def test_materialize_patterns(self):
- sv = SchemaView(os.path.join(INPUT_DIR, "pattern-example.yaml"))
+ sv = SchemaView(SCHEMA_WITH_STRUCTURED_PATTERNS)
sv.materialize_patterns()
@@ -552,6 +553,24 @@ class SchemaViewTestCase(unittest.TestCase):
self.assertEqual(height_slot.pattern, "\d+[\.\d+] (centimeter|meter|inch)")
self.assertEqual(weight_slot.pattern, "\d+[\.\d+] (kg|g|lbs|stone)")
+ def test_materialize_patterns_slot_usage(self):
+ sv = SchemaView(SCHEMA_WITH_STRUCTURED_PATTERNS)
+
+ sv.materialize_patterns()
+
+ name_slot_usage = sv.get_class("FancyPersonInfo").slot_usage['name']
+
+ self.assertEqual(name_slot_usage.pattern, "\\S+ \\S+-\\S+")
+
+ def test_materialize_patterns_attribute(self):
+ sv = SchemaView(SCHEMA_WITH_STRUCTURED_PATTERNS)
+
+ sv.materialize_patterns()
+
+ weight_attribute = sv.get_class('ClassWithAttributes').attributes['weight']
+
+ self.assertEqual(weight_attribute.pattern, "\d+[\.\d+] (kg|g|lbs|stone)")
+
if __name__ == '__main__':
unittest.main()
|
`SchemaView.materialize_patterns()` does not account for `slot_usage`
If I have a schema that contains the following:
```yaml
# test.yaml
settings:
generated_id: "[a-z0-9]{6,}"
classes:
MyClass:
slots:
- id
slots:
id:
structured_pattern:
syntax: "prefix:{generated_id}"
```
And I call the `materialize_patterns` method on a `SchemaView` instance for that schema, I see that the `pattern` meta-slot is populated as expected:
```python
>>> from linkml_runtime import SchemaView
>>> view = SchemaView('test.yaml')
>>> view.schema.slots['id'].pattern is None
True
>>> view.materialize_patterns()
>>> view.schema.slots['id'].pattern
'prefix:[a-z0-9]{6,}'
```
However if the `structured_pattern` is instead set on the `slot_usage` of a class:
```yaml
# test2.yaml
settings:
generated_id: "[a-z0-9]{6,}"
classes:
MyClass:
slots:
-id
FancyIdClass:
is_a: MyClass
slot_usage:
id:
structured_pattern:
syntax: "prefix:{generated_id}"
slots:
id:
```
Calling `materialize_patterns` on the `SchemaView` instance doesn't seem to have any effect. I suppose I would expect the `pattern` to be materialized on the `slot_usage` definition instead in this case.
```python
>>> view = SchemaView('test2.yaml')
>>> view.schema.classes['FancyIdClass'].slot_usage['id'].pattern is None
True
>>> view.materialize_patterns()
>>> # I think I would expect this to not be None at this point
>>> view.schema.classes['FancyIdClass'].slot_usage['id'].pattern is None
True
```
|
0.0
|
7188ab550ffce1c44492613239808e4d99748c96
|
[
"tests/test_utils/test_pattern.py::PatternTestCase::test_generate_patterns",
"tests/test_utils/test_pattern.py::PatternTestCase::test_pattern_resolver",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_all_classes_ordered_lexical",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_all_classes_ordered_no_ordered_by",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_all_classes_ordered_rank",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_all_slots_ordered_lexical",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_all_slots_ordered_rank",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_caching",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_get_classes_by_slot",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_imports",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_materialize_patterns",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_materialize_patterns_attribute",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_materialize_patterns_slot_usage",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_merge_imports",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_metamodel_in_schemaview",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_rollup_rolldown",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_schemaview",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_slot_inheritance",
"tests/test_utils/test_schemaview.py::SchemaViewTestCase::test_traversal"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-04 16:52:09+00:00
|
cc0-1.0
| 3,610 |
|
linkml__linkml-runtime-43
|
diff --git a/linkml_runtime/utils/formatutils.py b/linkml_runtime/utils/formatutils.py
index d1ced86..f43877e 100644
--- a/linkml_runtime/utils/formatutils.py
+++ b/linkml_runtime/utils/formatutils.py
@@ -133,8 +133,9 @@ def remove_empty_items(obj: Any, hide_protected_keys: bool = False, inside: bool
:return: copy of obj with empty items removed or None if obj itself is "empty"
"""
if is_list(obj):
+ # for discussion of logic, see: https://github.com/linkml/linkml-runtime/issues/42
obj_list = [e for e in [remove_empty_items(l, hide_protected_keys=hide_protected_keys, inside=True)
- for l in as_json_obj(obj)] if not is_empty(e)]
+ for l in obj if l != '_root'] if not is_empty(e)]
return obj_list if not inside or not is_empty(obj_list) else None
elif is_dict(obj):
obj_dict = {k: v for k, v in [(k2, remove_empty_items(v2, hide_protected_keys=hide_protected_keys, inside=True))
@@ -146,7 +147,7 @@ def remove_empty_items(obj: Any, hide_protected_keys: bool = False, inside: bool
enum_text = list(obj_dict.values())[0].get('text', None)
if enum_text is not None:
return enum_text
- if hide_protected_keys and len(obj_dict) == 1 and list(obj_dict.keys())[0].startswith('_'):
+ if hide_protected_keys and len(obj_dict) == 1 and str(list(obj_dict.keys())[0]).startswith('_'):
inner_element = list(obj_dict.values())[0]
if isinstance(inner_element, dict):
obj_dict = inner_element
|
linkml/linkml-runtime
|
325ad3df708560b092c5140ee091c5db37eace6a
|
diff --git a/tests/test_loaders_dumpers/models/books_normalized.py b/tests/test_loaders_dumpers/models/books_normalized.py
index 2315655..b06d654 100644
--- a/tests/test_loaders_dumpers/models/books_normalized.py
+++ b/tests/test_loaders_dumpers/models/books_normalized.py
@@ -1,5 +1,5 @@
# Auto generated from books_normalized.yaml by pythongen.py version: 0.9.0
-# Generation date: 2021-07-27 12:45
+# Generation date: 2021-09-10 16:59
# Schema: example
#
# id: https://w3id.org/example
@@ -65,7 +65,7 @@ class CreativeWork(YAMLRoot):
id: Union[str, CreativeWorkId] = None
name: Optional[str] = None
- genres: Optional[Union[str, List[str]]] = empty_list()
+ genres: Optional[Union[Union[str, "GenreEnum"], List[Union[str, "GenreEnum"]]]] = empty_list()
creator: Optional[Union[dict, "Author"]] = None
summary: Optional[str] = None
reviews: Optional[Union[Union[dict, "Review"], List[Union[dict, "Review"]]]] = empty_list()
@@ -81,14 +81,14 @@ class CreativeWork(YAMLRoot):
if not isinstance(self.genres, list):
self.genres = [self.genres] if self.genres is not None else []
- self.genres = [v if isinstance(v, str) else str(v) for v in self.genres]
+ self.genres = [v if isinstance(v, GenreEnum) else GenreEnum(v) for v in self.genres]
if self.creator is not None and not isinstance(self.creator, Author):
self.creator = Author(**as_dict(self.creator))
if not isinstance(self.genres, list):
self.genres = [self.genres] if self.genres is not None else []
- self.genres = [v if isinstance(v, str) else str(v) for v in self.genres]
+ self.genres = [v if isinstance(v, GenreEnum) else GenreEnum(v) for v in self.genres]
if self.summary is not None and not isinstance(self.summary, str):
self.summary = str(self.summary)
@@ -139,7 +139,7 @@ class BookSeries(CreativeWork):
id: Union[str, BookSeriesId] = None
books: Optional[Union[Dict[Union[str, BookId], Union[dict, Book]], List[Union[dict, Book]]]] = empty_dict()
- genres: Optional[Union[str, List[str]]] = empty_list()
+ genres: Optional[Union[Union[str, "GenreEnum"], List[Union[str, "GenreEnum"]]]] = empty_list()
price: Optional[float] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
@@ -152,7 +152,7 @@ class BookSeries(CreativeWork):
if not isinstance(self.genres, list):
self.genres = [self.genres] if self.genres is not None else []
- self.genres = [v if isinstance(v, str) else str(v) for v in self.genres]
+ self.genres = [v if isinstance(v, GenreEnum) else GenreEnum(v) for v in self.genres]
if self.price is not None and not isinstance(self.price, float):
self.price = float(self.price)
@@ -170,7 +170,7 @@ class Author(YAMLRoot):
class_model_uri: ClassVar[URIRef] = EXAMPLE.Author
name: Optional[str] = None
- genres: Optional[Union[str, List[str]]] = empty_list()
+ genres: Optional[Union[Union[str, "GenreEnum"], List[Union[str, "GenreEnum"]]]] = empty_list()
from_country: Optional[Union[str, CountryName]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
@@ -179,7 +179,7 @@ class Author(YAMLRoot):
if not isinstance(self.genres, list):
self.genres = [self.genres] if self.genres is not None else []
- self.genres = [v if isinstance(v, str) else str(v) for v in self.genres]
+ self.genres = [v if isinstance(v, GenreEnum) else GenreEnum(v) for v in self.genres]
if self.from_country is not None and not isinstance(self.from_country, CountryName):
self.from_country = CountryName(self.from_country)
@@ -286,7 +286,7 @@ slots.creator = Slot(uri=EXAMPLE.creator, name="creator", curie=EXAMPLE.curie('c
model_uri=EXAMPLE.creator, domain=None, range=Optional[Union[dict, Author]])
slots.genres = Slot(uri=EXAMPLE.genres, name="genres", curie=EXAMPLE.curie('genres'),
- model_uri=EXAMPLE.genres, domain=None, range=Optional[Union[str, List[str]]])
+ model_uri=EXAMPLE.genres, domain=None, range=Optional[Union[Union[str, "GenreEnum"], List[Union[str, "GenreEnum"]]]])
slots.from_country = Slot(uri=EXAMPLE.from_country, name="from_country", curie=EXAMPLE.curie('from_country'),
model_uri=EXAMPLE.from_country, domain=None, range=Optional[Union[str, CountryName]])
diff --git a/tests/test_loaders_dumpers/models/books_normalized.yaml b/tests/test_loaders_dumpers/models/books_normalized.yaml
index 0348bf5..c379eec 100644
--- a/tests/test_loaders_dumpers/models/books_normalized.yaml
+++ b/tests/test_loaders_dumpers/models/books_normalized.yaml
@@ -78,8 +78,7 @@ slots:
examples:
- value: Ian M Banks
genres:
- #range: genre_enum
- range: string
+ range: genre_enum
multivalued: true
from_country:
range: country
diff --git a/tests/test_loaders_dumpers/test_csv_loader_dumper.py b/tests/test_loaders_dumpers/test_csv_loader_dumper.py
index 5286e18..720264f 100644
--- a/tests/test_loaders_dumpers/test_csv_loader_dumper.py
+++ b/tests/test_loaders_dumpers/test_csv_loader_dumper.py
@@ -1,13 +1,18 @@
import os
import unittest
import json
+import logging
-from linkml_runtime.dumpers import json_dumper
+from jsonasobj2 import as_json_obj, JsonObj
+
+from linkml_runtime.dumpers import json_dumper, yaml_dumper
from linkml_runtime.loaders import yaml_loader
+from linkml_runtime.utils.formatutils import remove_empty_items, is_empty
from linkml_runtime.utils.schemaview import SchemaView
from linkml_runtime.dumpers import csv_dumper
from linkml_runtime.loaders import csv_loader
-from tests.test_loaders_dumpers.models.books_normalized import Shop, Book
+from linkml_runtime.utils.yamlutils import as_json_object
+from tests.test_loaders_dumpers.models.books_normalized import Shop, Book, GenreEnum, BookSeries
ROOT = os.path.abspath(os.path.dirname(__file__))
@@ -27,24 +32,57 @@ def _json(obj) -> str:
class CSVGenTestCase(unittest.TestCase):
+ def test_object_model(self):
+ book = Book(id='B1', genres=['fantasy'], creator={})
+ print(book.genres)
+ print(type(book.genres[0]))
+ logging.debug(as_json_obj(book.genres[0]))
+ assert str(book.genres[0]) == 'fantasy'
+ assert book.genres[0].code.text == 'fantasy'
+ processed = remove_empty_items(book.genres)
+ print(f'PR={processed}')
+ assert processed[0] == 'fantasy'
+ series = BookSeries(id='S1')
+ series.books.append(book)
+ schemaview = SchemaView(SCHEMA)
+ shop = Shop()
+ shop.all_book_series.append(book)
+ #csvstr = csv_dumper.dumps(shop, index_slot='all_book_series', schemaview=schemaview)
+ #logging.debug(csvstr)
+
def test_csvgen_roundtrip(self):
schemaview = SchemaView(SCHEMA)
data = yaml_loader.load(DATA, target_class=Shop)
csv_dumper.dump(data, to_file=OUTPUT, index_slot='all_book_series', schemaview=schemaview)
roundtrip = csv_loader.load(OUTPUT, target_class=Shop, index_slot='all_book_series', schemaview=schemaview)
- print(json_dumper.dumps(roundtrip))
+ logging.debug(json_dumper.dumps(roundtrip))
+ logging.debug(f'COMPARE 1: {roundtrip}')
+ logging.debug(f'COMPARE 2: {data}')
assert roundtrip == data
def test_csvgen_unroundtrippable(self):
schemaview = SchemaView(SCHEMA)
#schema = YAMLGenerator(SCHEMA).schema
data = yaml_loader.load(DATA2, target_class=Shop)
- #print(json_dumper.dumps(data))
- #print(csv_dumper.dumps(data, index_slot='all_book_series', schema=schema))
+ logging.debug(data.all_book_series[0])
+ logging.debug(data.all_book_series[0].genres[0])
+ assert str(data.all_book_series[0].genres[0]) == 'fantasy'
+ logging.debug(yaml_dumper.dumps(data))
+ logging.debug(json_dumper.dumps(data))
+ processed = remove_empty_items(data)
+ logging.debug(f'PROC {processed["all_book_series"]}')
+ asj = as_json_object(processed, None)
+ logging.debug(f'ASJ {asj["all_book_series"]}')
+ reconstituted_json = json.loads(json_dumper.dumps(data))
+ s0 = reconstituted_json['all_book_series'][0]
+ logging.debug(s0)
+ logging.debug(json_dumper.dumps(data))
+ #logging.debug(csv_dumper.dumps(data, index_slot='all_book_series', schema=schema))
csv_dumper.dump(data, to_file=OUTPUT2, index_slot='all_book_series', schemaview=schemaview)
+ #assert False
roundtrip = csv_loader.load(OUTPUT2, target_class=Shop, index_slot='all_book_series', schemaview=schemaview)
- print(json_dumper.dumps(roundtrip))
- #assert roundtrip == data
+ logging.debug(json_dumper.dumps(roundtrip))
+ assert roundtrip == data
diff --git a/tests/test_utils/test_formatutils.py b/tests/test_utils/test_formatutils.py
index a5e8c69..191e053 100644
--- a/tests/test_utils/test_formatutils.py
+++ b/tests/test_utils/test_formatutils.py
@@ -127,6 +127,8 @@ class FormatUtilsTestCase(unittest.TestCase):
self.assertTrue(is_empty(thing), msg=f"{thing} should clock in as empty")
for thing in non_empty_things:
self.assertFalse(is_empty(thing))
+ obj = JsonObj([])
+ assert is_empty(obj)
def test_remove_empty_items(self):
""" Test the various remove empty items paths """
|
jsonasobj2.as_json_obj turns enums into empty dicts
```python
book = Book(id='B1', genres=['fantasy'])
print(book.genres)
print(type(book.genres[0]))
print(as_json_obj(book.genres[0]))
```
out:
```
[(text='fantasy')]
<class 'tests.test_loaders_dumpers.models.books_normalized.GenreEnum'>
{}
```
This is a problem, because `remove_empty_items` calls as_json_obj when converting lists
|
0.0
|
325ad3df708560b092c5140ee091c5db37eace6a
|
[
"tests/test_loaders_dumpers/test_csv_loader_dumper.py::CSVGenTestCase::test_object_model"
] |
[
"tests/test_utils/test_formatutils.py::FormatUtilsTestCase::test_empty_functions",
"tests/test_utils/test_formatutils.py::FormatUtilsTestCase::test_enumerations_case",
"tests/test_utils/test_formatutils.py::FormatUtilsTestCase::test_formats",
"tests/test_utils/test_formatutils.py::FormatUtilsTestCase::test_linestuff",
"tests/test_utils/test_formatutils.py::FormatUtilsTestCase::test_remove_empty_items"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-09-11 00:43:33+00:00
|
cc0-1.0
| 3,611 |
|
linode__linode_api4-python-188
|
diff --git a/.travis.yml b/.travis.yml
index 9c5621d..ebfe9eb 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,6 +3,7 @@ python:
- "3.6"
- "3.7"
- "3.8"
+ - "3.9"
dist: xenial
install:
- python setup.py install
diff --git a/linode_api4/linode_client.py b/linode_api4/linode_client.py
index b776ffd..eae0a99 100644
--- a/linode_api4/linode_client.py
+++ b/linode_api4/linode_client.py
@@ -218,6 +218,10 @@ class LinodeGroup(Group):
default to True if the Instance is deployed from an Image
or Backup.
:type booted: bool
+ :param tags: A list of tags to apply to the new instance. If any of the
+ tags included do not exist, they will be created as part of
+ this operation.
+ :type tags: list[str]
:returns: A new Instance object, or a tuple containing the new Instance and
the generated password.
@@ -1015,7 +1019,7 @@ class ObjectStorageGroup(Group):
class LinodeClient:
- def __init__(self, token, base_url="https://api.linode.com/v4", user_agent=None):
+ def __init__(self, token, base_url="https://api.linode.com/v4", user_agent=None, page_size=None):
"""
The main interface to the Linode API.
@@ -1031,11 +1035,17 @@ class LinodeClient:
application. Setting this is not necessary, but some
applications may desire this behavior.
:type user_agent: str
+ :param page_size: The default size to request pages at. If not given,
+ the API's default page size is used. Valid values
+ can be found in the API docs, but at time of writing
+ are between 25 and 500.
+ :type page_size: int
"""
self.base_url = base_url
self._add_user_agent = user_agent
self.token = token
self.session = requests.Session()
+ self.page_size = page_size
#: Access methods related to Linodes - see :any:`LinodeGroup` for
#: more information
@@ -1163,7 +1173,12 @@ class LinodeClient:
return j
def _get_objects(self, endpoint, cls, model=None, parent_id=None, filters=None):
- response_json = self.get(endpoint, model=model, filters=filters)
+ # handle non-default page sizes
+ call_endpoint = endpoint
+ if self.page_size is not None:
+ call_endpoint += "?page_size={}".format(self.page_size)
+
+ response_json = self.get(call_endpoint, model=model, filters=filters)
if not "data" in response_json:
raise UnexpectedResponseError("Problem with response!", json=response_json)
@@ -1306,6 +1321,10 @@ class LinodeClient:
:type domain: str
:param master: Whether this is a master (defaults to true)
:type master: bool
+ :param tags: A list of tags to apply to the new domain. If any of the
+ tags included do not exist, they will be created as part of
+ this operation.
+ :type tags: list[str]
:returns: The new Domain object.
:rtype: Domain
@@ -1439,6 +1458,10 @@ class LinodeClient:
:type linode: Instance or int
:param size: The size, in GB, of the new Volume. Defaults to 20.
:type size: int
+ :param tags: A list of tags to apply to the new volume. If any of the
+ tags included do not exist, they will be created as part of
+ this operation.
+ :type tags: list[str]
:returns: The new Volume.
:rtype: Volume
diff --git a/linode_api4/paginated_list.py b/linode_api4/paginated_list.py
index 8d09d03..c6b51b6 100644
--- a/linode_api4/paginated_list.py
+++ b/linode_api4/paginated_list.py
@@ -37,7 +37,8 @@ class PaginatedList(object):
self.page_size = len(page)
self.max_pages = max_pages
self.lists = [ None for _ in range(0, self.max_pages) ]
- self.lists[0] = page
+ if self.lists:
+ self.lists[0] = page
self.list_cls = type(page[0]) if page else None # TODO if this is None that's bad
self.objects_parent_id = parent_id
self.cur = 0 # for being a generator
@@ -84,7 +85,7 @@ class PaginatedList(object):
return "PaginatedList ({} items)".format(self.total_items)
def _load_page(self, page_number):
- j = self.client.get("/{}?page={}".format(self.page_endpoint, page_number+1),
+ j = self.client.get("/{}?page={}&page_size={}".format(self.page_endpoint, page_number+1, self.page_size),
filters=self.query_filters)
if j['pages'] != self.max_pages or j['results'] != len(self):
diff --git a/setup.py b/setup.py
index 0d25334..b8739d9 100755
--- a/setup.py
+++ b/setup.py
@@ -31,7 +31,7 @@ setup(
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
- version='3.0.2',
+ version='3.0.4',
description='The official python SDK for Linode API v4',
long_description=long_description,
|
linode/linode_api4-python
|
d83e5c6774e2cccd6a46d802081ac55842f583eb
|
diff --git a/test/paginated_list_test.py b/test/paginated_list_test.py
index 0a411de..77643a5 100644
--- a/test/paginated_list_test.py
+++ b/test/paginated_list_test.py
@@ -1,4 +1,5 @@
from unittest import TestCase
+from unittest.mock import MagicMock, call
from linode_api4.paginated_list import PaginatedList
@@ -76,3 +77,52 @@ class PaginationSlicingTest(TestCase):
Tests that backwards indexing works as expected
"""
self.assertEqual(self.normal_list[10:5], self.paginated_list[10:5])
+
+
+class TestModel():
+ """
+ This is a test model class used to simulate an actual model that would be
+ returned by the API
+ """
+ @classmethod
+ def make_instance(*args, **kwargs):
+ return TestModel()
+
+
+class PageLoadingTest(TestCase):
+ def test_page_size_in_request(self):
+ """
+ Tests that the correct page_size is added to requests when loading subsequent pages
+ """
+
+ for i in (25, 100, 500):
+ # these are the pages we're sending in to the mocked list
+ first_page = [ TestModel() for x in range(i) ]
+ second_page = {
+ "data": [{"id": 1}],
+ "pages": 2,
+ "page": 2,
+ "results": i + 1,
+ }
+
+ # our mock client to intercept the requests and return the mocked info
+ client = MagicMock()
+ client.get = MagicMock(return_value=second_page)
+
+ # let's do it!
+ p = PaginatedList(client, "/test", page=first_page, max_pages=2, total_items=i+1)
+ p[i] # load second page
+
+ # and we called the next page URL with the correct page_size
+ assert client.get.call_args == call("//test?page=2&page_size={}".format(i), filters=None)
+
+ def test_no_pages(self):
+ """
+ Tests that this library correctly handles paginated lists with no data, such
+ as if a paginated endpoint is given a filter that matches nothing.
+ """
+ client = MagicMock()
+
+ p = PaginatedList(client, "/test", page=[], max_pages=0, total_items=0)
+
+ assert(len(p) == 0)
|
Undocumented valid argument "tag" in client.instance_create() function and undocumented requirements for client.create_tag() function
As per the title, there's missing documentation entries for two functions:
- The "tag" parameter in the instance_create() function of the LinodeClient() class
- The user permissions requirements for the tag_create() class
This led to a lot of trial and error on my part when I tried to create a new instance with a new tag in a _restricted account_.
The docs clearly say that the [client.tags() function requires an unrestricted account](https://linode-api4.readthedocs.io/en/latest/linode_api4/linode_client.html#linode_api4.LinodeClient.tags). However, the [entry for client.tag_create()](https://linode-api4.readthedocs.io/en/latest/linode_api4/linode_client.html#linode_api4.LinodeClient.tag_create) does not, and causes and error when called on by a restricted user.
# Example
When trying to create a new tag and apply it to all or any instances:
```
client = LinodeClient(settings.get("API_key"))
instance = client.instance_create(.....)
client.tag_create(label="random", instances=instance)
```
This results in a 403 response from the API:
```
Traceback (most recent call last):
File "./deploy.py", line 53, in <module>
main()
File "./deploy.py", line 31, in main
tags = client.tag_create(label="random")
File "/usr/local/lib/python3.7/dist-packages/linode_api4/linode_client.py", line 1408, in tag_create
result = self.post('/tags', data=params)
File "/usr/local/lib/python3.7/dist-packages/linode_api4/linode_client.py", line 1185, in post
return self._api_call(*args, method=self.session.post, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/linode_api4/linode_client.py", line 1156, in _api_call
raise ApiError(error_msg, status=response.status_code, json=j)
linode_api4.errors.ApiError: 403: Unauthorized;
```
# counterexample
This is not to say that new instances cannot be tagged: the alternative is to specify the tag when creating a new instance, but this is missing from the documentation.
Here's some example code that has worked for me:
```
[...]
client = LinodeClient(settings.get("API_key"))
new_linode, new_password = client.linode.instance_create(settings.get("type"),
settings.get("region"),
tags=['random'],
image=deployment_image,
authorized_keys=settings.get("ssh").get("key"),
label=label,
booted=True)
```
However, this is *not documented* in the [entry for the instance_create() function](https://linode-api4.readthedocs.io/en/latest/linode_api4/linode_client.html#linode_api4.linode_client.LinodeGroup.instance_create)
# Suggestions
1) Add a note to the tag_create() class [documentation entry](https://linode-api4.readthedocs.io/en/latest/linode_api4/linode_client.html#linode_api4.LinodeClient.tag_create) notifying developers of the restriction (similar to how the [client.tags() entry](https://linode-api4.readthedocs.io/en/latest/linode_api4/linode_client.html#linode_api4.LinodeClient.tags) does)
2) Add the parameter "tags" to the supported list of arguments in the client.instance_create() [documentation](https://linode-api4.readthedocs.io/en/latest/linode_api4/linode_client.html#linode_api4.linode_client.LinodeGroup.instance_create) and [source](https://linode-api4.readthedocs.io/en/latest/_modules/linode_api4/linode_client.html#LinodeGroup.instance_create)
Edit: grammar and spelling
|
0.0
|
d83e5c6774e2cccd6a46d802081ac55842f583eb
|
[
"test/paginated_list_test.py::PageLoadingTest::test_no_pages",
"test/paginated_list_test.py::PageLoadingTest::test_page_size_in_request"
] |
[
"test/paginated_list_test.py::PaginationSlicingTest::test_slice_backward_indexing",
"test/paginated_list_test.py::PaginationSlicingTest::test_slice_bad_index",
"test/paginated_list_test.py::PaginationSlicingTest::test_slice_bad_negative_index",
"test/paginated_list_test.py::PaginationSlicingTest::test_slice_boundless",
"test/paginated_list_test.py::PaginationSlicingTest::test_slice_negative",
"test/paginated_list_test.py::PaginationSlicingTest::test_slice_no_lower_bound",
"test/paginated_list_test.py::PaginationSlicingTest::test_slice_no_upper_bound",
"test/paginated_list_test.py::PaginationSlicingTest::test_slice_normal",
"test/paginated_list_test.py::PaginationSlicingTest::test_slice_unsupported_step"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-01-12 12:59:31+00:00
|
bsd-3-clause
| 3,612 |
|
linw1995__data_extractor-10
|
diff --git a/data_extractor/abc.py b/data_extractor/abc.py
index 84f6dd4..b41f71e 100644
--- a/data_extractor/abc.py
+++ b/data_extractor/abc.py
@@ -4,19 +4,29 @@ Abstract Base Classes.
# Standard Library
import warnings
-from abc import ABC, abstractmethod
-from typing import Any
+from abc import abstractmethod
+from typing import Any, Dict, List, Tuple
+# Local Folder
+from .utils import sentinel
-class __Sentinel:
- def __repr__(self) -> str:
- return "sentinel"
+class ComplexExtractorMeta(type):
+ """
+ Complex Extractor Meta Class.
+ """
+
+ def __init__(cls, name: str, bases: Tuple[type], attr_dict: Dict[str, Any]):
+ super().__init__(name, bases, attr_dict)
+ field_names: List[str] = []
+ for key, attr in attr_dict.items():
+ if isinstance(type(attr), ComplexExtractorMeta):
+ field_names.append(key)
-sentinel = __Sentinel()
+ cls._field_names = field_names
-class AbstractExtractor(ABC):
+class AbstractExtractor(metaclass=ComplexExtractorMeta):
def __init__(self, expr: str):
self.expr = expr
@@ -30,6 +40,8 @@ class AbstractExtractor(ABC):
"""
raise NotImplementedError
+
+class ExtractFirstMixin(AbstractExtractor):
def extract_first(self, element: Any, default: Any = sentinel) -> Any:
"""
Extract the first data or subelement from `extract` method call result.
@@ -43,11 +55,13 @@ class AbstractExtractor(ABC):
if not rv:
if default is sentinel:
- raise ValueError(f"Invalid {self!r}")
+ from .exceptions import ExtractError
+
+ raise ExtractError(self, element)
return default
return rv[0]
-__all__ = ("AbstractExtractor", "sentinel")
+__all__ = ("AbstractExtractor", "ComplexExtractorMeta", "ExtractFirstMixin")
diff --git a/data_extractor/exceptions.py b/data_extractor/exceptions.py
index ede9879..af6934e 100644
--- a/data_extractor/exceptions.py
+++ b/data_extractor/exceptions.py
@@ -1,8 +1,14 @@
"""
Exceptions.
"""
+# Standard Library
+import reprlib
+
+from typing import Any
+
# Local Folder
from .abc import AbstractExtractor
+from .utils import LazyStr
class ExprError(Exception):
@@ -15,7 +21,31 @@ class ExprError(Exception):
self.exc = exc
def __repr__(self) -> str:
- return f"{self.__class__.__name__}({self.extractor!r}, {self.exc!r})"
+ return f"{self.__class__.__name__}({self.extractor!r}, exc={self.exc!r})"
+
+
+class ExtractError(Exception):
+ """
+ ExtractError thrown by extractor extracting data.
+ """
+
+ def __init__(self, extractor: AbstractExtractor, element: Any):
+ super().__init__(LazyStr(func=lambda: self._trace_repr))
+ self.element = element
+ self.extractors = [extractor]
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}({self.extractors[0]!r}, element={reprlib.repr(self.element)})"
+
+ def _append(self, extractor: AbstractExtractor) -> None:
+ self.extractors.append(extractor)
+
+ @property
+ def _trace_repr(self) -> str:
+ return f"{self.__repr__()}\n" + "\n".join(
+ " " * idx + "|-" + repr(extractor)
+ for idx, extractor in enumerate([*self.extractors[::-1], self.element])
+ )
-__all__ = ("ExprError",)
+__all__ = ("ExprError", "ExtractError")
diff --git a/data_extractor/item.py b/data_extractor/item.py
index 9a4c0f6..f6de68e 100644
--- a/data_extractor/item.py
+++ b/data_extractor/item.py
@@ -4,28 +4,15 @@ Complex Extractor for data extracting.
# Standard Library
import warnings
-from typing import Any, Dict, Iterator, List, Tuple
+from typing import Any, Iterator
# Local Folder
-from .abc import AbstractExtractor, sentinel
+from .abc import AbstractExtractor
+from .exceptions import ExtractError
+from .utils import sentinel
-class FieldMeta(type):
- """
- Complex Extractor Meta Class.
- """
-
- def __init__(cls, name: str, bases: Tuple[type], attr_dict: Dict[str, Any]):
- super().__init__(name, bases, attr_dict)
- field_names: List[str] = []
- for key, attr in attr_dict.items():
- if isinstance(type(attr), FieldMeta):
- field_names.append(key)
-
- cls._field_names = field_names
-
-
-class Field(metaclass=FieldMeta):
+class Field(AbstractExtractor):
"""
Extract data by cooperating with extractor.
"""
@@ -44,7 +31,7 @@ class Field(metaclass=FieldMeta):
self.is_many = is_many
def __repr__(self) -> str:
- return f"{self.__class__.__name__}(extractor={self.extractor!r}, default={self.default!r}, is_many={self.is_many})"
+ return f"{self.__class__.__name__}({self.extractor!r}, default={self.default!r}, is_many={self.is_many})"
def extract(self, element: Any) -> Any:
"""
@@ -65,7 +52,7 @@ class Field(metaclass=FieldMeta):
if not rv:
if self.default is sentinel:
- raise ValueError(f"Invalid {self!r}")
+ raise ExtractError(self, element)
return self.default
@@ -83,7 +70,11 @@ class Item(Field):
def _extract(self, element: Any) -> Any:
rv = {}
for field in self.field_names():
- rv[field] = getattr(self, field).extract(element)
+ try:
+ rv[field] = getattr(self, field).extract(element)
+ except ExtractError as exc:
+ exc._append(extractor=self)
+ raise exc
return rv
@@ -96,4 +87,4 @@ class Item(Field):
yield name
-__all__ = ("Field", "FieldMeta", "Item")
+__all__ = ("Field", "Item")
diff --git a/data_extractor/json.py b/data_extractor/json.py
index b5c2fa5..38aaaae 100644
--- a/data_extractor/json.py
+++ b/data_extractor/json.py
@@ -8,10 +8,10 @@ from typing import Any
import jsonpath_rw
# Local Folder
-from .abc import AbstractExtractor
+from .abc import ExtractFirstMixin
-class JSONExtractor(AbstractExtractor):
+class JSONExtractor(ExtractFirstMixin):
"""
Use JSONPath expression for JSON data extracting.
diff --git a/data_extractor/lxml.py b/data_extractor/lxml.py
index 4942444..dd7c74e 100644
--- a/data_extractor/lxml.py
+++ b/data_extractor/lxml.py
@@ -9,11 +9,11 @@ from lxml.etree import XPathEvalError
from lxml.etree import _Element as Element
# Local Folder
-from .abc import AbstractExtractor
+from .abc import ExtractFirstMixin
from .exceptions import ExprError
-class CSSExtractor(AbstractExtractor):
+class CSSExtractor(ExtractFirstMixin):
"""
Use CSS Selector for XML or HTML data subelements extracting.
@@ -27,7 +27,7 @@ class CSSExtractor(AbstractExtractor):
return element.cssselect(self.expr)
-class TextCSSExtractor(AbstractExtractor):
+class TextCSSExtractor(ExtractFirstMixin):
"""
Use CSS Selector for XML or HTML data subelements' text extracting.
@@ -41,7 +41,7 @@ class TextCSSExtractor(AbstractExtractor):
return [ele.text for ele in CSSExtractor(self.expr).extract(element)]
-class AttrCSSExtractor(AbstractExtractor):
+class AttrCSSExtractor(ExtractFirstMixin):
"""
Use CSS Selector for XML or HTML data subelements' attribute value extracting.
@@ -66,7 +66,7 @@ class AttrCSSExtractor(AbstractExtractor):
]
-class XPathExtractor(AbstractExtractor):
+class XPathExtractor(ExtractFirstMixin):
"""
Use XPath for XML or HTML data extracting.
diff --git a/data_extractor/utils.py b/data_extractor/utils.py
new file mode 100644
index 0000000..eabc2a0
--- /dev/null
+++ b/data_extractor/utils.py
@@ -0,0 +1,27 @@
+# Standard Library
+from typing import Callable
+
+
+class __Sentinel:
+ """ Singleton """
+
+ def __repr__(self) -> str:
+ return "sentinel"
+
+
+sentinel = __Sentinel()
+
+
+class LazyStr:
+ """
+ Lazy String
+ """
+
+ def __init__(self, func: Callable[[], str]):
+ self.func = func
+
+ def __str__(self) -> str:
+ return self.func()
+
+
+__all__ = ("LazyStr", "sentinel")
|
linw1995/data_extractor
|
14cc0c09b4fda68586793b95e7dd8309c29b3718
|
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000..6e6c8e9
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,22 @@
+# Third Party Library
+import pytest
+
+
[email protected]
+def json0():
+ return {
+ "data": {
+ "users": [
+ {"id": 0, "name": "Vang Stout", "gender": "female"},
+ {"id": 1, "name": "Jeannie Gaines", "gender": "male"},
+ {"id": 2, "name": "Guzman Hunter", "gender": "female"},
+ {"id": 3, "name": "Janine Gross"},
+ {"id": 4, "name": "Clarke Patrick", "gender": "male"},
+ {"id": 5, "name": "Whitney Mcfadden"},
+ ],
+ "start": 0,
+ "size": 5,
+ "total": 100,
+ },
+ "status": 0,
+ }
diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py
new file mode 100644
index 0000000..decde7e
--- /dev/null
+++ b/tests/test_exceptions.py
@@ -0,0 +1,49 @@
+# Standard Library
+import textwrap
+
+# Third Party Library
+import pytest
+
+# First Party Library
+from data_extractor.exceptions import ExtractError
+from data_extractor.item import Field, Item
+from data_extractor.json import JSONExtractor
+
+
+def test_exception_trace(json0):
+ data = json0
+
+ class User(Item):
+ uid = Field(JSONExtractor("id"))
+ name = Field(JSONExtractor("name"))
+ gender = Field(JSONExtractor("gender"))
+
+ class UserResponse(Item):
+ start = Field(JSONExtractor("start"), default=0)
+ size = Field(JSONExtractor("size"))
+ total = Field(JSONExtractor("total"))
+ data = User(JSONExtractor("users[*]"), is_many=True)
+
+ extractor = UserResponse(JSONExtractor("data"))
+ with pytest.raises(ExtractError) as catch:
+ extractor.extract(data)
+
+ exc = catch.value
+ assert len(exc.extractors) == 3
+ assert exc.extractors[0] is User.gender
+ assert exc.extractors[1] is UserResponse.data
+ assert exc.extractors[2] is extractor
+ assert exc.element == {"id": 3, "name": "Janine Gross"}
+
+ assert (
+ str(exc.args[0])
+ == textwrap.dedent(
+ """
+ ExtractError(Field(JSONExtractor('gender'), default=sentinel, is_many=False), element={'id': 3, 'name': 'Janine Gross'})
+ |-UserResponse(JSONExtractor('data'), default=sentinel, is_many=False)
+ |-User(JSONExtractor('users[*]'), default=sentinel, is_many=True)
+ |-Field(JSONExtractor('gender'), default=sentinel, is_many=False)
+ |-{'id': 3, 'name': 'Janine Gross'}
+ """
+ ).strip()
+ )
diff --git a/tests/test_item.py b/tests/test_item.py
index c8af9e5..e6b518b 100644
--- a/tests/test_item.py
+++ b/tests/test_item.py
@@ -5,6 +5,7 @@ from pathlib import Path
import pytest
# First Party Library
+from data_extractor.exceptions import ExtractError
from data_extractor.item import Field, Item
from data_extractor.json import JSONExtractor
from data_extractor.lxml import CSSExtractor, TextCSSExtractor, XPathExtractor
@@ -81,8 +82,14 @@ def test_field_extract_with_default(element0, Extractor, expr, expect):
ids=repr,
)
def test_field_extract_without_default(element0, Extractor, expr):
- with pytest.raises(ValueError):
- Field(Extractor(expr)).extract(element0)
+ extractor = Field(Extractor(expr))
+ with pytest.raises(ExtractError) as catch:
+ extractor.extract(element0)
+
+ exc = catch.value
+ assert len(exc.extractors) == 1
+ assert exc.extractors[0] is extractor
+ assert exc.element is element0
def test_field_parameters_conflict():
@@ -167,8 +174,15 @@ def element2():
def test_item_extract_failure_when_last_field_missing(element2, Article0):
- with pytest.raises(ValueError):
- Article0(CSSExtractor("li.article"), is_many=True).extract(element2)
+ extractor = Article0(CSSExtractor("li.article"), is_many=True)
+ with pytest.raises(ExtractError) as catch:
+ extractor.extract(element2)
+
+ exc = catch.value
+ assert len(exc.extractors) == 2
+ assert exc.extractors[0] is Article0.content
+ assert exc.extractors[1] is extractor
+ assert exc.element is element2.xpath("//li[@class='article'][2]")[0]
def test_item_extract_success_without_is_many_when_last_field_missing(
@@ -258,23 +272,8 @@ def test_complex_item_extract_xml_data():
}
-def test_complex_item_extract_json_data():
- data = {
- "data": {
- "users": [
- {"id": 0, "name": "Vang Stout", "gender": "female"},
- {"id": 1, "name": "Jeannie Gaines", "gender": "male"},
- {"id": 2, "name": "Guzman Hunter", "gender": "female"},
- {"id": 3, "name": "Janine Gross"},
- {"id": 4, "name": "Clarke Patrick", "gender": "male"},
- {"id": 5, "name": "Whitney Mcfadden"},
- ],
- "start": 0,
- "size": 5,
- "total": 100,
- },
- "status": 0,
- }
+def test_complex_item_extract_json_data(json0):
+ data = json0
class User(Item):
uid = Field(JSONExtractor("id"))
diff --git a/tests/test_lxml.py b/tests/test_lxml.py
index 28ff66a..058d36b 100644
--- a/tests/test_lxml.py
+++ b/tests/test_lxml.py
@@ -4,7 +4,7 @@ import pytest
from lxml.etree import XPathEvalError
# First Party Library
-from data_extractor.exceptions import ExprError
+from data_extractor.exceptions import ExprError, ExtractError
from data_extractor.lxml import AttrCSSExtractor, TextCSSExtractor, XPathExtractor
@@ -87,9 +87,14 @@ def test_extract_first(element, Extractor, expr, expect):
)
def test_extract_first_without_default(element, Extractor, expr):
extractor = Extractor(expr)
- with pytest.raises(ValueError):
+ with pytest.raises(ExtractError) as catch:
extractor.extract_first(element)
+ exc = catch.value
+ assert len(exc.extractors) == 1
+ assert exc.extractors[0] is extractor
+ assert exc.element is element
+
@pytest.mark.parametrize(
"expr,attr,expect",
@@ -128,18 +133,24 @@ def test_attr_css_extract_first(element, expr, attr, expect):
)
def test_attr_css_extract_first_without_default(element, expr, attr):
extractor = AttrCSSExtractor(expr=expr, attr=attr)
- with pytest.raises(ValueError):
+ with pytest.raises(ExtractError) as catch:
extractor.extract_first(element)
+ exc = catch.value
+ assert len(exc.extractors) == 1
+ assert exc.extractors[0] is extractor
+ assert exc.element is element
+
@pytest.mark.parametrize("expr", ["///", "/text(", ""])
def test_invalid_xpath_expr(element, expr):
extractor = XPathExtractor(expr)
- with pytest.raises(ExprError) as exc_info:
+ with pytest.raises(ExprError) as catch:
extractor.extract(element)
- assert exc_info.value.extractor is extractor
- assert isinstance(exc_info.value.exc, XPathEvalError)
+ exc = catch.value
+ assert exc.extractor is extractor
+ assert isinstance(exc.exc, XPathEvalError)
def test_xpath_result_not_list(element):
diff --git a/tests/test_utils.py b/tests/test_utils.py
new file mode 100644
index 0000000..3310399
--- /dev/null
+++ b/tests/test_utils.py
@@ -0,0 +1,16 @@
+# First Party Library
+from data_extractor.utils import LazyStr
+
+
+def test_lazy_str():
+ string = ""
+
+ def func():
+ nonlocal string
+ return string
+
+ ls = LazyStr(func=func)
+ assert str(ls) == ""
+
+ string = "abc"
+ assert str(ls) == "abc"
|
Make easy to trace exception thrown by complex extractor extracting data.
|
0.0
|
14cc0c09b4fda68586793b95e7dd8309c29b3718
|
[
"tests/test_exceptions.py::test_exception_trace",
"tests/test_item.py::test_field_extract[<class",
"tests/test_item.py::test_field_extract_with_is_many[<class",
"tests/test_item.py::test_field_extract_with_default[<class",
"tests/test_item.py::test_field_extract_without_default[<class",
"tests/test_item.py::test_field_parameters_conflict",
"tests/test_item.py::test_field_xpath_extract_result_not_list",
"tests/test_item.py::test_field_xpath_extract_result_not_list_conflict_with_is_many",
"tests/test_item.py::test_item_extract",
"tests/test_item.py::test_item_extract_without_is_many",
"tests/test_item.py::test_item_extract_failure_when_last_field_missing",
"tests/test_item.py::test_item_extract_success_without_is_many_when_last_field_missing",
"tests/test_item.py::test_complex_item_extract_xml_data",
"tests/test_item.py::test_complex_item_extract_json_data",
"tests/test_lxml.py::test_extract[<class",
"tests/test_lxml.py::test_extract_first[<class",
"tests/test_lxml.py::test_extract_first_without_default[<class",
"tests/test_lxml.py::test_attr_css_extract['span.class_a'-'class'-['class_a']]",
"tests/test_lxml.py::test_attr_css_extract['span.class_b'-'class'-['class_b']]",
"tests/test_lxml.py::test_attr_css_extract['span'-'class'-['class_a',",
"tests/test_lxml.py::test_attr_css_extract['span'-'notexists'-[]]",
"tests/test_lxml.py::test_attr_css_extract['notexists'-'class'-[]]",
"tests/test_lxml.py::test_attr_css_extract_first['span.class_a'-'class'-'class_a']",
"tests/test_lxml.py::test_attr_css_extract_first['span.class_b'-'class'-'class_b']",
"tests/test_lxml.py::test_attr_css_extract_first['span'-'class'-'class_a']",
"tests/test_lxml.py::test_attr_css_extract_first['span'-'notexists'-'default']",
"tests/test_lxml.py::test_attr_css_extract_first['notexists'-'class'-'default']",
"tests/test_lxml.py::test_attr_css_extract_first_without_default['span'-'notexists']",
"tests/test_lxml.py::test_attr_css_extract_first_without_default['notexists'-'class']",
"tests/test_lxml.py::test_invalid_xpath_expr[///]",
"tests/test_lxml.py::test_invalid_xpath_expr[/text(]",
"tests/test_lxml.py::test_invalid_xpath_expr[]",
"tests/test_lxml.py::test_xpath_result_not_list",
"tests/test_utils.py::test_lazy_str"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-23 15:10:48+00:00
|
mit
| 3,613 |
|
linw1995__data_extractor-18
|
diff --git a/data_extractor/abc.py b/data_extractor/abc.py
index 0c2434e..115b15e 100644
--- a/data_extractor/abc.py
+++ b/data_extractor/abc.py
@@ -3,6 +3,7 @@
====================================
"""
# Standard Library
+import inspect
import warnings
from abc import abstractmethod
@@ -20,8 +21,41 @@ class ComplexExtractorMeta(type):
def __init__(cls, name: str, bases: Tuple[type], attr_dict: Dict[str, Any]):
super().__init__(name, bases, attr_dict)
field_names: List[str] = []
+
+ __init_args = inspect.getfullargspec(getattr(cls, "__init__")).args
+
for key, attr in attr_dict.items():
if isinstance(type(attr), ComplexExtractorMeta):
+ if key in __init_args:
+ frame = inspect.currentframe()
+ assert (
+ frame is not None
+ ), "If running in an implementation without Python stack frame support this function returns None."
+ try:
+ outer_frame = frame.f_back
+
+ filename = outer_frame.f_code.co_filename
+ firstlineno = outer_frame.f_lineno
+ lines, _ = inspect.findsource(outer_frame)
+
+ for lineno, line in enumerate(lines[firstlineno:], start=1):
+ if line.strip().startswith(key):
+ break
+ else: # pragma: no cover
+ assert False, "This line is never executed."
+
+ lineno += firstlineno
+ index = inspect.indentsize(line)
+ finally:
+ del outer_frame
+ del frame
+
+ line = line.strip()
+ raise SyntaxError(
+ f"{line!r} overwriten the parameter {key!r} of '{name}.__init__' method.",
+ (filename, lineno, index, line),
+ )
+
field_names.append(key)
cls._field_names = field_names
|
linw1995/data_extractor
|
ee4cc11a17b617461cf93168bbcd81fbbc14db07
|
diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py
index decde7e..7f8cbe9 100644
--- a/tests/test_exceptions.py
+++ b/tests/test_exceptions.py
@@ -1,4 +1,5 @@
# Standard Library
+import inspect
import textwrap
# Third Party Library
@@ -8,6 +9,7 @@ import pytest
from data_extractor.exceptions import ExtractError
from data_extractor.item import Field, Item
from data_extractor.json import JSONExtractor
+from data_extractor.lxml import XPathExtractor
def test_exception_trace(json0):
@@ -47,3 +49,17 @@ def test_exception_trace(json0):
"""
).strip()
)
+
+
+def test_field_name_overwrite_item_parameter():
+ with pytest.raises(SyntaxError) as catch:
+
+ class Parameter(Item):
+ name = Field(XPathExtractor("./span[@class='name']"))
+ default = Field(XPathExtractor("./span[@class='default']"))
+
+ exc = catch.value
+ assert exc.filename == __file__
+ assert exc.lineno == inspect.currentframe().f_lineno - 4
+ assert exc.offset == 12
+ assert exc.text == "default = Field(XPathExtractor(\"./span[@class='default']\"))"
|
The field name is the same as Item's parameter in Item.
```python3
class Parameter(Item):
name = Field(XPathExtractor("./span[@class='name']"))
default = Field(XPathExtractor("./span[@class='default']"))
extractor = Parameter(XPathExtractor("./li[@class='parameter']", default=None)
```
the `default=None` parameter overwrites the Field `default = Field(XPathExtractor("./span[@class='default']"))` of Item.
|
0.0
|
ee4cc11a17b617461cf93168bbcd81fbbc14db07
|
[
"tests/test_exceptions.py::test_field_name_overwrite_item_parameter"
] |
[
"tests/test_exceptions.py::test_exception_trace"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-05-17 16:24:58+00:00
|
mit
| 3,614 |
|
linw1995__data_extractor-19
|
diff --git a/data_extractor/abc.py b/data_extractor/abc.py
index 115b15e..5c3df63 100644
--- a/data_extractor/abc.py
+++ b/data_extractor/abc.py
@@ -52,7 +52,10 @@ class ComplexExtractorMeta(type):
line = line.strip()
raise SyntaxError(
- f"{line!r} overwriten the parameter {key!r} of '{name}.__init__' method.",
+ (
+ f"{line!r} overwriten the parameter {key!r} of '{name}.__init__' method. "
+ f"Please using the optional parameter name={key!r} in {attr!r} to avoid overwriting parameter name."
+ ),
(filename, lineno, index, line),
)
diff --git a/data_extractor/item.py b/data_extractor/item.py
index c3dcbd0..0156388 100644
--- a/data_extractor/item.py
+++ b/data_extractor/item.py
@@ -18,6 +18,7 @@ class Field(AbstractExtractor):
Extract data by cooperating with extractor.
:param extractor: The object for data extracting base on :class:`data_extractor.abc.SimpleExtractor`.
+ :param name: Optional parameter for special field name.
:param default: Default value when not found. Default: :data:`data_extractor.utils.sentinel`.
:param is_many: Indicate the data which extractor extracting is more than one.
@@ -28,6 +29,7 @@ class Field(AbstractExtractor):
def __init__(
self,
extractor: SimpleExtractorBase,
+ name: str = None,
default: Any = sentinel,
is_many: bool = False,
):
@@ -38,11 +40,22 @@ class Field(AbstractExtractor):
raise ValueError(f"Can't both set default={default} and is_many=True")
self.extractor = extractor
+ self.name = name
self.default = default
self.is_many = is_many
def __repr__(self) -> str:
- return f"{self.__class__.__name__}({self.extractor!r}, default={self.default!r}, is_many={self.is_many})"
+ args = [f"{self.extractor!r}"]
+ if self.name is not None:
+ args.append(f"name={self.name!r}")
+
+ if self.default is not sentinel:
+ args.append(f"default={self.default!r}")
+
+ if self.is_many:
+ args.append(f"is_many={self.is_many!r}")
+
+ return f"{self.__class__.__name__}({', '.join(args)})"
def extract(self, element: Any) -> Any:
"""
@@ -88,7 +101,11 @@ class Item(Field):
rv = {}
for field in self.field_names():
try:
- rv[field] = getattr(self, field).extract(element)
+ extractor = getattr(self, field)
+ if extractor.name is not None:
+ field = extractor.name
+
+ rv[field] = extractor.extract(element)
except ExtractError as exc:
exc._append(extractor=self)
raise exc
|
linw1995/data_extractor
|
99a4a7f9ca7908d9e15399a8cfb96a50af7a1f01
|
diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py
index 7f8cbe9..747a944 100644
--- a/tests/test_exceptions.py
+++ b/tests/test_exceptions.py
@@ -1,5 +1,4 @@
# Standard Library
-import inspect
import textwrap
# Third Party Library
@@ -9,7 +8,6 @@ import pytest
from data_extractor.exceptions import ExtractError
from data_extractor.item import Field, Item
from data_extractor.json import JSONExtractor
-from data_extractor.lxml import XPathExtractor
def test_exception_trace(json0):
@@ -17,7 +15,7 @@ def test_exception_trace(json0):
class User(Item):
uid = Field(JSONExtractor("id"))
- name = Field(JSONExtractor("name"))
+ username = Field(JSONExtractor("name"), name="name")
gender = Field(JSONExtractor("gender"))
class UserResponse(Item):
@@ -41,25 +39,11 @@ def test_exception_trace(json0):
str(exc.args[0])
== textwrap.dedent(
"""
- ExtractError(Field(JSONExtractor('gender'), default=sentinel, is_many=False), element={'id': 3, 'name': 'Janine Gross'})
- |-UserResponse(JSONExtractor('data'), default=sentinel, is_many=False)
- |-User(JSONExtractor('users[*]'), default=sentinel, is_many=True)
- |-Field(JSONExtractor('gender'), default=sentinel, is_many=False)
+ ExtractError(Field(JSONExtractor('gender')), element={'id': 3, 'name': 'Janine Gross'})
+ |-UserResponse(JSONExtractor('data'))
+ |-User(JSONExtractor('users[*]'), is_many=True)
+ |-Field(JSONExtractor('gender'))
|-{'id': 3, 'name': 'Janine Gross'}
"""
).strip()
)
-
-
-def test_field_name_overwrite_item_parameter():
- with pytest.raises(SyntaxError) as catch:
-
- class Parameter(Item):
- name = Field(XPathExtractor("./span[@class='name']"))
- default = Field(XPathExtractor("./span[@class='default']"))
-
- exc = catch.value
- assert exc.filename == __file__
- assert exc.lineno == inspect.currentframe().f_lineno - 4
- assert exc.offset == 12
- assert exc.text == "default = Field(XPathExtractor(\"./span[@class='default']\"))"
diff --git a/tests/test_item.py b/tests/test_item.py
index 8181efb..13fe5a7 100644
--- a/tests/test_item.py
+++ b/tests/test_item.py
@@ -1,4 +1,6 @@
# Standard Library
+import inspect
+
from pathlib import Path
# Third Party Library
@@ -277,7 +279,7 @@ def test_complex_item_extract_json_data(json0):
class User(Item):
uid = Field(JSONExtractor("id"))
- name = Field(JSONExtractor("name"))
+ username = Field(JSONExtractor("name"), name="name")
gender = Field(JSONExtractor("gender"), default=None)
class UserResponse(Item):
@@ -312,3 +314,64 @@ def test_misplacing():
with pytest.raises(ValueError):
Field(extractor=ComplexExtractor(extractor=JSONExtractor("users[*]")))
+
+
+def test_field_name_overwrite_item_parameter():
+ with pytest.raises(SyntaxError) as catch:
+
+ class User(Item):
+ uid = Field(JSONExtractor("id"))
+ name = Field(JSONExtractor("name"))
+
+ exc = catch.value
+ assert exc.filename == __file__
+ assert exc.lineno == inspect.currentframe().f_lineno - 4
+ assert exc.offset == 12
+ assert exc.text == 'name = Field(JSONExtractor("name"))'
+
+
+def test_avoid_field_name_overwriting_item_parameter(json0):
+ data = json0
+
+ with pytest.raises(SyntaxError):
+
+ class User(Item):
+ uid = Field(JSONExtractor("id"))
+ name = Field(JSONExtractor("name"))
+
+ class User(Item): # noqa
+ uid = Field(JSONExtractor("id"))
+ username = Field(JSONExtractor("name"), name="name")
+
+ assert User(JSONExtractor("data.users[*]")).extract(data) == {
+ "uid": 0,
+ "name": "Vang Stout",
+ }
+
+
+def test_special_field_name(json0):
+ data = json0
+
+ class User(Item):
+ uid = Field(JSONExtractor("id"))
+ username = Field(JSONExtractor("name"), name="user.name")
+
+ assert User(JSONExtractor("data.users[*]")).extract(data) == {
+ "uid": 0,
+ "user.name": "Vang Stout",
+ }
+
+
+def test_special_field_name_in_the_nested_class_definition(json0):
+ data = json0
+
+ class User(Item):
+ uid = Field(JSONExtractor("id"))
+ username = Field(JSONExtractor("name"), name="name")
+
+ class UserResponse(Item):
+ _ = User(JSONExtractor("users[*]"), name="data")
+
+ first_row = {"uid": 0, "name": "Vang Stout"}
+ assert User(JSONExtractor("data.users[*]")).extract(data) == first_row
+ assert UserResponse(JSONExtractor("data")).extract(data) == {"data": first_row}
|
Add optional param name for special field name into Item and Field.
|
0.0
|
99a4a7f9ca7908d9e15399a8cfb96a50af7a1f01
|
[
"tests/test_exceptions.py::test_exception_trace",
"tests/test_item.py::test_complex_item_extract_json_data",
"tests/test_item.py::test_field_name_overwrite_item_parameter",
"tests/test_item.py::test_avoid_field_name_overwriting_item_parameter",
"tests/test_item.py::test_special_field_name",
"tests/test_item.py::test_special_field_name_in_the_nested_class_definition"
] |
[
"tests/test_item.py::test_field_extract[<class",
"tests/test_item.py::test_field_extract_with_is_many[<class",
"tests/test_item.py::test_field_extract_with_default[<class",
"tests/test_item.py::test_field_extract_without_default[<class",
"tests/test_item.py::test_field_parameters_conflict",
"tests/test_item.py::test_field_xpath_extract_result_not_list",
"tests/test_item.py::test_field_xpath_extract_result_not_list_conflict_with_is_many",
"tests/test_item.py::test_item_extract",
"tests/test_item.py::test_item_extract_without_is_many",
"tests/test_item.py::test_item_extract_failure_when_last_field_missing",
"tests/test_item.py::test_item_extract_success_without_is_many_when_last_field_missing",
"tests/test_item.py::test_complex_item_extract_xml_data",
"tests/test_item.py::test_misplacing"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-05-18 15:21:16+00:00
|
mit
| 3,615 |
|
linw1995__data_extractor-22
|
diff --git a/data_extractor/abc.py b/data_extractor/abc.py
index f26a4a2..865a7c3 100644
--- a/data_extractor/abc.py
+++ b/data_extractor/abc.py
@@ -26,7 +26,10 @@ class ComplexExtractorMeta(type):
for key, attr in attr_dict.items():
if isinstance(type(attr), ComplexExtractorMeta):
- if key in __init_args:
+ if key in __init_args[1:]:
+ # Item's attribute overwrites the 'Item.__init__' parameters except first parameter.
+ args = [] # type: List[Any]
+ exc_args = None
frame = inspect.currentframe()
assert (
frame is not None
@@ -36,28 +39,54 @@ class ComplexExtractorMeta(type):
filename = outer_frame.f_code.co_filename
firstlineno = outer_frame.f_lineno
- lines, _ = inspect.findsource(outer_frame)
+ firstline_idx = firstlineno - 1
+ lines = None
+ try:
+ lines, _ = inspect.findsource(outer_frame)
+ except OSError:
+ # can't get the source code from python repl
+ pass
+
+ if lines is not None:
+ start_index = inspect.indentsize(lines[firstline_idx])
+ for lineno, line in enumerate(
+ lines[firstline_idx + 1 :], start=firstlineno + 1
+ ):
+ # iterate line in the code block body
+ cur_index = inspect.indentsize(line)
+ if cur_index <= start_index:
+ # reach end of the code block, use code block firstlineno as SyntaxError.lineno
+ line = lines[firstline_idx]
+ lineno = firstlineno
+ break
+
+ if line.lstrip().startswith(key):
+ # find the line as SyntaxError.text
+ break
+
+ else:
+ # reach EOF, use code block firstlineno as SyntaxError.lineno
+ line = lines[firstline_idx]
+ lineno = firstlineno
+
+ offset = inspect.indentsize(line)
+ line = line.strip()
+ exc_args = (filename, lineno, offset, line)
+ else:
+ line = f"{key}={attr!r}"
- for lineno, line in enumerate(lines[firstlineno:], start=1):
- if line.strip().startswith(key):
- break
- else: # pragma: no cover
- assert False, "This line is never executed."
-
- lineno += firstlineno
- index = inspect.indentsize(line)
finally:
del outer_frame
del frame
- line = line.strip()
- raise SyntaxError(
- (
- f"{line!r} overwriten the parameter {key!r} of '{name}.__init__' method. "
- f"Please using the optional parameter name={key!r} in {attr!r} to avoid overwriting parameter name."
- ),
- (filename, lineno, index, line),
+ args.append(
+ f"{line!r} overwriten the parameter {key!r} of '{name}.__init__' method. "
+ f"Please using the optional parameter name={key!r} in {attr!r} to avoid overwriting parameter name."
)
+ if exc_args is not None:
+ args.append(exc_args)
+
+ raise SyntaxError(*args)
field_names.append(key)
|
linw1995/data_extractor
|
6315f8913b2776832d4e6aab25e02c47194b1d00
|
diff --git a/tests/test_item.py b/tests/test_item.py
index 13fe5a7..801bd6e 100644
--- a/tests/test_item.py
+++ b/tests/test_item.py
@@ -1,5 +1,6 @@
# Standard Library
import inspect
+import linecache
from pathlib import Path
@@ -316,7 +317,7 @@ def test_misplacing():
Field(extractor=ComplexExtractor(extractor=JSONExtractor("users[*]")))
-def test_field_name_overwrite_item_parameter():
+def test_field_name_overwrite_item_parameter_common():
with pytest.raises(SyntaxError) as catch:
class User(Item):
@@ -330,6 +331,99 @@ def test_field_name_overwrite_item_parameter():
assert exc.text == 'name = Field(JSONExtractor("name"))'
+def test_field_name_overwrite_item_parameter_oneline():
+ with pytest.raises(SyntaxError) as catch:
+ # fmt: off
+ class Parameter(Item): name = Field(XPathExtractor("./span[@class='name']")) # noqa
+ # fmt: on
+
+ exc = catch.value
+ assert exc.filename == __file__
+ assert exc.lineno == inspect.currentframe().f_lineno - 5
+ assert exc.offset == 8
+ assert (
+ exc.text
+ == "class Parameter(Item): name = Field(XPathExtractor(\"./span[@class='name']\")) # noqa"
+ )
+
+
+def test_field_name_overwrite_item_parameter_type_creation():
+ with pytest.raises(SyntaxError) as catch:
+ # fmt: off
+ type("Parameter", (Item,), {"name": Field(XPathExtractor("./span[@class='name']"))})
+ # fmt: on
+
+ exc = catch.value
+ assert exc.filename == __file__
+ assert exc.lineno == inspect.currentframe().f_lineno - 5
+ assert exc.offset == 8
+ assert (
+ exc.text
+ == 'type("Parameter", (Item,), {"name": Field(XPathExtractor("./span[@class=\'name\']"))})'
+ )
+
+
+source_codes = [
+ 'type("Parameter", (Item,), {"name": Field(XPathExtractor("./span[@class=\'name\']"))})',
+ "class Parameter(Item): name = Field(XPathExtractor(\"./span[@class='name']\")) # noqa",
+ """class User(Item):
+ uid = Field(JSONExtractor("id")); name = Field(JSONExtractor("name"))
+ """,
+ """
+class User(Item):
+ uid = Field(JSONExtractor("id"))
+ name = Field(JSONExtractor("name"))
+ """,
+]
+
+
[email protected]("source_code", source_codes)
+def test_field_name_overwrite_item_parameter_in_repl(source_code):
+ with pytest.raises(SyntaxError) as catch:
+ exec(source_code)
+
+ exc = catch.value
+ assert exc.filename is None
+ assert exc.lineno is None
+ assert exc.offset is None
+ assert exc.text is None
+
+
[email protected]("source_code", source_codes[:-1])
+def test_field_name_overwrite_item_parameter_oneline_in_script(source_code, tmp_path):
+ tmp_file = tmp_path / "foo.py"
+ tmp_file.write_text(source_code)
+ tmp_file = str(tmp_file)
+ linecache.updatecache(tmp_file)
+
+ with pytest.raises(SyntaxError) as catch:
+ exec(compile(source_code, tmp_file, "exec"))
+
+ exc = catch.value
+ assert exc.filename == tmp_file
+ assert exc.lineno == 1
+ assert exc.offset == 0
+ assert exc.text == source_code.split("\n")[0].strip()
+
+
+def test_field_name_overwrite_item_parameter_common_in_script(tmp_path):
+ source_code = source_codes[-1]
+
+ tmp_file = tmp_path / "foo.py"
+ tmp_file.write_text(source_code)
+ tmp_file = str(tmp_file)
+ linecache.updatecache(tmp_file)
+
+ with pytest.raises(SyntaxError) as catch:
+ exec(compile(source_code, tmp_file, "exec"))
+
+ exc = catch.value
+ assert exc.filename == tmp_file
+ assert exc.lineno == 4
+ assert exc.offset == 4
+ assert exc.text == 'name = Field(JSONExtractor("name"))'
+
+
def test_avoid_field_name_overwriting_item_parameter(json0):
data = json0
|
Can't locate SytanxError.text from source code.
```python3
class Parameter(Item): name = Field(XPathExtractor("./span[@class='name']"))
```
```python3
class User(Item):
uid = Field(JSONExtractor("id")); name = Field(JSONExtractor("name"))
```
```python3
Parameter = type(
"Parameter", (Item,), {"name": Field(XPathExtractor("./span[@class='name']"))}
)
```
this line raises `AssertError` when above code executed.
https://github.com/linw1995/data_extractor/blob/99a4a7f9ca7908d9e15399a8cfb96a50af7a1f01/data_extractor/abc.py#L45
|
0.0
|
6315f8913b2776832d4e6aab25e02c47194b1d00
|
[
"tests/test_item.py::test_field_name_overwrite_item_parameter_oneline",
"tests/test_item.py::test_field_name_overwrite_item_parameter_type_creation",
"tests/test_item.py::test_field_name_overwrite_item_parameter_in_repl[type(\"Parameter\",",
"tests/test_item.py::test_field_name_overwrite_item_parameter_in_repl[class",
"tests/test_item.py::test_field_name_overwrite_item_parameter_in_repl[\\nclass",
"tests/test_item.py::test_field_name_overwrite_item_parameter_oneline_in_script[type(\"Parameter\",",
"tests/test_item.py::test_field_name_overwrite_item_parameter_oneline_in_script[class"
] |
[
"tests/test_item.py::test_field_extract[<class",
"tests/test_item.py::test_field_extract_with_is_many[<class",
"tests/test_item.py::test_field_extract_with_default[<class",
"tests/test_item.py::test_field_extract_without_default[<class",
"tests/test_item.py::test_field_parameters_conflict",
"tests/test_item.py::test_field_xpath_extract_result_not_list",
"tests/test_item.py::test_field_xpath_extract_result_not_list_conflict_with_is_many",
"tests/test_item.py::test_item_extract",
"tests/test_item.py::test_item_extract_without_is_many",
"tests/test_item.py::test_item_extract_failure_when_last_field_missing",
"tests/test_item.py::test_item_extract_success_without_is_many_when_last_field_missing",
"tests/test_item.py::test_complex_item_extract_xml_data",
"tests/test_item.py::test_complex_item_extract_json_data",
"tests/test_item.py::test_misplacing",
"tests/test_item.py::test_field_name_overwrite_item_parameter_common",
"tests/test_item.py::test_field_name_overwrite_item_parameter_common_in_script",
"tests/test_item.py::test_avoid_field_name_overwriting_item_parameter",
"tests/test_item.py::test_special_field_name",
"tests/test_item.py::test_special_field_name_in_the_nested_class_definition"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-05-20 16:29:20+00:00
|
mit
| 3,616 |
|
lipoja__URLExtract-134
|
diff --git a/urlextract/urlextract_core.py b/urlextract/urlextract_core.py
index 0f5bc0a..8f7d5c8 100644
--- a/urlextract/urlextract_core.py
+++ b/urlextract/urlextract_core.py
@@ -488,6 +488,9 @@ class URLExtract(CacheFile):
# URL should not start with two backslashes
if complete_url.startswith("//"):
complete_url = complete_url[2:]
+ # URL should not start with unreserved characters
+ if complete_url.startswith(("-", ".", "~", "_")):
+ complete_url = complete_url[1:]
if not self._is_domain_valid(
complete_url, tld, check_dns=check_dns, with_schema_only=with_schema_only
):
|
lipoja/URLExtract
|
6bc2f30fb09ec8eac1fb9c2f1b1435174d827f99
|
diff --git a/tests/unit/test_find_urls.py b/tests/unit/test_find_urls.py
index 03b6c17..d776bbd 100644
--- a/tests/unit/test_find_urls.py
+++ b/tests/unit/test_find_urls.py
@@ -37,8 +37,8 @@ import pytest
["https://example.com/what.com"],
),
(
- "https://i2.wp.com/siliconfilter.com/2011/06/example.jpg",
- ["https://i2.wp.com/siliconfilter.com/2011/06/example.jpg"],
+ "* test link -https://www.example.com",
+ ["https://www.example.com"],
),
(
"https://www.test.org/paper/apostrophe'in-url",
|
Does Not extract the URL that is leading special character
Hi,
I am trying to extract the URL from the following text,
But it is not grabbing the link due to the hyphen.
EncodedText="* test link -https://www.google.com";
extractor = URLExtract()
urls = extractor.find_urls(EncodedText)
print(URLs)
---output
[]
Do you have any idea to fix this?
|
0.0
|
6bc2f30fb09ec8eac1fb9c2f1b1435174d827f99
|
[
"tests/unit/test_find_urls.py::test_find_urls[*"
] |
[
"tests/unit/test_find_urls.py::test_find_urls[Let's",
"tests/unit/test_find_urls.py::test_find_urls[Dot",
"tests/unit/test_find_urls.py::test_find_urls[URL",
"tests/unit/test_find_urls.py::test_find_urls[ukrainian",
"tests/unit/test_find_urls.py::test_find_urls[<a",
"tests/unit/test_find_urls.py::test_find_urls[https://bladomain.com/bla/?cid=74530889&h=bladomain.com-expected7]",
"tests/unit/test_find_urls.py::test_find_urls[Hey",
"tests/unit/test_find_urls.py::test_find_urls[https://www.test.org/paper/apostrophe'in-url-expected10]",
"tests/unit/test_find_urls.py::test_find_urls[http://aa.com/b.html",
"tests/unit/test_find_urls.py::test_find_urls[http://0.0.0.0/a.io-expected12]",
"tests/unit/test_find_urls.py::test_find_urls[http://123.56.234.210/struts_action.do-expected13]",
"tests/unit/test_find_urls.py::test_find_urls[<script",
"tests/unit/test_find_urls.py::test_find_urls_case_insensitive[http://caseInsensitive.cOM-expected0]",
"tests/unit/test_find_urls.py::test_find_urls_case_insensitive[http://caseInsensitive.COM-expected1]",
"tests/unit/test_find_urls.py::test_find_urls_unique[http://unique.com",
"tests/unit/test_find_urls.py::test_find_urls_unique[Get",
"tests/unit/test_find_urls.py::test_find_urls_with_indices[Let's",
"tests/unit/test_find_urls.py::test_find_urls_with_indices[Some",
"tests/unit/test_find_urls.py::test_find_urls_schema_only[Let's",
"tests/unit/test_find_urls.py::test_find_urls_schema_only[Without"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-21 18:11:49+00:00
|
mit
| 3,617 |
|
lipoja__URLExtract-135
|
diff --git a/urlextract/urlextract_core.py b/urlextract/urlextract_core.py
index 8f7d5c8..dec043c 100644
--- a/urlextract/urlextract_core.py
+++ b/urlextract/urlextract_core.py
@@ -577,6 +577,10 @@ class URLExtract(CacheFile):
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
+ # authority can't start with @
+ if url_parts.authority.startswith('@'):
+ return False
+
# if URI contains user info and schema was automatically added
# the url is probably an email
if url_parts.getuserinfo() and added_schema:
|
lipoja/URLExtract
|
1159482fca5c2db82a987b68c44d172135741d1f
|
diff --git a/tests/unit/test_extract_email.py b/tests/unit/test_extract_email.py
index d8c12e6..77352dd 100644
--- a/tests/unit/test_extract_email.py
+++ b/tests/unit/test_extract_email.py
@@ -34,6 +34,7 @@ def test_extract_email_disabled(urlextract, text, expected):
[
("Do not extract emails by default [email protected]", ["[email protected]"]),
("<[email protected]>", ["[email protected]"]),
+ ("whitespace @address.net>", []),
("Given URIs are not mail [email protected]/asdasd [email protected]:1234", []),
("Given URIs are not mail [email protected]?not [email protected]#not", []),
],
diff --git a/tests/unit/test_find_urls.py b/tests/unit/test_find_urls.py
index d776bbd..783ca0c 100644
--- a/tests/unit/test_find_urls.py
+++ b/tests/unit/test_find_urls.py
@@ -57,6 +57,7 @@ import pytest
"<script src='//www.example.com/somejsfile.js'>",
["www.example.com/somejsfile.js"],
),
+ ("bad.email @address.net>", ['bad.email']),
],
)
def test_find_urls(urlextract, text, expected):
|
should not grab email fragments
```
>>> from urlextract import URLExtract
>>> extractor = URLExtract()
>>> extractor.find_urls("@gmail.com")
['@gmail.com']
>>> extractor.find_urls("bad.email @gmail.com")
['bad.email', '@gmail.com']
```
should not be grabbing `@gmail.com` at all
|
0.0
|
1159482fca5c2db82a987b68c44d172135741d1f
|
[
"tests/unit/test_extract_email.py::test_extract_email_enabled[whitespace",
"tests/unit/test_find_urls.py::test_find_urls[bad.email"
] |
[
"tests/unit/test_extract_email.py::test_extract_email_disabled[URI",
"tests/unit/test_extract_email.py::test_extract_email_disabled[<[email protected]>-expected1]",
"tests/unit/test_extract_email.py::test_extract_email_disabled[Do",
"tests/unit/test_extract_email.py::test_extract_email_enabled[Do",
"tests/unit/test_extract_email.py::test_extract_email_enabled[<[email protected]>-expected1]",
"tests/unit/test_extract_email.py::test_extract_email_enabled[Given",
"tests/unit/test_find_urls.py::test_find_urls[Let's",
"tests/unit/test_find_urls.py::test_find_urls[Dot",
"tests/unit/test_find_urls.py::test_find_urls[URL",
"tests/unit/test_find_urls.py::test_find_urls[ukrainian",
"tests/unit/test_find_urls.py::test_find_urls[<a",
"tests/unit/test_find_urls.py::test_find_urls[https://bladomain.com/bla/?cid=74530889&h=bladomain.com-expected7]",
"tests/unit/test_find_urls.py::test_find_urls[Hey",
"tests/unit/test_find_urls.py::test_find_urls[*",
"tests/unit/test_find_urls.py::test_find_urls[https://www.test.org/paper/apostrophe'in-url-expected10]",
"tests/unit/test_find_urls.py::test_find_urls[http://aa.com/b.html",
"tests/unit/test_find_urls.py::test_find_urls[http://0.0.0.0/a.io-expected12]",
"tests/unit/test_find_urls.py::test_find_urls[http://123.56.234.210/struts_action.do-expected13]",
"tests/unit/test_find_urls.py::test_find_urls[<script",
"tests/unit/test_find_urls.py::test_find_urls_case_insensitive[http://caseInsensitive.cOM-expected0]",
"tests/unit/test_find_urls.py::test_find_urls_case_insensitive[http://caseInsensitive.COM-expected1]",
"tests/unit/test_find_urls.py::test_find_urls_unique[http://unique.com",
"tests/unit/test_find_urls.py::test_find_urls_unique[Get",
"tests/unit/test_find_urls.py::test_find_urls_with_indices[Let's",
"tests/unit/test_find_urls.py::test_find_urls_with_indices[Some",
"tests/unit/test_find_urls.py::test_find_urls_schema_only[Let's",
"tests/unit/test_find_urls.py::test_find_urls_schema_only[Without"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-22 12:45:57+00:00
|
mit
| 3,618 |
|
lipoja__URLExtract-138
|
diff --git a/urlextract/urlextract_core.py b/urlextract/urlextract_core.py
index 5fb3ee7..6d70d2c 100644
--- a/urlextract/urlextract_core.py
+++ b/urlextract/urlextract_core.py
@@ -578,7 +578,7 @@ class URLExtract(CacheFile):
# <scheme>://<authority>/<path>?<query>#<fragment>
# authority can't start with @
- if url_parts.authority.startswith('@'):
+ if url_parts.authority and url_parts.authority.startswith('@'):
return False
# if URI contains user info and schema was automatically added
|
lipoja/URLExtract
|
1f872e8126b66f369ba3be8f4933890b3193c688
|
diff --git a/tests/unit/test_find_urls.py b/tests/unit/test_find_urls.py
index 783ca0c..90250e6 100644
--- a/tests/unit/test_find_urls.py
+++ b/tests/unit/test_find_urls.py
@@ -58,6 +58,7 @@ import pytest
["www.example.com/somejsfile.js"],
),
("bad.email @address.net>", ['bad.email']),
+ ('[[ "$(giturl)" =~ ^https://gitlab.com ]] echo "found" || echo "didnt', []),
],
)
def test_find_urls(urlextract, text, expected):
|
urlextract without authority causes AttributeError
Hi, ever since 1.7.0 -- in particular it looks like #135, some URLs cause an error since the authority is None:
on 1.6.0:
```
In [1]: text = '[[ "$(giturl)" =~ ^https://gitlab.com ]] echo "found" || echo "didnt'
In [2]: import urlextract
In [3]: u = urlextract.URLExtract()
In [4]: list(u.gen_urls(text))
Out[4]: []
```
(I am not talking about this not finding the URL, just about this throwing an error)
on 1.7.0:
```
In [1]: text = '[[ "$(giturl)" =~ ^https://gitlab.com ]] echo "found" || echo "didnt'
In [2]: import urlextract
...:
In [3]: u = urlextract.URLExtract()
...:
In [4]: list(u.gen_urls(text))
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
Cell In [4], line 1
----> 1 list(u.gen_urls(text))
File ~/.local/lib/python3.10/site-packages/urlextract/urlextract_core.py:792, in URLExtract.gen_urls(self, text, check_dns, get_indices, with_schema_only)
790 validated = self._validate_tld_match(text, tld, offset + tld_pos)
791 if tld_pos != -1 and validated:
--> 792 tmp_url = self._complete_url(
793 text,
794 offset + tld_pos,
795 tld,
796 check_dns=check_dns,
797 with_schema_only=with_schema_only,
798 )
800 if tmp_url:
801 # do not search for TLD in already extracted URL
802 tld_pos_url = self._get_tld_pos(tmp_url, tld)
File ~/.local/lib/python3.10/site-packages/urlextract/urlextract_core.py:494, in URLExtract._complete_url(self, text, tld_pos, tld, check_dns, with_schema_only)
492 if complete_url.startswith(("-", ".", "~", "_")):
493 complete_url = complete_url[1:]
--> 494 if not self._is_domain_valid(
495 complete_url, tld, check_dns=check_dns, with_schema_only=with_schema_only
496 ):
497 return ""
499 return complete_url
File ~/.local/lib/python3.10/site-packages/urlextract/urlextract_core.py:581, in URLExtract._is_domain_valid(self, url, tld, check_dns, with_schema_only)
577 url_parts = uritools.urisplit(url)
578 # <scheme>://<authority>/<path>?<query>#<fragment>
579
580 # authority can't start with @
--> 581 if url_parts.authority.startswith('@'):
582 return False
584 # if URI contains user info and schema was automatically added
585 # the url is probably an email
AttributeError: 'NoneType' object has no attribute 'startswith'
```
I believe would need to add a check for `url_parts.authority` to check if its None before checking for `@`?
|
0.0
|
1f872e8126b66f369ba3be8f4933890b3193c688
|
[
"tests/unit/test_find_urls.py::test_find_urls[[["
] |
[
"tests/unit/test_find_urls.py::test_find_urls[Let's",
"tests/unit/test_find_urls.py::test_find_urls[Dot",
"tests/unit/test_find_urls.py::test_find_urls[URL",
"tests/unit/test_find_urls.py::test_find_urls[ukrainian",
"tests/unit/test_find_urls.py::test_find_urls[<a",
"tests/unit/test_find_urls.py::test_find_urls[https://bladomain.com/bla/?cid=74530889&h=bladomain.com-expected7]",
"tests/unit/test_find_urls.py::test_find_urls[Hey",
"tests/unit/test_find_urls.py::test_find_urls[*",
"tests/unit/test_find_urls.py::test_find_urls[https://www.test.org/paper/apostrophe'in-url-expected10]",
"tests/unit/test_find_urls.py::test_find_urls[http://aa.com/b.html",
"tests/unit/test_find_urls.py::test_find_urls[http://0.0.0.0/a.io-expected12]",
"tests/unit/test_find_urls.py::test_find_urls[http://123.56.234.210/struts_action.do-expected13]",
"tests/unit/test_find_urls.py::test_find_urls[<script",
"tests/unit/test_find_urls.py::test_find_urls[bad.email",
"tests/unit/test_find_urls.py::test_find_urls_case_insensitive[http://caseInsensitive.cOM-expected0]",
"tests/unit/test_find_urls.py::test_find_urls_case_insensitive[http://caseInsensitive.COM-expected1]",
"tests/unit/test_find_urls.py::test_find_urls_unique[http://unique.com",
"tests/unit/test_find_urls.py::test_find_urls_unique[Get",
"tests/unit/test_find_urls.py::test_find_urls_with_indices[Let's",
"tests/unit/test_find_urls.py::test_find_urls_with_indices[Some",
"tests/unit/test_find_urls.py::test_find_urls_schema_only[Let's",
"tests/unit/test_find_urls.py::test_find_urls_schema_only[Without"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-25 18:50:16+00:00
|
mit
| 3,619 |
|
lipoja__URLExtract-140
|
diff --git a/urlextract/urlextract_core.py b/urlextract/urlextract_core.py
index b6b3767..8283024 100644
--- a/urlextract/urlextract_core.py
+++ b/urlextract/urlextract_core.py
@@ -760,8 +760,11 @@ class URLExtract(CacheFile):
url_parts = uritools.urisplit(tpm_url)
host = str(url_parts.gethost())
- offset = url.find(host)
- return host.rfind(tld) + offset
+ # `host` is always returned in lowercase,
+ # so make sure `url` & `tld` must also be lowercase,
+ # otherwise the `find()` may fail.
+ offset = url.lower().find(host)
+ return host.rfind(tld.lower()) + offset
# TODO: move type assertion to be Generator based
# found https://stackoverflow.com/a/38423388/14669675
|
lipoja/URLExtract
|
d2071f4a0f497898ae4d763d43de4c380897470f
|
diff --git a/tests/unit/test_find_urls.py b/tests/unit/test_find_urls.py
index 90250e6..9ee46eb 100644
--- a/tests/unit/test_find_urls.py
+++ b/tests/unit/test_find_urls.py
@@ -112,10 +112,10 @@ def test_find_urls_unique(urlextract, text, expected):
"text, expected",
[
(
- "Let's have URL http://janlipovsky.cz and a second URL https://example.com/@eon01/asdsd-dummy it's over.",
+ "Let's have URL http://janlipovsky.cz and a second URL https://Example.Com/@eon01/asdsd-dummy it's over.",
[
("http://janlipovsky.cz", (15, 36)),
- ("https://example.com/@eon01/asdsd-dummy", (54, 92)),
+ ("https://Example.Com/@eon01/asdsd-dummy", (54, 92)),
],
),
(
diff --git a/tests/unit/test_get_tld_pos.py b/tests/unit/test_get_tld_pos.py
new file mode 100644
index 0000000..64997cd
--- /dev/null
+++ b/tests/unit/test_get_tld_pos.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+This file contains pytests for _get_tld_pos() method of URLExtract
+
+.. Licence MIT
+.. codeauthor:: Jan Lipovský <[email protected]>, janlipovsky.cz
+"""
+import pytest
+
+
[email protected](
+ "url, tld, expected",
+ [
+ ("httpbin.org/status/200", ".org", 7),
+ ("https://httpbin.org/status/200", ".org", 15),
+ ("caseInsensitive.cOM", ".cOM", 15),
+ ("https://caseInsensitive.COM/status/200", ".COM", 23),
+ ],
+)
+def test_get_ltd_pos(urlextract, url, tld, expected):
+ """
+ Testing _get_tld_pos returning index
+
+ :param fixture urlextract: fixture holding URLExtract object
+ :param str url: URL in which tld should be located
+ :param str tld: TLD we want to find
+ :param int expected: index of tld that has be found in url
+ """
+ assert urlextract._get_tld_pos(url, tld) == expected
|
Wrong indices with uppercase characters in domain name
I am getting wrong indices when the domain name of a URL contains uppercase characters.
To reproduce:
```
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("www.Google.com", get_indices=True)
print(urls[0])
urls = extractor.find_urls("www.google.com", get_indices=True)
print(urls[0])
```
Output is:
```
('www.Google.com', (1, 15))
('www.google.com', (0, 14))
```
|
0.0
|
d2071f4a0f497898ae4d763d43de4c380897470f
|
[
"tests/unit/test_find_urls.py::test_find_urls_with_indices[Let's",
"tests/unit/test_get_tld_pos.py::test_get_ltd_pos[caseInsensitive.cOM-.cOM-15]",
"tests/unit/test_get_tld_pos.py::test_get_ltd_pos[https://caseInsensitive.COM/status/200-.COM-23]"
] |
[
"tests/unit/test_find_urls.py::test_find_urls[Let's",
"tests/unit/test_find_urls.py::test_find_urls[Dot",
"tests/unit/test_find_urls.py::test_find_urls[URL",
"tests/unit/test_find_urls.py::test_find_urls[ukrainian",
"tests/unit/test_find_urls.py::test_find_urls[<a",
"tests/unit/test_find_urls.py::test_find_urls[https://bladomain.com/bla/?cid=74530889&h=bladomain.com-expected7]",
"tests/unit/test_find_urls.py::test_find_urls[Hey",
"tests/unit/test_find_urls.py::test_find_urls[*",
"tests/unit/test_find_urls.py::test_find_urls[https://www.test.org/paper/apostrophe'in-url-expected10]",
"tests/unit/test_find_urls.py::test_find_urls[http://aa.com/b.html",
"tests/unit/test_find_urls.py::test_find_urls[http://0.0.0.0/a.io-expected12]",
"tests/unit/test_find_urls.py::test_find_urls[http://123.56.234.210/struts_action.do-expected13]",
"tests/unit/test_find_urls.py::test_find_urls[<script",
"tests/unit/test_find_urls.py::test_find_urls[bad.email",
"tests/unit/test_find_urls.py::test_find_urls[[[",
"tests/unit/test_find_urls.py::test_find_urls_case_insensitive[http://caseInsensitive.cOM-expected0]",
"tests/unit/test_find_urls.py::test_find_urls_case_insensitive[http://caseInsensitive.COM-expected1]",
"tests/unit/test_find_urls.py::test_find_urls_unique[http://unique.com",
"tests/unit/test_find_urls.py::test_find_urls_unique[Get",
"tests/unit/test_find_urls.py::test_find_urls_with_indices[Some",
"tests/unit/test_find_urls.py::test_find_urls_schema_only[Let's",
"tests/unit/test_find_urls.py::test_find_urls_schema_only[Without",
"tests/unit/test_get_tld_pos.py::test_get_ltd_pos[httpbin.org/status/200-.org-7]",
"tests/unit/test_get_tld_pos.py::test_get_ltd_pos[https://httpbin.org/status/200-.org-15]"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-04 08:58:02+00:00
|
mit
| 3,620 |
|
litl__backoff-161
|
diff --git a/backoff/_sync.py b/backoff/_sync.py
index ecc592d..1c54ecc 100644
--- a/backoff/_sync.py
+++ b/backoff/_sync.py
@@ -28,11 +28,8 @@ def retry_predicate(target, wait_gen, predicate,
@functools.wraps(target)
def retry(*args, **kwargs):
-
- # update variables from outer function args
- nonlocal max_tries, max_time
- max_tries = _maybe_call(max_tries)
- max_time = _maybe_call(max_time)
+ max_tries_value = _maybe_call(max_tries)
+ max_time_value = _maybe_call(max_time)
tries = 0
start = datetime.datetime.now()
@@ -50,9 +47,9 @@ def retry_predicate(target, wait_gen, predicate,
ret = target(*args, **kwargs)
if predicate(ret):
- max_tries_exceeded = (tries == max_tries)
+ max_tries_exceeded = (tries == max_tries_value)
max_time_exceeded = (max_time is not None and
- elapsed >= max_time)
+ elapsed >= max_time_value)
if max_tries_exceeded or max_time_exceeded:
_call_handlers(on_giveup, **details, value=ret)
@@ -86,11 +83,8 @@ def retry_exception(target, wait_gen, exception,
@functools.wraps(target)
def retry(*args, **kwargs):
-
- # update variables from outer function args
- nonlocal max_tries, max_time
- max_tries = _maybe_call(max_tries)
- max_time = _maybe_call(max_time)
+ max_tries_value = _maybe_call(max_tries)
+ max_time_value = _maybe_call(max_time)
tries = 0
start = datetime.datetime.now()
@@ -109,9 +103,9 @@ def retry_exception(target, wait_gen, exception,
try:
ret = target(*args, **kwargs)
except exception as e:
- max_tries_exceeded = (tries == max_tries)
+ max_tries_exceeded = (tries == max_tries_value)
max_time_exceeded = (max_time is not None and
- elapsed >= max_time)
+ elapsed >= max_time_value)
if giveup(e) or max_tries_exceeded or max_time_exceeded:
_call_handlers(on_giveup, **details)
|
litl/backoff
|
92e7b6e910accee30118a6f3d2cef4c62caa38f5
|
diff --git a/tests/test_backoff.py b/tests/test_backoff.py
index e6b3657..4e70fec 100644
--- a/tests/test_backoff.py
+++ b/tests/test_backoff.py
@@ -548,6 +548,29 @@ def test_on_exception_callable_max_tries(monkeypatch):
assert len(log) == 3
+def test_on_exception_callable_max_tries_reads_every_time(monkeypatch):
+ monkeypatch.setattr('time.sleep', lambda x: None)
+
+ lookups = []
+ def lookup_max_tries():
+ lookups.append(True)
+ return 3
+
+ @backoff.on_exception(backoff.constant,
+ ValueError,
+ max_tries=lookup_max_tries)
+ def exceptor():
+ raise ValueError()
+
+ with pytest.raises(ValueError):
+ exceptor()
+
+ with pytest.raises(ValueError):
+ exceptor()
+
+ assert len(lookups) == 2
+
+
def test_on_exception_callable_gen_kwargs():
def lookup_foo():
diff --git a/tests/test_backoff_async.py b/tests/test_backoff_async.py
index d8219fd..341a6b8 100644
--- a/tests/test_backoff_async.py
+++ b/tests/test_backoff_async.py
@@ -571,6 +571,31 @@ async def test_on_exception_callable_max_tries(monkeypatch):
assert len(log) == 3
[email protected]
+async def test_on_exception_callable_max_tries_reads_every_time(monkeypatch):
+ monkeypatch.setattr('asyncio.sleep', _await_none)
+
+ lookups = []
+ def lookup_max_tries():
+ lookups.append(True)
+ return 3
+
+ @backoff.on_exception(backoff.constant,
+ ValueError,
+ max_tries=lookup_max_tries)
+ def exceptor():
+ raise ValueError()
+
+ with pytest.raises(ValueError):
+ exceptor()
+
+ with pytest.raises(ValueError):
+ exceptor()
+
+ assert len(lookups) == 2
+
+
+
@pytest.mark.asyncio
async def test_on_exception_callable_gen_kwargs():
|
Behavior change when reading max_tries and max_time from callables
Hi there!
Using a callable to read `max_tries` and `max_time` was really useful to query an application config object.
Every time the function was called, the value would be read again and so it remained configurable.
After updating to version 2+, we realized that the `max_tries` and `max_time` are read from the callable once and then cached for every other usage of the function. We saw this when some unit tests that were testing that the application reloads the config correctly were broken.
This was really an use case for us and we're pinned to the previous version we had as we cannot recreate every other object that has a decorated function on every config reload and so we'd consider this a regression on this nice functionality :)
```diff
-backoff==1.10.0
+backoff==2.0.1
```
This minimal example reproduces the problem, the output shows the difference when running in those two versions:
```python
import backoff
CONFIG = {'max_tries': 1}
def _from_config():
print('Reading max_tries: ', CONFIG)
return CONFIG['max_tries']
@backoff.on_exception(
backoff.expo,
Exception,
max_tries=_from_config,
on_giveup=lambda details: print('Total tries', details['tries']),
)
def function():
print('Calling function')
raise Exception(f'boom!')
print('First attempt', CONFIG)
try:
function()
except:
pass
print('----')
CONFIG['max_tries'] = 2 # Config changed
print('Second attempt', CONFIG)
try:
function()
except:
pass
```
The output for `1.10.0` shows that the configuration is read again on every function call:
```
First attempt {'max_tries': 1}
Reading max_tries: {'max_tries': 1}
Calling function
Total tries 1
----
Second attempt {'max_tries': 2}
Reading max_tries: {'max_tries': 2}
Calling function
Calling function
Total tries 2
```
While on `2.0.1` this is only made once and then cached for every other execution:
```
First attempt {'max_tries': 1}
Reading max_tries: {'max_tries': 1}
Calling function
Total tries 1
----
Second attempt {'max_tries': 2}
Calling function
Total tries 1
```
As you can see, the second time the function already cached the `max_tries` and does not read it again.
|
0.0
|
92e7b6e910accee30118a6f3d2cef4c62caa38f5
|
[
"tests/test_backoff.py::test_on_exception_callable_max_tries_reads_every_time",
"tests/test_backoff_async.py::test_on_exception_callable_max_tries_reads_every_time"
] |
[
"tests/test_backoff.py::test_on_predicate",
"tests/test_backoff.py::test_on_predicate_max_tries",
"tests/test_backoff.py::test_on_predicate_max_time",
"tests/test_backoff.py::test_on_exception",
"tests/test_backoff.py::test_on_exception_tuple",
"tests/test_backoff.py::test_on_exception_max_tries",
"tests/test_backoff.py::test_on_exception_constant_iterable",
"tests/test_backoff.py::test_on_exception_success_random_jitter",
"tests/test_backoff.py::test_on_exception_success_full_jitter",
"tests/test_backoff.py::test_on_exception_success",
"tests/test_backoff.py::test_on_exception_giveup[True]",
"tests/test_backoff.py::test_on_exception_giveup[False]",
"tests/test_backoff.py::test_on_exception_giveup_predicate",
"tests/test_backoff.py::test_on_predicate_success",
"tests/test_backoff.py::test_on_predicate_giveup",
"tests/test_backoff.py::test_on_predicate_iterable_handlers",
"tests/test_backoff.py::test_on_exception_success_0_arg_jitter",
"tests/test_backoff.py::test_on_predicate_success_0_arg_jitter",
"tests/test_backoff.py::test_on_exception_callable_max_tries",
"tests/test_backoff.py::test_on_exception_callable_gen_kwargs",
"tests/test_backoff.py::test_on_predicate_in_thread",
"tests/test_backoff.py::test_on_predicate_constant_iterable",
"tests/test_backoff.py::test_on_exception_in_thread",
"tests/test_backoff.py::test_on_exception_logger_default",
"tests/test_backoff.py::test_on_exception_logger_none",
"tests/test_backoff.py::test_on_exception_logger_user",
"tests/test_backoff.py::test_on_exception_logger_user_str",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-10-10]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-10-10]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-10-20]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-10-20]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-10-30]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-10-30]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-10-40]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-10-40]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-10-50]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-10-50]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-20-10]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-20-10]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-20-20]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-20-20]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-20-30]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-20-30]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-20-40]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-20-40]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-20-50]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-20-50]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-30-10]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-30-10]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-30-20]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-30-20]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-30-30]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-30-30]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-30-40]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-30-40]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-30-50]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-30-50]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-40-10]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-40-10]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-40-20]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-40-20]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-40-30]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-40-30]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-40-40]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-40-40]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-40-50]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-40-50]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-50-10]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-50-10]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-50-20]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-50-20]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-50-30]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-50-30]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-50-40]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-50-40]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-50-50]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-50-50]",
"tests/test_backoff_async.py::test_on_predicate",
"tests/test_backoff_async.py::test_on_predicate_max_tries",
"tests/test_backoff_async.py::test_on_exception",
"tests/test_backoff_async.py::test_on_exception_tuple",
"tests/test_backoff_async.py::test_on_exception_max_tries",
"tests/test_backoff_async.py::test_on_exception_constant_iterable",
"tests/test_backoff_async.py::test_on_exception_success_random_jitter",
"tests/test_backoff_async.py::test_on_exception_success_full_jitter",
"tests/test_backoff_async.py::test_on_exception_success",
"tests/test_backoff_async.py::test_on_exception_giveup[True]",
"tests/test_backoff_async.py::test_on_exception_giveup[False]",
"tests/test_backoff_async.py::test_on_exception_giveup_predicate",
"tests/test_backoff_async.py::test_on_exception_giveup_coro",
"tests/test_backoff_async.py::test_on_predicate_success",
"tests/test_backoff_async.py::test_on_predicate_giveup",
"tests/test_backoff_async.py::test_on_predicate_iterable_handlers",
"tests/test_backoff_async.py::test_on_predicate_constant_iterable",
"tests/test_backoff_async.py::test_on_exception_success_0_arg_jitter",
"tests/test_backoff_async.py::test_on_predicate_success_0_arg_jitter",
"tests/test_backoff_async.py::test_on_exception_callable_max_tries",
"tests/test_backoff_async.py::test_on_exception_callable_gen_kwargs",
"tests/test_backoff_async.py::test_on_exception_coro_cancelling",
"tests/test_backoff_async.py::test_on_predicate_on_regular_function_without_event_loop",
"tests/test_backoff_async.py::test_on_exception_on_regular_function_without_event_loop"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-06-07 10:30:27+00:00
|
mit
| 3,621 |
|
litl__backoff-172
|
diff --git a/README.rst b/README.rst
index d80f98f..720c3a8 100644
--- a/README.rst
+++ b/README.rst
@@ -288,7 +288,9 @@ In the case of the ``on_exception`` decorator, all ``on_backoff`` and
``on_giveup`` handlers are called from within the except block for the
exception being handled. Therefore exception info is available to the
handler functions via the python standard library, specifically
-``sys.exc_info()`` or the ``traceback`` module.
+``sys.exc_info()`` or the ``traceback`` module. The exception is also
+available at the *exception* key in the `details` dict passed to the
+handlers.
Asynchronous code
-----------------
diff --git a/backoff/_async.py b/backoff/_async.py
index bf01642..0ca7084 100644
--- a/backoff/_async.py
+++ b/backoff/_async.py
@@ -156,7 +156,7 @@ def retry_exception(target, wait_gen, exception,
elapsed >= max_time_value)
if giveup_result or max_tries_exceeded or max_time_exceeded:
- await _call_handlers(on_giveup, **details)
+ await _call_handlers(on_giveup, **details, exception=e)
if raise_on_giveup:
raise
return None
@@ -165,10 +165,10 @@ def retry_exception(target, wait_gen, exception,
seconds = _next_wait(wait, e, jitter, elapsed,
max_time_value)
except StopIteration:
- await _call_handlers(on_giveup, **details)
+ await _call_handlers(on_giveup, **details, exception=e)
raise e
- await _call_handlers(on_backoff, **details, wait=seconds)
+ await _call_handlers(on_backoff, **details, wait=seconds, exception=e)
# Note: there is no convenient way to pass explicit event
# loop to decorator, so here we assume that either default
diff --git a/backoff/_sync.py b/backoff/_sync.py
index 61b93dd..f0bd4a1 100644
--- a/backoff/_sync.py
+++ b/backoff/_sync.py
@@ -109,7 +109,7 @@ def retry_exception(target, wait_gen, exception,
elapsed >= max_time_value)
if giveup(e) or max_tries_exceeded or max_time_exceeded:
- _call_handlers(on_giveup, **details)
+ _call_handlers(on_giveup, **details, exception=e)
if raise_on_giveup:
raise
return None
@@ -118,10 +118,10 @@ def retry_exception(target, wait_gen, exception,
seconds = _next_wait(wait, e, jitter, elapsed,
max_time_value)
except StopIteration:
- _call_handlers(on_giveup, **details)
+ _call_handlers(on_giveup, **details, exception=e)
raise e
- _call_handlers(on_backoff, **details, wait=seconds)
+ _call_handlers(on_backoff, **details, wait=seconds, exception=e)
time.sleep(seconds)
else:
|
litl/backoff
|
24ae1b9d5e29b2770944d431f9e0cca98481f412
|
diff --git a/tests/test_backoff.py b/tests/test_backoff.py
index 25b6eb4..cd33b63 100644
--- a/tests/test_backoff.py
+++ b/tests/test_backoff.py
@@ -299,7 +299,9 @@ def test_on_exception_success():
for i in range(2):
details = backoffs[i]
elapsed = details.pop('elapsed')
+ exception = details.pop('exception')
assert isinstance(elapsed, float)
+ assert isinstance(exception, ValueError)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': succeeder._target,
@@ -345,7 +347,9 @@ def test_on_exception_giveup(raise_on_giveup):
details = giveups[0]
elapsed = details.pop('elapsed')
+ exception = details.pop('exception')
assert isinstance(elapsed, float)
+ assert isinstance(exception, ValueError)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': exceptor._target,
@@ -517,7 +521,9 @@ def test_on_exception_success_0_arg_jitter(monkeypatch):
for i in range(2):
details = backoffs[i]
elapsed = details.pop('elapsed')
+ exception = details.pop('exception')
assert isinstance(elapsed, float)
+ assert isinstance(exception, ValueError)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': succeeder._target,
diff --git a/tests/test_backoff_async.py b/tests/test_backoff_async.py
index d2947ad..226ef08 100644
--- a/tests/test_backoff_async.py
+++ b/tests/test_backoff_async.py
@@ -255,7 +255,9 @@ async def test_on_exception_success():
for i in range(2):
details = log['backoff'][i]
elapsed = details.pop('elapsed')
+ exception = details.pop('exception')
assert isinstance(elapsed, float)
+ assert isinstance(exception, ValueError)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': succeeder._target,
@@ -302,7 +304,9 @@ async def test_on_exception_giveup(raise_on_giveup):
details = log['giveup'][0]
elapsed = details.pop('elapsed')
+ exception = details.pop('exception')
assert isinstance(elapsed, float)
+ assert isinstance(exception, ValueError)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': exceptor._target,
@@ -521,7 +525,9 @@ async def test_on_exception_success_0_arg_jitter(monkeypatch):
for i in range(2):
details = log['backoff'][i]
elapsed = details.pop('elapsed')
+ exception = details.pop('exception')
assert isinstance(elapsed, float)
+ assert isinstance(exception, ValueError)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': succeeder._target,
|
[Feature Suggestion] - Exception in _call_handlers
Would be great to know which exception was thrown in handlers from decorator exception tuple in `backoff.on_exception`. Maybe receive exception as extra kwargs in _call_handlers. What you guys think?
|
0.0
|
24ae1b9d5e29b2770944d431f9e0cca98481f412
|
[
"tests/test_backoff.py::test_on_exception_success",
"tests/test_backoff.py::test_on_exception_giveup[True]",
"tests/test_backoff.py::test_on_exception_giveup[False]",
"tests/test_backoff.py::test_on_exception_success_0_arg_jitter",
"tests/test_backoff_async.py::test_on_exception_success",
"tests/test_backoff_async.py::test_on_exception_giveup[True]",
"tests/test_backoff_async.py::test_on_exception_giveup[False]",
"tests/test_backoff_async.py::test_on_exception_success_0_arg_jitter"
] |
[
"tests/test_backoff.py::test_on_predicate",
"tests/test_backoff.py::test_on_predicate_max_tries",
"tests/test_backoff.py::test_on_predicate_max_time",
"tests/test_backoff.py::test_on_predicate_max_time_callable",
"tests/test_backoff.py::test_on_exception",
"tests/test_backoff.py::test_on_exception_tuple",
"tests/test_backoff.py::test_on_exception_max_tries",
"tests/test_backoff.py::test_on_exception_max_tries_callable",
"tests/test_backoff.py::test_on_exception_constant_iterable",
"tests/test_backoff.py::test_on_exception_success_random_jitter",
"tests/test_backoff.py::test_on_exception_success_full_jitter",
"tests/test_backoff.py::test_on_exception_giveup_predicate",
"tests/test_backoff.py::test_on_predicate_success",
"tests/test_backoff.py::test_on_predicate_giveup",
"tests/test_backoff.py::test_on_predicate_iterable_handlers",
"tests/test_backoff.py::test_on_predicate_success_0_arg_jitter",
"tests/test_backoff.py::test_on_exception_callable_max_tries",
"tests/test_backoff.py::test_on_exception_callable_max_tries_reads_every_time",
"tests/test_backoff.py::test_on_exception_callable_gen_kwargs",
"tests/test_backoff.py::test_on_predicate_in_thread",
"tests/test_backoff.py::test_on_predicate_constant_iterable",
"tests/test_backoff.py::test_on_exception_in_thread",
"tests/test_backoff.py::test_on_exception_logger_default",
"tests/test_backoff.py::test_on_exception_logger_none",
"tests/test_backoff.py::test_on_exception_logger_user",
"tests/test_backoff.py::test_on_exception_logger_user_str",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-10-10]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-10-10]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-10-20]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-10-20]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-10-30]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-10-30]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-10-40]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-10-40]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-10-50]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-10-50]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-20-10]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-20-10]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-20-20]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-20-20]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-20-30]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-20-30]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-20-40]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-20-40]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-20-50]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-20-50]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-30-10]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-30-10]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-30-20]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-30-20]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-30-30]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-30-30]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-30-40]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-30-40]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-30-50]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-30-50]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-40-10]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-40-10]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-40-20]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-40-20]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-40-30]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-40-30]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-40-40]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-40-40]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-40-50]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-40-50]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-50-10]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-50-10]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-50-20]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-50-20]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-50-30]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-50-30]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-50-40]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-50-40]",
"tests/test_backoff.py::test_event_log_levels[_on_predicate_factory-50-50]",
"tests/test_backoff.py::test_event_log_levels[_on_exception_factory-50-50]",
"tests/test_backoff_async.py::test_on_predicate",
"tests/test_backoff_async.py::test_on_predicate_max_tries",
"tests/test_backoff_async.py::test_on_predicate_max_tries_callable",
"tests/test_backoff_async.py::test_on_exception",
"tests/test_backoff_async.py::test_on_exception_tuple",
"tests/test_backoff_async.py::test_on_exception_max_tries",
"tests/test_backoff_async.py::test_on_exception_max_tries_callable",
"tests/test_backoff_async.py::test_on_exception_constant_iterable",
"tests/test_backoff_async.py::test_on_exception_success_random_jitter",
"tests/test_backoff_async.py::test_on_exception_success_full_jitter",
"tests/test_backoff_async.py::test_on_exception_giveup_predicate",
"tests/test_backoff_async.py::test_on_exception_giveup_coro",
"tests/test_backoff_async.py::test_on_predicate_success",
"tests/test_backoff_async.py::test_on_predicate_giveup",
"tests/test_backoff_async.py::test_on_predicate_iterable_handlers",
"tests/test_backoff_async.py::test_on_predicate_constant_iterable",
"tests/test_backoff_async.py::test_on_predicate_success_0_arg_jitter",
"tests/test_backoff_async.py::test_on_exception_callable_max_tries",
"tests/test_backoff_async.py::test_on_exception_callable_max_tries_reads_every_time",
"tests/test_backoff_async.py::test_on_exception_callable_gen_kwargs",
"tests/test_backoff_async.py::test_on_exception_coro_cancelling",
"tests/test_backoff_async.py::test_on_predicate_on_regular_function_without_event_loop",
"tests/test_backoff_async.py::test_on_exception_on_regular_function_without_event_loop"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-19 18:04:24+00:00
|
mit
| 3,622 |
|
lmc2179__bayesian_bootstrap-20
|
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..18c6a84
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,208 @@
+
+# Created by https://www.toptal.com/developers/gitignore/api/macos,python,vim
+# Edit at https://www.toptal.com/developers/gitignore?templates=macos,python,vim
+
+### macOS ###
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+### Python ###
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintainted in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
+
+### Vim ###
+# Swap
+[._]*.s[a-v][a-z]
+!*.svg # comment out if you don't need vector files
+[._]*.sw[a-p]
+[._]s[a-rt-v][a-z]
+[._]ss[a-gi-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+Sessionx.vim
+
+# Temporary
+.netrwhist
+*~
+# Auto-generated tag files
+tags
+# Persistent undo
+[._]*.un~
+
+# End of https://www.toptal.com/developers/gitignore/api/macos,python,vim
diff --git a/README.md b/README.md
index 8a85aa1..286cf45 100644
--- a/README.md
+++ b/README.md
@@ -115,7 +115,7 @@ Users interested in accessing the base models can do so via the `base_models_` a
Interested in contributing? We'd love to have your help! Please keep the following in mind:
-* Bug fixes are welcome! Make sure you reference the issue number that is being resolved, and that all test cases in `tests` pass on both Python 2.7 and 3.4/3.5.
+* Bug fixes are welcome! Make sure you reference the issue number that is being resolved, and that all test cases in `tests` pass.
* New features are welcome as well! Any new features should include docstrings and unit tests in the `tests` directory.
diff --git a/bayesian_bootstrap/__init__.py b/bayesian_bootstrap/__init__.py
index 7b32346..db31a25 100644
--- a/bayesian_bootstrap/__init__.py
+++ b/bayesian_bootstrap/__init__.py
@@ -275,9 +275,7 @@ def central_credible_interval(samples, alpha=0.05):
Returns: Left and right interval bounds (tuple)
"""
- tail_size = int(round(len(samples) * (alpha / 2)))
- samples_sorted = sorted(samples)
- return samples_sorted[tail_size], samples_sorted[-tail_size - 1]
+ return np.quantile(samples, alpha / 2), np.quantile(samples, 1 - alpha / 2)
def highest_density_interval(samples, alpha=0.05):
diff --git a/requirements.txt b/requirements.txt
index 2ad3a69..1b9ae72 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,4 @@
-numpy
-scipy
-pandas
-scikit-learn
-tqdm
\ No newline at end of file
+numpy>=1.22.1
+scipy>=1.7.3
+scikit-learn>=1.0.2
+tqdm>=4.62.3
diff --git a/setup.py b/setup.py
index 6a90645..7abd06f 100644
--- a/setup.py
+++ b/setup.py
@@ -1,18 +1,22 @@
from distutils.core import setup
+
+with open("./requirements.txt") as f:
+ REQUIRES = [line.strip() for line in f]
+
setup(
name = "bayesian_bootstrap",
packages = ["bayesian_bootstrap"],
- version = "1.0.5",
+ version = "1.1.0",
description = "Bayesian Bootstrapping for statistics and regression models",
author = "Louis Cialdella",
author_email = "[email protected]",
url = "https://github.com/lmc2179/bayesian_bootstrap",
download_url = "https://github.com/lmc2179/bayesian_bootstrap/archive/master.zip",
keywords = ["statistics", "bayesian", "machine learning", "bootstrap", "bayes", "probability", "inference"],
+ install_requires=REQUIRES,
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 2",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Software Development :: Libraries :: Python Modules",
@@ -23,4 +27,4 @@ setup(
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
long_description = """bayesian_bootstrap is a package for Bayesian bootstrapping in Python. For more information about this package and its usage, visit https://github.com/lmc2179/bayesian_bootstrap."""
-)
\ No newline at end of file
+)
|
lmc2179/bayesian_bootstrap
|
ce52d8330ec3f8fc450f437e6c293814a7a2990d
|
diff --git a/bayesian_bootstrap/tests/test_bootstrap.py b/bayesian_bootstrap/tests/test_bootstrap.py
index 565cd4e..2a2a5fd 100644
--- a/bayesian_bootstrap/tests/test_bootstrap.py
+++ b/bayesian_bootstrap/tests/test_bootstrap.py
@@ -1,7 +1,6 @@
import unittest
import numpy as np
import scipy
-import random
import bayesian_bootstrap as bb
from bayesian_bootstrap import (
mean,
@@ -14,6 +13,8 @@ from bayesian_bootstrap import (
)
from sklearn.linear_model import LinearRegression
+RNG = np.random.default_rng(1337) # repeatable pseudorandomness
+
class TestMoments(unittest.TestCase):
def test_mean(self):
@@ -23,18 +24,18 @@ class TestMoments(unittest.TestCase):
self.assertAlmostEqual(len([s for s in posterior_samples if s < 0]), 5000, delta=1000)
def test_variance(self):
- X = np.random.uniform(-1, 1, 500)
+ X = RNG.uniform(-1, 1, 500)
posterior_samples = var(X, 10000)
self.assertAlmostEqual(np.mean(posterior_samples), 1 / 3.0, delta=0.05)
def test_self_covar(self):
- X = np.random.uniform(-1, 1, 500)
+ X = RNG.uniform(-1, 1, 500)
posterior_samples = covar(X, X, 10000)
self.assertAlmostEqual(np.mean(posterior_samples), np.var(X), delta=0.05)
def test_covar(self):
- X = np.random.uniform(-1, 1, 500)
- Y = np.random.uniform(-1, 1, 500)
+ X = RNG.uniform(-1, 1, 500)
+ Y = RNG.uniform(-1, 1, 500)
posterior_samples = covar(X, Y, 10000)
self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.05)
@@ -48,25 +49,25 @@ class TestMoments(unittest.TestCase):
self.assertAlmostEqual(len([s for s in posterior_samples if s < 0]), 5000, delta=1000)
def test_var_resample(self):
- X = np.random.uniform(-1, 1, 500)
+ X = RNG.uniform(-1, 1, 500)
posterior_samples = bayesian_bootstrap(X, np.var, 10000, 5000, low_mem=True)
self.assertAlmostEqual(np.mean(posterior_samples), 1 / 3.0, delta=0.05)
- X = np.random.uniform(-1, 1, 500)
+ X = RNG.uniform(-1, 1, 500)
posterior_samples = bayesian_bootstrap(X, np.var, 10000, 5000, low_mem=False)
self.assertAlmostEqual(np.mean(posterior_samples), 1 / 3.0, delta=0.05)
class TestIntervals(unittest.TestCase):
def test_central_credible_interval(self):
- l, r = central_credible_interval(self._shuffle(list(range(10))), alpha=0.2)
- self.assertEqual(l, 1)
- self.assertEqual(r, 8)
- l, r = central_credible_interval(self._shuffle(list(range(10))), alpha=0.19)
- self.assertEqual(l, 1)
- self.assertEqual(r, 8)
- l, r = central_credible_interval(self._shuffle(list(range(20))), alpha=0.1)
- self.assertEqual(l, 1)
- self.assertEqual(r, 18)
+ l, r = central_credible_interval(self._shuffle(range(10)), alpha=0.2)
+ self.assertEqual(l, 0.9)
+ self.assertEqual(r, 8.1)
+ l, r = central_credible_interval(self._shuffle(range(10)), alpha=0.19)
+ self.assertEqual(l, 0.855)
+ self.assertEqual(r, 8.145)
+ l, r = central_credible_interval(self._shuffle(range(20)), alpha=0.1)
+ self.assertAlmostEqual(l, 0.95)
+ self.assertEqual(r, 18.05)
def test_hpdi(self):
l, r = highest_density_interval(self._shuffle([0, 10, 1] + [1.1] * 7), alpha=0.2)
@@ -78,14 +79,14 @@ class TestIntervals(unittest.TestCase):
def _shuffle(self, x):
x = list(x)
- random.shuffle(x)
+ RNG.shuffle(x)
return x
class TestRegression(unittest.TestCase):
def test_parameter_estimation_resampling_low_memory(self):
- X = np.random.uniform(0, 4, 1000)
- y = X + np.random.normal(0, 1, 1000)
+ X = RNG.uniform(0, 4, 1000)
+ y = X + RNG.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, 1000, low_mem=True)
m.fit(X.reshape(-1, 1), y)
coef_samples = [b.coef_ for b in m.base_models_]
@@ -107,8 +108,8 @@ class TestRegression(unittest.TestCase):
self.assertGreater(r, 0)
def test_parameter_estimation_resampling(self):
- X = np.random.uniform(0, 4, 1000)
- y = X + np.random.normal(0, 1, 1000)
+ X = RNG.uniform(0, 4, 1000)
+ y = X + RNG.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, 1000, low_mem=False)
m.fit(X.reshape(-1, 1), y)
coef_samples = [b.coef_ for b in m.base_models_]
@@ -130,8 +131,8 @@ class TestRegression(unittest.TestCase):
self.assertGreater(r, 0)
def test_parameter_estimation_bayes(self):
- X = np.random.uniform(0, 4, 1000)
- y = X + np.random.normal(0, 1, 1000)
+ X = RNG.uniform(0, 4, 1000)
+ y = X + RNG.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, low_mem=False)
m.fit(X.reshape(-1, 1), y)
coef_samples = [b.coef_ for b in m.base_models_]
@@ -153,8 +154,8 @@ class TestRegression(unittest.TestCase):
self.assertGreater(r, 0)
def test_parameter_estimation_bayes_low_memory(self):
- X = np.random.uniform(0, 4, 1000)
- y = X + np.random.normal(0, 1, 1000)
+ X = RNG.uniform(0, 4, 1000)
+ y = X + RNG.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, low_mem=True)
m.fit(X.reshape(-1, 1), y)
coef_samples = [b.coef_ for b in m.base_models_]
@@ -182,12 +183,10 @@ def test_pearsonr():
assert np.mean(bb.pearsonr(x, y, 10000)) == 1
assert np.mean(bb.pearsonr(x, -y, 10000)) == -1
- np.random.seed(1337)
x = [0, 1, 3, 6]
y = [1, 2, 5, 7]
assert np.isclose(np.mean(bb.pearsonr(x, y, 10000)), scipy.stats.pearsonr(x, y)[0], atol=0.001)
- np.random.seed(1337)
x = np.linspace(-10, 10, 10000)
y = np.abs(x)
assert np.isclose(scipy.stats.pearsonr(x, y)[0], np.mean(bb.pearsonr(x, y, 1000)), atol=0.001)
|
Use np.quantile function for central credible interval
[np.quantile](https://numpy.org/devdocs/reference/generated/numpy.quantile.html) offers the same functionality, but also interpolate between samples.
https://github.com/lmc2179/bayesian_bootstrap/blob/529758efd7a4993b3b446e9681934cb1ab50fb53/bayesian_bootstrap/bootstrap.py#L227-L238
|
0.0
|
ce52d8330ec3f8fc450f437e6c293814a7a2990d
|
[
"bayesian_bootstrap/tests/test_bootstrap.py::TestIntervals::test_central_credible_interval"
] |
[
"bayesian_bootstrap/tests/test_bootstrap.py::TestMoments::test_covar",
"bayesian_bootstrap/tests/test_bootstrap.py::TestMoments::test_mean",
"bayesian_bootstrap/tests/test_bootstrap.py::TestMoments::test_mean_resample",
"bayesian_bootstrap/tests/test_bootstrap.py::TestMoments::test_self_covar",
"bayesian_bootstrap/tests/test_bootstrap.py::TestMoments::test_var_resample",
"bayesian_bootstrap/tests/test_bootstrap.py::TestMoments::test_variance",
"bayesian_bootstrap/tests/test_bootstrap.py::TestIntervals::test_hpdi",
"bayesian_bootstrap/tests/test_bootstrap.py::TestRegression::test_parameter_estimation_bayes",
"bayesian_bootstrap/tests/test_bootstrap.py::TestRegression::test_parameter_estimation_bayes_low_memory",
"bayesian_bootstrap/tests/test_bootstrap.py::TestRegression::test_parameter_estimation_resampling",
"bayesian_bootstrap/tests/test_bootstrap.py::TestRegression::test_parameter_estimation_resampling_low_memory",
"bayesian_bootstrap/tests/test_bootstrap.py::test_pearsonr"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-01-25 20:26:00+00:00
|
mit
| 3,623 |
|
lmc2179__bayesian_bootstrap-9
|
diff --git a/README.md b/README.md
index 3d45649..05e754b 100644
--- a/README.md
+++ b/README.md
@@ -124,6 +124,7 @@ Interested in contributing? We'd love to have your help! Please keep the followi
Credit for past contributions:
* [roya0045](https://github.com/roya0045) implemented the original version of the low-memory optimizations.
+* [JulianWgs](https://github.com/JulianWgs) implemented the Bayesian machine learning model using weight distributions instead of resampling.
# Further reading
diff --git a/bayesian_bootstrap/bootstrap.py b/bayesian_bootstrap/bootstrap.py
index 70c7fe5..1b5af80 100644
--- a/bayesian_bootstrap/bootstrap.py
+++ b/bayesian_bootstrap/bootstrap.py
@@ -124,17 +124,21 @@ def bayesian_bootstrap_regression(X, y, statistic, n_replications, resample_size
else:
weights = np.random.dirichlet([1] * len(X), n_replications)
for w in weights:
- resample_i = np.random.choice(range(len(X_arr)), p=w, size=resample_size)
- resample_X = X_arr[resample_i]
- resample_y = y_arr[resample_i]
- s = statistic(resample_X, resample_y)
+ if resample_size is None:
+ s = statistic(X, y, w)
+ else:
+ resample_i = np.random.choice(range(len(X_arr)), p=w, size=resample_size)
+ resample_X = X_arr[resample_i]
+ resample_y = y_arr[resample_i]
+ s = statistic(resample_X, resample_y)
samples.append(s)
return samples
+
class BayesianBootstrapBagging(object):
"""A bootstrap aggregating model using the bayesian bootstrap. Similar to scikit-learn's BaggingRegressor."""
- def __init__(self, base_learner, n_replications, resample_size, low_mem=False):
+ def __init__(self, base_learner, n_replications, resample_size=None, low_mem=False):
"""Initialize the base learners of the ensemble.
Parameter base_learner: A scikit-learn like estimator. This object should implement a fit() and predict()
@@ -161,12 +165,18 @@ class BayesianBootstrapBagging(object):
Returns: Fitted model
"""
- self.base_models_ = bayesian_bootstrap_regression(X,
- y,
- lambda X, y: deepcopy(self.base_learner).fit(X, y),
- self.n_replications,
- self.resample_size,
- low_mem=self.memo)
+ if self.resample_size is None:
+ statistic = lambda X, y, w: deepcopy(self.base_learner).fit(X, y, w)
+ else:
+ statistic = lambda X, y: deepcopy(self.base_learner).fit(X, y)
+ self.base_models_ = bayesian_bootstrap_regression(
+ X,
+ y,
+ statistic,
+ self.n_replications,
+ self.resample_size,
+ low_mem=self.memo
+ )
return self
def predict(self, X):
|
lmc2179/bayesian_bootstrap
|
54a11c222db5e30cb8b9f8e55ffe13bd182f3d33
|
diff --git a/bayesian_bootstrap/tests/test_bootstrap.py b/bayesian_bootstrap/tests/test_bootstrap.py
index e817c81..91bf78d 100644
--- a/bayesian_bootstrap/tests/test_bootstrap.py
+++ b/bayesian_bootstrap/tests/test_bootstrap.py
@@ -36,7 +36,7 @@ class TestMoments(unittest.TestCase):
posterior_samples = bayesian_bootstrap(X, np.mean, 10000, 100,low_mem=False)
self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.01)
self.assertAlmostEqual(len([s for s in posterior_samples if s < 0]), 5000, delta=1000)
-
+
def test_var_resample(self):
X = np.random.uniform(-1, 1, 500)
posterior_samples = bayesian_bootstrap(X, np.var, 10000, 5000, low_mem=True)
@@ -72,7 +72,7 @@ class TestIntervals(unittest.TestCase):
return x
class TestRegression(unittest.TestCase):
- def test_parameter_estimation_low_memory(self):
+ def test_parameter_estimation_resampling_low_memory(self):
X = np.random.uniform(0, 4, 1000)
y = X + np.random.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, 1000, low_mem=True)
@@ -96,7 +96,7 @@ class TestRegression(unittest.TestCase):
self.assertGreater(r, 0)
- def test_parameter_estimation(self):
+ def test_parameter_estimation_resampling(self):
X = np.random.uniform(0, 4, 1000)
y = X + np.random.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, 1000, low_mem=False)
@@ -117,7 +117,54 @@ class TestRegression(unittest.TestCase):
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = highest_density_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
- self.assertGreater(r, 0)
+ self.assertGreater(r, 0)
+
+ def test_parameter_estimation_bayes(self):
+ X = np.random.uniform(0, 4, 1000)
+ y = X + np.random.normal(0, 1, 1000)
+ m = BayesianBootstrapBagging(LinearRegression(), 10000, low_mem=False)
+ m.fit(X.reshape(-1, 1), y)
+ coef_samples = [b.coef_ for b in m.base_models_]
+ intercept_samples = [b.intercept_ for b in m.base_models_]
+ self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
+ l, r = central_credible_interval(coef_samples, alpha=0.05)
+ self.assertLess(l, 1)
+ self.assertGreater(r, 1)
+ l, r = highest_density_interval(coef_samples, alpha=0.05)
+ self.assertLess(l, 1)
+ self.assertGreater(r, 1)
+ self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
+ l, r = central_credible_interval(intercept_samples, alpha=0.05)
+ self.assertLess(l, 0)
+ self.assertGreater(r, 0)
+ self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
+ l, r = highest_density_interval(intercept_samples, alpha=0.05)
+ self.assertLess(l, 0)
+ self.assertGreater(r, 0)
+
+ def test_parameter_estimation_bayes_low_memory(self):
+ X = np.random.uniform(0, 4, 1000)
+ y = X + np.random.normal(0, 1, 1000)
+ m = BayesianBootstrapBagging(LinearRegression(), 10000, low_mem=True)
+ m.fit(X.reshape(-1, 1), y)
+ coef_samples = [b.coef_ for b in m.base_models_]
+ intercept_samples = [b.intercept_ for b in m.base_models_]
+ self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
+ l, r = central_credible_interval(coef_samples, alpha=0.05)
+ self.assertLess(l, 1)
+ self.assertGreater(r, 1)
+ l, r = highest_density_interval(coef_samples, alpha=0.05)
+ self.assertLess(l, 1)
+ self.assertGreater(r, 1)
+ self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
+ l, r = central_credible_interval(intercept_samples, alpha=0.05)
+ self.assertLess(l, 0)
+ self.assertGreater(r, 0)
+ self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
+ l, r = highest_density_interval(intercept_samples, alpha=0.05)
+ self.assertLess(l, 0)
+ self.assertGreater(r, 0)
+
if __name__ == '__main__':
unittest.main()
|
Implementation of weight bootstrapping
Hello,
first of all: This is a great package!
Now to my question: Why is the only method of Bayesian bootstrapping resampling? Many sklearn regressor implement the weight keyword in the `fit` function and therefore could use Bayesian bootstrapping with weights instead of resampling. This approach should be much faster and less resource hungry, since only the size of the original training data matters.
Also I'm interested in implementing such a feature :)
Greetings!
|
0.0
|
54a11c222db5e30cb8b9f8e55ffe13bd182f3d33
|
[
"bayesian_bootstrap/tests/test_bootstrap.py::TestRegression::test_parameter_estimation_bayes",
"bayesian_bootstrap/tests/test_bootstrap.py::TestRegression::test_parameter_estimation_bayes_low_memory"
] |
[
"bayesian_bootstrap/tests/test_bootstrap.py::TestMoments::test_covar",
"bayesian_bootstrap/tests/test_bootstrap.py::TestMoments::test_mean",
"bayesian_bootstrap/tests/test_bootstrap.py::TestMoments::test_mean_resample",
"bayesian_bootstrap/tests/test_bootstrap.py::TestMoments::test_self_covar",
"bayesian_bootstrap/tests/test_bootstrap.py::TestMoments::test_var_resample",
"bayesian_bootstrap/tests/test_bootstrap.py::TestMoments::test_variance",
"bayesian_bootstrap/tests/test_bootstrap.py::TestIntervals::test_central_credible_interval",
"bayesian_bootstrap/tests/test_bootstrap.py::TestIntervals::test_hpdi",
"bayesian_bootstrap/tests/test_bootstrap.py::TestRegression::test_parameter_estimation_resampling",
"bayesian_bootstrap/tests/test_bootstrap.py::TestRegression::test_parameter_estimation_resampling_low_memory"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-29 15:35:51+00:00
|
mit
| 3,624 |
|
lmfit__lmfit-py-487
|
diff --git a/lmfit/parameter.py b/lmfit/parameter.py
index 9a5c7d1a..235a919b 100644
--- a/lmfit/parameter.py
+++ b/lmfit/parameter.py
@@ -826,6 +826,8 @@ class Parameter(object):
if val is None:
self._expr_ast = None
if val is not None and self._expr_eval is not None:
+ self._expr_eval.error = []
+ self._expr_eval.error_msg = None
self._expr_ast = self._expr_eval.parse(val)
check_ast_errors(self._expr_eval)
self._expr_deps = get_ast_names(self._expr_ast)
|
lmfit/lmfit-py
|
6c87262fcfd3c361b197c6769852f76366113246
|
diff --git a/tests/test_parameters.py b/tests/test_parameters.py
index 73604e47..7344241d 100644
--- a/tests/test_parameters.py
+++ b/tests/test_parameters.py
@@ -212,6 +212,23 @@ class TestParameters(unittest.TestCase):
pars.add('c4', expr='csum-c1-c2-c3', min=0, max=1)
assert_(isclose(pars['c4'].value, 0.2))
+ def test_invalid_expr_exceptions(self):
+ "test if an exception is raised for invalid expressions (GH486)"""
+ p1 = Parameters()
+ p1.add('t', 2.0, min=0.0, max=5.0)
+ p1.add('x', 10.0)
+ with self.assertRaises(SyntaxError):
+ p1.add('y', expr='x*t + sqrt(t)/')
+ assert(len(p1['y']._expr_eval.error) > 0)
+ p1.add('y', expr='x*t + sqrt(t)/3.0')
+ p1['y'].set(expr='x*3.0 + t**2')
+ assert('x*3' in p1['y'].expr)
+ assert(len(p1['y']._expr_eval.error) == 0)
+ with self.assertRaises(SyntaxError):
+ p1['y'].set(expr='t+')
+ assert(len(p1['y']._expr_eval.error) > 0)
+ assert_almost_equal(p1['y'].value, 34.0)
+
if __name__ == '__main__':
unittest.main()
|
asteval errors are not flushed after raising
#### Description
With parameter.set(expr="foo"), if the expression raises an exception in asteval, this error is stored in parameter._expr_eval.errors. Now, the function https://github.com/lmfit/lmfit-py/blob/6c87262fcfd3c361b197c6769852f76366113246/lmfit/parameter.py#L18-L21 gets called from parameter.__set_expression(val) and checks if the length of this errors variable is longer than 0 and if it is, the exception is raised.
However, if you try to set the expression for this parameter again (to a VALID expression), the Parameter._expr_eval.errors variable will still contain the first exception and so, check_ast_errors() will always raise an Exception. This should not be the expected behaviour, for example if lmfit is used from a GUI and the user tries to enter a valid expression after first entering an invalid one (where the exception was caught).
I will submit a PR where check_ast_errors() flushes the parameter._expr_eval.errors before trying to set a new expression.
###### Version information
Python: 3.6.5 (default, Apr 1 2018, 05:46:30)
[GCC 7.3.0]
lmfit: 0.9.11, scipy: 1.1.0, numpy: 1.15.0, asteval: 0.9.12, uncertainties: 3.0.2, six: 1.11.0
###### Minimal example
```python
import lmfit
def set_constraints(paramname, expr):
# params[paramname]._expr_eval.error.clear() # this would be the dirty fix
try:
params[paramname].set(expr=expr)
print("expr was valid")
except SyntaxError:
params[paramname].set(expr="")
print("expr was not valid")
model = lmfit.models.PseudoVoigtModel()
params = model.make_params()
set_constraints("amplitude", expr="sigma * 2")
set_constraints("amplitude", expr="fail *")
set_constraints("amplitude", expr="sigma * 2")
```
produces:
```python
expr was valid
expr was not valid
expr was not valid
```
If you uncomment the "dirty fix", it produces
```python
expr was valid
expr was not valid
expr was valid
```
|
0.0
|
6c87262fcfd3c361b197c6769852f76366113246
|
[
"tests/test_parameters.py::TestParameters::test_invalid_expr_exceptions"
] |
[
"tests/test_parameters.py::TestParameters::test_add_many_params",
"tests/test_parameters.py::TestParameters::test_copy",
"tests/test_parameters.py::TestParameters::test_copy_function",
"tests/test_parameters.py::TestParameters::test_deepcopy",
"tests/test_parameters.py::TestParameters::test_dumps_loads_parameters",
"tests/test_parameters.py::TestParameters::test_expr_and_constraints_GH265",
"tests/test_parameters.py::TestParameters::test_expr_was_evaluated",
"tests/test_parameters.py::TestParameters::test_expr_with_bounds",
"tests/test_parameters.py::TestParameters::test_isclose",
"tests/test_parameters.py::TestParameters::test_pickle_parameter",
"tests/test_parameters.py::TestParameters::test_pickle_parameters",
"tests/test_parameters.py::TestParameters::test_set_symtable"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-07-29 12:46:35+00:00
|
bsd-3-clause
| 3,625 |
|
lmfit__lmfit-py-821
|
diff --git a/doc/constraints.rst b/doc/constraints.rst
index dbe439e3..cc82bb98 100644
--- a/doc/constraints.rst
+++ b/doc/constraints.rst
@@ -13,7 +13,7 @@ highly desirable to place mathematical constraints on parameter values.
For example, one might want to require that two Gaussian peaks have the
same width, or have amplitudes that are constrained to add to some value.
Of course, one could rewrite the objective or model function to place such
-requirements, but this is somewhat error prone, and limits the flexibility
+requirements, but this is somewhat error-prone, and limits the flexibility
so that exploring constraints becomes laborious.
To simplify the setting of constraints, Parameters can be assigned a
diff --git a/doc/fitting.rst b/doc/fitting.rst
index 60e66548..50a05c8c 100644
--- a/doc/fitting.rst
+++ b/doc/fitting.rst
@@ -129,7 +129,7 @@ Choosing Different Fitting Methods
By default, the `Levenberg-Marquardt
<https://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm>`_ algorithm is
used for fitting. While often criticized, including the fact it finds a
-*local* minima, this approach has some distinct advantages. These include
+*local* minimum, this approach has some distinct advantages. These include
being fast, and well-behaved for most curve-fitting needs, and making it
easy to estimate uncertainties for and correlations between pairs of fit
variables, as discussed in :ref:`fit-results-label`.
@@ -476,7 +476,7 @@ be used to abort a fit.
:type resid: numpy.ndarray
:param args: Positional arguments. Must match ``args`` argument to :func:`minimize`
:param kws: Keyword arguments. Must match ``kws`` argument to :func:`minimize`
- :return: Residual array (generally ``data-model``) to be minimized in the least-squares sense.
+ :return: Iteration abort flag.
:rtype: None for normal behavior, any value like ``True`` to abort the fit.
diff --git a/doc/model.rst b/doc/model.rst
index b38254cd..3400cc15 100644
--- a/doc/model.rst
+++ b/doc/model.rst
@@ -378,7 +378,7 @@ Parameters if the supplied default value was a valid number (but not
.. jupyter-execute::
def decay2(t, tau, N=10, check_positive=False):
- if check_small:
+ if check_positive:
arg = abs(t)/max(1.e-9, abs(tau))
else:
arg = t/tau
diff --git a/lmfit/lineshapes.py b/lmfit/lineshapes.py
index 761d7e2a..7ecdc42d 100644
--- a/lmfit/lineshapes.py
+++ b/lmfit/lineshapes.py
@@ -1,7 +1,7 @@
"""Basic model lineshapes and distribution functions."""
-from numpy import (arctan, copysign, cos, exp, isclose, isnan, log, log1p, pi,
- real, sin, sqrt, where)
+from numpy import (arctan, copysign, cos, exp, isclose, isnan, log, log1p,
+ maximum, minimum, pi, real, sin, sqrt, where)
from scipy.special import betaln as betalnfcn
from scipy.special import erf, erfc
from scipy.special import gamma as gamfcn
@@ -431,13 +431,11 @@ def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
if form == 'erf':
out = 0.5*(1 + erf(out))
elif form == 'logistic':
- out = (1. - 1./(1. + exp(out)))
+ out = 1. - 1./(1. + exp(out))
elif form in ('atan', 'arctan'):
out = 0.5 + arctan(out)/pi
elif form == 'linear':
- out = out + 0.5
- out[out < 0] = 0.0
- out[out > 1] = 1.0
+ out = minimum(1, maximum(0, out + 0.5))
else:
msg = (f"Invalid value ('{form}') for argument 'form'; should be one "
"of 'erf', 'logistic', 'atan', 'arctan', or 'linear'.")
@@ -471,15 +469,11 @@ def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0,
if form == 'erf':
out = 0.5*(erf(arg1) + erf(arg2))
elif form == 'logistic':
- out = (1. - 1./(1. + exp(arg1)) - 1./(1. + exp(arg2)))
+ out = 1. - 1./(1. + exp(arg1)) - 1./(1. + exp(arg2))
elif form in ('atan', 'arctan'):
out = (arctan(arg1) + arctan(arg2))/pi
elif form == 'linear':
- arg1[arg1 < 0] = 0.0
- arg1[arg1 > 1] = 1.0
- arg2[arg2 > 0] = 0.0
- arg2[arg2 < -1] = -1.0
- out = arg1 + arg2
+ out = 0.5*(minimum(1, maximum(-1, arg1)) + minimum(1, maximum(-1, arg2)))
else:
msg = (f"Invalid value ('{form}') for argument 'form'; should be one "
"of 'erf', 'logistic', 'atan', 'arctan', or 'linear'.")
diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py
index e9c3c0b7..11d874dc 100644
--- a/lmfit/minimizer.py
+++ b/lmfit/minimizer.py
@@ -710,6 +710,7 @@ class Minimizer:
# and which are defined expressions.
result.var_names = [] # note that this *does* belong to self...
result.init_vals = []
+ result._init_vals_internal = []
result.params.update_constraints()
result.nfev = 0
result.call_kws = {}
@@ -725,7 +726,8 @@ class Minimizer:
par.vary = False
if par.vary:
result.var_names.append(name)
- result.init_vals.append(par.setup_bounds())
+ result._init_vals_internal.append(par.setup_bounds())
+ result.init_vals.append(par.value)
par.init_value = par.value
if par.name is None:
@@ -953,11 +955,12 @@ class Minimizer:
"""
result = self.prepare_fit(params=params)
result.method = method
- variables = result.init_vals
+ variables = result._init_vals_internal
params = result.params
self.set_max_nfev(max_nfev, 2000*(result.nvarys+1))
fmin_kws = dict(method=method, options={'maxiter': 2*self.max_nfev})
+ # fmin_kws = dict(method=method, options={'maxfun': 2*self.max_nfev})
fmin_kws.update(self.kws)
if 'maxiter' in kws:
@@ -1661,7 +1664,7 @@ class Minimizer:
result = self.prepare_fit(params=params)
result.method = 'leastsq'
result.nfev -= 2 # correct for "pre-fit" initialization/checks
- variables = result.init_vals
+ variables = result._init_vals_internal
# note we set the max number of function evaluations here, and send twice that
# value to the solver so it essentially never stops on its own
@@ -1779,7 +1782,7 @@ class Minimizer:
basinhopping_kws.update(self.kws)
basinhopping_kws.update(kws)
- x0 = result.init_vals
+ x0 = result._init_vals_internal
result.call_kws = basinhopping_kws
try:
ret = scipy_basinhopping(self.penalty, x0, **basinhopping_kws)
@@ -2072,7 +2075,7 @@ class Minimizer:
ampgo_kws.update(self.kws)
ampgo_kws.update(kws)
- values = result.init_vals
+ values = result._init_vals_internal
result.method = f"ampgo, with {ampgo_kws['local']} as local solver"
result.call_kws = ampgo_kws
try:
@@ -2212,8 +2215,7 @@ class Minimizer:
result.method = 'dual_annealing'
self.set_max_nfev(max_nfev, 200000*(result.nvarys+1))
- da_kws = dict(maxiter=1000, local_search_options={},
- initial_temp=5230.0, restart_temp_ratio=2e-05,
+ da_kws = dict(initial_temp=5230.0, restart_temp_ratio=2e-05,
visit=2.62, accept=-5.0, maxfun=2*self.max_nfev,
seed=None, no_local_search=False, callback=None,
x0=None)
diff --git a/lmfit/models.py b/lmfit/models.py
index 7c2d3d1d..090c2dce 100644
--- a/lmfit/models.py
+++ b/lmfit/models.py
@@ -1480,7 +1480,7 @@ class StepModel(Model):
& f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) & = A \min{[1, \max{(0, \alpha + 1/2)}]} \\
& f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}}) & = A [1/2 + \arctan{(\alpha)}/{\pi}] \\
& f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) & = A [1 + {\operatorname{erf}}(\alpha)]/2 \\
- & f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}})& = A [1 - \frac{1}{1 + e^{\alpha}} ]
+ & f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}})& = A \left[1 - \frac{1}{1 + e^{\alpha}} \right]
\end{eqnarray*}
where :math:`\alpha = (x - \mu)/{\sigma}`.
@@ -1535,10 +1535,10 @@ class RectangleModel(Model):
:nowrap:
\begin{eqnarray*}
- &f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) &= A \{ \min{[1, \max{(0, \alpha_1)}]} + \min{[-1, \max{(0, \alpha_2)}]} \} \\
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) &= A \{ \min{[1, \max{(-1, \alpha_1)}]} + \min{[1, \max{(-1, \alpha_2)}]} \}/2 \\
&f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}}) &= A [\arctan{(\alpha_1)} + \arctan{(\alpha_2)}]/{\pi} \\
- &f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) &= A [{\operatorname{erf}}(\alpha_1) + {\operatorname{erf}}(\alpha_2)]/2 \\
- &f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}}) &= A [1 - \frac{1}{1 + e^{\alpha_1}} - \frac{1}{1 + e^{\alpha_2}} ]
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) &= A \left[{\operatorname{erf}}(\alpha_1) + {\operatorname{erf}}(\alpha_2)\right]/2 \\
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}}) &= A \left[1 - \frac{1}{1 + e^{\alpha_1}} - \frac{1}{1 + e^{\alpha_2}} \right]
\end{eqnarray*}
|
lmfit/lmfit-py
|
99952a223dfad467fb36c20c5f1e535cd3832f71
|
diff --git a/tests/test_NIST_Strd.py b/tests/test_NIST_Strd.py
index f377672c..3419de91 100644
--- a/tests/test_NIST_Strd.py
+++ b/tests/test_NIST_Strd.py
@@ -118,7 +118,7 @@ options:
--------
-m name of fitting method. One of:
leastsq, nelder, powell, lbfgsb, bfgs,
- tnc, cobyla, slsqp, cg, newto-cg
+ tnc, cobyla, slsqp, cg, newton-cg
leastsq (Levenberg-Marquardt) is the default
"""
return usage
@@ -183,115 +183,114 @@ def RunNIST_Model(model):
out1 = NIST_Dataset(model, start='start1', plot=False, verbose=False)
out2 = NIST_Dataset(model, start='start2', plot=False, verbose=False)
assert(out1 or out2)
- return out1 or out2
def test_Bennett5():
- return RunNIST_Model('Bennett5')
+ RunNIST_Model('Bennett5')
def test_BoxBOD():
- return RunNIST_Model('BoxBOD')
+ RunNIST_Model('BoxBOD')
def test_Chwirut1():
- return RunNIST_Model('Chwirut1')
+ RunNIST_Model('Chwirut1')
def test_Chwirut2():
- return RunNIST_Model('Chwirut2')
+ RunNIST_Model('Chwirut2')
def test_DanWood():
- return RunNIST_Model('DanWood')
+ RunNIST_Model('DanWood')
def test_ENSO():
- return RunNIST_Model('ENSO')
+ RunNIST_Model('ENSO')
def test_Eckerle4():
- return RunNIST_Model('Eckerle4')
+ RunNIST_Model('Eckerle4')
def test_Gauss1():
- return RunNIST_Model('Gauss1')
+ RunNIST_Model('Gauss1')
def test_Gauss2():
- return RunNIST_Model('Gauss2')
+ RunNIST_Model('Gauss2')
def test_Gauss3():
- return RunNIST_Model('Gauss3')
+ RunNIST_Model('Gauss3')
def test_Hahn1():
- return RunNIST_Model('Hahn1')
+ RunNIST_Model('Hahn1')
def test_Kirby2():
- return RunNIST_Model('Kirby2')
+ RunNIST_Model('Kirby2')
def test_Lanczos1():
- return RunNIST_Model('Lanczos1')
+ RunNIST_Model('Lanczos1')
def test_Lanczos2():
- return RunNIST_Model('Lanczos2')
+ RunNIST_Model('Lanczos2')
def test_Lanczos3():
- return RunNIST_Model('Lanczos3')
+ RunNIST_Model('Lanczos3')
def test_MGH09():
- return RunNIST_Model('MGH09')
+ RunNIST_Model('MGH09')
def test_MGH10():
- return RunNIST_Model('MGH10')
+ RunNIST_Model('MGH10')
def test_MGH17():
- return RunNIST_Model('MGH17')
+ RunNIST_Model('MGH17')
def test_Misra1a():
- return RunNIST_Model('Misra1a')
+ RunNIST_Model('Misra1a')
def test_Misra1b():
- return RunNIST_Model('Misra1b')
+ RunNIST_Model('Misra1b')
def test_Misra1c():
- return RunNIST_Model('Misra1c')
+ RunNIST_Model('Misra1c')
def test_Misra1d():
- return RunNIST_Model('Misra1d')
+ RunNIST_Model('Misra1d')
def test_Nelson():
- return RunNIST_Model('Nelson')
+ RunNIST_Model('Nelson')
def test_Rat42():
- return RunNIST_Model('Rat42')
+ RunNIST_Model('Rat42')
def test_Rat43():
- return RunNIST_Model('Rat43')
+ RunNIST_Model('Rat43')
def test_Roszman1():
- return RunNIST_Model('Roszman1')
+ RunNIST_Model('Roszman1')
def test_Thurber():
- return RunNIST_Model('Thurber')
+ RunNIST_Model('Thurber')
if __name__ == '__main__':
diff --git a/tests/test_lineshapes.py b/tests/test_lineshapes.py
index 8e5a7bee..1d28aad8 100644
--- a/tests/test_lineshapes.py
+++ b/tests/test_lineshapes.py
@@ -73,13 +73,8 @@ def test_x_float_value(lineshape):
if par_name != 'x']:
fnc_args.append(sig.parameters[par].default)
- if lineshape in ('step', 'rectangle'):
- msg = r"'float' object does not support item assignment"
- with pytest.raises(TypeError, match=msg):
- fnc_output = func(*fnc_args)
- else:
- fnc_output = func(*fnc_args)
- assert isinstance(fnc_output, float)
+ fnc_output = func(*fnc_args)
+ assert isinstance(fnc_output, float)
rising_form = ['erf', 'logistic', 'atan', 'arctan', 'linear', 'unknown']
@@ -111,6 +106,18 @@ def test_form_argument_step_rectangle(form, lineshape):
assert len(fnc_output) == len(xvals)
[email protected]('form', rising_form)
[email protected]('lineshape', ['step', 'rectangle'])
+def test_value_step_rectangle(form, lineshape):
+ """Test values at mu1/mu2 for step- and rectangle-functions."""
+ func = getattr(lmfit.lineshapes, lineshape)
+ # at position mu1 we should be at A/2
+ assert_almost_equal(func(0), 0.5)
+ # for a rectangular shape we have the same at mu2
+ if lineshape == 'rectangle':
+ assert_almost_equal(func(1), 0.5)
+
+
thermal_form = ['bose', 'maxwell', 'fermi', 'Bose-Einstein', 'unknown']
|
Incorrect value in MinimizerResult.init_values
#### First Time Issue Code
Yes, I read the instructions and I am sure this is a GitHub Issue.
#### Description
Using the [simple example] from the documentation and printing `result.init_values` (or `result.init_vals`) as in the MWE below reveals an incorrect initial value for `amp`. It should be `10` but it is `10.954451150103322`.
[simple example]: https://lmfit.github.io/lmfit-py/parameters.html#simple-example
###### A Minimal, Complete, and Verifiable example
```python
import numpy as np
from lmfit import Minimizer, Parameters, report_fit
# create data to be fitted
x = np.linspace(0, 15, 301)
np.random.seed(2021)
data = (5.0 * np.sin(2.0*x - 0.1) * np.exp(-x*x*0.025) +
np.random.normal(size=x.size, scale=0.2))
# define objective function: returns the array to be minimized
def fcn2min(params, x, data):
"""Model a decaying sine wave and subtract data."""
amp = params['amp']
shift = params['shift']
omega = params['omega']
decay = params['decay']
model = amp * np.sin(x*omega + shift) * np.exp(-x*x*decay)
return model - data
# create a set of Parameters
params = Parameters()
params.add('amp', value=10, min=0)
params.add('decay', value=0.1)
params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2.)
params.add('omega', value=3.0)
# do fit, here with the default leastsq algorithm
minner = Minimizer(fcn2min, params, fcn_args=(x, data))
result = minner.minimize()
# calculate final result
final = data + result.residual
# write error report
report_fit(result)
# now print the inital values
print(result.init_values)
```
This gives the output
```
[[Fit Statistics]]
# fitting method = leastsq
# function evals = 59
# data points = 301
# variables = 4
chi-square = 12.1867036
reduced chi-square = 0.04103267
Akaike info crit = -957.236198
Bayesian info crit = -942.407756
[[Variables]]
amp: 5.03087926 +/- 0.04005805 (0.80%) (init = 10)
decay: 0.02495454 +/- 4.5395e-04 (1.82%) (init = 0.1)
shift: -0.10264934 +/- 0.01022298 (9.96%) (init = 0)
omega: 2.00026304 +/- 0.00326184 (0.16%) (init = 3)
[[Correlations]] (unreported correlations are < 0.100)
C(shift, omega) = -0.785
C(amp, decay) = 0.584
C(amp, shift) = -0.118
{'amp': 10.954451150103322, 'decay': 0.1, 'shift': 0.0, 'omega': 3.0}
```
`report_fit` correctly reports an initial value of `10` for `amp` but `result.init_values` does not.
###### Version information
```
Python: 3.9.13 | packaged by conda-forge | (main, May 27 2022, 16:50:36) [MSC v.1929 64 bit (AMD64)]
lmfit: 1.0.3, scipy: 1.9.3, numpy: 1.23.4,asteval: 0.9.27, uncertainties: 3.1.7
```
|
0.0
|
99952a223dfad467fb36c20c5f1e535cd3832f71
|
[
"tests/test_lineshapes.py::test_x_float_value[step]",
"tests/test_lineshapes.py::test_x_float_value[rectangle]",
"tests/test_lineshapes.py::test_value_step_rectangle[step-erf]",
"tests/test_lineshapes.py::test_value_step_rectangle[step-logistic]",
"tests/test_lineshapes.py::test_value_step_rectangle[step-atan]",
"tests/test_lineshapes.py::test_value_step_rectangle[step-arctan]",
"tests/test_lineshapes.py::test_value_step_rectangle[step-linear]",
"tests/test_lineshapes.py::test_value_step_rectangle[step-unknown]",
"tests/test_lineshapes.py::test_value_step_rectangle[rectangle-erf]",
"tests/test_lineshapes.py::test_value_step_rectangle[rectangle-logistic]",
"tests/test_lineshapes.py::test_value_step_rectangle[rectangle-atan]",
"tests/test_lineshapes.py::test_value_step_rectangle[rectangle-arctan]",
"tests/test_lineshapes.py::test_value_step_rectangle[rectangle-linear]",
"tests/test_lineshapes.py::test_value_step_rectangle[rectangle-unknown]"
] |
[
"tests/test_NIST_Strd.py::test_Bennett5",
"tests/test_NIST_Strd.py::test_BoxBOD",
"tests/test_NIST_Strd.py::test_Chwirut1",
"tests/test_NIST_Strd.py::test_Chwirut2",
"tests/test_NIST_Strd.py::test_DanWood",
"tests/test_NIST_Strd.py::test_ENSO",
"tests/test_NIST_Strd.py::test_Eckerle4",
"tests/test_NIST_Strd.py::test_Gauss1",
"tests/test_NIST_Strd.py::test_Gauss2",
"tests/test_NIST_Strd.py::test_Gauss3",
"tests/test_NIST_Strd.py::test_Hahn1",
"tests/test_NIST_Strd.py::test_Kirby2",
"tests/test_NIST_Strd.py::test_Lanczos1",
"tests/test_NIST_Strd.py::test_Lanczos2",
"tests/test_NIST_Strd.py::test_Lanczos3",
"tests/test_NIST_Strd.py::test_MGH09",
"tests/test_NIST_Strd.py::test_MGH10",
"tests/test_NIST_Strd.py::test_MGH17",
"tests/test_NIST_Strd.py::test_Misra1a",
"tests/test_NIST_Strd.py::test_Misra1b",
"tests/test_NIST_Strd.py::test_Misra1c",
"tests/test_NIST_Strd.py::test_Misra1d",
"tests/test_NIST_Strd.py::test_Nelson",
"tests/test_NIST_Strd.py::test_Rat42",
"tests/test_NIST_Strd.py::test_Rat43",
"tests/test_NIST_Strd.py::test_Roszman1",
"tests/test_NIST_Strd.py::test_Thurber",
"tests/test_lineshapes.py::test_not_zero[1-1.0]",
"tests/test_lineshapes.py::test_not_zero[-1--1.0]",
"tests/test_lineshapes.py::test_not_zero[0-1e-15]",
"tests/test_lineshapes.py::test_not_zero[0--1e-15]",
"tests/test_lineshapes.py::test_not_zero[value4-1.0]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[gaussian]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[gaussian2d]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[lorentzian]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[voigt]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[pvoigt]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[moffat]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[pearson4]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[pearson7]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[breit_wigner]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[damped_oscillator]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[dho]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[logistic]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[lognormal]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[students_t]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[expgaussian]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[doniach]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[skewed_gaussian]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[skewed_voigt]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[thermal_distribution]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[step]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[rectangle]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[exponential]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[powerlaw]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[linear]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[parabolic]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[sine]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[expsine]",
"tests/test_lineshapes.py::test_no_ZeroDivisionError_and_finite_output[split_lorentzian]",
"tests/test_lineshapes.py::test_x_float_value[gaussian]",
"tests/test_lineshapes.py::test_x_float_value[gaussian2d]",
"tests/test_lineshapes.py::test_x_float_value[lorentzian]",
"tests/test_lineshapes.py::test_x_float_value[voigt]",
"tests/test_lineshapes.py::test_x_float_value[pvoigt]",
"tests/test_lineshapes.py::test_x_float_value[moffat]",
"tests/test_lineshapes.py::test_x_float_value[pearson4]",
"tests/test_lineshapes.py::test_x_float_value[pearson7]",
"tests/test_lineshapes.py::test_x_float_value[breit_wigner]",
"tests/test_lineshapes.py::test_x_float_value[damped_oscillator]",
"tests/test_lineshapes.py::test_x_float_value[dho]",
"tests/test_lineshapes.py::test_x_float_value[logistic]",
"tests/test_lineshapes.py::test_x_float_value[lognormal]",
"tests/test_lineshapes.py::test_x_float_value[students_t]",
"tests/test_lineshapes.py::test_x_float_value[expgaussian]",
"tests/test_lineshapes.py::test_x_float_value[doniach]",
"tests/test_lineshapes.py::test_x_float_value[skewed_gaussian]",
"tests/test_lineshapes.py::test_x_float_value[skewed_voigt]",
"tests/test_lineshapes.py::test_x_float_value[thermal_distribution]",
"tests/test_lineshapes.py::test_x_float_value[exponential]",
"tests/test_lineshapes.py::test_x_float_value[powerlaw]",
"tests/test_lineshapes.py::test_x_float_value[linear]",
"tests/test_lineshapes.py::test_x_float_value[parabolic]",
"tests/test_lineshapes.py::test_x_float_value[sine]",
"tests/test_lineshapes.py::test_x_float_value[expsine]",
"tests/test_lineshapes.py::test_x_float_value[split_lorentzian]",
"tests/test_lineshapes.py::test_form_argument_step_rectangle[step-erf]",
"tests/test_lineshapes.py::test_form_argument_step_rectangle[step-logistic]",
"tests/test_lineshapes.py::test_form_argument_step_rectangle[step-atan]",
"tests/test_lineshapes.py::test_form_argument_step_rectangle[step-arctan]",
"tests/test_lineshapes.py::test_form_argument_step_rectangle[step-linear]",
"tests/test_lineshapes.py::test_form_argument_step_rectangle[step-unknown]",
"tests/test_lineshapes.py::test_form_argument_step_rectangle[rectangle-erf]",
"tests/test_lineshapes.py::test_form_argument_step_rectangle[rectangle-logistic]",
"tests/test_lineshapes.py::test_form_argument_step_rectangle[rectangle-atan]",
"tests/test_lineshapes.py::test_form_argument_step_rectangle[rectangle-arctan]",
"tests/test_lineshapes.py::test_form_argument_step_rectangle[rectangle-linear]",
"tests/test_lineshapes.py::test_form_argument_step_rectangle[rectangle-unknown]",
"tests/test_lineshapes.py::test_form_argument_thermal_distribution[bose]",
"tests/test_lineshapes.py::test_form_argument_thermal_distribution[maxwell]",
"tests/test_lineshapes.py::test_form_argument_thermal_distribution[fermi]",
"tests/test_lineshapes.py::test_form_argument_thermal_distribution[Bose-Einstein]",
"tests/test_lineshapes.py::test_form_argument_thermal_distribution[unknown]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-31 16:14:53+00:00
|
bsd-3-clause
| 3,626 |
|
lmmentel__mendeleev-116
|
diff --git a/mendeleev/electronegativity.py b/mendeleev/electronegativity.py
index 4fdcb53..62d47cc 100644
--- a/mendeleev/electronegativity.py
+++ b/mendeleev/electronegativity.py
@@ -3,7 +3,7 @@ Electronegativity scale formulas.
"""
import math
-from typing import List
+from typing import List, Union
import numpy as np
@@ -98,17 +98,15 @@ def martynov_batsanov(ionization_energies: List[float]) -> float:
def mulliken(
ionization_energy: float,
electron_affinity: float,
- missing_is_zero: bool = False,
- allow_negative_ea: bool = False,
-) -> float:
+) -> Union[float, None]:
r"""
- Return the absolute electronegativity (Mulliken scale), calculated as
+ Return the absolute electronegativity (Mulliken scale).
Args:
ionization_energy: ionization energy
electron_affinity: electron affinity
- missing_is_zero: missing values are substituted with zero
- allow_negative_ea: if `True` negative EA values will be allowed
+
+ The value of electonegativity is calculated as:
.. math::
@@ -120,18 +118,14 @@ def mulliken(
- :math:`A` is the electron affinity
"""
- if ionization_energy is not None:
- if (
- electron_affinity is not None
- and electron_affinity < 0.0
- and allow_negative_ea
- ):
- return (ionization_energy + electron_affinity) * 0.5
- elif electron_affinity is not None or missing_is_zero:
- return ionization_energy * 0.5
- else:
+ if ionization_energy is None:
return None
+ if electron_affinity is None:
+ return ionization_energy * 0.5
+
+ return (ionization_energy + electron_affinity) * 0.5
+
def nagle(nvalence: int, polarizability: float) -> float:
"""
diff --git a/mendeleev/models.py b/mendeleev/models.py
index f6130bc..89646c7 100644
--- a/mendeleev/models.py
+++ b/mendeleev/models.py
@@ -627,23 +627,23 @@ class Element(Base):
def electronegativity_mulliken(
self,
charge: int = 0,
- missing_is_zero: bool = False,
- allow_negative_ea: bool = False,
) -> float:
r"""
- Return the absolute electronegativity (Mulliken scale), calculated as
+ Return the absolute electronegativity (Mulliken scale).
Args:
charge: charge of the ion
- missing_is_zero: missing values are substituted with zero
- allow_negative_ea: if `True` negative EA values will be allowed
+
+ The value of electonegativity is calculated as:
.. math::
- \chi = \frac{I + A}{2}
+ \chi = \frac{I + A}{2}
+
+ where:
- where :math:`I` is the ionization energy and :math:`A` is the electron
- affinity
+ - :math:`I` is the ionization energy,
+ - :math:`A` is the electron affinity
"""
if charge == 0:
@@ -654,9 +654,7 @@ class Element(Base):
ea = self.ionenergies.get(charge, None)
else:
raise ValueError(f"Charge has to be a non-negative integer, got: {charge}")
- return mulliken(
- ip, ea, missing_is_zero=missing_is_zero, allow_negative_ea=allow_negative_ea
- )
+ return mulliken(ip, ea)
def electronegativity_nagle(self) -> float:
"Nagle's electronegativity"
|
lmmentel/mendeleev
|
9e9a594e9097ed4fe377f28c77c718d0fb61a3b8
|
diff --git a/tests/test_electronegativity.py b/tests/test_electronegativity.py
index 5080ffd..07935d2 100644
--- a/tests/test_electronegativity.py
+++ b/tests/test_electronegativity.py
@@ -1,6 +1,7 @@
import pytest
from mendeleev import Element
+from mendeleev.electronegativity import mulliken
def test_scales_exception():
@@ -8,3 +9,11 @@ def test_scales_exception():
e = Element()
with pytest.raises(ValueError):
e.electronegativity(scale="unknown")
+
+
+def test_mulliken():
+
+ assert mulliken(None, None) is None
+ assert mulliken(None, 1.0) is None
+ assert mulliken(2.0, None) == pytest.approx(1.0)
+ assert mulliken(2.0, 1.0) == pytest.approx(1.5)
|
Calculation of Mulliken Electronegativity
Hi!
I realized that during Mulliken electronegativity calculations, electron affinity is not taken into account. When I checked the source code, I think this is related to last if condition. Can you please check if there is an issue, or do I miss something?
mendeleev version: 0.13.1
python version: 3.10
platform: google colab
Below I also shared a code snippet for only one element but I got the same issue for other elements, as well.
## To Reproduce
Steps to reproduce the behavior:
```
from mendeleev import element
Ni = element('Ni')
print('Mulliken with function:', Ni.electronegativity('mulliken'))
print('Mulliken with manual calculation:', (Ni.ionenergies[1]+Ni.electron_affinity)/2)
print('Half of first ionization energy:', (Ni.ionenergies[1])/2)
```
The output is:
```
Mulliken with function: 3.8199385
Mulliken with manual calculation: 4.3979385
Half of first ionization energy: 3.8199385
```
|
0.0
|
9e9a594e9097ed4fe377f28c77c718d0fb61a3b8
|
[
"tests/test_electronegativity.py::test_mulliken"
] |
[
"tests/test_electronegativity.py::test_scales_exception"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-07 19:35:31+00:00
|
mit
| 3,627 |
|
locustio__locust-1380
|
diff --git a/locust/main.py b/locust/main.py
index e5bcfb95..ab9dfc19 100644
--- a/locust/main.py
+++ b/locust/main.py
@@ -6,6 +6,7 @@ import signal
import socket
import sys
import time
+import resource
import gevent
@@ -153,6 +154,14 @@ def main():
# list() call is needed to consume the dict_view object in Python 3
user_classes = list(user_classes.values())
+ try:
+ if resource.getrlimit(resource.RLIMIT_NOFILE)[0] < 10000:
+ # Increasing the limit to 10000 within a running process should work on at least MacOS.
+ # It does not work on all OS:es, but we should be no worse off for trying.
+ resource.setrlimit(resource.RLIMIT_NOFILE, [10000, resource.RLIM_INFINITY])
+ except:
+ logger.warning("System open file limit setting is not high enough for load testing, and the OS wouldnt allow locust to increase it by itself. See https://docs.locust.io/en/stable/installation.html#increasing-maximum-number-of-open-files-limit for more info.")
+
# create locust Environment
environment = create_environment(user_classes, options, events=locust.events)
diff --git a/locust/user/task.py b/locust/user/task.py
index e474cba8..7e3d0933 100644
--- a/locust/user/task.py
+++ b/locust/user/task.py
@@ -218,25 +218,19 @@ class TaskSet(object, metaclass=TaskSetMeta):
if not set on the TaskSet.
"""
- user = None
- """Will refer to the root User class instance when the TaskSet has been instantiated"""
-
- parent = None
- """
- Will refer to the parent TaskSet, or User, class instance when the TaskSet has been
- instantiated. Useful for nested TaskSet classes.
- """
+ _user = None
+ _parent = None
def __init__(self, parent):
self._task_queue = []
self._time_start = time()
if isinstance(parent, TaskSet):
- self.user = parent.user
+ self._user = parent.user
else:
- self.user = parent
+ self._user = parent
- self.parent = parent
+ self._parent = parent
# if this class doesn't have a min_wait, max_wait or wait_function defined, copy it from Locust
if not self.min_wait:
@@ -246,9 +240,21 @@ class TaskSet(object, metaclass=TaskSetMeta):
if not self.wait_function:
self.wait_function = self.user.wait_function
+
+
+ @property
+ def user(self):
+ """:py:class:`User <locust.User>` instance that this TaskSet was created by"""
+ return self._user
+
+ @property
+ def parent(self):
+ """Parent TaskSet instance of this TaskSet (or :py:class:`User <locust.User>` if this is not a nested TaskSet)"""
+ return self._parent
+
def on_start(self):
"""
- Called when a User starts executing (enters) this TaskSet
+ Called when a User starts executing this TaskSet
"""
pass
@@ -392,8 +398,7 @@ class TaskSet(object, metaclass=TaskSetMeta):
@property
def client(self):
"""
- Reference to the :py:attr:`client <locust.User.client>` attribute of the root
- User instance.
+ Shortcut to the client :py:attr:`client <locust.User.client>` attribute of this TaskSet's :py:class:`User <locust.User>`
"""
return self.user.client
|
locustio/locust
|
06266b265c295d01231a756bb16a0baebc86392c
|
diff --git a/locust/test/test_locust_class.py b/locust/test/test_locust_class.py
index 92960c0a..dbf58d43 100644
--- a/locust/test/test_locust_class.py
+++ b/locust/test/test_locust_class.py
@@ -383,6 +383,25 @@ class TestTaskSet(LocustTestCase):
self.assertTrue(isinstance(parents["sub"], RootTaskSet))
self.assertTrue(isinstance(parents["subsub"], SubTaskSet))
+ def test_user_is_read_only(self):
+ class MyTaskSet(TaskSet):
+ raised_attribute_error = False
+ @task
+ def overwrite_user(self):
+ try:
+ self.user = "whatever"
+ except AttributeError:
+ MyTaskSet.raised_attribute_error = True
+ raise StopUser()
+
+ class MyUser(User):
+ wait_time = constant(0)
+ host = ""
+ tasks = [MyTaskSet]
+
+ MyUser(Environment()).run()
+ self.assertTrue(MyTaskSet.raised_attribute_error)
+
class TestLocustClass(LocustTestCase):
def test_locust_wait(self):
|
SequentialTaskSet is broken when using local class members (headless mode)
<!--
If you have a general question about how to use Locust, please check Stack Overflow first https://stackoverflow.com/questions/tagged/locust
You can also ask new questions on SO, https://stackoverflow.com/questions/ask just remember to tag your question with "locust". Do not immediately post your issue here after posting to SO, wait for an answer there instead.
Use this form only for reporting actual bugs in locust. Be mindful that the developers of locust are unpaid volunteers, so make sure you have tried everything you can think of before filing a bug :)
-->
### Describe the bug
Trying to execute a SequentialTaskSet in headless mode, store result's from first task, and use it in second task
### Expected behavior
Execute first task, retrieve json, store it as dict in SequentialTaskSet object, execute second task and access the variable.
### Actual behavior
Execute first task, json is retrieved, stored. In second task upon trying to access the stored data, I get following exceptions:
1.) AttributeError: 'dict' object has no attribute '_state'
2.) AttributeError: 'dict' object has no attribute 'environment'
### Steps to reproduce
Start provided mockserver.py (flask) on localhost:5000, then run provided locust file (sequence_test.py) with command lines specified
### Environment
- OS:
Ubuntu 18.04.4 LTS running in WSL
Linux PSALA059 4.4.0-18990-Microsoft #1-Microsoft Sat Sep 21 17:04:00 PST 2019 x86_64 x86_64 x86_64 GNU/Linux
- Python version:
3.6.9
- Locust version:
locust 1.0.1
- Locust command line that you ran:
run master locust:
` locust -f sequence_test.py --host=http://localhost:5000 --master --expect-workers=1 --headless -u 1 -r 1 -t 1h`
run worker locust:
`locust -f sequence_test.py --worker --master-host=127.0.0.1 --host=http://localhost:5000`
- Locust file contents (anonymized if necessary):
[locust-1.0.1-sequence-bug.zip](https://github.com/locustio/locust/files/4641968/locust-1.0.1-sequence-bug.zip)
|
0.0
|
06266b265c295d01231a756bb16a0baebc86392c
|
[
"locust/test/test_locust_class.py::TestTaskSet::test_user_is_read_only"
] |
[
"locust/test/test_locust_class.py::TestTaskSet::test_on_start",
"locust/test/test_locust_class.py::TestTaskSet::test_on_start_interrupt",
"locust/test/test_locust_class.py::TestTaskSet::test_on_stop_interrupt",
"locust/test/test_locust_class.py::TestTaskSet::test_on_stop_interrupt_reschedule",
"locust/test/test_locust_class.py::TestTaskSet::test_on_stop_when_locust_stops",
"locust/test/test_locust_class.py::TestTaskSet::test_parent_attribute",
"locust/test/test_locust_class.py::TestTaskSet::test_schedule_task",
"locust/test/test_locust_class.py::TestTaskSet::test_schedule_task_bound_method",
"locust/test/test_locust_class.py::TestTaskSet::test_sub_taskset",
"locust/test/test_locust_class.py::TestTaskSet::test_sub_taskset_tasks_decorator",
"locust/test/test_locust_class.py::TestTaskSet::test_task_decorator_on_taskset",
"locust/test/test_locust_class.py::TestTaskSet::test_task_decorator_ratio",
"locust/test/test_locust_class.py::TestTaskSet::test_task_decorator_with_or_without_argument",
"locust/test/test_locust_class.py::TestTaskSet::test_task_ratio",
"locust/test/test_locust_class.py::TestTaskSet::test_tasks_missing_gives_user_friendly_exception",
"locust/test/test_locust_class.py::TestTaskSet::test_tasks_on_abstract_locust",
"locust/test/test_locust_class.py::TestTaskSet::test_tasks_on_locust",
"locust/test/test_locust_class.py::TestTaskSet::test_taskset_inheritance",
"locust/test/test_locust_class.py::TestTaskSet::test_taskset_on_abstract_locust",
"locust/test/test_locust_class.py::TestTaskSet::test_wait_function",
"locust/test/test_locust_class.py::TestLocustClass::test_deprecated_locust_class",
"locust/test/test_locust_class.py::TestLocustClass::test_locust_forced_stop",
"locust/test/test_locust_class.py::TestLocustClass::test_locust_graceful_stop",
"locust/test/test_locust_class.py::TestLocustClass::test_locust_on_start",
"locust/test/test_locust_class.py::TestLocustClass::test_locust_on_stop",
"locust/test/test_locust_class.py::TestLocustClass::test_locust_start",
"locust/test/test_locust_class.py::TestLocustClass::test_locust_wait",
"locust/test/test_locust_class.py::TestWebLocustClass::test_client_basic_auth",
"locust/test/test_locust_class.py::TestWebLocustClass::test_client_delete",
"locust/test/test_locust_class.py::TestWebLocustClass::test_client_get",
"locust/test/test_locust_class.py::TestWebLocustClass::test_client_get_absolute_url",
"locust/test/test_locust_class.py::TestWebLocustClass::test_client_head",
"locust/test/test_locust_class.py::TestWebLocustClass::test_client_post",
"locust/test/test_locust_class.py::TestWebLocustClass::test_client_put",
"locust/test/test_locust_class.py::TestWebLocustClass::test_client_request_headers",
"locust/test/test_locust_class.py::TestWebLocustClass::test_get_request",
"locust/test/test_locust_class.py::TestWebLocustClass::test_locust_client_error",
"locust/test/test_locust_class.py::TestWebLocustClass::test_log_request_name_argument",
"locust/test/test_locust_class.py::TestWebLocustClass::test_redirect_url_original_path_as_name",
"locust/test/test_locust_class.py::TestCatchResponse::test_catch_response",
"locust/test/test_locust_class.py::TestCatchResponse::test_catch_response_allow_404",
"locust/test/test_locust_class.py::TestCatchResponse::test_catch_response_connection_error_fail",
"locust/test/test_locust_class.py::TestCatchResponse::test_catch_response_connection_error_success",
"locust/test/test_locust_class.py::TestCatchResponse::test_catch_response_http_fail",
"locust/test/test_locust_class.py::TestCatchResponse::test_catch_response_http_manual_fail",
"locust/test/test_locust_class.py::TestCatchResponse::test_catch_response_http_manual_success",
"locust/test/test_locust_class.py::TestCatchResponse::test_interrupt_taskset_with_catch_response"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-18 19:08:01+00:00
|
mit
| 3,628 |
|
locustio__locust-992
|
diff --git a/locust/main.py b/locust/main.py
index fe3186d8..a42308b4 100644
--- a/locust/main.py
+++ b/locust/main.py
@@ -282,6 +282,16 @@ def parse_options():
help="show program's version number and exit"
)
+ # set the exit code to post on errors
+ parser.add_option(
+ '--exit-code-on-error',
+ action='store',
+ type="int",
+ dest='exit_code_on_error',
+ default=1,
+ help="sets the exit code to post on error"
+ )
+
# Finalize
# Return three-tuple of parser + the output from parse_args (opt obj, args)
opts, args = parser.parse_args()
@@ -554,7 +564,7 @@ def main():
main_greenlet.join()
code = 0
if len(runners.locust_runner.errors):
- code = 1
+ code = options.exit_code_on_error
shutdown(code=code)
except KeyboardInterrupt as e:
shutdown(0)
diff --git a/locust/stats.py b/locust/stats.py
index 4ba20644..7ec6a9b7 100644
--- a/locust/stats.py
+++ b/locust/stats.py
@@ -121,6 +121,7 @@ class RequestStats(object):
"""
self.start_time = time.time()
self.total.reset()
+ self.errors = {}
for r in six.itervalues(self.entries):
r.reset()
@@ -276,7 +277,7 @@ class StatsEntry(object):
@property
def fail_ratio(self):
try:
- return float(self.num_failures) / (self.num_requests + self.num_failures)
+ return float(self.num_failures) / self.num_requests
except ZeroDivisionError:
if self.num_failures > 0:
return 1.0
@@ -734,3 +735,25 @@ def distribution_csv():
rows.append('"%s",0,"N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A"' % s.name)
return "\n".join(rows)
+
+def failures_csv():
+ """"Return the contents of the 'failures' tab as a CSV."""
+ from . import runners
+
+ rows = [
+ ",".join((
+ '"Method"',
+ '"Name"',
+ '"Error"',
+ '"Occurences"',
+ ))
+ ]
+
+ for s in sort_stats(runners.locust_runner.stats.errors):
+ rows.append('"%s","%s","%s",%i' % (
+ s.method,
+ s.name,
+ s.error,
+ s.occurences,
+ ))
+ return "\n".join(rows)
\ No newline at end of file
diff --git a/locust/templates/index.html b/locust/templates/index.html
index ce4e6b72..9623eec7 100644
--- a/locust/templates/index.html
+++ b/locust/templates/index.html
@@ -148,6 +148,7 @@
<div style="margin-top:20px;">
<a href="./stats/requests/csv">Download request statistics CSV</a><br>
<a href="./stats/distribution/csv">Download response time distribution CSV</a><br>
+ <a href="./stats/failures/csv">Download failures CSV</a><br>
<a href="./exceptions/csv">Download exceptions CSV</a>
</div>
</div>
diff --git a/locust/web.py b/locust/web.py
index aa31599e..97c87f1a 100644
--- a/locust/web.py
+++ b/locust/web.py
@@ -17,7 +17,7 @@ from six.moves import StringIO, xrange
from . import runners
from .runners import MasterLocustRunner
-from .stats import distribution_csv, median_from_dict, requests_csv, sort_stats
+from .stats import distribution_csv, failures_csv, median_from_dict, requests_csv, sort_stats
from .util.cache import memoize
logger = logging.getLogger(__name__)
@@ -69,6 +69,7 @@ def stop():
@app.route("/stats/reset")
def reset_stats():
runners.locust_runner.stats.reset_all()
+ runners.locust_runner.exceptions = {}
return "ok"
@app.route("/stats/requests/csv")
@@ -89,6 +90,15 @@ def distribution_stats_csv():
response.headers["Content-disposition"] = disposition
return response
[email protected]("/stats/failures/csv")
+def failures_stats_csv():
+ response = make_response(failures_csv())
+ file_name = "failures_{0}.csv".format(time())
+ disposition = "attachment;filename={0}".format(file_name)
+ response.headers["Content-type"] = "text/csv"
+ response.headers["Content-disposition"] = disposition
+ return response
+
@app.route('/stats/requests')
@memoize(timeout=DEFAULT_CACHE_TIME, dynamic_timeout=True)
def request_stats():
|
locustio/locust
|
8b17c286034178ba31c6d48b93f29202514f5659
|
diff --git a/locust/test/test_stats.py b/locust/test/test_stats.py
index 4b45aeed..fcfee1cc 100644
--- a/locust/test/test_stats.py
+++ b/locust/test/test_stats.py
@@ -252,6 +252,18 @@ class TestStatsEntryResponseTimesCache(unittest.TestCase):
))
+class TestStatsEntry(unittest.TestCase):
+ def setUp(self, *args, **kwargs):
+ super(TestStatsEntry, self).setUp(*args, **kwargs)
+ self.stats = RequestStats()
+
+ def test_fail_ratio_with_failures(self):
+ s = StatsEntry(self.stats, "/", "GET")
+ s.num_requests = 10
+ s.num_failures = 5
+ self.assertAlmostEqual(s.fail_ratio, 0.5)
+
+
class TestRequestStatsWithWebserver(WebserverTestCase):
def test_request_stats_content_length(self):
class MyLocust(HttpLocust):
diff --git a/locust/test/test_web.py b/locust/test/test_web.py
index ed3f07eb..14a02d7c 100644
--- a/locust/test/test_web.py
+++ b/locust/test/test_web.py
@@ -95,12 +95,40 @@ class TestWebUI(LocustTestCase):
# verify that the 95%, 98%, 99% and 100% percentiles are 1200
for value in total_cols[-4:]:
self.assertEqual('1200', value)
+
+ def test_failure_stats_csv(self):
+ stats.global_stats.log_error("GET", "/", Exception("Error1337"))
+ response = requests.get("http://127.0.0.1:%i/stats/failures/csv" % self.web_port)
+ self.assertEqual(200, response.status_code)
def test_request_stats_with_errors(self):
stats.global_stats.log_error("GET", "/", Exception("Error1337"))
response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port)
self.assertEqual(200, response.status_code)
self.assertIn("Error1337", response.text)
+
+ def test_reset_stats(self):
+ try:
+ raise Exception(u"A cool test exception")
+ except Exception as e:
+ tb = sys.exc_info()[2]
+ runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb)))
+ runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb)))
+
+ stats.global_stats.log_request("GET", "/test", 120, 5612)
+ stats.global_stats.log_error("GET", "/", Exception("Error1337"))
+
+ response = requests.get("http://127.0.0.1:%i/stats/reset" % self.web_port)
+
+ self.assertEqual(200, response.status_code)
+
+ self.assertEqual({}, stats.global_stats.errors)
+ self.assertEqual({}, runners.locust_runner.exceptions)
+
+ self.assertEqual(0, stats.global_stats.get("/", "GET").num_requests)
+ self.assertEqual(0, stats.global_stats.get("/", "GET").num_failures)
+ self.assertEqual(0, stats.global_stats.get("/test", "GET").num_requests)
+ self.assertEqual(0, stats.global_stats.get("/test", "GET").num_failures)
def test_exceptions(self):
try:
|
Locust exits with 1 on timeouts / errors
Depending on how others are deploying this tool from the command line it may be a non-issue, but it's causing problems for us. Returning non-zero with the same code that we use when there are misconfigurations for, say, one 500 response code out of thousands is not subtle enough.
By the time we exit in this case, the entire test was already completed and requests were sent. Whatever downstream metrics systems will have valid data, etc. If we retry based on a non-zero exit code, we run the test twice. Users of the tool should probably be doing something more intelligent downstream to determine if the test was a failure or not than if there was at least 1 error. So why exit with an error code?
Two initial ideas that could improve the behavior (either or both would work):
1) Use a return code that's unique to this kind of "failure but not really", so it can be handled and ignored / logged with the correct criticality
2) Add a new command line parameter that is a threshold for failure (percent using stats.fail_ratio and/or absolute compared against num_requests)
Is this bothering anyone else?
([ref](https://github.com/locustio/locust/blob/v0.8a1/locust/main.py#L443))
|
0.0
|
8b17c286034178ba31c6d48b93f29202514f5659
|
[
"locust/test/test_stats.py::TestStatsEntry::test_fail_ratio_with_failures",
"locust/test/test_web.py::TestWebUI::test_failure_stats_csv",
"locust/test/test_web.py::TestWebUI::test_reset_stats"
] |
[
"locust/test/test_stats.py::TestRequestStats::test_aggregation",
"locust/test/test_stats.py::TestRequestStats::test_aggregation_with_rounding",
"locust/test/test_stats.py::TestRequestStats::test_avg",
"locust/test/test_stats.py::TestRequestStats::test_current_rps",
"locust/test/test_stats.py::TestRequestStats::test_error_grouping",
"locust/test/test_stats.py::TestRequestStats::test_error_grouping_errors_with_memory_addresses",
"locust/test/test_stats.py::TestRequestStats::test_median",
"locust/test/test_stats.py::TestRequestStats::test_num_reqs_fails",
"locust/test/test_stats.py::TestRequestStats::test_percentile",
"locust/test/test_stats.py::TestRequestStats::test_percentile_rounded_down",
"locust/test/test_stats.py::TestRequestStats::test_percentile_rounded_up",
"locust/test/test_stats.py::TestRequestStats::test_reset",
"locust/test/test_stats.py::TestRequestStats::test_reset_min_response_time",
"locust/test/test_stats.py::TestRequestStats::test_total_rps",
"locust/test/test_stats.py::TestStatsEntryResponseTimesCache::test_diff_response_times_dicts",
"locust/test/test_stats.py::TestStatsEntryResponseTimesCache::test_get_current_response_time_percentile",
"locust/test/test_stats.py::TestStatsEntryResponseTimesCache::test_latest_total_response_times_pruned",
"locust/test/test_stats.py::TestStatsEntryResponseTimesCache::test_response_times_cached",
"locust/test/test_stats.py::TestStatsEntryResponseTimesCache::test_response_times_not_cached_if_not_enabled",
"locust/test/test_stats.py::TestRequestStatsWithWebserver::test_request_connection_error",
"locust/test/test_stats.py::TestRequestStatsWithWebserver::test_request_stats_content_length",
"locust/test/test_stats.py::TestRequestStatsWithWebserver::test_request_stats_named_endpoint",
"locust/test/test_stats.py::TestRequestStatsWithWebserver::test_request_stats_no_content_length_streaming",
"locust/test/test_stats.py::TestRequestStatsWithWebserver::test_request_stats_put",
"locust/test/test_stats.py::TestRequestStatsWithWebserver::test_request_stats_query_variables",
"locust/test/test_stats.py::TestInspectLocust::test_get_task_ratio_dict_relative",
"locust/test/test_stats.py::TestInspectLocust::test_get_task_ratio_dict_total",
"locust/test/test_web.py::TestWebUI::test_distribution_stats_csv",
"locust/test/test_web.py::TestWebUI::test_exceptions",
"locust/test/test_web.py::TestWebUI::test_exceptions_csv",
"locust/test/test_web.py::TestWebUI::test_index",
"locust/test/test_web.py::TestWebUI::test_request_stats_csv",
"locust/test/test_web.py::TestWebUI::test_request_stats_with_errors",
"locust/test/test_web.py::TestWebUI::test_stats",
"locust/test/test_web.py::TestWebUI::test_stats_cache",
"locust/test/test_web.py::TestWebUI::test_stats_no_data"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-06 17:32:18+00:00
|
mit
| 3,629 |
|
locustio__locust-994
|
diff --git a/locust/stats.py b/locust/stats.py
index 4ba20644..3feacc05 100644
--- a/locust/stats.py
+++ b/locust/stats.py
@@ -276,7 +276,7 @@ class StatsEntry(object):
@property
def fail_ratio(self):
try:
- return float(self.num_failures) / (self.num_requests + self.num_failures)
+ return float(self.num_failures) / self.num_requests
except ZeroDivisionError:
if self.num_failures > 0:
return 1.0
|
locustio/locust
|
8b17c286034178ba31c6d48b93f29202514f5659
|
diff --git a/locust/test/test_stats.py b/locust/test/test_stats.py
index 4b45aeed..fcfee1cc 100644
--- a/locust/test/test_stats.py
+++ b/locust/test/test_stats.py
@@ -252,6 +252,18 @@ class TestStatsEntryResponseTimesCache(unittest.TestCase):
))
+class TestStatsEntry(unittest.TestCase):
+ def setUp(self, *args, **kwargs):
+ super(TestStatsEntry, self).setUp(*args, **kwargs)
+ self.stats = RequestStats()
+
+ def test_fail_ratio_with_failures(self):
+ s = StatsEntry(self.stats, "/", "GET")
+ s.num_requests = 10
+ s.num_failures = 5
+ self.assertAlmostEqual(s.fail_ratio, 0.5)
+
+
class TestRequestStatsWithWebserver(WebserverTestCase):
def test_request_stats_content_length(self):
class MyLocust(HttpLocust):
|
Fail ratio calculation for individual requests is incorrect
### Description of issue
The fail ratio for each individual request is incorrect, and doesn't match the total fail ratio.
### Expected behavior
The fail ratio is correct.
### Actual behavior
The fail ratio is incorrect. For example, if 10 out of 10 requests fail, I'd expect the ratio to be 100%, but instead it is 50%.
```
Name # reqs # fails Avg Min Max | Median req/s
--------------------------------------------------------------------------------------------------------------------------------------------
GET / 10 10(50.00%) 2014 2009 2021 | 2000 0.75
--------------------------------------------------------------------------------------------------------------------------------------------
Total 10 10(100.00%) 0.75
```
### Environment settings
- OS: Windows/Linux
- Python version: 3.6
- Locust version: 0.11.0
### Steps to reproduce (for bug reports)
Create a trivial locustfile.py:
```python
from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
@task
def index(self):
self.client.get("/")
class WebsiteUser(HttpLocust):
task_set = UserBehavior
```
Run it against a host that doesn't have a web server running at all:
`locust --host http://localhost --no-web --clients=10 --hatch-rate=2 --run-time=30`
All of the "GET /" requests fail, but the stats (as shown above) have the failure rate at 50% rather than 100%.
I think the problem is at [stats.py:279](https://github.com/locustio/locust/blob/8b17c286034178ba31c6d48b93f29202514f5659/locust/stats.py#L279):
```python
return float(self.num_failures) / (self.num_requests + self.num_failures)
```
It seems like it ought to be:
```python
return float(self.num_failures) / (self.num_requests)
```
Maybe at some point `self.num_requests` was only the count of successful requests?
|
0.0
|
8b17c286034178ba31c6d48b93f29202514f5659
|
[
"locust/test/test_stats.py::TestStatsEntry::test_fail_ratio_with_failures"
] |
[
"locust/test/test_stats.py::TestRequestStats::test_aggregation",
"locust/test/test_stats.py::TestRequestStats::test_aggregation_with_rounding",
"locust/test/test_stats.py::TestRequestStats::test_avg",
"locust/test/test_stats.py::TestRequestStats::test_current_rps",
"locust/test/test_stats.py::TestRequestStats::test_error_grouping",
"locust/test/test_stats.py::TestRequestStats::test_error_grouping_errors_with_memory_addresses",
"locust/test/test_stats.py::TestRequestStats::test_median",
"locust/test/test_stats.py::TestRequestStats::test_num_reqs_fails",
"locust/test/test_stats.py::TestRequestStats::test_percentile",
"locust/test/test_stats.py::TestRequestStats::test_percentile_rounded_down",
"locust/test/test_stats.py::TestRequestStats::test_percentile_rounded_up",
"locust/test/test_stats.py::TestRequestStats::test_reset",
"locust/test/test_stats.py::TestRequestStats::test_reset_min_response_time",
"locust/test/test_stats.py::TestRequestStats::test_total_rps",
"locust/test/test_stats.py::TestStatsEntryResponseTimesCache::test_diff_response_times_dicts",
"locust/test/test_stats.py::TestStatsEntryResponseTimesCache::test_get_current_response_time_percentile",
"locust/test/test_stats.py::TestStatsEntryResponseTimesCache::test_latest_total_response_times_pruned",
"locust/test/test_stats.py::TestStatsEntryResponseTimesCache::test_response_times_cached",
"locust/test/test_stats.py::TestStatsEntryResponseTimesCache::test_response_times_not_cached_if_not_enabled",
"locust/test/test_stats.py::TestRequestStatsWithWebserver::test_request_connection_error",
"locust/test/test_stats.py::TestRequestStatsWithWebserver::test_request_stats_content_length",
"locust/test/test_stats.py::TestRequestStatsWithWebserver::test_request_stats_named_endpoint",
"locust/test/test_stats.py::TestRequestStatsWithWebserver::test_request_stats_no_content_length_streaming",
"locust/test/test_stats.py::TestRequestStatsWithWebserver::test_request_stats_put",
"locust/test/test_stats.py::TestRequestStatsWithWebserver::test_request_stats_query_variables",
"locust/test/test_stats.py::TestInspectLocust::test_get_task_ratio_dict_relative",
"locust/test/test_stats.py::TestInspectLocust::test_get_task_ratio_dict_total"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-07 03:23:20+00:00
|
mit
| 3,630 |
|
log2timeline__dftimewolf-533
|
diff --git a/dftimewolf/lib/processors/gcp_logging_timesketch.py b/dftimewolf/lib/processors/gcp_logging_timesketch.py
index 026bf42..937718a 100644
--- a/dftimewolf/lib/processors/gcp_logging_timesketch.py
+++ b/dftimewolf/lib/processors/gcp_logging_timesketch.py
@@ -16,6 +16,8 @@ if TYPE_CHECKING:
class GCPLoggingTimesketch(BaseModule):
"""Transforms Google Cloud Platform logs for Timesketch."""
+ DATA_TYPE = 'gcp:log:json'
+
def __init__(self,
state: "state.DFTimewolfState",
name: Optional[str]=None,
@@ -44,7 +46,8 @@ class GCPLoggingTimesketch(BaseModule):
log_record = json.loads(log_line)
# Metadata about how the record was obtained.
- timesketch_record = {'query': query, 'project_name': project_name}
+ timesketch_record = {'query': query, 'project_name': project_name,
+ 'data_type': self.DATA_TYPE}
# Timestamp related fields.
timestamp = log_record.get('timestamp', None)
|
log2timeline/dftimewolf
|
00474d4601e00094c136bb218f926d2a16433352
|
diff --git a/tests/lib/processors/gcp_logging_timesketch.py b/tests/lib/processors/gcp_logging_timesketch.py
index ac4df0c..a1cd69d 100644
--- a/tests/lib/processors/gcp_logging_timesketch.py
+++ b/tests/lib/processors/gcp_logging_timesketch.py
@@ -73,6 +73,8 @@ class GCPLoggingTimesketchTest(unittest.TestCase):
'test_query',
'project_name':
'test_project',
+ 'data_type':
+ 'gcp:log:json',
'datetime':
'2019-06-06T09:00:41.797000Z',
'timestamp_desc':
@@ -202,6 +204,8 @@ class GCPLoggingTimesketchTest(unittest.TestCase):
'test_query',
'project_name':
'test_project',
+ 'data_type':
+ 'gcp:log:json',
'datetime':
'2019-06-06T09:00:27.066000Z',
'timestamp_desc':
@@ -295,6 +299,8 @@ class GCPLoggingTimesketchTest(unittest.TestCase):
'test_query',
'project_name':
'test_project',
+ 'data_type':
+ 'gcp:log:json',
'datetime':
'2019-06-06T09:29:04.499000Z',
'timestamp_desc':
@@ -425,6 +431,8 @@ class GCPLoggingTimesketchTest(unittest.TestCase):
'test_query',
'project_name':
'test_project',
+ 'data_type':
+ 'gcp:log:json',
'datetime':
'2020-06-16T05:09:57.427874505Z',
'timestamp_desc':
@@ -516,6 +524,7 @@ class GCPLoggingTimesketchTest(unittest.TestCase):
'message': 'line 470: /etc/selinux/config: Permission denied',
'project_name': 'test_project',
'query': 'test_query',
+ 'data_type': 'gcp:log:json',
'resource_label_cluster_name': 'cluster-ca8b',
'resource_label_cluster_uuid': '44444-444444-444-4444-4444',
'resource_label_project_id': 'metastore-playground',
|
GCPLoggingTimesketch set data_type gcp:log:json
Timesketch events created using GCPLoggingTimesketch don't have a data_type set making it hard to filter specifically on GCP logs in Timesketch.
The new Timesketch GCP analyser relies on a data_type of "gcp:log:json" as set by the GCP logging Plaso parser. GCPLoggingTimesketch should also set this data_type for emitted events.
|
0.0
|
00474d4601e00094c136bb218f926d2a16433352
|
[
"tests/lib/processors/gcp_logging_timesketch.py::GCPLoggingTimesketchTest::testDataProcYarn",
"tests/lib/processors/gcp_logging_timesketch.py::GCPLoggingTimesketchTest::testGCECreateLog",
"tests/lib/processors/gcp_logging_timesketch.py::GCPLoggingTimesketchTest::testGCEFirewallLog",
"tests/lib/processors/gcp_logging_timesketch.py::GCPLoggingTimesketchTest::testGCSCreateLog"
] |
[
"tests/lib/processors/gcp_logging_timesketch.py::GCPLoggingTimesketchTest::testInitialization"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-02-15 00:19:03+00:00
|
apache-2.0
| 3,631 |
|
loganasherjones__yapconf-18
|
diff --git a/docs/usage.rst b/docs/usage.rst
index 32a719f..cc0a303 100644
--- a/docs/usage.rst
+++ b/docs/usage.rst
@@ -129,6 +129,9 @@ set an environment prefix on the ``YapconfSpec`` item via the ``env_prefix``:
config.foo # returns 'namespaced_value'
+.. note:: When using an ``env_name`` with ``env_prefix`` the ``env_prefix`` will still be applied
+ to the name you provided. If you want to avoid this behavior, set the ``apply_env_prefix`` to ``False``.
+
CLI Support
-----------
Yapconf has some great support for adding your configuration items as command-line arguments by utilizing
@@ -329,6 +332,8 @@ For each item in a specification, you can set any of these keys:
+-------------------+------------------+----------------------------------------------------------------------------------------------------------------+
| format_cli | ``True`` | A flag to determine if we should format the command-line arguments to be kebab-case. |
+-------------------+------------------+----------------------------------------------------------------------------------------------------------------+
+| apply_env_prefix | ``True`` | Apply the env_prefix even if the environment name was set manually. Ignored if ``format_env`` is ``False`` |
++-------------------+------------------+----------------------------------------------------------------------------------------------------------------+
.. _python-box: https://github.com/cdgriffith/Box
diff --git a/yapconf/items.py b/yapconf/items.py
index 37531a0..1bc5edd 100644
--- a/yapconf/items.py
+++ b/yapconf/items.py
@@ -75,6 +75,7 @@ def _generate_item(name, item_dict, env_prefix,
init_args['env_name'] = item_dict.get('env_name', None)
init_args['format_cli'] = item_dict.get('format_cli', True)
init_args['format_env'] = item_dict.get('format_env', True)
+ init_args['apply_env_prefix'] = item_dict.get('apply_env_prefix', True)
init_args['env_prefix'] = env_prefix
if parent_names:
@@ -139,6 +140,9 @@ class YapconfItem(object):
format_env: A flag to determine if environment variables will be all
upper-case SNAKE_CASE.
env_prefix: The env_prefix to apply to the environment name.
+ apply_env_prefix: Apply the env_prefix even if the environment name
+ was set manually. Setting format_env to false will override this
+ behavior.
Raises:
YapconfItemError: If any of the information given during
@@ -151,7 +155,7 @@ class YapconfItem(object):
cli_choices=None, previous_names=None, previous_defaults=None,
children=None, cli_expose=True, separator='.', prefix=None,
bootstrap=False, format_cli=True, format_env=True,
- env_prefix=None):
+ env_prefix=None, apply_env_prefix=True):
self.name = name
self.item_type = item_type
@@ -171,6 +175,7 @@ class YapconfItem(object):
self.format_env = format_env
self.format_cli = format_cli
self.env_prefix = env_prefix or ''
+ self.apply_env_prefix = apply_env_prefix
if self.prefix:
self.fq_name = self.separator.join([self.prefix, self.name])
@@ -330,6 +335,8 @@ class YapconfItem(object):
def _setup_env_name(self):
if self.env_name is not None:
+ if self.apply_env_prefix:
+ self.env_name = self.env_prefix + self.env_name
return
if self.format_env:
@@ -507,12 +514,12 @@ class YapconfBoolItem(YapconfItem):
cli_choices=None, previous_names=None, previous_defaults=None,
children=None, cli_expose=True, separator='.', prefix=None,
bootstrap=False, format_cli=True, format_env=True,
- env_prefix=None):
+ env_prefix=None, apply_env_prefix=True):
super(YapconfBoolItem, self).__init__(
name, item_type, default, env_name, description, required,
cli_short_name, cli_choices, previous_names, previous_defaults,
children, cli_expose, separator, prefix, bootstrap, format_cli,
- format_env, env_prefix)
+ format_env, env_prefix, apply_env_prefix)
def add_argument(self, parser, bootstrap=False):
"""Add boolean item as an argument to the given parser.
@@ -622,13 +629,13 @@ class YapconfListItem(YapconfItem):
cli_choices=None, previous_names=None, previous_defaults=None,
children=None, cli_expose=True, separator='.', prefix=None,
bootstrap=False, format_cli=True, format_env=True,
- env_prefix=None):
+ env_prefix=None, apply_env_prefix=True):
super(YapconfListItem, self).__init__(
name, item_type, default, env_name, description, required,
cli_short_name, cli_choices, previous_names, previous_defaults,
children, cli_expose, separator, prefix, bootstrap, format_cli,
- format_env, env_prefix)
+ format_env, env_prefix, apply_env_prefix)
if len(self.children) != 1:
raise YapconfListItemError("List Items can only have a "
@@ -754,13 +761,13 @@ class YapconfDictItem(YapconfItem):
cli_choices=None, previous_names=None, previous_defaults=None,
children=None, cli_expose=True, separator='.', prefix=None,
bootstrap=False, format_cli=True, format_env=True,
- env_prefix=None):
+ env_prefix=None, apply_env_prefix=True):
super(YapconfDictItem, self).__init__(
name, item_type, default, env_name, description, required,
cli_short_name, cli_choices, previous_names, previous_defaults,
children, cli_expose, separator, prefix, bootstrap, format_cli,
- format_env, env_prefix)
+ format_env, env_prefix, apply_env_prefix)
if len(self.children) < 1:
raise YapconfDictItemError('Dict item {0} must have children'
|
loganasherjones/yapconf
|
1ef758bf1a70916a3f0e4a54b7cb005d426953c3
|
diff --git a/tests/spec_test.py b/tests/spec_test.py
index dddf583..85eb866 100644
--- a/tests/spec_test.py
+++ b/tests/spec_test.py
@@ -347,8 +347,6 @@ def test_load_config_yaml_not_supported(basic_spec):
def test_load_config_nested_from_environment(spec_with_dicts):
os.environ['FOO_BAR_BAZ'] = 'baz_value'
- print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
- print(spec_with_dicts.get_item('foo').children['bar'].children['baz'].env_name)
config = spec_with_dicts.load_config(
{
'database': {'name': 'dbname', 'host': 'dbhost', 'port': 1234},
@@ -594,3 +592,22 @@ def test_real_world_migrations(real_world_spec, current, expected,
always_update=always_update)
assert new_config == expected
+
+
[email protected]('env_name,apply_prefix,env_prefix,key', [
+ ('BG_HOST', True, 'BG_', 'BG_BG_HOST'),
+ ('BG_HOST', False, 'BG_', 'BG_HOST'),
+ ('HOST', True, 'BG_', 'BG_HOST'),
+])
+def test_env_names_with_prefixes(env_name, apply_prefix, env_prefix, key):
+ spec = YapconfSpec(
+ {
+ 'bg_host': {
+ 'type': 'str',
+ 'env_name': env_name,
+ 'apply_env_prefix': apply_prefix,
+ }
+ }, env_prefix=env_prefix)
+
+ config = spec.load_config(('ENVIRONMENT', {key: 'host_value'}))
+ assert config.bg_host == 'host_value'
|
Environment names with prefix
* Yapcon version: 0.1.1
* Python version: 3.6.4
* Operating System: Centos 7
I was trying to use a config option with the name `bg_host` and a specification with the environment prefix `BG_`.
I expected to need to set the environment variable named `BG_BG_HOST` for yapconf to work correctly. That was successful.
That looks a little weird to me, so I set the `env_name` option in the spec to `HOST`, expecting that then setting the `BG_HOST` environment variable would work, but it didn't.
Looking though the code it looks like if you specify an alternate environment name the prefix doesn't get applied to it. I can see certain situations where that would be preferable, but in my case I'd like the prefix to still be applied to the new name.
I worked around the issue by setting the `env_name` to `BG_HOST` and that worked like I wanted, but that requires knowing at spec creation time what prefix I'm planning on applying.
I kind of expected the prefix to be applied to the new name, so I'm wondering if that should be the default behavior. Perhaps with a little flag like `apply_prefix` that could be set to False if you didn't want that behavior?
|
0.0
|
1ef758bf1a70916a3f0e4a54b7cb005d426953c3
|
[
"tests/spec_test.py::test_env_names_with_prefixes[HOST-True-BG_-BG_HOST]",
"tests/spec_test.py::test_env_names_with_prefixes[BG_HOST-True-BG_-BG_BG_HOST]"
] |
[
"tests/spec_test.py::test_load_specification_from_file[json-{\"foo\":",
"tests/spec_test.py::test_load_bad_specification_from_file",
"tests/spec_test.py::test_update_defaults_bad_key",
"tests/spec_test.py::test_load_config_yaml_not_supported",
"tests/spec_test.py::test_load_config_real_world",
"tests/spec_test.py::test_real_world_migrations[current0-expected0-False-True]",
"tests/spec_test.py::test_load_config_invalid_override[override1]",
"tests/spec_test.py::test_update_defaults",
"tests/spec_test.py::test_invalid_specification[bad_data2]",
"tests/spec_test.py::test_load_list_config",
"tests/spec_test.py::test_load_config_nested_from_environment",
"tests/spec_test.py::test_migrate_config_file_update_previous_default",
"tests/spec_test.py::test_env_names_with_prefixes[BG_HOST-False-BG_-BG_HOST]",
"tests/spec_test.py::test_real_world_migrations[current1-expected1-False-True]",
"tests/spec_test.py::test_real_world_migrations[current2-expected2-False-True]",
"tests/spec_test.py::test_migrate_config_file_create_yaml",
"tests/spec_test.py::test_invalid_specification[bad_data0]",
"tests/spec_test.py::test_spec_bad_file_type",
"tests/spec_test.py::test_migrate_config_file_does_not_exist_create",
"tests/spec_test.py::test_invalid_specification[bad_data1]",
"tests/spec_test.py::test_migrate_config_file_no_changes",
"tests/spec_test.py::test_load_config_multi_part_dictionary",
"tests/spec_test.py::test_load_from_env",
"tests/spec_test.py::test_get_item",
"tests/spec_test.py::test_simple_load_config",
"tests/spec_test.py::test_load_config_invalid_override[override2]",
"tests/spec_test.py::test_migrate_config_file_does_not_exist_do_not_create",
"tests/spec_test.py::test_invalid_specification[bad_data3]",
"tests/spec_test.py::test_migrate_config_file_always_update",
"tests/spec_test.py::test_nested_load_config",
"tests/spec_test.py::test_load_config_fallbacks",
"tests/spec_test.py::test_load_config_invalid_override[override0]",
"tests/spec_test.py::test_real_world_migrations[current3-expected3-False-True]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-03-08 03:33:26+00:00
|
mit
| 3,632 |
|
logzio__logzio-python-handler-70
|
diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml
new file mode 100644
index 0000000..9b08f61
--- /dev/null
+++ b/.github/workflows/auto-release.yml
@@ -0,0 +1,27 @@
+name: Pypi Release
+on:
+ workflow_run:
+ workflows: ["CI Tests"]
+ branches: [master]
+ types:
+ - completed
+ # Allows you to run this workflow manually from the Actions tab
+jobs:
+ build-and-release:
+ if: ${{ github.event.workflow_run.conclusion == 'success' }}
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/[email protected]
+ with:
+ python-version: '3.x'
+ - name: "Install dependencies"
+ run: |
+ python3 -m pip install setuptools wheel twine
+ - name: "Build and uploads to PyPI"
+ run: |
+ python3 setup.py sdist bdist_wheel
+ python3 -m twine upload dist/*
+ env:
+ TWINE_USERNAME: __token__
+ TWINE_PASSWORD: ${{ secrets.PYPI_REPO_TOKEN }}
diff --git a/README.md b/README.md
index bad528b..282870b 100644
--- a/README.md
+++ b/README.md
@@ -32,6 +32,7 @@ Travis CI will build this handler and test against:
- "3.6"
- "3.7"
- "3.8"
+ - "3.9"
We can't ensure compatibility to any other version, as we can't test it automatically.
@@ -76,7 +77,7 @@ format={"additional_field": "value"}
- Debug flag. Set to True, will print debug messages to stdout. (defaults to "False")
- Backup logs flag. Set to False, will disable the local backup of logs in case of failure. (defaults to "True")
- Network timeout, in seconds, int or float, for sending the logs to logz.io. (defaults to 10)
- - Retries number (retry_no), in seconds (defaults to 4).
+ - Retries number (retry_no, defaults to 4).
- Retry timeout (retry_timeout) in seconds (defaults to 2).
Please note, that you have to configure those parameters by this exact order.
@@ -213,15 +214,19 @@ LOGGING = {
Please note that if you are using `python 3.8` it is preferred to use the `logging.config.dictConfig` method, as mentioned in [python's documentation](https://docs.python.org/3/library/logging.config.html#configuration-file-format).
## Release Notes
+- 3.1.1
+ - Bug fixes (issue #68, exception message formatting)
+ - Added CI: Tests and Auto release
+
- 3.1.0
- Bug fixes
- Retry number and timeout is now configurable
-- 3.0.0
- - Deprecated `python2.7` & `python3.4`
- - Changed log levels on `_flush_queue()` method (@hilsenrat)
<details>
<summary markdown="span"> Expand to check old versions </summary>
+- 3.0.0
+ - Deprecated `python2.7` & `python3.4`
+ - Changed log levels on `_flush_queue()` method (@hilsenrat)
- 2.0.15
- Added flusher decorator for serverless platforms(@mcmasty)
diff --git a/logzio/handler.py b/logzio/handler.py
index 9fca847..e0a4b45 100644
--- a/logzio/handler.py
+++ b/logzio/handler.py
@@ -98,13 +98,15 @@ class LogzioHandler(logging.Handler):
if message.exc_info:
return_json['exception'] = self.format_exception(message.exc_info)
- # # We want to ignore default logging formatting on exceptions
- # # As we handle those differently directly into exception field
+ # # We want to ignore default logging formatting on exceptions
+ # # As we handle those differently directly into exception field
formatted_message = self.format(message)
+ # Exception with multiple fields, apply them to log json.
if isinstance(formatted_message, dict):
return_json.update(formatted_message)
- else:
+ # No exception, apply default formatted message
+ elif not message.exc_info:
return_json['message'] = formatted_message
return_json.update(self.extra_fields(message))
diff --git a/logzio/logger.py b/logzio/logger.py
index f120485..529efaa 100644
--- a/logzio/logger.py
+++ b/logzio/logger.py
@@ -7,7 +7,9 @@ def get_logger(debug):
def get_stdout_logger(debug):
- return __get_logger(debug, __name__ + '_stdout', logging.StreamHandler(sys.stdout))
+ stdout_logger = __get_logger(debug, __name__ + '_stdout', logging.StreamHandler(sys.stdout))
+ stdout_logger.propagate = False
+ return stdout_logger
def __get_logger(debug, name, handler=None):
diff --git a/logzio/sender.py b/logzio/sender.py
index ca73b73..5a7d16c 100644
--- a/logzio/sender.py
+++ b/logzio/sender.py
@@ -40,7 +40,6 @@ class LogzioSender:
self.token = token
self.url = '{}/?token={}'.format(url, token)
self.logs_drain_timeout = logs_drain_timeout
- self.logger = get_logger(debug)
self.stdout_logger = get_stdout_logger(debug)
self.backup_logs = backup_logs
self.network_timeout = network_timeout
@@ -57,7 +56,6 @@ class LogzioSender:
self._initialize_sending_thread()
def __del__(self):
- del self.logger
del self.stdout_logger
del self.backup_logs
del self.queue
@@ -93,7 +91,7 @@ class LogzioSender:
try:
self._flush_queue()
except Exception as e:
- self.logger.debug(
+ self.stdout_logger.debug(
'Unexpected exception while draining queue to Logz.io, '
'swallowing. Exception: %s', e)
@@ -165,7 +163,7 @@ class LogzioSender:
self.stdout_logger.error(
'Could not send logs to Logz.io after %s tries, '
'backing up to local file system', self.number_of_retries)
- backup_logs(logs_list, self.logger)
+ backup_logs(logs_list, self.stdout_logger)
del logs_list
diff --git a/setup.py b/setup.py
index fe0e999..0b6d2e1 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
from setuptools import setup, find_packages
setup(
name="logzio-python-handler",
- version='3.1.0',
+ version='3.1.1',
description="Logging handler to send logs to your Logz.io account with bulk SSL",
keywords="logging handler logz.io bulk https",
author="roiravhon",
diff --git a/tox.ini b/tox.ini
index 066a58c..9fd1b6d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
[tox]
minversion = 1.7.2
-envlist = flake8, py3flake8, py35, py36, py37, py38, pypy, pypy3
+envlist = flake8, py3flake8, py35, py36, py37, py38, py39, pypy, pypy3
skip_missing_interpreters = true
[testenv]
@@ -13,11 +13,14 @@ passenv = CI TRAVIS TRAVIS_*
commands = pytest --cov-report term-missing --cov logzio tests -v
[testenv:flake8]
-basepython = python3.8
+basepython = python3.9
deps = flake8
commands = flake8 logzio
[testenv:py3flake8]
-basepython = python3.8
+basepython = python3.9
deps = flake8
commands = flake8 logzio
+
+[gh-actions]
+python = 3.9: py39
|
logzio/logzio-python-handler
|
ef65fd3b2fd64654f967b57671c1e1a4739b492c
|
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
new file mode 100644
index 0000000..62d9a53
--- /dev/null
+++ b/.github/workflows/tests.yml
@@ -0,0 +1,14 @@
+name: CI Tests
+on: [push]
+jobs:
+ run-tests:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v1
+ - uses: actions/[email protected]
+ with:
+ python-version: '3.x'
+ - name: Install tox
+ run: pip install tox
+ - name: Run tox
+ run: python -m tox
\ No newline at end of file
diff --git a/tests/test_logzioHandler.py b/tests/test_logzioHandler.py
index 60ec3ec..e2ae4a1 100644
--- a/tests/test_logzioHandler.py
+++ b/tests/test_logzioHandler.py
@@ -159,7 +159,6 @@ class TestLogzioHandler(TestCase):
formatted_message["exception"] = formatted_message["exception"].replace(os.path.abspath(__file__), "")
formatted_message["exception"] = re.sub(r", line \d+", "", formatted_message["exception"])
- formatted_message["message"] = formatted_message["message"].replace(os.path.abspath(__file__), "")
self.assertDictEqual(
{
@@ -167,10 +166,8 @@ class TestLogzioHandler(TestCase):
'line_number': 10,
'log_level': 'NOTSET',
'logger': 'my-logger',
- 'message': f'exception test:\nTraceback (most recent call last):\n File "", line 142, in test_exc\n raise '
- 'ValueError("oops.")\nValueError: oops.',
- 'exception': 'Traceback (most recent call last):\n\n File "", in test_exc\n raise ValueError('
- '"oops.")\n\nValueError: oops.\n',
+ 'message': 'exception test:',
+ 'exception': 'Traceback (most recent call last):\n\n File "", in test_exc\n raise ValueError("oops.")\n\nValueError: oops.\n',
'path_name': 'handler_test.py',
'type': 'python'
},
|
Endless loop when no connection to Logz.io
I'm sending Python application logs to Logz.io. If there is not connection to the remote server, `logzio-python-handler` sits in an endless loop retrying until the queue is empty (which it never is because the logs never get sent because there is no connection).
Is this desirable behaviour? I would imagine that there should be a limit on the number of retries. My application essentially hangs when closing, as it is waiting for the logs handler to flush.
```
Got exception while sending logs to Logz.io, Try (4/4). Message: HTTPSConnectionPool(host='listener-nl.logz.io', port=8071): Max retries exceeded with url: /?token=<<mytoken>>(Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x00000214D43E8FD0>: Failed to establish a new connection: [WinError 10061] No connection could be made because the target machine actively refused it'))
2022-01-19 15:45:01,623 - logzio.logger - WARNING - Got exception while sending logs to Logz.io, Try (4/4). Message: HTTPSConnectionPool(host='listener-nl.logz.io', port=8071): Max retries exceeded with url: /?token=<<mytoken>>(Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x00000214D43E8FD0>: Failed to establish a new connection: [WinError 10061] No connection could be made because the target machine actively refused it'))
Could not send logs to Logz.io after 4 tries, backing up to local file system
2022-01-19 15:45:17,640 - logzio.logger - ERROR - Could not send logs to Logz.io after 4 tries, backing up to local file system
Backing up your logs to logzio-failures-19012022-154517.txt
2022-01-19 15:45:17,642 - logzio.logger - INFO - Backing up your logs to logzio-failures-19012022-154517.txt
Got exception while sending logs to Logz.io, Try (1/4). Message: HTTPSConnectionPool(host='listener-nl.logz.io', port=8071): Max retries exceeded with url: /?token=<<mytoken>>(Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x00000214D3DC4BE0>: Failed to establish a new connection: [WinError 10061] No connection could be made because the target machine actively refused it'))
2022-01-19 15:45:19,741 - logzio.logger - WARNING - Got exception while sending logs to Logz.io, Try (1/4). Message: HTTPSConnectionPool(host='listener-nl.logz.io', port=8071): Max retries exceeded with url: /?token=<<mytoken>>(Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x00000214D3DC4BE0>: Failed to establish a new connection: [WinError 10061] No connection could be made because the target machine actively refused it'))
```
|
0.0
|
ef65fd3b2fd64654f967b57671c1e1a4739b492c
|
[
"tests/test_logzioHandler.py::TestLogzioHandler::test_exc"
] |
[
"tests/test_logzioHandler.py::TestLogzioHandler::test_extra_formatting",
"tests/test_logzioHandler.py::TestLogzioHandler::test_format_string_message",
"tests/test_logzioHandler.py::TestLogzioHandler::test_json",
"tests/test_logzioHandler.py::TestLogzioHandler::test_string"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-03-02 13:49:15+00:00
|
apache-2.0
| 3,633 |
|
lokal-profil__runeberg-10
|
diff --git a/runeberg/page.py b/runeberg/page.py
index a4f435b..604c251 100644
--- a/runeberg/page.py
+++ b/runeberg/page.py
@@ -39,6 +39,13 @@ class Page(object):
"""Represent the Page as a string."""
return self.uid
+ @property
+ def image_file_type(self):
+ """Return the file extension of the image file."""
+ if not self.image:
+ return None
+ return os.path.splitext(self.image)[1]
+
def set_blank(self, default_label=None):
"""
Set the blank proofread status based on page label.
@@ -74,7 +81,7 @@ class Page(object):
UserWarning)
@staticmethod
- def from_path(base_path, uid, label, whole_page_ok):
+ def from_path(base_path, uid, image_type, label, whole_page_ok):
"""
Create a page from the path to the unzipped files and a uid.
@@ -82,11 +89,13 @@ class Page(object):
unzipped files.
@param uid: the identifier of the page, ordinarily the zero-padded
ordinal number of the page.
+ @param image_type: the expected file extension of the scanned image.
@param label: the real printable label of the page, ordinarily the page
number.
@param whole_page_ok: whether the whole page has been proofread
"""
- img_path = os.path.join(base_path, IMG_DIR, '{}.tif'.format(uid))
+ img_path = os.path.join(
+ base_path, IMG_DIR, '{0}{1}'.format(uid, image_type))
if not os.path.isfile(img_path):
raise ValueError(
'{} was provided as the image for "{}" but no such image '
diff --git a/runeberg/work.py b/runeberg/work.py
index 1aad793..9cefb1f 100644
--- a/runeberg/work.py
+++ b/runeberg/work.py
@@ -11,18 +11,21 @@ This most often corresponds to an edition in e.g. Wikidata terminology.
import os
from collections import Counter, OrderedDict
from itertools import takewhile
+from shlex import quote
from shutil import which # used for djvu conversion
from subprocess import DEVNULL, run # used for djvu conversion
from tqdm import tqdm
from runeberg.article import Article
-from runeberg.download import DATA_DIR, SITE, UNZIP_SUBDIR
+from runeberg.download import DATA_DIR, IMG_DIR, SITE, UNZIP_SUBDIR
from runeberg.lst_file import LstFile
from runeberg.page import Page
from runeberg.page_range import PageRange
from runeberg.person import Person
+IMAGE_TYPES = ('.tif', '.jpg')
+
class Work(object):
"""An object representing a runeberg.org work."""
@@ -41,11 +44,12 @@ class Work(object):
self.identifiers = {} # identifiers for the work in external sources
self.people = {} # people involved with the work
self.djvu = None # djvu file of the whole work
+ self.image_type = None # the default file extension of the image files
# @TODO: something recording the date when runeberg was downloaded
@staticmethod
- def from_files(uid, base_path=None, known_people=None):
+ def from_files(uid, base_path=None, known_people=None, img_dir=None):
"""
Create an Work from the downloaded, unzipped files.
@@ -53,10 +57,14 @@ class Work(object):
Overrides the default {cwd}/DATA_DIR/{uid}/UNZIP_SUBDIR
@param known_people: dict of Person objects from which author, editor
and translator roles are matched.
+ @param img_dir: the name given to the image sub-directory. Defaults to
+ IMG_DIR.
"""
work = Work(uid)
base_path = base_path or os.path.join(
os.getcwd(), DATA_DIR, uid, UNZIP_SUBDIR)
+ img_dir = img_dir or IMG_DIR
+ work.determine_image_file_type(base_path, img_dir)
chapters = work.load_pages(base_path)
work.load_articles(base_path, chapters)
work.load_metadata(base_path)
@@ -68,6 +76,32 @@ class Work(object):
"""Return the base url on runeberg.org."""
return '{0}/{1}/'.format(SITE, self.uid)
+ def determine_image_file_type(self, base_path, img_dir):
+ """
+ Determine the file type of the images of the scanned pages.
+
+ Will stop after encountering the first recognised image file type.
+
+ @raises NoImagesError: if no files are encountered
+ @raises UnrecognisedImageTypeError: if no recognised image file types
+ are encountered.
+ @param base_path: The path to the unzipped directory.
+ @param img_dir: the image subdirectory
+ """
+ found_types = set()
+ img_dir_path = os.path.join(base_path, img_dir)
+ with os.scandir(img_dir_path) as it:
+ for entry in it:
+ if not entry.name.startswith('.') and entry.is_file():
+ ext = os.path.splitext(entry)[1]
+ if ext in IMAGE_TYPES:
+ self.image_type = ext
+ return
+ found_types.add(ext)
+ if not found_types:
+ raise NoImagesError(img_dir_path)
+ raise UnrecognisedImageTypeError(found_types)
+
def load_pages(self, base_path):
"""
Populate self.pages.
@@ -86,7 +120,7 @@ class Work(object):
chapters = Counter()
for uid, label in pages.data:
page = Page.from_path(
- base_path, uid, label, uid in ok_pages)
+ base_path, uid, self.image_type, label, uid in ok_pages)
chapters.update(page.get_chapters())
self.pages[uid] = page
return chapters
@@ -282,10 +316,22 @@ class Work(object):
stderr = DEVNULL if silent else None
for i, page in tqdm(enumerate(self.pages.values(), 1),
total=len(self.pages)):
- # cjb2 to convert single tif to djvu
+ in_file = quote(page.image)
+
# @TODO: can try "capture_output=True" if python is bumped to 3.7
- run(['cjb2', '-clean', page.image, tmp_djvu],
- check=True, stderr=stderr)
+ # use page file type in case original image has been replaced
+ if page.image_file_type == '.tif':
+ # cjb2 to convert single tif to djvu
+ run(['cjb2', '-clean', in_file, tmp_djvu],
+ check=True, stderr=stderr)
+ elif page.image_file_type == '.jpg':
+ run(['c44', '-crcbfull', in_file, tmp_djvu],
+ check=True, stderr=stderr)
+ else:
+ raise NotImplementedError(
+ 'At the moment only .tif and .jpg images can be converted '
+ 'to DjVu')
+
if i == 1:
base_path = os.path.dirname(page.image)
book_djvu = os.path.join(base_path, 'book.djvu')
@@ -315,6 +361,33 @@ class Work(object):
return (which('djvm') is not None and which('cjb2') is not None)
+class NoImagesError(Exception):
+ """Error when no images were found for the pages of the work."""
+
+ def __init__(self, img_dir_path):
+ """Initialise a NoImagesError."""
+ msg = (
+ 'Could not detect any images for this work. Are you sure you '
+ 'provided the correct image subdirectory ({}) and that this is '
+ 'not a volumed work?'.format(img_dir_path))
+ super().__init__(msg)
+
+
+class UnrecognisedImageTypeError(Exception):
+ """Error in trying to determine the image file type."""
+
+ def __init__(self, encountered):
+ """Initialise a UnrecognisedImageTypeError."""
+ encountered = sorted(list(encountered)) # make deterministic
+ msg = (
+ 'The encountered images do not seem to be of a recognised image '
+ 'file type.\n'
+ 'Recognised image file types are: {0}\n'
+ 'Encountered file types: {1}'.format(
+ ', '.join(IMAGE_TYPES), ', '.join(encountered)))
+ super().__init__(msg)
+
+
class DisambiguationError(Exception):
"""Error in disambiguating non-unique article titles."""
|
lokal-profil/runeberg
|
c19546bcff1113d64ac8226eb38d1b0e48d096d8
|
diff --git a/tests/test_page.py b/tests/test_page.py
index c0a84e7..94f8c39 100644
--- a/tests/test_page.py
+++ b/tests/test_page.py
@@ -183,3 +183,19 @@ class TestCheckBlank(unittest.TestCase):
self.page.is_proofread = False
with self.assertWarnsRegex(UserWarning, r'is not empty'):
self.page.check_blank()
+
+
+class TestImageFileType(unittest.TestCase):
+
+ """Test the image_file_type() method."""
+
+ def setUp(self):
+ self.page = Page('0001')
+
+ def test_image_file_type_empty(self):
+ # self.page.image = ''
+ self.assertIsNone(self.page.image_file_type)
+
+ def test_image_file_type_tif(self):
+ self.page.image = 'somewhere/foo.tif'
+ self.assertEquals(self.page.image_file_type, '.tif')
diff --git a/tests/test_work.py b/tests/test_work.py
index 599e9d8..93148c0 100644
--- a/tests/test_work.py
+++ b/tests/test_work.py
@@ -7,7 +7,29 @@ from collections import Counter, OrderedDict
from runeberg.article import Article
from runeberg.page import Page
from runeberg.page_range import PageRange
-from runeberg.work import DisambiguationError, ReconciliationError, Work
+from runeberg.work import (
+ DisambiguationError,
+ NoImagesError,
+ ReconciliationError,
+ UnrecognisedImageTypeError,
+ Work
+)
+
+
+class PseudoDirEntry(object):
+
+ """Class to mock DirEntry returned by os.scandir."""
+
+ def __init__(self, name, is_file=True):
+ self.name = name
+ self.path = './somewhere/{}'.format(name)
+ self._is_file = is_file
+
+ def is_file(self):
+ return self._is_file
+
+ def __fspath__(self):
+ return self.path
class TestParseRange(unittest.TestCase):
@@ -343,3 +365,56 @@ class TestReconciliationError(unittest.TestCase):
self.assertEqual(e.no_tag_articles, [
'b: page(s) 002'
])
+
+
+class TestDetermineImageFileType(unittest.TestCase):
+
+ """Unit tests for determine_image_file_type()."""
+
+ def setUp(self):
+ self.work = Work('test')
+ # cannot autospec due to https://bugs.python.org/issue23078
+
+ self.mock_scandir_list = [
+ PseudoDirEntry('.git', False),
+ PseudoDirEntry('.travis.yml'),
+ PseudoDirEntry('tests', False)
+ ]
+
+ self.mock_scandir_val = mock.MagicMock()
+ self.mock_scandir_val.__enter__.return_value = self.mock_scandir_list
+ patcher = mock.patch('runeberg.work.os.scandir',
+ return_value=self.mock_scandir_val)
+ self.mock_scandir = patcher.start()
+ self.addCleanup(patcher.stop)
+
+ def test_determine_image_file_type_empty(self):
+ del self.mock_scandir_list[:] # empty the list without creating new
+ with self.assertRaises(NoImagesError):
+ self.work.determine_image_file_type('base', 'img')
+ self.mock_scandir.assert_called_once_with('base/img')
+
+ def test_determine_image_file_type_no_valid_files(self):
+ with self.assertRaises(NoImagesError):
+ self.work.determine_image_file_type('base', 'img')
+
+ def test_determine_image_file_type_valid_file(self):
+ self.mock_scandir_list.append(PseudoDirEntry('tests.jpg'))
+
+ self.work.determine_image_file_type('base', 'img')
+ self.assertEquals(self.work.image_type, '.jpg')
+
+ def test_determine_image_file_type_no_valid_types(self):
+ self.mock_scandir_list.append(PseudoDirEntry('tests.foo'))
+ self.mock_scandir_list.append(PseudoDirEntry('tests.bar'))
+
+ with self.assertRaisesRegex(UnrecognisedImageTypeError,
+ '\.bar, \.foo'):
+ self.work.determine_image_file_type('base', 'img')
+
+ def test_determine_image_file_type_valid_file_ignore_second(self):
+ self.mock_scandir_list.append(PseudoDirEntry('tests.jpg'))
+ self.mock_scandir_list.append(PseudoDirEntry('tests.tif'))
+
+ self.work.determine_image_file_type('base', 'img')
+ self.assertEquals(self.work.image_type, '.jpg')
|
Works with .jpg images
Of course not all scanned images are .tif. See e.g. [`aoshbok`](runeberg.org/aoshbok/)
When not these will crash work parsing as `page.from_path()` assumed `.tif`.
DjVu conversion also assumes `.tif` (this is expected by `cjb2`).
Determine the file type and store this as a variable in work which is passed on to `page.from_path()` and `work.can_djvu()`.
|
0.0
|
c19546bcff1113d64ac8226eb38d1b0e48d096d8
|
[
"tests/test_page.py::TestStr::test_str_range",
"tests/test_page.py::TestGetChapters::test_get_chapters_empty",
"tests/test_page.py::TestGetChapters::test_get_chapters_missing_attribute",
"tests/test_page.py::TestGetChapters::test_get_chapters_no_chapters",
"tests/test_page.py::TestGetChapters::test_get_chapters_non_unique_chapters",
"tests/test_page.py::TestGetChapters::test_get_chapters_unique_chapters",
"tests/test_page.py::TestRenameChapter::test_rename_chapter_multiple_matches",
"tests/test_page.py::TestRenameChapter::test_rename_chapter_no_chapter",
"tests/test_page.py::TestRenameChapter::test_rename_chapter_single_match",
"tests/test_page.py::TestCheckBlank::test_check_blank_empty_and_not_proofread",
"tests/test_page.py::TestCheckBlank::test_check_blank_empty_and_proofread",
"tests/test_page.py::TestCheckBlank::test_check_blank_not_empty_and_blank",
"tests/test_page.py::TestImageFileType::test_image_file_type_empty",
"tests/test_page.py::TestImageFileType::test_image_file_type_tif",
"tests/test_work.py::TestParseRange::test_parse_range_empty",
"tests/test_work.py::TestParseRange::test_parse_range_multiple_mixed",
"tests/test_work.py::TestParseRange::test_parse_range_single_page",
"tests/test_work.py::TestParseRange::test_parse_range_single_range",
"tests/test_work.py::TestLoadMetadata::test_load_metadata_detect_invalid_identifier",
"tests/test_work.py::TestLoadMetadata::test_load_metadata_detect_invalid_line",
"tests/test_work.py::TestLoadMetadata::test_load_metadata_empty",
"tests/test_work.py::TestLoadMetadata::test_load_metadata_ignore_comments",
"tests/test_work.py::TestLoadMetadata::test_load_metadata_set_and_overwritten_attribute",
"tests/test_work.py::TestParseMarc::test_parse_marc_empty",
"tests/test_work.py::TestParseMarc::test_parse_marc_multiple",
"tests/test_work.py::TestParseMarc::test_parse_marc_single",
"tests/test_work.py::TestSetupDisambiguationCounter::test_setup_disambiguation_counter_duplicates",
"tests/test_work.py::TestSetupDisambiguationCounter::test_setup_disambiguation_counter_empty",
"tests/test_work.py::TestSetupDisambiguationCounter::test_setup_disambiguation_counter_no_duplicates",
"tests/test_work.py::TestLoadArticle::test_load_articles_chapter_without_article",
"tests/test_work.py::TestLoadArticle::test_load_articles_empty",
"tests/test_work.py::TestLoadArticle::test_load_articles_more_dupes_than_expected",
"tests/test_work.py::TestLoadArticle::test_load_articles_no_chapter_no_dupe",
"tests/test_work.py::TestLoadArticle::test_load_articles_no_chapter_no_dupe_toc",
"tests/test_work.py::TestLoadArticle::test_load_articles_no_chapter_with_dupe_raise_error",
"tests/test_work.py::TestLoadArticle::test_load_articles_one_to_one_article_chapter_tags",
"tests/test_work.py::TestLoadArticle::test_load_articles_one_to_one_article_chapter_tags_dupe",
"tests/test_work.py::TestReconciliationError::test_reconciliation_error_articles",
"tests/test_work.py::TestReconciliationError::test_reconciliation_error_empty",
"tests/test_work.py::TestReconciliationError::test_reconciliation_error_negative_chapter_count",
"tests/test_work.py::TestReconciliationError::test_reconciliation_error_non_zero_chapter_count",
"tests/test_work.py::TestDetermineImageFileType::test_determine_image_file_type_empty",
"tests/test_work.py::TestDetermineImageFileType::test_determine_image_file_type_no_valid_files",
"tests/test_work.py::TestDetermineImageFileType::test_determine_image_file_type_no_valid_types",
"tests/test_work.py::TestDetermineImageFileType::test_determine_image_file_type_valid_file",
"tests/test_work.py::TestDetermineImageFileType::test_determine_image_file_type_valid_file_ignore_second"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-06-09 19:21:19+00:00
|
mit
| 3,634 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.