instance_id
stringlengths
10
57
patch
stringlengths
261
37.7k
repo
stringlengths
7
53
base_commit
stringlengths
40
40
hints_text
stringclasses
301 values
test_patch
stringlengths
212
2.22M
problem_statement
stringlengths
23
37.7k
version
stringclasses
1 value
environment_setup_commit
stringlengths
40
40
FAIL_TO_PASS
listlengths
1
4.94k
PASS_TO_PASS
listlengths
0
7.82k
meta
dict
created_at
stringlengths
25
25
license
stringclasses
8 values
__index_level_0__
int64
0
6.41k
googlefonts__gftools-890
diff --git a/Lib/gftools/gfgithub.py b/Lib/gftools/gfgithub.py index 7fddd37..38d36f2 100644 --- a/Lib/gftools/gfgithub.py +++ b/Lib/gftools/gfgithub.py @@ -4,7 +4,7 @@ import requests import typing import urllib import time - +from gftools.utils import github_user_repo GITHUB_GRAPHQL_API = "https://api.github.com/graphql" GITHUB_V3_REST_API = "https://api.github.com" @@ -18,6 +18,11 @@ class GitHubClient: self.repo_owner = repo_owner self.repo_name = repo_name + @classmethod + def from_url(cls, url): + user, repo = github_user_repo(url) + return cls(user, repo) + def _post(self, url, payload: typing.Dict): headers = {"Authorization": f"bearer {self.gh_token}"} response = requests.post(url, json=payload, headers=headers) @@ -87,7 +92,12 @@ class GitHubClient: self.rest_url("pulls", state="open", head=pr_head, base=pr_base_branch) ) - def create_pr(self, title: str, body: str, head: str, base: str, draft: bool = False): + def get_commit(self, ref: str): + return self._get(self.rest_url(f"commits/{ref}")) + + def create_pr( + self, title: str, body: str, head: str, base: str, draft: bool = False + ): return self._post( self.rest_url("pulls"), { @@ -96,7 +106,7 @@ class GitHubClient: "head": head, "base": base, "maintainer_can_modify": True, - "draft": draft + "draft": draft, }, ) diff --git a/Lib/gftools/packager.py b/Lib/gftools/packager.py index 7ba1d53..1cae84a 100644 --- a/Lib/gftools/packager.py +++ b/Lib/gftools/packager.py @@ -173,20 +173,14 @@ def load_metadata(fp: "Path | str"): item.source_file = src item.dest_file = dst metadata.source.files.append(item) - - metadata.source.repository_url = re.sub( - r"\.git$", "", metadata.source.repository_url - ) return metadata def save_metadata(fp: Path, metadata: fonts_pb2.FamilyProto): """Save METADATA.pb file and delete old upstream.yaml file.""" - _, _, _, user, repo = metadata.source.repository_url.split("/") - github = GitHubClient(user, repo) - url = github.rest_url(f"commits/{metadata.source.branch}") - resp = github._get(url) - git_commit = resp["sha"] + github = GitHubClient.from_url(metadata.source.repository_url) + commit = github.get_commit(metadata.source.branch) + git_commit = commit["sha"] metadata.source.commit = git_commit language_comments = fonts.LanguageComments(LoadLanguages()) fonts.WriteProto(metadata, fp, comments=language_comments) @@ -219,8 +213,7 @@ def download_assets( metadata: fonts_pb2.FamilyProto, out: Path, latest_release: bool = False ) -> List[str]: """Download assets listed in the metadata's source field""" - _, _, _, owner, repo = metadata.source.repository_url.split("/") - upstream = GitHubClient(owner, repo) + upstream = GitHubClient.from_url(metadata.source.repository_url) res = [] # Getting files from an archive always takes precedence over a # repo dir diff --git a/Lib/gftools/utils.py b/Lib/gftools/utils.py index b57e23b..1cfa0f4 100644 --- a/Lib/gftools/utils.py +++ b/Lib/gftools/utils.py @@ -643,3 +643,13 @@ def shell_quote(s: Union[str, Path]) -> str: return subprocess.list2cmdline([s]) else: return shlex.quote(s) + + +def github_user_repo(github_url): + pattern = r'https?://github\.com/(?P<user>[^/]+)/(?P<repo>[^/^.]+)' + match = re.search(pattern, github_url) + if not match: + raise ValueError( + f"Cannot extract github user and repo name from url '{github_url}'." + ) + return match.group('user'), match.group('repo')
googlefonts/gftools
420108ee6ad484abd6ace9b650a936b634d456f2
diff --git a/tests/test_utils.py b/tests/test_utils.py index 40b5b25..acbcc49 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -56,3 +56,17 @@ He was referred to H.R. Giger, who headed the H.R. department at the time, then </p> """ assert format_html(input) == output + + [email protected]( + """url,want""", + [ + ("https://github.com/SorkinType/SASchoolHandAustralia", ("SorkinType", "SASchoolHandAustralia")), + ("https://github.com/SorkinType/SASchoolHandAustralia/", ("SorkinType", "SASchoolHandAustralia")), + ("https://github.com/googlefonts/MavenPro//", ("googlefonts", "MavenPro")), + ("https://github.com/googlefonts/MavenPro.git", ("googlefonts", "MavenPro")), + ] +) +def test_github_user_repo(url, want): + from gftools.utils import github_user_repo + assert github_user_repo(url) == want \ No newline at end of file
packager: better error when repo ends in a slash (Or just deal with it) This is a very lazy way to parse a URL: ;-) https://github.com/googlefonts/gftools/blob/d5cc1aa2373e78cbaa77b4389e284faf7b8de9e3/Lib/gftools/packager.py#L222 With a repo URL of `https://github.com/SorkinType/Briem-Hand/` I got a nasty error: ``` File "/Users/simon/others-repos/gftools/Lib/gftools/packager.py", line 222, in download_assets _, _, _, owner, repo = metadata.source.repository_url.split("/") ^^^^^^^^^^^^^^^^^^^^ ValueError: too many values to unpack (expected 5) ```
0.0
420108ee6ad484abd6ace9b650a936b634d456f2
[ "tests/test_utils.py::test_github_user_repo[https://github.com/SorkinType/SASchoolHandAustralia-want0]", "tests/test_utils.py::test_github_user_repo[https://github.com/SorkinType/SASchoolHandAustralia/-want1]", "tests/test_utils.py::test_github_user_repo[https://github.com/googlefonts/MavenPro//-want2]", "tests/test_utils.py::test_github_user_repo[https://github.com/googlefonts/MavenPro.git-want3]" ]
[ "tests/test_utils.py::test_remove_url_prefix[https://www.google.com-google.com]", "tests/test_utils.py::test_remove_url_prefix[https://google.com-google.com]", "tests/test_utils.py::test_remove_url_prefix[http://www.google.com-google.com]", "tests/test_utils.py::test_remove_url_prefix[http://google.com-google.com]", "tests/test_utils.py::test_remove_url_prefix[google.com-google.com]", "tests/test_utils.py::test_remove_url_prefix[-]", "tests/test_utils.py::test_format_html" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2024-03-28 10:42:22+00:00
apache-2.0
2,635
googlefonts__glyphsLib-537
diff --git a/Lib/glyphsLib/builder/custom_params.py b/Lib/glyphsLib/builder/custom_params.py index 8e3cfa15..2eeb126e 100644 --- a/Lib/glyphsLib/builder/custom_params.py +++ b/Lib/glyphsLib/builder/custom_params.py @@ -27,7 +27,7 @@ from .constants import ( REVERSE_CODEPAGE_RANGES, PUBLIC_PREFIX, ) -from .features import replace_feature +from .features import replace_feature, replace_prefixes """Set Glyphs custom parameters in UFO info or lib, where appropriate. @@ -671,6 +671,34 @@ class ReplaceFeatureParamHandler(AbstractParamHandler): register(ReplaceFeatureParamHandler()) +class ReplacePrefixParamHandler(AbstractParamHandler): + def to_ufo(self, builder, glyphs, ufo): + repl_map = {} + for value in glyphs.get_custom_values("Replace Prefix"): + prefix_name, prefix_code = re.split(r"\s*;\s*", value, 1) + # if multiple 'Replace Prefix' custom params replace the same + # prefix, the last wins + repl_map[prefix_name] = prefix_code + + features_text = ufo._owner.features.text + + if not (repl_map and features_text): + return + + glyph_names = set(ufo._owner.keys()) + + ufo._owner.features.text = replace_prefixes( + repl_map, features_text, glyph_names=glyph_names + ) + + def to_glyphs(self, glyphs, ufo): + # do the same as ReplaceFeatureParamHandler.to_glyphs + pass + + +register(ReplacePrefixParamHandler()) + + class ReencodeGlyphsParamHandler(AbstractParamHandler): """ The "Reencode Glyphs" custom parameter contains a list of 'glyphname=unicodevalue' strings: e.g., ["smiley=E100", "logo=E101"]. diff --git a/Lib/glyphsLib/builder/features.py b/Lib/glyphsLib/builder/features.py index 65cefb02..bc719eb6 100644 --- a/Lib/glyphsLib/builder/features.py +++ b/Lib/glyphsLib/builder/features.py @@ -36,20 +36,37 @@ def autostr(automatic): def to_ufo_features(self): for master_id, source in self._sources.items(): master = self.font.masters[master_id] - _to_ufo_features(self, master, source.font) + ufo = source.font + # Recover the original feature code if it was stored in the user data + original = master.userData[ORIGINAL_FEATURE_CODE_KEY] + if original is not None: + ufo.features.text = original + else: + skip_export_glyphs = self._designspace.lib.get("public.skipExportGlyphs") + ufo.features.text = _to_ufo_features( + self.font, + ufo, + generate_GDEF=self.generate_GDEF, + skip_export_glyphs=skip_export_glyphs, + ) -def _to_ufo_features(self, master, ufo): - """Write an UFO's OpenType feature file.""" - # Recover the original feature code if it was stored in the user data - original = master.userData[ORIGINAL_FEATURE_CODE_KEY] - if original is not None: - ufo.features.text = original - return +def _to_ufo_features(font, ufo=None, generate_GDEF=False, skip_export_glyphs=None): + """Convert GSFont features, including prefixes and classes, to UFO. + Optionally, build a GDEF table definiton, excluding 'skip_export_glyphs'. + + Args: + font: GSFont + ufo: Optional[defcon.Font] + generate_GDEF: bool + skip_export_glyphs: Optional[List[str]] + + Returns: str + """ prefixes = [] - for prefix in self.font.featurePrefixes: + for prefix in font.featurePrefixes: strings = [] if prefix.name != ANONYMOUS_FEATURE_PREFIX_NAME: strings.append("# Prefix: %s\n" % prefix.name) @@ -60,7 +77,7 @@ def _to_ufo_features(self, master, ufo): prefix_str = "\n\n".join(prefixes) class_defs = [] - for class_ in self.font.classes: + for class_ in font.classes: prefix = "@" if not class_.name.startswith("@") else "" name = prefix + class_.name class_defs.append( @@ -69,7 +86,7 @@ def _to_ufo_features(self, master, ufo): class_str = "\n\n".join(class_defs) feature_defs = [] - for feature in self.font.features: + for feature in font.features: code = feature.code lines = ["feature %s {" % feature.name] if feature.notes: @@ -90,21 +107,20 @@ def _to_ufo_features(self, master, ufo): # results, we would need anchor propagation or user intervention. Glyphs.app # only generates it on generating binaries. gdef_str = None - if self.generate_GDEF: + if generate_GDEF: + assert ufo is not None if re.search(r"^\s*table\s+GDEF\s+{", prefix_str, flags=re.MULTILINE): raise ValueError( "The features already contain a `table GDEF {...}` statement. " "Either delete it or set generate_GDEF to False." ) - gdef_str = _build_gdef( - ufo, self._designspace.lib.get("public.skipExportGlyphs") - ) + gdef_str = _build_gdef(ufo, skip_export_glyphs) # make sure feature text is a unicode string, for defcon full_text = ( "\n\n".join(filter(None, [class_str, prefix_str, fea_str, gdef_str])) + "\n" ) - ufo.features.text = full_text if full_text.strip() else "" + return full_text if full_text.strip() else "" def _build_gdef(ufo, skipExportGlyphs=None): @@ -200,6 +216,32 @@ def replace_feature(tag, repl, features): ) +def replace_prefixes(repl_map, features_text, glyph_names=None): + """Replace all '# Prefix: NAME' sections in features. + + Args: + repl_map: Dict[str, str]: dictionary keyed by prefix name containing + feature code snippets to be replaced. + features_text: str: feature text to be parsed. + glyph_names: Optional[Sequence[str]]: list of valid glyph names, used + by feaLib Parser to distinguish glyph name tokens containing '-' from + glyph ranges such as 'a-z'. + + Returns: + str: new feature text with replaced prefix paragraphs. + """ + from glyphsLib.classes import GSFont + + temp_font = GSFont() + _to_glyphs_features(temp_font, features_text, glyph_names=glyph_names) + + for prefix in temp_font.featurePrefixes: + if prefix.name in repl_map: + prefix.code = repl_map[prefix.name] + + return _to_ufo_features(temp_font) + + def to_glyphs_features(self): if not self.designspace.sources: # Needs at least one UFO @@ -228,9 +270,26 @@ def to_glyphs_features(self): ufo = self.designspace.sources[0].font if ufo.features.text is None: return - document = FeaDocument(ufo.features.text, ufo.keys()) - processor = FeatureFileProcessor(document, self.glyphs_module) - processor.to_glyphs(self.font) + _to_glyphs_features( + self.font, + ufo.features.text, + glyph_names=ufo.keys(), + glyphs_module=self.glyphs_module, + ) + + +def _to_glyphs_features(font, features_text, glyph_names=None, glyphs_module=None): + """Import features text in GSFont, split into prefixes, features and classes. + + Args: + font: GSFont + feature_text: str + glyph_names: Optional[Sequence[str]] + glyphs_module: Optional[Any] + """ + document = FeaDocument(features_text, glyph_names) + processor = FeatureFileProcessor(document, glyphs_module) + processor.to_glyphs(font) def _features_are_different_across_ufos(self): @@ -274,9 +333,12 @@ def _to_glyphs_features_basic(self): class FeaDocument(object): """Parse the string of a fea code into statements.""" - def __init__(self, text, glyph_set): + def __init__(self, text, glyph_set=None): feature_file = StringIO(text) - parser_ = parser.Parser(feature_file, glyph_set, followIncludes=False) + glyph_names = glyph_set if glyph_set is not None else () + parser_ = parser.Parser( + feature_file, glyphNames=glyph_names, followIncludes=False + ) self._doc = parser_.parse() self.statements = self._doc.statements self._lines = text.splitlines(True) # keepends=True @@ -396,8 +458,10 @@ class PeekableIterator(object): class FeatureFileProcessor(object): """Put fea statements into the correct fields of a GSFont.""" - def __init__(self, doc, glyphs_module): + def __init__(self, doc, glyphs_module=None): self.doc = doc + if glyphs_module is None: + from glyphsLib import classes as glyphs_module self.glyphs_module = glyphs_module self.statements = PeekableIterator(doc.statements) self._font = None
googlefonts/glyphsLib
120166bb32e5d2af247aab7844f34003979cc8ee
diff --git a/tests/builder/custom_params_test.py b/tests/builder/custom_params_test.py index 42b3c036..cf229329 100644 --- a/tests/builder/custom_params_test.py +++ b/tests/builder/custom_params_test.py @@ -292,6 +292,74 @@ class SetCustomParamsTest(unittest.TestCase): self.assertEqual(self.ufo.features.text, original) + def test_replace_prefix(self): + self.ufo.features.text = dedent( + """\ + # Prefix: AAA + include(../aaa.fea); + + # Prefix: FOO + # foo + + # Prefix: ZZZ + include(../zzz.fea); + + # Prefix: BAR + # bar + + feature liga { + sub f i by f_i; + } liga; + + table GDEF { + GlyphClassDef + [f i], # Base + [f_i], # Liga + , # Mark + ; + } GDEF; + """ + ) + + self.master.customParameters.append( + GSCustomParameter("Replace Prefix", "FOO; include(../foo.fea);") + ) + self.master.customParameters.append( + GSCustomParameter("Replace Prefix", "BAR; include(../bar.fea);") + ) + self.set_custom_params() + + self.assertEqual( + self.ufo.features.text, + dedent( + """\ + # Prefix: AAA + include(../aaa.fea); + + # Prefix: FOO + include(../foo.fea); + + # Prefix: ZZZ + include(../zzz.fea); + + # Prefix: BAR + include(../bar.fea); + + table GDEF { + GlyphClassDef + [f i], # Base + [f_i], # Liga + , # Mark + ; + } GDEF; + + feature liga { + sub f i by f_i; + } liga; + """ + ), + ) + def test_useProductionNames(self): for value in (True, False): self.master.customParameters["Don't use Production Names"] = value
Support Replace Prefix custom parameter This is mentioned somewhat in https://github.com/googlefonts/glyphsLib/issues/506 and https://github.com/googlefonts/glyphsLib/issues/253 and in some fontmake issues, but to fully support including external feature files the way Glyphs does we need to read this parameter. We have external files defined with `Replace Prefix` for each instance.
0.0
120166bb32e5d2af247aab7844f34003979cc8ee
[ "tests/builder/custom_params_test.py::SetCustomParamsTest::test_replace_prefix" ]
[ "tests/builder/custom_params_test.py::SetCustomParamsTest::test_default_fstype", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_empty_fstype", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_gasp_table", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_normalizes_curved_quotes_in_names", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_parse_glyphs_filter", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_replace_feature", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_set_codePageRanges", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_set_codePageRanges_empty", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_set_defaults", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_set_disable_last_change", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_set_disables_nice_names", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_set_fsSelection_flags", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_set_fsSelection_flags_all", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_set_fsSelection_flags_empty", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_set_fsSelection_flags_none", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_set_fstype", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_set_openTypeOS2CodePageRanges", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_ufo2ft_filter_roundtrip", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_underlinePosition", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_underlineThickness", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_useProductionNames", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_version_string", "tests/builder/custom_params_test.py::SetCustomParamsTest::test_xHeight" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-07-24 18:03:18+00:00
apache-2.0
2,636
googlefonts__glyphsLib-798
diff --git a/Lib/glyphsLib/__init__.py b/Lib/glyphsLib/__init__.py index 367c3235..a4aea3d1 100644 --- a/Lib/glyphsLib/__init__.py +++ b/Lib/glyphsLib/__init__.py @@ -53,6 +53,7 @@ def load_to_ufos( family_name=None, propagate_anchors=None, ufo_module=None, + expand_includes=False, minimal=False, glyph_data=None, ): @@ -70,6 +71,7 @@ def load_to_ufos( family_name=family_name, propagate_anchors=propagate_anchors, ufo_module=ufo_module, + expand_includes=expand_includes, minimal=minimal, glyph_data=glyph_data, ) @@ -88,6 +90,7 @@ def build_masters( generate_GDEF=True, store_editor_state=True, write_skipexportglyphs=False, + expand_includes=False, ufo_module=None, minimal=False, glyph_data=None, @@ -126,6 +129,7 @@ def build_masters( generate_GDEF=generate_GDEF, store_editor_state=store_editor_state, write_skipexportglyphs=write_skipexportglyphs, + expand_includes=expand_includes, ufo_module=ufo_module, minimal=minimal, glyph_data=glyph_data, diff --git a/Lib/glyphsLib/builder/__init__.py b/Lib/glyphsLib/builder/__init__.py index 624a4baf..c52ccc53 100644 --- a/Lib/glyphsLib/builder/__init__.py +++ b/Lib/glyphsLib/builder/__init__.py @@ -31,6 +31,7 @@ def to_ufos( generate_GDEF=True, store_editor_state=True, write_skipexportglyphs=False, + expand_includes=False, minimal=False, glyph_data=None, ): @@ -47,6 +48,9 @@ def to_ufos( If generate_GDEF is True, write a `table GDEF {...}` statement in the UFO's features.fea, containing GlyphClassDef and LigatureCaretByPos. + If expand_includes is True, resolve include statements in the GSFont features + and inline them in the UFO features.fea. + If minimal is True, it is assumed that the UFOs will only be used in font production, and unnecessary steps (e.g. converting background layers) will be skipped. @@ -60,6 +64,7 @@ def to_ufos( generate_GDEF=generate_GDEF, store_editor_state=store_editor_state, write_skipexportglyphs=write_skipexportglyphs, + expand_includes=expand_includes, minimal=minimal, glyph_data=glyph_data, ) @@ -81,6 +86,7 @@ def to_designspace( generate_GDEF=True, store_editor_state=True, write_skipexportglyphs=False, + expand_includes=False, minimal=False, glyph_data=None, ): @@ -117,6 +123,7 @@ def to_designspace( generate_GDEF=generate_GDEF, store_editor_state=store_editor_state, write_skipexportglyphs=write_skipexportglyphs, + expand_includes=expand_includes, minimal=minimal, glyph_data=glyph_data, ) @@ -128,6 +135,7 @@ def to_glyphs( glyphs_module=classes, ufo_module=None, minimize_ufo_diffs=False, + expand_includes=False, ): """ Take a list of UFOs or a single DesignspaceDocument with attached UFOs @@ -146,6 +154,7 @@ def to_glyphs( glyphs_module=glyphs_module, ufo_module=ufo_module, minimize_ufo_diffs=minimize_ufo_diffs, + expand_includes=expand_includes, ) else: builder = GlyphsBuilder( @@ -153,5 +162,6 @@ def to_glyphs( glyphs_module=glyphs_module, ufo_module=ufo_module, minimize_ufo_diffs=minimize_ufo_diffs, + expand_includes=expand_includes, ) return builder.font diff --git a/Lib/glyphsLib/builder/builders.py b/Lib/glyphsLib/builder/builders.py index f3bd845f..3beea470 100644 --- a/Lib/glyphsLib/builder/builders.py +++ b/Lib/glyphsLib/builder/builders.py @@ -47,6 +47,7 @@ class UFOBuilder(LoggerMixin): generate_GDEF=True, store_editor_state=True, write_skipexportglyphs=False, + expand_includes=False, minimal=False, glyph_data=None, ): @@ -79,6 +80,10 @@ class UFOBuilder(LoggerMixin): into the UFOs' and Designspace's lib instead of the glyph level lib key "com.schriftgestaltung.Glyphs.Export". + expand_includes -- If True, expand include statements in the GSFont features + and inline them in the UFO features.fea. + minimal -- If True, it is assumed that the UFOs will only be used in font + production, and unnecessary steps will be skipped. glyph_data -- A list of GlyphData. """ self.font = font @@ -96,6 +101,7 @@ class UFOBuilder(LoggerMixin): self.store_editor_state = store_editor_state self.bracket_layers = [] self.write_skipexportglyphs = write_skipexportglyphs + self.expand_includes = expand_includes self.minimal = minimal if propagate_anchors is None: @@ -422,6 +428,7 @@ class GlyphsBuilder(LoggerMixin): glyphs_module=classes, ufo_module=None, minimize_ufo_diffs=False, + expand_includes=False, ): """Create a builder that goes from UFOs + designspace to Glyphs. @@ -450,9 +457,12 @@ class GlyphsBuilder(LoggerMixin): minimize_ufo_diffs -- set to True to store extra info in .glyphs files in order to get smaller diffs between UFOs when going UFOs->glyphs->UFOs + expand_includes -- If True, expand include statements in the UFOs' features.fea + and inline them in the GSFont features. """ self.glyphs_module = glyphs_module self.minimize_ufo_diffs = minimize_ufo_diffs + self.expand_includes = expand_includes if designspace is not None: if ufos: diff --git a/Lib/glyphsLib/builder/features.py b/Lib/glyphsLib/builder/features.py index d6f642b6..2c821910 100644 --- a/Lib/glyphsLib/builder/features.py +++ b/Lib/glyphsLib/builder/features.py @@ -14,6 +14,7 @@ from __future__ import annotations +import os import re from textwrap import dedent from io import StringIO @@ -50,7 +51,11 @@ def to_ufo_master_features(self, ufo, master): ufo.features.text = original else: ufo.features.text = _to_ufo_features( - self.font, ufo, generate_GDEF=self.generate_GDEF, master=master + self.font, + ufo, + generate_GDEF=self.generate_GDEF, + master=master, + expand_includes=self.expand_includes, ) @@ -71,6 +76,7 @@ def _to_ufo_features( ufo: Font | None = None, generate_GDEF: bool = False, master: GSFontMaster | None = None, + expand_includes: bool = False, ) -> str: """Convert GSFont features, including prefixes and classes, to UFO. @@ -158,7 +164,23 @@ def _to_ufo_features( regenerate_opentype_categories(font, ufo) full_text = "\n\n".join(filter(None, [class_str, prefix_str, fea_str])) + "\n" - return full_text if full_text.strip() else "" + full_text = full_text if full_text.strip() else "" + + if not full_text or not expand_includes: + return full_text + + # use feaLib Parser to resolve include statements relative to the GSFont + # fontpath, and inline them in the output features text. + feature_file = StringIO(full_text) + include_dir = os.path.dirname(font.filepath) if font.filepath else None + fea_parser = parser.Parser( + feature_file, + glyphNames={glyph.name for glyph in font.glyphs}, + includeDir=include_dir, + followIncludes=expand_includes, + ) + doc = fea_parser.parse() + return doc.asFea() def _build_public_opentype_categories(ufo: Font) -> dict[str, str]: @@ -320,11 +342,16 @@ def to_glyphs_features(self): ufo = self.designspace.sources[0].font if ufo.features.text is None: return + include_dir = None + if self.expand_includes and ufo.path: + include_dir = os.path.dirname(os.path.normpath(ufo.path)) _to_glyphs_features( self.font, ufo.features.text, glyph_names=ufo.keys(), glyphs_module=self.glyphs_module, + include_dir=include_dir, + expand_includes=self.expand_includes, ) # Store GDEF category data GSFont-wide to capture bracket glyphs that we @@ -334,7 +361,14 @@ def to_glyphs_features(self): self.font.userData[ORIGINAL_CATEGORY_KEY] = opentype_categories -def _to_glyphs_features(font, features_text, glyph_names=None, glyphs_module=None): +def _to_glyphs_features( + font, + features_text, + glyph_names=None, + glyphs_module=None, + include_dir=None, + expand_includes=False, +): """Import features text in GSFont, split into prefixes, features and classes. Args: @@ -342,8 +376,15 @@ def _to_glyphs_features(font, features_text, glyph_names=None, glyphs_module=Non feature_text: str glyph_names: Optional[Sequence[str]] glyphs_module: Optional[Any] + include_dir: Optional[str] + expand_includes: bool """ - document = FeaDocument(features_text, glyph_names) + document = FeaDocument( + features_text, + glyph_names, + include_dir=include_dir, + expand_includes=expand_includes, + ) processor = FeatureFileProcessor(document, glyphs_module) processor.to_glyphs(font) @@ -389,12 +430,20 @@ def _to_glyphs_features_basic(self): class FeaDocument: """Parse the string of a fea code into statements.""" - def __init__(self, text, glyph_set=None): + def __init__(self, text, glyph_set=None, include_dir=None, expand_includes=False): feature_file = StringIO(text) glyph_names = glyph_set if glyph_set is not None else () parser_ = parser.Parser( - feature_file, glyphNames=glyph_names, followIncludes=False + feature_file, + glyphNames=glyph_names, + includeDir=include_dir, + followIncludes=expand_includes, ) + if expand_includes: + # if we expand includes, we need to reparse the whole file with the + # new content to get the updated locations + text = parser_.parse().asFea() + parser_ = parser.Parser(StringIO(text), glyphNames=glyph_names) self._doc = parser_.parse() self.statements = self._doc.statements self._lines = text.splitlines(True) # keepends=True diff --git a/Lib/glyphsLib/cli.py b/Lib/glyphsLib/cli.py index f5b0ec76..a24fe671 100644 --- a/Lib/glyphsLib/cli.py +++ b/Lib/glyphsLib/cli.py @@ -151,6 +151,14 @@ def main(args=None): "key." ), ) + group.add_argument( + "--expand-includes", + action="store_true", + help=( + "Expand include statements in the .glyphs features and inline them in " + "the exported UFO features.fea." + ), + ) group = parser_glyphs2ufo.add_argument_group("Glyph data") group.add_argument( "--glyph-data", @@ -211,6 +219,14 @@ def main(args=None): action="store_false", help="Enable automatic alignment of components in glyphs.", ) + group.add_argument( + "--expand-includes", + action="store_true", + help=( + "Expand include statements in the UFO features.fea and inline them in " + "the exported .glyphs features." + ), + ) options = parser.parse_args(args) @@ -246,6 +262,7 @@ def glyphs2ufo(options): generate_GDEF=options.generate_GDEF, store_editor_state=not options.no_store_editor_state, write_skipexportglyphs=options.write_public_skip_export_glyphs, + expand_includes=options.expand_includes, ufo_module=__import__(options.ufo_module), minimal=options.minimal, glyph_data=options.glyph_data or None, @@ -298,6 +315,7 @@ def ufo2glyphs(options): object_to_read, ufo_module=ufo_module, minimize_ufo_diffs=options.no_preserve_glyphsapp_metadata, + expand_includes=options.expand_includes, ) # Make the Glyphs file more suitable for roundtrip:
googlefonts/glyphsLib
74aefb387a5a84659902b53fa805b74ff3d8ed0c
diff --git a/tests/builder/features_test.py b/tests/builder/features_test.py index 15b7e367..6030e369 100644 --- a/tests/builder/features_test.py +++ b/tests/builder/features_test.py @@ -343,6 +343,50 @@ def test_include_no_semicolon(tmpdir, ufo_module): assert rtufo.features.text == ufo.features.text +def test_to_glyphs_expand_includes(tmp_path, ufo_module): + ufo = ufo_module.Font() + ufo.features.text = dedent( + """\ + include(family.fea); + """ + ) + ufo.save(str(tmp_path / "font.ufo")) + + included_path = tmp_path / "family.fea" + included_path.write_text("# hello from family.fea") + assert included_path.exists() + + font = to_glyphs([ufo], minimize_ufo_diffs=True, expand_includes=True) + + assert len(font.featurePrefixes) == 1 + assert font.featurePrefixes[0].code.strip() == "# hello from family.fea" + + +def test_to_ufos_expand_includes(tmp_path, ufo_module): + font = classes.GSFont() + font.masters.append(classes.GSFontMaster()) + + feature_prefix = classes.GSFeaturePrefix() + feature_prefix.name = "include" + feature_prefix.code = dedent( + """\ + include(family.fea); + """ + ) + font.featurePrefixes.append(feature_prefix) + + font.filepath = str(tmp_path / "font.glyphs") + font.save() + + included_path = tmp_path / "family.fea" + included_path.write_text("# hello from family.fea") + assert included_path.exists() + + (ufo,) = to_ufos(font, ufo_module=ufo_module, expand_includes=True) + + assert ufo.features.text == ("# Prefix: include\n# hello from family.fea") + + def test_standalone_lookup(tmpdir, ufo_module): ufo = ufo_module.Font() # FIXME: (jany) does not preserve whitespace before and after
enhance support for external feature files (adapt file path) Trying to build this font; https://github.com/rsms/inter-gf-tight The features are contained in separated feature files, and I have this error when building; ``` Traceback (most recent call last): File "/Users/rosalie/Google/env/bin/gftools-builder.py", line 83, in <module> builder.build() File "/Users/rosalie/Google/env/lib/python3.9/site-packages/gftools/builder/__init__.py", line 194, in build self.build_variable() File "/Users/rosalie/Google/env/lib/python3.9/site-packages/gftools/builder/__init__.py", line 298, in build_variable output_files = self.run_fontmake(source, args) File "/Users/rosalie/Google/env/lib/python3.9/site-packages/gftools/builder/__init__.py", line 338, in run_fontmake FontProject().run_from_glyphs(source, **args) File "/Users/rosalie/Google/env/lib/python3.9/site-packages/fontmake/font_project.py", line 741, in run_from_glyphs self.run_from_designspace(designspace_path, **kwargs) File "/Users/rosalie/Google/env/lib/python3.9/site-packages/fontmake/font_project.py", line 1017, in run_from_designspace raise FontmakeError( fontmake.errors.FontmakeError: In 'InterTight.glyphs' -> 'master_ufo/InterTight.designspace': Generating fonts from Designspace failed: /Users/rosalie/Google/forks/inter-gf-tight/sources/master_ufo/InterTight-Thin.ufo/features.fea:85:8: The following feature file should be included but cannot be found: ccmp.fea ``` GlyphsApp can build the font, and it finds the features of course. I figured that the problem was that the master_ufo is another directory, and therefore the path of the feature should be adapted; instead of having `include(features/ccmp.fea);`, it should then be `include(../features/ccmp.fea);`. Doing it in the Glyphs file makes it works but breaks the export with Glyphs for other users. IMO glyphsLib or glyphs2ufo should adapt the feature path accordingly.
0.0
74aefb387a5a84659902b53fa805b74ff3d8ed0c
[ "tests/builder/features_test.py::test_to_glyphs_expand_includes[ufoLib2]", "tests/builder/features_test.py::test_to_ufos_expand_includes[ufoLib2]" ]
[ "tests/builder/features_test.py::test_blank[ufoLib2]", "tests/builder/features_test.py::test_comment[ufoLib2]", "tests/builder/features_test.py::test_languagesystems[ufoLib2]", "tests/builder/features_test.py::test_classes[ufoLib2]", "tests/builder/features_test.py::test_class_synonym[ufoLib2]", "tests/builder/features_test.py::test_feature_names[ufoLib2]", "tests/builder/features_test.py::test_feature_names_notes[ufoLib2]", "tests/builder/features_test.py::test_feature_names_full[ufoLib2]", "tests/builder/features_test.py::test_feature_names_multi[ufoLib2]", "tests/builder/features_test.py::test_include[ufoLib2]", "tests/builder/features_test.py::test_include_no_semicolon[ufoLib2]", "tests/builder/features_test.py::test_standalone_lookup[ufoLib2]", "tests/builder/features_test.py::test_feature[ufoLib2]", "tests/builder/features_test.py::test_different_features_in_different_UFOS[ufoLib2]", "tests/builder/features_test.py::test_roundtrip_disabled_feature[ufoLib2]", "tests/builder/features_test.py::test_roundtrip_automatic_feature[ufoLib2]", "tests/builder/features_test.py::test_roundtrip_feature_prefix_with_only_a_comment[ufoLib2]", "tests/builder/features_test.py::test_roundtrip_existing_GDEF[ufoLib2]", "tests/builder/features_test.py::test_groups_remain_at_top[ufoLib2]", "tests/builder/features_test.py::test_roundtrip_empty_feature[ufoLib2]", "tests/builder/features_test.py::test_comments_in_classes[ufoLib2]", "tests/builder/features_test.py::test_mark_class_used_as_glyph_class[ufoLib2]", "tests/builder/features_test.py::test_build_GDEF_incomplete_glyphOrder" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-07-22 16:13:10+00:00
apache-2.0
2,637
googlefonts__glyphsLib-849
diff --git a/Lib/glyphsLib/builder/constants.py b/Lib/glyphsLib/builder/constants.py index 815c7e74..de3f4340 100644 --- a/Lib/glyphsLib/builder/constants.py +++ b/Lib/glyphsLib/builder/constants.py @@ -42,6 +42,9 @@ ANONYMOUS_FEATURE_PREFIX_NAME = "<anonymous>" ORIGINAL_FEATURE_CODE_KEY = GLYPHLIB_PREFIX + "originalFeatureCode" ORIGINAL_CATEGORY_KEY = GLYPHLIB_PREFIX + "originalOpenTypeCategory" +INSERT_FEATURE_MARKER_RE = r"\s*# Automatic Code.*" +INSERT_FEATURE_MARKER_COMMENT = "# Automatic Code\n" + APP_VERSION_LIB_KEY = GLYPHS_PREFIX + "appVersion" KEYBOARD_INCREMENT_KEY = GLYPHS_PREFIX + "keyboardIncrement" MASTER_ORDER_LIB_KEY = GLYPHS_PREFIX + "fontMasterOrder" diff --git a/Lib/glyphsLib/builder/features.py b/Lib/glyphsLib/builder/features.py index 2c821910..511e6d68 100644 --- a/Lib/glyphsLib/builder/features.py +++ b/Lib/glyphsLib/builder/features.py @@ -30,6 +30,8 @@ from .constants import ( ORIGINAL_CATEGORY_KEY, LANGUAGE_MAPPING, REVERSE_LANGUAGE_MAPPING, + INSERT_FEATURE_MARKER_RE, + INSERT_FEATURE_MARKER_COMMENT, ) from .tokens import TokenExpander, PassThruExpander @@ -71,6 +73,11 @@ def _to_glyphs_language(langID): return REVERSE_LANGUAGE_MAPPING[langID] +def _is_manual_kern_feature(feature): + """Return true if the feature is a manually written 'kern' features.""" + return feature.name == "kern" and not feature.automatic + + def _to_ufo_features( font: GSFont, ufo: Font | None = None, @@ -155,6 +162,15 @@ def _to_ufo_features( lines.extend("#" + line for line in code.splitlines()) else: lines.append(code) + + # Manual kern features in glyphs also have the automatic code added after them + # We make sure it gets added with an "Automatic Code..." marker if it doesn't + # already have one. + if _is_manual_kern_feature(feature) and not re.search( + INSERT_FEATURE_MARKER_RE, code + ): + lines.append(INSERT_FEATURE_MARKER_COMMENT) + lines.append("} %s;" % feature.name) feature_defs.append("\n".join(lines)) fea_str = "\n\n".join(feature_defs) diff --git a/Lib/glyphsLib/builder/user_data.py b/Lib/glyphsLib/builder/user_data.py index edcf3445..ece632f3 100644 --- a/Lib/glyphsLib/builder/user_data.py +++ b/Lib/glyphsLib/builder/user_data.py @@ -31,27 +31,12 @@ from .constants import ( ) -def _has_manual_kern_feature(font): - """Return true if the GSFont contains a manually written 'kern' feature.""" - return any(f for f in font.features if f.name == "kern" and not f.automatic) - - def to_designspace_family_user_data(self): if self.use_designspace: for key, value in dict(self.font.userData).items(): if _user_data_has_no_special_meaning(key): self.designspace.lib[key] = value - # only write our custom ufo2ft featureWriters settings if the font - # does have a manually written 'kern' feature; and if the lib key wasn't - # already set in font.userData (in which case we assume the user knows - # what she's doing). - if ( - _has_manual_kern_feature(self.font) - and UFO2FT_FEATURE_WRITERS_KEY not in self.designspace.lib - ): - self.designspace.lib[UFO2FT_FEATURE_WRITERS_KEY] = DEFAULT_FEATURE_WRITERS - def to_ufo_family_user_data(self, ufo): """Set family-wide user data as Glyphs does."""
googlefonts/glyphsLib
24b4d340e4c82948ba121dcfe563c1450a8e69c9
diff --git a/tests/builder/features_test.py b/tests/builder/features_test.py index 6030e369..d08f1bf8 100644 --- a/tests/builder/features_test.py +++ b/tests/builder/features_test.py @@ -17,9 +17,10 @@ import os from textwrap import dedent -from glyphsLib import to_glyphs, to_ufos, classes +from glyphsLib import to_glyphs, to_ufos, classes, to_designspace from glyphsLib.builder.features import _build_public_opentype_categories +from fontTools.designspaceLib import DesignSpaceDocument import pytest @@ -741,3 +742,33 @@ def test_mark_class_used_as_glyph_class(tmpdir, ufo_module): # hence the following assertion would fail... # https://github.com/googlefonts/glyphsLib/issues/694#issuecomment-1117204523 # assert rtufo.features.text == ufo.features.text + + +def test_automatic_added_to_manual_kern(tmpdir, ufo_module): + """Test that when a Glyphs file has a manual kern feature, + automatic markers are added so that the source kerning also + gets applied. + """ + font = classes.GSFont() + font.masters.append(classes.GSFontMaster()) + + (ufo,) = to_ufos(font) + + assert "# Automatic Code" not in ufo.features.text + + kern = classes.GSFeature(name="kern", code="pos a b 100;") + font.features.append(kern) + (ufo,) = to_ufos(font) + + assert "# Automatic Code" in ufo.features.text + + designspace = to_designspace(font, ufo_module=ufo_module) + path = str(tmpdir / "test.designspace") + designspace.write(path) + for source in designspace.sources: + source.font.save(str(tmpdir / source.filename)) + + designspace2 = DesignSpaceDocument.fromfile(path) + font2 = to_glyphs(designspace2, ufo_module=ufo_module) + + assert len([f for f in font2.features if f.name == "kern"]) == 1 diff --git a/tests/builder/lib_and_user_data_test.py b/tests/builder/lib_and_user_data_test.py index 8eeaa8c3..37c1121d 100644 --- a/tests/builder/lib_and_user_data_test.py +++ b/tests/builder/lib_and_user_data_test.py @@ -51,34 +51,6 @@ def test_designspace_lib_equivalent_to_font_user_data(tmpdir): assert designspace.lib["designspaceLibKey1"] == "designspaceLibValue1" -def test_default_featureWriters_in_designspace_lib(tmpdir, ufo_module): - """Test that the glyphsLib custom featureWriters settings (with mode="append") - are exported to the designspace lib whenever a GSFont contains a manual 'kern' - feature. And that they are not imported back to GSFont.userData if they are - the same as the default value. - """ - font = classes.GSFont() - font.masters.append(classes.GSFontMaster()) - kern = classes.GSFeature(name="kern", code="pos a b 100;") - font.features.append(kern) - - designspace = to_designspace(font, ufo_module=ufo_module) - path = str(tmpdir / "test.designspace") - designspace.write(path) - for source in designspace.sources: - source.font.save(str(tmpdir / source.filename)) - - designspace2 = DesignSpaceDocument.fromfile(path) - - assert UFO2FT_FEATURE_WRITERS_KEY in designspace2.lib - assert designspace2.lib[UFO2FT_FEATURE_WRITERS_KEY] == DEFAULT_FEATURE_WRITERS - - font2 = to_glyphs(designspace2, ufo_module=ufo_module) - - assert not len(font2.userData) - assert len([f for f in font2.features if f.name == "kern"]) == 1 - - def test_custom_featureWriters_in_designpace_lib(tmpdir, ufo_module): """Test that we can roundtrip custom user-defined ufo2ft featureWriters settings that are stored in the designspace lib or GSFont.userData.
Default feature writers heuristic is baffling If the Glyphs file does not have a manual `kern` feature, glyphsLib does nothing special, and ufo2ft uses a default list of feature writers, which is ``` KernFeatureWriter, MarkFeatureWriter, GdefFeatureWriter, CursFeatureWriter, ``` But if the Glyphs file has a manual `kern` feature, glyphsLib writes a `com.github.googlei18n.ufo2ft.featureWriters` lib entry in the designspace file: https://github.com/googlefonts/glyphsLib/blob/609b1765096b016e7382b1155c4034888a0de878/Lib/glyphsLib/builder/user_data.py#L47-L55 glyphsLib's version of `DEFAULT_FEATURE_WRITERS` contains two writers: `KernFeatureWriter` and `MarkFeatureWriter`. So an Arabic which has a manual kern feature (even if that feature just says `# Automatic code start`) will lose its cursive positioning feature. That was surprising! I could fix this by adding more feature writers to glyphsLib's default list, but I don't think that's the right fix. I don't understand why we are we making our own "default" list, which we now have to keep in sync with ufo2ft, when *not doing anything at all* would cause ufo2ft to do the right thing.
0.0
24b4d340e4c82948ba121dcfe563c1450a8e69c9
[ "tests/builder/features_test.py::test_automatic_added_to_manual_kern[ufoLib2]" ]
[ "tests/builder/features_test.py::test_blank[ufoLib2]", "tests/builder/features_test.py::test_comment[ufoLib2]", "tests/builder/features_test.py::test_languagesystems[ufoLib2]", "tests/builder/features_test.py::test_classes[ufoLib2]", "tests/builder/features_test.py::test_class_synonym[ufoLib2]", "tests/builder/features_test.py::test_feature_names[ufoLib2]", "tests/builder/features_test.py::test_feature_names_notes[ufoLib2]", "tests/builder/features_test.py::test_feature_names_full[ufoLib2]", "tests/builder/features_test.py::test_feature_names_multi[ufoLib2]", "tests/builder/features_test.py::test_include[ufoLib2]", "tests/builder/features_test.py::test_include_no_semicolon[ufoLib2]", "tests/builder/features_test.py::test_to_glyphs_expand_includes[ufoLib2]", "tests/builder/features_test.py::test_to_ufos_expand_includes[ufoLib2]", "tests/builder/features_test.py::test_standalone_lookup[ufoLib2]", "tests/builder/features_test.py::test_feature[ufoLib2]", "tests/builder/features_test.py::test_different_features_in_different_UFOS[ufoLib2]", "tests/builder/features_test.py::test_roundtrip_disabled_feature[ufoLib2]", "tests/builder/features_test.py::test_roundtrip_automatic_feature[ufoLib2]", "tests/builder/features_test.py::test_roundtrip_feature_prefix_with_only_a_comment[ufoLib2]", "tests/builder/features_test.py::test_roundtrip_existing_GDEF[ufoLib2]", "tests/builder/features_test.py::test_groups_remain_at_top[ufoLib2]", "tests/builder/features_test.py::test_roundtrip_empty_feature[ufoLib2]", "tests/builder/features_test.py::test_comments_in_classes[ufoLib2]", "tests/builder/features_test.py::test_mark_class_used_as_glyph_class[ufoLib2]", "tests/builder/lib_and_user_data_test.py::test_custom_featureWriters_in_designpace_lib[ufoLib2]", "tests/builder/lib_and_user_data_test.py::test_ufo_lib_equivalent_to_font_master_user_data[ufoLib2]", "tests/builder/lib_and_user_data_test.py::test_ufo_data_into_font_master_user_data[ufoLib2]", "tests/builder/lib_and_user_data_test.py::test_layer_lib_into_master_user_data[ufoLib2]", "tests/builder/lib_and_user_data_test.py::test_layer_lib_in_font_user_data[ufoLib2]", "tests/builder/lib_and_user_data_test.py::test_glif_lib_equivalent_to_layer_user_data[ufoLib2]", "tests/builder/lib_and_user_data_test.py::test_lib_data_types[ufoLib2]", "tests/builder/features_test.py::test_build_GDEF_incomplete_glyphOrder", "tests/builder/lib_and_user_data_test.py::test_designspace_lib_equivalent_to_font_user_data", "tests/builder/lib_and_user_data_test.py::test_font_user_data_to_ufo_lib", "tests/builder/lib_and_user_data_test.py::test_DisplayStrings_ufo_lib", "tests/builder/lib_and_user_data_test.py::test_glyph_user_data_into_ufo_lib", "tests/builder/lib_and_user_data_test.py::test_node_user_data_into_glif_lib" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-01-19 15:26:02+00:00
apache-2.0
2,638
googlefonts__nanoemoji-280
diff --git a/src/nanoemoji/features.py b/src/nanoemoji/features.py index 741ed2c..454df73 100644 --- a/src/nanoemoji/features.py +++ b/src/nanoemoji/features.py @@ -20,14 +20,17 @@ from nanoemoji.glyph import glyph_name -def generate_fea(rgi_sequences): - # Generate rlig feature with ligature lookup for multi-codepoint RGIs +DEFAULT_GSUB_FEATURE_TAG = "ccmp" + + +def generate_fea(rgi_sequences, feature_tag=DEFAULT_GSUB_FEATURE_TAG): + # Generate feature with ligature lookup for multi-codepoint RGIs rules = [] rules.append("languagesystem DFLT dflt;") rules.append("languagesystem latn dflt;") rules.append("") - rules.append("feature rlig {") + rules.append(f"feature {feature_tag} {{") for rgi in sorted(rgi_sequences): if len(rgi) == 1: continue @@ -35,6 +38,6 @@ def generate_fea(rgi_sequences): target = glyph_name(rgi) rules.append(" sub %s by %s;" % (" ".join(glyphs), target)) - rules.append("} rlig;") + rules.append(f"}} {feature_tag};") rules.append("") return "\n".join(rules)
googlefonts/nanoemoji
67081b8abe14771b757a95791cf6b1d03e9ecf52
diff --git a/tests/features_test.py b/tests/features_test.py new file mode 100644 index 0000000..fba756e --- /dev/null +++ b/tests/features_test.py @@ -0,0 +1,19 @@ +from textwrap import dedent +from nanoemoji.features import generate_fea, DEFAULT_GSUB_FEATURE_TAG +import pytest + + [email protected]("feature_tag", (DEFAULT_GSUB_FEATURE_TAG, "rlig")) +def test_generate_fea(feature_tag): + rgi_sequences = [(0x1F64C,), (0x1F64C, 0x1F3FB), (0x1F64C, 0x1F3FC)] + assert generate_fea(rgi_sequences, feature_tag=feature_tag) == dedent( + f"""\ + languagesystem DFLT dflt; + languagesystem latn dflt; + + feature {feature_tag} {{ + sub g_1f64c g_1f3fb by g_1f64c_1f3fb; + sub g_1f64c g_1f3fc by g_1f64c_1f3fc; + }} {feature_tag}; + """ + )
Ligature changed from rlig feature to ccmp Different platforms or browsers have some differences in ligature support([Test](http://unifraktur.sourceforge.net/testcases/enable_opentype_features/)). Let us discuss what issues should be paid attention to in emoji ligatures. noto-emoji and noto-emoji-svg both use ccmp by default https://github.com/googlefonts/noto-emoji/blob/41ae6686ace1453b432ac907a165428f2e1ad54e/NotoColorEmoji.tmpl.ttx.tmpl#L311 https://github.com/adobe-fonts/noto-emoji-svg/blob/ed5c78c8e3d46fdf8dbf7532875aaabfdcdc5c3a/GSUB.fea#L4
0.0
67081b8abe14771b757a95791cf6b1d03e9ecf52
[ "tests/features_test.py::test_generate_fea[ccmp]", "tests/features_test.py::test_generate_fea[rlig]" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2021-04-19 14:52:41+00:00
apache-2.0
2,639
googlefonts__picosvg-128
diff --git a/src/picosvg/svg.py b/src/picosvg/svg.py index ffd6e95..52a5bb1 100644 --- a/src/picosvg/svg.py +++ b/src/picosvg/svg.py @@ -19,11 +19,19 @@ import itertools from lxml import etree # pytype: disable=import-error import re from typing import List, Optional, Tuple -from picosvg.svg_meta import ntos, svgns, xlinkns, parse_css_declarations +from picosvg.svg_meta import ( + number_or_percentage, + ntos, + strip_ns, + svgns, + xlinkns, + parse_css_declarations, +) from picosvg.svg_types import * +from picosvg.svg_transform import Affine2D import numbers -_ELEMENT_CLASSES = { +_SHAPE_CLASSES = { "circle": SVGCircle, "ellipse": SVGEllipse, "line": SVGLine, @@ -32,8 +40,17 @@ _ELEMENT_CLASSES = { "polyline": SVGPolyline, "rect": SVGRect, } -_CLASS_ELEMENTS = {v: f"{{{svgns()}}}{k}" for k, v in _ELEMENT_CLASSES.items()} -_ELEMENT_CLASSES.update({f"{{{svgns()}}}{k}": v for k, v in _ELEMENT_CLASSES.items()}) +_CLASS_ELEMENTS = {v: f"{{{svgns()}}}{k}" for k, v in _SHAPE_CLASSES.items()} +_SHAPE_CLASSES.update({f"{{{svgns()}}}{k}": v for k, v in _SHAPE_CLASSES.items()}) + +_GRADIENT_CLASSES = { + "linearGradient": SVGLinearGradient, + "radialGradient": SVGRadialGradient, +} +_GRADIENT_COORDS = { + "linearGradient": (("x1", "y1"), ("x2", "y2")), + "radialGradient": (("cx", "cy"), ("fx", "fy")), +} _XLINK_TEMP = "xlink_" @@ -46,6 +63,10 @@ _MAX_PCT_ERROR = 0.1 _DEFAULT_DEFAULT_TOLERENCE = 0.1 +# Rounding for rewritten gradient matrices +_GRADIENT_TRANSFORM_NDIGITS = 6 + + def _xlink_href_attr_name() -> str: return f"{{{xlinkns()}}}href" @@ -104,9 +125,10 @@ def _field_name(attr_name): def from_element(el): - if el.tag not in _ELEMENT_CLASSES: + if el.tag not in _SHAPE_CLASSES: raise ValueError(f"Bad tag <{el.tag}>") - data_type = _ELEMENT_CLASSES[el.tag] + data_type = _SHAPE_CLASSES[el.tag] + parse_fn = getattr(data_type, "from_element", None) args = { f.name: f.type(el.attrib[_attr_name(f.name)]) for f in dataclasses.fields(data_type) @@ -148,8 +170,9 @@ class SVG: if self.elements: return self.elements elements = [] + view_box = self.view_box() for el in self.svg_root.iter("*"): - if el.tag not in _ELEMENT_CLASSES: + if el.tag not in _SHAPE_CLASSES: continue elements.append((el, (from_element(el),))) self.elements = elements @@ -679,17 +702,89 @@ class SVG: return self + def _select_gradients(self): + return self.xpath(" | ".join(f"//svg:{tag}" for tag in _GRADIENT_CLASSES)) + + def _collect_gradients(self, inplace=False): + if not inplace: + svg = SVG(copy.deepcopy(self.svg_root)) + svg._collect_gradients(inplace=False) + return svg + + # Collect gradients; remove other defs + defs = etree.Element(f"{{{svgns()}}}defs", nsmap=self.svg_root.nsmap) + for gradient in self._select_gradients(): + gradient.getparent().remove(gradient) + defs.append(gradient) + + for def_el in [e for e in self.xpath("//svg:defs")]: + def_el.getparent().remove(def_el) + + self.svg_root.insert(0, defs) + + def _apply_gradient_translation(self, inplace=False): + if not inplace: + svg = SVG(copy.deepcopy(self.svg_root)) + svg._apply_gradient_translation(inplace=True) + return svg + + for el in self._select_gradients(): + gradient = _GRADIENT_CLASSES[strip_ns(el.tag)].from_element( + el, self.view_box() + ) + affine = gradient.gradientTransform + a, b, c, d, dx, dy = affine + if (dx, dy) == (0, 0): + continue + affine_prime = affine._replace(e=0, f=0) + + for x_attr, y_attr in _GRADIENT_COORDS[strip_ns(el.tag)]: + # if at default just ignore + if x_attr not in el.attrib and y_attr not in el.attrib: + continue + x = getattr(gradient, x_attr) + y = getattr(gradient, y_attr) + + # We need x`, y` such that matrix a b c d 0 0 yields same + # result as x,y with a b c d e f + # That is: + # 1) ax` + cy` + 0 = ax + cy + e + # 2) bx` + dy` + 0 = bx + dy + f + # ^ rhs is a known scalar; we'll call r1, r2 + # multiply 1) by b/a so when subtracted from 2) we eliminate x` + # 1) bx` + (b/a)cy` = (b/a) * r1 + # 2) - 1) bx` - bx` + dy` - (b/a)cy` = r2 - (b/a) * r1 + # y` = (r2 - (b/a) * r1) / (d - (b/a)c) + r1, r2 = affine.map_point((x, y)) + assert r1 == a * x + c * y + dx + assert r2 == b * x + d * y + dy + y_prime = (r2 - r1 * b / a) / (d - b * c / a) + + # Sub y` into 1) + # 1) x` = (r1 - cy`) / a + x_prime = (r1 - c * y_prime) / a + + # sanity check: a`(x`, y`) should be a(x, y) + p = affine.map_point((x, y)) + p_prime = affine_prime.map_point((x_prime, y_prime)) + assert p.almost_equals(p_prime) + + el.attrib[x_attr] = ntos(round(x_prime, _GRADIENT_TRANSFORM_NDIGITS)) + el.attrib[y_attr] = ntos(round(y_prime, _GRADIENT_TRANSFORM_NDIGITS)) + + if affine_prime != Affine2D.identity(): + el.attrib["gradientTransform"] = ( + "matrix(" + " ".join(ntos(v) for v in affine_prime) + ")" + ) + else: + del el.attrib["gradientTransform"] + def checkpicosvg(self): """Check for nano violations, return xpaths to bad elements. If result sequence empty then this is a valid picosvg. """ - def _strip_ns(tagname): - if "}" in tagname: - return tagname[tagname.index("}") + 1 :] - return tagname - self._update_etree() errors = [] @@ -705,7 +800,7 @@ class SVG: frontier = [(0, self.svg_root, "")] while frontier: el_idx, el, parent_path = frontier.pop(0) - el_tag = _strip_ns(el.tag) + el_tag = strip_ns(el.tag) el_path = f"{parent_path}/{el_tag}[{el_idx}]" if not any((re.match(pat, el_path) for pat in path_whitelist)): @@ -742,16 +837,8 @@ class SVG: self.absolute(inplace=True) self.round_floats(ndigits, inplace=True) - # Collect gradients; remove other defs - defs = etree.Element(f"{{{svgns()}}}defs", nsmap=self.svg_root.nsmap) - for gradient in self.xpath("//svg:linearGradient | //svg:radialGradient"): - gradient.getparent().remove(gradient) - defs.append(gradient) - - for def_el in [e for e in self.xpath("//svg:defs")]: - def_el.getparent().remove(def_el) - - self.svg_root.insert(0, defs) + self._apply_gradient_translation(inplace=True) + self._collect_gradients(inplace=True) nano_violations = self.checkpicosvg() if nano_violations: diff --git a/src/picosvg/svg_meta.py b/src/picosvg/svg_meta.py index 253d333..fea4c73 100644 --- a/src/picosvg/svg_meta.py +++ b/src/picosvg/svg_meta.py @@ -36,6 +36,12 @@ def xlinkns(): return "http://www.w3.org/1999/xlink" +def strip_ns(tagname): + if "}" in tagname: + return tagname[tagname.index("}") + 1 :] + return tagname + + # https://www.w3.org/TR/SVG11/paths.html#PathData _CMD_ARGS = { "m": 2, @@ -100,6 +106,10 @@ def ntos(n: float) -> str: return str(int(n)) if isinstance(n, float) and n.is_integer() else str(n) +def number_or_percentage(s: str, scale=1) -> float: + return float(s[:-1]) / 100 * scale if s.endswith("%") else float(s) + + def path_segment(cmd, *args): # put commas between coords, spaces otherwise, author readability pref args_per_cmd = check_cmd(cmd, args) diff --git a/src/picosvg/svg_types.py b/src/picosvg/svg_types.py index b8ebd5e..3565354 100644 --- a/src/picosvg/svg_types.py +++ b/src/picosvg/svg_types.py @@ -15,7 +15,16 @@ import copy import dataclasses from picosvg.geometric_types import Point, Rect -from picosvg import svg_meta +from picosvg.svg_meta import ( + check_cmd, + cmd_coords, + number_or_percentage, + parse_css_declarations, + path_segment, + strip_ns, + SVGCommand, + SVGCommandSeq, +) from picosvg import svg_pathops from picosvg.arc_to_cubic import arc_to_cubic from picosvg.svg_path_iter import parse_svg_path @@ -45,7 +54,7 @@ def _explicit_lines_callback(subpath_start, curr_pos, cmd, args, *_): def _relative_to_absolute(curr_pos, cmd, args): - x_coord_idxs, y_coord_idxs = svg_meta.cmd_coords(cmd) + x_coord_idxs, y_coord_idxs = cmd_coords(cmd) if cmd.islower(): cmd = cmd.upper() args = list(args) # we'd like to mutate 'em @@ -59,7 +68,7 @@ def _relative_to_absolute(curr_pos, cmd, args): def _next_pos(curr_pos, cmd, cmd_args): # update current position - x_coord_idxs, y_coord_idxs = svg_meta.cmd_coords(cmd) + x_coord_idxs, y_coord_idxs = cmd_coords(cmd) new_x, new_y = curr_pos if cmd.isupper(): if x_coord_idxs: @@ -79,7 +88,7 @@ def _move_endpoint(curr_pos, cmd, cmd_args, new_endpoint): # we need to be able to alter both axes ((cmd, cmd_args),) = _explicit_lines_callback(None, curr_pos, cmd, cmd_args) - x_coord_idxs, y_coord_idxs = svg_meta.cmd_coords(cmd) + x_coord_idxs, y_coord_idxs = cmd_coords(cmd) cmd_args = list(cmd_args) # we'd like to mutate new_x, new_y = new_endpoint if cmd.islower(): @@ -179,7 +188,7 @@ class SVGShape: def as_path(self) -> "SVGPath": raise NotImplementedError("You should implement as_path") - def as_cmd_seq(self) -> svg_meta.SVGCommandSeq: + def as_cmd_seq(self) -> SVGCommandSeq: return ( self.as_path() .explicit_lines() # hHvV => lL @@ -193,7 +202,7 @@ class SVGShape: # only meaningful for path, which overrides return self - def stroke_commands(self, tolerance) -> Generator[svg_meta.SVGCommand, None, None]: + def stroke_commands(self, tolerance) -> Generator[SVGCommand, None, None]: return svg_pathops.stroke( self.as_cmd_seq(), self.stroke_linecap, @@ -217,7 +226,7 @@ class SVGShape: f.name.replace("_", "-"): f.type for f in dataclasses.fields(self) } raw_attrs = {} - unparsed_style = svg_meta.parse_css_declarations( + unparsed_style = parse_css_declarations( target.style, raw_attrs, property_names=attr_types.keys() ) for attr_name, attr_value in raw_attrs.items(): @@ -245,7 +254,7 @@ class SVGShape: # https://www.w3.org/TR/SVG11/paths.html#PathElement @dataclasses.dataclass -class SVGPath(SVGShape, svg_meta.SVGCommandSeq): +class SVGPath(SVGShape, SVGCommandSeq): d: str = "" def __init__(self, **kwargs): @@ -258,7 +267,7 @@ class SVGPath(SVGShape, svg_meta.SVGCommandSeq): self.d += path_snippet def _add_cmd(self, cmd, *args): - self._add(svg_meta.path_segment(cmd, *args)) + self._add(path_segment(cmd, *args)) def M(self, *args): self._add_cmd("M", *args) @@ -267,7 +276,7 @@ class SVGPath(SVGShape, svg_meta.SVGCommandSeq): self._add_cmd("m", *args) def _arc(self, c, rx, ry, x, y, large_arc): - self._add(svg_meta.path_segment(c, rx, ry, 0, large_arc, 1, x, y)) + self._add(path_segment(c, rx, ry, 0, large_arc, 1, x, y)) def A(self, rx, ry, x, y, large_arc=0): self._arc("A", rx, ry, x, y, large_arc) @@ -332,7 +341,7 @@ class SVGPath(SVGShape, svg_meta.SVGCommandSeq): # iteration gives us exploded commands for idx, (cmd, args) in enumerate(self): - svg_meta.check_cmd(cmd, args) + check_cmd(cmd, args) if idx == 0 and cmd == "m": cmd = "M" @@ -366,7 +375,7 @@ class SVGPath(SVGShape, svg_meta.SVGCommandSeq): # Shift the absolute parts and call it a day. if cmd.islower(): return ((cmd, args),) - x_coord_idxs, y_coord_idxs = svg_meta.cmd_coords(cmd) + x_coord_idxs, y_coord_idxs = cmd_coords(cmd) args = list(args) # we'd like to mutate 'em for x_coord_idx in x_coord_idxs: args[x_coord_idx] += dx @@ -485,13 +494,11 @@ class SVGPath(SVGShape, svg_meta.SVGCommandSeq): return target @classmethod - def from_commands( - cls, svg_cmds: Generator[svg_meta.SVGCommand, None, None] - ) -> "SVGPath": + def from_commands(cls, svg_cmds: Generator[SVGCommand, None, None]) -> "SVGPath": return cls().update_path(svg_cmds, inplace=True) def update_path( - self, svg_cmds: Generator[svg_meta.SVGCommand, None, None], inplace=False + self, svg_cmds: Generator[SVGCommand, None, None], inplace=False ) -> "SVGPath": target = self if not inplace: @@ -637,6 +644,90 @@ class SVGRect(SVGShape): return path +def _get_gradient_units_relative_scale(grad_el, view_box): + gradient_units = grad_el.attrib.get("gradientUnits", "objectBoundingBox") + if gradient_units == "userSpaceOnUse": + # For gradientUnits="userSpaceOnUse", percentages represent values relative to + # the current viewport. + return view_box + elif gradient_units == "objectBoundingBox": + # For gradientUnits="objectBoundingBox", percentages represent values relative + # to the object bounding box. The latter defines an abstract coordinate system + # with origin at (0,0) and a nominal width and height = 1. + return Rect(0, 0, 1, 1) + else: + raise ValueError( + f'{strip_ns(grad_el.tag)} gradientUnits="{gradient_units}" not supported' + ) + + +def _parse_common_gradient_parts(gradient, el, view_box): + self = gradient + self.gradientUnits = _get_gradient_units_relative_scale(el, view_box) + if "gradientTransform" in el.attrib: + self.gradientTransform = Affine2D.fromstring(el.attrib["gradientTransform"]) + if "spreadMethod" in el.attrib: + self.spreadMethod = el.attrib["spreadMethod"] + return self.gradientUnits.w, self.gradientUnits.h + + +# https://developer.mozilla.org/en-US/docs/Web/SVG/Element/linearGradient +# Should be parsed with from_element [email protected] +class SVGLinearGradient: + x1: float = 0.0 + x2: float = 0.0 + y1: float = 0.0 + y2: float = 0.0 + gradientUnits: Rect = Rect(0, 0, 1, 1) + gradientTransform: Affine2D = Affine2D.identity() + spreadMethod: str = "pad" + + @staticmethod + def from_element(el, view_box) -> "SVGLinearGradient": + self = SVGLinearGradient() + width, height = _parse_common_gradient_parts(self, el, view_box) + + self.x1 = number_or_percentage(el.attrib.get("x1", "0%"), width) + self.y1 = number_or_percentage(el.attrib.get("y1", "0%"), height) + self.x2 = number_or_percentage(el.attrib.get("x2", "100%"), width) + self.y2 = number_or_percentage(el.attrib.get("y2", "0%"), height) + return self + + +# https://developer.mozilla.org/en-US/docs/Web/SVG/Element/radialGradient +# Should be parsed with from_element [email protected] +class SVGRadialGradient: + cx: float = 0.0 + cy: float = 0.0 + r: float = 0.0 + fr: float = 0.0 + fx: float = 0.0 + fy: float = 0.0 + gradientUnits: Rect = Rect(0, 0, 1, 1) + gradientTransform: Affine2D = Affine2D.identity() + spreadMethod: str = "pad" + + @staticmethod + def from_element(el, view_box) -> "SVGRadialGradient": + self = SVGRadialGradient() + width, height = _parse_common_gradient_parts(self, el, view_box) + + self.cx = number_or_percentage(el.attrib.get("cx", "50%"), width) + self.cy = number_or_percentage(el.attrib.get("cy", "50%"), height) + self.r = number_or_percentage(el.attrib.get("r", "50%"), width) + + raw_fx = el.attrib.get("fx") + self.fx = number_or_percentage(raw_fx, width) if raw_fx is not None else self.cx + raw_fy = el.attrib.get("fy") + self.fy = ( + number_or_percentage(raw_fy, height) if raw_fy is not None else self.cy + ) + self.fr = number_or_percentage(el.attrib.get("fr", "0%"), width) + return self + + def union(shapes: Iterable[SVGShape]) -> SVGPath: return SVGPath.from_commands( svg_pathops.union(
googlefonts/picosvg
9e1b496622698ef04a260c47658ab2059a085d23
diff --git a/tests/svg_test.py b/tests/svg_test.py index 965d0ca..2b781b5 100644 --- a/tests/svg_test.py +++ b/tests/svg_test.py @@ -17,7 +17,7 @@ from lxml import etree import os import pytest from picosvg.svg import SVG -from picosvg import svg_meta +from picosvg.svg_meta import strip_ns, parse_css_declarations from svg_test_helpers import * @@ -380,7 +380,7 @@ def test_parse_css_declarations( style, property_names, expected_output, expected_unparsed ): output = {} - unparsed = svg_meta.parse_css_declarations(style, output, property_names) + unparsed = parse_css_declarations(style, output, property_names) assert output == expected_output assert unparsed == expected_unparsed @@ -388,7 +388,7 @@ def test_parse_css_declarations( @pytest.mark.parametrize("style", ["foo;bar;", "foo:bar:baz;"]) def test_parse_css_declarations_invalid(style): with pytest.raises(ValueError, match="Invalid CSS declaration syntax"): - svg_meta.parse_css_declarations(style, {}) + parse_css_declarations(style, {}) @pytest.mark.parametrize( @@ -403,3 +403,47 @@ def test_apply_style_attributes(actual, expected_result): expected_result, lambda svg: svg.shapes() and svg.apply_style_attributes(), ) + + [email protected]( + "gradient_string, expected_result", + [ + # No transform, no change + ( + '<linearGradient id="c" x1="63.85" x2="63.85" y1="4245" y2="4137.3" gradientUnits="userSpaceOnUse"/>', + '<linearGradient id="c" x1="63.85" x2="63.85" y1="4245" y2="4137.3" gradientUnits="userSpaceOnUse"/>', + ), + # Real example from emoji_u1f392.svg w/ dx changed from 0 to 1 + # scale, translate + ( + '<linearGradient id="c" x1="63.85" x2="63.85" y1="4245" y2="4137.3" gradientTransform="translate(1 -4122)" gradientUnits="userSpaceOnUse"/>', + '<linearGradient id="c" x1="64.85" x2="64.85" y1="123" y2="15.3" gradientUnits="userSpaceOnUse"/>', + ), + # Real example from emoji_u1f392.svg w/sx changed from 1 to 0.5 + # scale, translate + ( + '<radialGradient id="b" cx="63.523" cy="12368" r="53.477" gradientTransform="matrix(.5 0 0 .2631 0 -3150)" gradientUnits="userSpaceOnUse"/>', + '<radialGradient id="b" cx="63.523" cy="395.366021" r="53.477" gradientTransform="matrix(0.5 0 0 0.2631 0 0)" gradientUnits="userSpaceOnUse"/>', + ), + # Real example from emoji_u1f44d.svg + # Using all 6 parts + ( + '<radialGradient id="d" cx="2459.4" cy="-319.18" r="20.331" gradientTransform="matrix(-1.3883 .0794 -.0374 -.6794 3505.4 -353.39)" gradientUnits="userSpaceOnUse"/>', + '<radialGradient id="d" cx="-71.60264" cy="-94.82264" r="20.331" gradientTransform="matrix(-1.3883 0.0794 -0.0374 -0.6794 0 0)" gradientUnits="userSpaceOnUse"/>', + ), + ], +) +def test_apply_gradient_translation(gradient_string, expected_result): + svg_string = ( + '<svg version="1.1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 128 128">' + + gradient_string + + "</svg>" + ) + svg = SVG.fromstring(svg_string)._apply_gradient_translation() + el = svg.xpath_one("//svg:linearGradient | //svg:radialGradient") + + for node in svg.svg_root.getiterator(): + node.tag = etree.QName(node).localname + etree.cleanup_namespaces(svg.svg_root) + + assert etree.tostring(el).decode("utf-8") == expected_result
Apply gradient translation Noto has things like: ```xml <linearGradient id="c" x1="63.85" x2="63.85" y1="4245" y2="4137.3" gradientTransform="translate(0 -4122)" gradientUnits="userSpaceOnUse"> <radialGradient id="b" cx="63.523" cy="12368" r="53.477" gradientTransform="matrix(1 0 0 .2631 0 -3150)" gradientUnits="userSpaceOnUse"> ``` At minimum, just apply the translate transform on a gradient. Perhaps just apply the whole transform, need to think about that. Credit to @anthrotype for spotting this pattern.
0.0
9e1b496622698ef04a260c47658ab2059a085d23
[ "tests/svg_test.py::test_common_attrib[<path", "tests/svg_test.py::test_common_attrib[<rect", "tests/svg_test.py::test_common_attrib[<polyline", "tests/svg_test.py::test_common_attrib[<line", "tests/svg_test.py::test_shapes_to_paths[<path", "tests/svg_test.py::test_shapes_to_paths[<line", "tests/svg_test.py::test_shapes_to_paths[<rect", "tests/svg_test.py::test_shapes_to_paths[<polygon", "tests/svg_test.py::test_shapes_to_paths[<polyline", "tests/svg_test.py::test_shapes_to_paths[<circle", "tests/svg_test.py::test_shapes_to_paths[<ellipse", "tests/svg_test.py::test_iter[<line", "tests/svg_test.py::test_iter[<path", "tests/svg_test.py::test_apply_clip_path[clip-rect.svg-clip-rect-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-ellipse.svg-clip-ellipse-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-curves.svg-clip-curves-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-multirect.svg-clip-multirect-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-groups.svg-clip-groups-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-use.svg-clip-use-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-rule-evenodd.svg-clip-rule-evenodd-clipped.svg]", "tests/svg_test.py::test_resolve_use[use-ellipse.svg-use-ellipse-resolved.svg]", "tests/svg_test.py::test_ungroup[ungroup-before.svg-ungroup-after.svg]", "tests/svg_test.py::test_ungroup[ungroup-multiple-children-before.svg-ungroup-multiple-children-after.svg]", "tests/svg_test.py::test_ungroup[twemoji-lesotho-flag-before.svg-twemoji-lesotho-flag-after-ungroup.svg]", "tests/svg_test.py::test_strokes_to_paths[stroke-simplepath-before.svg-stroke-simplepath-after.svg]", "tests/svg_test.py::test_strokes_to_paths[stroke-path-before.svg-stroke-path-after.svg]", "tests/svg_test.py::test_strokes_to_paths[stroke-capjoinmiterlimit-before.svg-stroke-capjoinmiterlimit-after.svg]", "tests/svg_test.py::test_transform[rotated-rect.svg-rotated-rect-after.svg]", "tests/svg_test.py::test_transform[translate-rect.svg-translate-rect-after.svg]", "tests/svg_test.py::test_topicosvg[ungroup-before.svg-ungroup-nano.svg]", "tests/svg_test.py::test_topicosvg[ungroup-multiple-children-before.svg-ungroup-multiple-children-nano.svg]", "tests/svg_test.py::test_topicosvg[group-stroke-before.svg-group-stroke-nano.svg]", "tests/svg_test.py::test_topicosvg[arcs-before.svg-arcs-nano.svg]", "tests/svg_test.py::test_topicosvg[invisible-before.svg-invisible-nano.svg]", "tests/svg_test.py::test_topicosvg[transform-before.svg-transform-nano.svg]", "tests/svg_test.py::test_topicosvg[group-data-name-before.svg-group-data-name-after.svg]", "tests/svg_test.py::test_topicosvg[matrix-before.svg-matrix-nano.svg]", "tests/svg_test.py::test_topicosvg[degenerate-before.svg-degenerate-nano.svg]", "tests/svg_test.py::test_topicosvg[fill-rule-evenodd-before.svg-fill-rule-evenodd-nano.svg]", "tests/svg_test.py::test_topicosvg[twemoji-lesotho-flag-before.svg-twemoji-lesotho-flag-nano.svg]", "tests/svg_test.py::test_topicosvg[inline-css-style-before.svg-inline-css-style-nano.svg]", "tests/svg_test.py::test_topicosvg[clipped-strokes-before.svg-clipped-strokes-nano.svg]", "tests/svg_test.py::test_remove_unpainted_shapes[invisible-before.svg-invisible-after.svg]", "tests/svg_test.py::test_checkpicosvg[good-defs-0.svg-expected_violations0]", "tests/svg_test.py::test_checkpicosvg[bad-defs-0.svg-expected_violations1]", "tests/svg_test.py::test_checkpicosvg[bad-defs-1.svg-expected_violations2]", "tests/svg_test.py::test_viewbox[<svg", "tests/svg_test.py::test_remove_attributes[<svg", "tests/svg_test.py::test_tolerance[<svg", "tests/svg_test.py::test_parse_css_declarations[fill:none-None-expected_output0-]", "tests/svg_test.py::test_parse_css_declarations[fill:", "tests/svg_test.py::test_parse_css_declarations[", "tests/svg_test.py::test_parse_css_declarations[enable-background:new", "tests/svg_test.py::test_parse_css_declarations_invalid[foo;bar;]", "tests/svg_test.py::test_parse_css_declarations_invalid[foo:bar:baz;]", "tests/svg_test.py::test_apply_style_attributes[inline-css-style-before.svg-inline-css-style-after.svg]", "tests/svg_test.py::test_apply_gradient_translation[<linearGradient", "tests/svg_test.py::test_apply_gradient_translation[<radialGradient" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-11-09 05:15:48+00:00
apache-2.0
2,640
googlefonts__picosvg-131
diff --git a/src/picosvg/svg.py b/src/picosvg/svg.py index b82838a..d8bd22f 100644 --- a/src/picosvg/svg.py +++ b/src/picosvg/svg.py @@ -733,8 +733,9 @@ class SVG: el, self.view_box() ) affine = gradient.gradientTransform - a, b, c, d, dx, dy = affine - if (dx, dy) == (0, 0): + a, b, c, d, e, f = affine + # no translate? nop! + if (e, f) == (0, 0): continue affine_prime = affine._replace(e=0, f=0) @@ -756,13 +757,24 @@ class SVG: # 2) - 1) bx` - bx` + dy` - (b/a)cy` = r2 - (b/a) * r1 # y` = (r2 - (b/a) * r1) / (d - (b/a)c) r1, r2 = affine.map_point((x, y)) - assert r1 == a * x + c * y + dx - assert r2 == b * x + d * y + dy - y_prime = (r2 - r1 * b / a) / (d - b * c / a) - - # Sub y` into 1) - # 1) x` = (r1 - cy`) / a - x_prime = (r1 - c * y_prime) / a + assert r1 == a * x + c * y + e + assert r2 == b * x + d * y + f + + if a != 0: + y_prime = (r2 - r1 * b / a) / (d - b * c / a) + + # Sub y` into 1) + # 1) x` = (r1 - cy`) / a + x_prime = (r1 - c * y_prime) / a + else: + # if a == 0 then above gives div / 0. Take a simpler path. + # 1) 0x` + cy` + 0 = 0x + cy + e + # y` = y + e/c + y_prime = y + e / c + # Sub y` into 2) + # 2) bx` + dy` + 0 = bx + dy + f + # x` = x + dy/b + f/b - dy`/b + x_prime = x + (d * y / b) + (f / b) - (d * y_prime / b) # sanity check: a`(x`, y`) should be a(x, y) # all our float brutality damages points; low tolerance sanity checks!
googlefonts/picosvg
253261829a32da86933b335fe613843b870973c7
diff --git a/tests/svg_test.py b/tests/svg_test.py index 81294a0..e89ed6a 100644 --- a/tests/svg_test.py +++ b/tests/svg_test.py @@ -436,6 +436,12 @@ def test_apply_style_attributes(actual, expected_result): '<radialGradient id="mbbox" cx="0.75" cy="0.75" r="0.40" gradientTransform="matrix(1 1 -0.7873 -0.001717 0.5 0)" gradientUnits="objectBoundingBox"/>', '<radialGradient id="mbbox" cx="0.748907" cy="0.11353" r="0.40" gradientTransform="matrix(1 1 -0.7873 -0.001717 0 0)" gradientUnits="objectBoundingBox"/>', ), + # Real example from emoji_u26BE + # https://github.com/googlefonts/picosvg/issues/129 + ( + '<radialGradient id="f" cx="-779.79" cy="3150" r="58.471" gradientTransform="matrix(0 1 -1 0 3082.5 1129.5)" gradientUnits="userSpaceOnUse"/>', + '<radialGradient id="f" cx="349.71" cy="67.5" r="58.471" gradientTransform="matrix(0 1 -1 0 0 0)" gradientUnits="userSpaceOnUse"/>', + ), ], ) def test_apply_gradient_translation(gradient_string, expected_result):
ZeroDivisionError when applying gradient translation the infamous baseball emoji (U+26BE) produces a ZeroDivisionError when passed through the latest picosvg (following #128) ``` $ picosvg ../color-fonts/font-srcs/noto-emoji/svg/emoji_u26be.svg Traceback (most recent call last): File "/Users/clupo/Github/nanoemoji/.venv/bin/picosvg", line 33, in <module> sys.exit(load_entry_point('picosvg', 'console_scripts', 'picosvg')()) File "/Users/clupo/Github/picosvg/src/picosvg/picosvg.py", line 39, in main svg = SVG.parse(input_file).topicosvg() File "/Users/clupo/Github/picosvg/src/picosvg/svg.py", line 822, in topicosvg svg.topicosvg(inplace=True) File "/Users/clupo/Github/picosvg/src/picosvg/svg.py", line 841, in topicosvg self._apply_gradient_translation(inplace=True) File "/Users/clupo/Github/picosvg/src/picosvg/svg.py", line 761, in _apply_gradient_translation y_prime = (r2 - r1 * b / a) / (d - b * c / a) ZeroDivisionError: float division by zero ``` Investigating
0.0
253261829a32da86933b335fe613843b870973c7
[ "tests/svg_test.py::test_apply_gradient_translation[<radialGradient" ]
[ "tests/svg_test.py::test_common_attrib[<path", "tests/svg_test.py::test_common_attrib[<rect", "tests/svg_test.py::test_common_attrib[<polyline", "tests/svg_test.py::test_common_attrib[<line", "tests/svg_test.py::test_shapes_to_paths[<path", "tests/svg_test.py::test_shapes_to_paths[<line", "tests/svg_test.py::test_shapes_to_paths[<rect", "tests/svg_test.py::test_shapes_to_paths[<polygon", "tests/svg_test.py::test_shapes_to_paths[<polyline", "tests/svg_test.py::test_shapes_to_paths[<circle", "tests/svg_test.py::test_shapes_to_paths[<ellipse", "tests/svg_test.py::test_iter[<line", "tests/svg_test.py::test_iter[<path", "tests/svg_test.py::test_apply_clip_path[clip-rect.svg-clip-rect-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-ellipse.svg-clip-ellipse-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-curves.svg-clip-curves-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-multirect.svg-clip-multirect-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-groups.svg-clip-groups-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-use.svg-clip-use-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-rule-evenodd.svg-clip-rule-evenodd-clipped.svg]", "tests/svg_test.py::test_resolve_use[use-ellipse.svg-use-ellipse-resolved.svg]", "tests/svg_test.py::test_ungroup[ungroup-before.svg-ungroup-after.svg]", "tests/svg_test.py::test_ungroup[ungroup-multiple-children-before.svg-ungroup-multiple-children-after.svg]", "tests/svg_test.py::test_ungroup[twemoji-lesotho-flag-before.svg-twemoji-lesotho-flag-after-ungroup.svg]", "tests/svg_test.py::test_strokes_to_paths[stroke-simplepath-before.svg-stroke-simplepath-after.svg]", "tests/svg_test.py::test_strokes_to_paths[stroke-path-before.svg-stroke-path-after.svg]", "tests/svg_test.py::test_strokes_to_paths[stroke-capjoinmiterlimit-before.svg-stroke-capjoinmiterlimit-after.svg]", "tests/svg_test.py::test_transform[rotated-rect.svg-rotated-rect-after.svg]", "tests/svg_test.py::test_transform[translate-rect.svg-translate-rect-after.svg]", "tests/svg_test.py::test_topicosvg[ungroup-before.svg-ungroup-nano.svg]", "tests/svg_test.py::test_topicosvg[ungroup-multiple-children-before.svg-ungroup-multiple-children-nano.svg]", "tests/svg_test.py::test_topicosvg[group-stroke-before.svg-group-stroke-nano.svg]", "tests/svg_test.py::test_topicosvg[arcs-before.svg-arcs-nano.svg]", "tests/svg_test.py::test_topicosvg[invisible-before.svg-invisible-nano.svg]", "tests/svg_test.py::test_topicosvg[transform-before.svg-transform-nano.svg]", "tests/svg_test.py::test_topicosvg[group-data-name-before.svg-group-data-name-after.svg]", "tests/svg_test.py::test_topicosvg[matrix-before.svg-matrix-nano.svg]", "tests/svg_test.py::test_topicosvg[degenerate-before.svg-degenerate-nano.svg]", "tests/svg_test.py::test_topicosvg[fill-rule-evenodd-before.svg-fill-rule-evenodd-nano.svg]", "tests/svg_test.py::test_topicosvg[twemoji-lesotho-flag-before.svg-twemoji-lesotho-flag-nano.svg]", "tests/svg_test.py::test_topicosvg[inline-css-style-before.svg-inline-css-style-nano.svg]", "tests/svg_test.py::test_topicosvg[clipped-strokes-before.svg-clipped-strokes-nano.svg]", "tests/svg_test.py::test_remove_unpainted_shapes[invisible-before.svg-invisible-after.svg]", "tests/svg_test.py::test_checkpicosvg[good-defs-0.svg-expected_violations0]", "tests/svg_test.py::test_checkpicosvg[bad-defs-0.svg-expected_violations1]", "tests/svg_test.py::test_checkpicosvg[bad-defs-1.svg-expected_violations2]", "tests/svg_test.py::test_viewbox[<svg", "tests/svg_test.py::test_remove_attributes[<svg", "tests/svg_test.py::test_tolerance[<svg", "tests/svg_test.py::test_parse_css_declarations[fill:none-None-expected_output0-]", "tests/svg_test.py::test_parse_css_declarations[fill:", "tests/svg_test.py::test_parse_css_declarations[", "tests/svg_test.py::test_parse_css_declarations[enable-background:new", "tests/svg_test.py::test_parse_css_declarations_invalid[foo;bar;]", "tests/svg_test.py::test_parse_css_declarations_invalid[foo:bar:baz;]", "tests/svg_test.py::test_apply_style_attributes[inline-css-style-before.svg-inline-css-style-after.svg]", "tests/svg_test.py::test_apply_gradient_translation[<linearGradient" ]
{ "failed_lite_validators": [ "has_media" ], "has_test_patch": true, "is_lite": false }
2020-11-11 04:03:08+00:00
apache-2.0
2,641
googlefonts__picosvg-240
diff --git a/src/picosvg/svg_transform.py b/src/picosvg/svg_transform.py index 557b775..5e519bf 100644 --- a/src/picosvg/svg_transform.py +++ b/src/picosvg/svg_transform.py @@ -32,7 +32,7 @@ from picosvg.geometric_types import ( ) from picosvg.svg_meta import ntos -DECOMPOSITION_ALMOST_EQUAL_TOLERANCE = 1e-6 +DECOMPOSITION_ALMOST_EQUAL_TOLERANCE = 1e-4 _SVG_ARG_FIXUPS = collections.defaultdict( lambda: lambda _: None,
googlefonts/picosvg
afedcf865c881f603a1b5f5470aa7561a60ccfb9
diff --git a/tests/svg_transform_test.py b/tests/svg_transform_test.py index 2f18abe..d20dc60 100644 --- a/tests/svg_transform_test.py +++ b/tests/svg_transform_test.py @@ -353,6 +353,21 @@ class TestAffine2D: Affine2D(1, 0, 0, 1, 50, -100), Affine2D(0, -1.0, 1.0, 0, 0, 0), ), + # https://github.com/googlefonts/picosvg/issues/239 + ( + Affine2D( + a=-4.382842e-08, + b=1.0027, + c=-4.2262, + d=-1.847346e-07, + e=1133.4812, + f=889.4797, + ), + Affine2D(a=1, b=0, c=0, d=1, e=887.0845192597527, f=-268.2034070511364), + Affine2D( + a=-4.382842e-08, b=1.0027, c=-4.2262, d=-1.847346e-07, e=0, f=0 + ), + ), ], ) def test_decompose_translation(
Failed to extract translation .local/lib/python3.7/site-packages/picosvg/svg_transform.py", line 339, in decompose_translation ), f"Failed to extract translation from {self}, parts compose back to {test_compose}" AssertionError: Failed to extract translation from Affine2D(a=-4.382842e-08, b=1.0027, c=-4.2262, d=-1.847346e-07, e=1133.4812, f=889.4797), parts compose back to Affine2D(a=-4.382842e-08, b=1.0027, c=-4.2262, d=-1.847346e-07, e=1133.4812, f=889.4796970082031)
0.0
afedcf865c881f603a1b5f5470aa7561a60ccfb9
[ "tests/svg_transform_test.py::TestAffine2D::test_decompose_translation[affine4-expected_translate4-expected_remaining4]" ]
[ "tests/svg_transform_test.py::test_parse_svg_transform[translate(-5)-expected_result0]", "tests/svg_transform_test.py::test_parse_svg_transform[translate(3.5,", "tests/svg_transform_test.py::test_parse_svg_transform[scale(2)-expected_result2]", "tests/svg_transform_test.py::test_parse_svg_transform[scale(-2", "tests/svg_transform_test.py::test_parse_svg_transform[rotate(45.0)-expected_result4]", "tests/svg_transform_test.py::test_parse_svg_transform[rotate(90.0,", "tests/svg_transform_test.py::test_parse_svg_transform[skewx(22.5)-expected_result6]", "tests/svg_transform_test.py::test_parse_svg_transform[skewY(22.5)-expected_result7]", "tests/svg_transform_test.py::test_parse_svg_transform[matrix(2,", "tests/svg_transform_test.py::test_parse_svg_transform[translate(50", "tests/svg_transform_test.py::test_parse_svg_transform[rotate(150)translate(0,6)rotate(66)-expected_result10]", "tests/svg_transform_test.py::test_parse_svg_transform[rotate", "tests/svg_transform_test.py::test_parse_svg_transform[matrix(", "tests/svg_transform_test.py::TestAffine2D::test_map_point", "tests/svg_transform_test.py::TestAffine2D::test_map_vector", "tests/svg_transform_test.py::TestAffine2D::test_determinant", "tests/svg_transform_test.py::TestAffine2D::test_is_degenerate", "tests/svg_transform_test.py::TestAffine2D::test_scale_0_is_degenerate", "tests/svg_transform_test.py::TestAffine2D::test_inverse", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src0-dest0-none-expected0]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src1-dest1-none-expected1]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src2-dest2-none-expected2]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src3-dest3-none-expected3]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src4-dest4-none-expected4]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src5-dest5-none-expected5]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src6-dest6-xMinYMin-expected6]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src7-dest7-xMidYMin-expected7]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src8-dest8-xMaxYMin-expected8]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src9-dest9-xMinYMid-expected9]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src10-dest10-xMidYMid-expected10]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src11-dest11-xMaxYMid-expected11]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src12-dest12-xMinYMax-expected12]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src13-dest13-xMidYMax-expected13]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src14-dest14-xMaxYMax-expected14]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src15-dest15-xMinYMin-expected15]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src16-dest16-xMidYMin-expected16]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src17-dest17-xMaxYMin-expected17]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src18-dest18-xMinYMid-expected18]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src19-dest19-xMidYMid-expected19]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src20-dest20-xMaxYMid-expected20]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src21-dest21-xMinYMax-expected21]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src22-dest22-xMidYMax-expected22]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src23-dest23-xMaxYMax-expected23]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src24-dest24-xMinYMin", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src25-dest25-xMidYMin", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src26-dest26-xMaxYMin", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src27-dest27-xMinYMid", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src28-dest28-xMidYMid", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src29-dest29-xMaxYMid", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src30-dest30-xMinYMax", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src31-dest31-xMidYMax", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src32-dest32-xMaxYMax", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src33-dest33-xMinYMin", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src34-dest34-xMidYMin", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src35-dest35-xMaxYMin", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src36-dest36-xMinYMid", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src37-dest37-xMidYMid", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src38-dest38-xMaxYMid", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src39-dest39-xMinYMax", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src40-dest40-xMidYMax", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src41-dest41-xMaxYMax", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src42-dest42-xMinYMin", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src43-dest43-xMidYMin", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src44-dest44-xMaxYMin", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src45-dest45-xMinYMid", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src46-dest46-xMidYMid", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src47-dest47-xMaxYMid", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src48-dest48-xMinYMax", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src49-dest49-xMidYMax", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src50-dest50-xMaxYMax", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src51-dest51-xMinYMin", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src52-dest52-xMidYMin", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src53-dest53-xMaxYMin", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src54-dest54-xMinYMid", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src55-dest55-xMidYMid", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src56-dest56-xMaxYMid", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src57-dest57-xMinYMax", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src58-dest58-xMidYMax", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src59-dest59-xMaxYMax", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src60-dest60-xMinYMin-expected60]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src61-dest61-xMinYMid-expected61]", "tests/svg_transform_test.py::TestAffine2D::test_rect_to_rect[src62-dest62-xMinYMax-expected62]", "tests/svg_transform_test.py::TestAffine2D::test_rotate_origin", "tests/svg_transform_test.py::TestAffine2D::test_product", "tests/svg_transform_test.py::TestAffine2D::test_product_ordering", "tests/svg_transform_test.py::TestAffine2D::test_gettranslate", "tests/svg_transform_test.py::TestAffine2D::test_getscale", "tests/svg_transform_test.py::TestAffine2D::test_almost_equals", "tests/svg_transform_test.py::TestAffine2D::test_decompose_scale[affine0-expected_scale0-expected_remaining0]", "tests/svg_transform_test.py::TestAffine2D::test_decompose_scale[affine1-expected_scale1-expected_remaining1]", "tests/svg_transform_test.py::TestAffine2D::test_decompose_translation[affine0-expected_translate0-expected_remaining0]", "tests/svg_transform_test.py::TestAffine2D::test_decompose_translation[affine1-expected_translate1-expected_remaining1]", "tests/svg_transform_test.py::TestAffine2D::test_decompose_translation[affine2-expected_translate2-expected_remaining2]", "tests/svg_transform_test.py::TestAffine2D::test_decompose_translation[affine3-expected_translate3-expected_remaining3]" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2021-08-06 17:24:57+00:00
apache-2.0
2,642
googlefonts__picosvg-35
diff --git a/.travis.yml b/.travis.yml index 0d89dc9..f174887 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,6 +24,8 @@ install: - pip install -e . script: - pytest -vv + # detect issues where tests don't work except as a group + - (for f in tests/*_test.py; do pytest -vv $f || exit; done) deploy: provider: pypi user: "__token__" diff --git a/src/picosvg/svg.py b/src/picosvg/svg.py index 3a7c994..2593476 100644 --- a/src/picosvg/svg.py +++ b/src/picosvg/svg.py @@ -277,7 +277,9 @@ class SVG: # union all the shapes under the clipPath # Fails if there are any non-shapes under clipPath - clip_path = svg_pathops.union(*[from_element(e) for e in clip_path_el]) + clip_path = SVGPath.from_commands( + svg_pathops.union(*[from_element(e).as_cmd_seq() for e in clip_path_el]) + ) return clip_path def _combine_clip_paths(self, clip_paths) -> SVGPath: @@ -286,7 +288,9 @@ class SVG: raise ValueError("Cannot combine no clip_paths") if len(clip_paths) == 1: return clip_paths[0] - return svg_pathops.intersection(*clip_paths) + return SVGPath.from_commands( + svg_pathops.intersection(*[c.as_cmd_seq() for c in clip_paths]) + ) def _new_id(self, tag, template): for i in range(100): @@ -448,7 +452,16 @@ class SVG: return (shape,) # make a new path that is the stroke - stroke = svg_pathops.stroke(shape, self.tolerance) + stroke = SVGPath.from_commands( + svg_pathops.stroke( + shape.as_cmd_seq(), + shape.stroke_linecap, + shape.stroke_linejoin, + shape.stroke_width, + shape.stroke_miterlimit, + self.tolerance, + ) + ) # convert some stroke attrs (e.g. stroke => fill) for field in dataclasses.fields(shape): @@ -515,7 +528,9 @@ class SVG: el, (target,) = self.elements[el_idx] target = target.as_path().absolute(inplace=True) - target.d = svg_pathops.intersection(target, clip_path).d + target.d = SVGPath.from_commands( + svg_pathops.intersection(target.as_cmd_seq(), clip_path.as_cmd_seq()) + ).d target.clip_path = "" self._set_element(el_idx, el, (target,)) diff --git a/src/picosvg/svg_meta.py b/src/picosvg/svg_meta.py index 05d063c..82d8bfb 100644 --- a/src/picosvg/svg_meta.py +++ b/src/picosvg/svg_meta.py @@ -13,6 +13,12 @@ # limitations under the License. import re +from typing import Generator, Iterable, Tuple + + +SVGCommand = Tuple[str, Tuple[float, ...]] +SVGCommandSeq = Iterable[SVGCommand] +SVGCommandGen = Generator[SVGCommand, None, None] def svgns(): diff --git a/src/picosvg/svg_pathops.py b/src/picosvg/svg_pathops.py index 8ed767d..25baac3 100644 --- a/src/picosvg/svg_pathops.py +++ b/src/picosvg/svg_pathops.py @@ -15,8 +15,9 @@ """SVGPath <=> skia-pathops constructs to enable ops on paths.""" import functools import pathops +from typing import Sequence, Tuple +from picosvg.svg_meta import SVGCommand, SVGCommandGen, SVGCommandSeq from picosvg.svg_transform import Affine2D -from picosvg.svg_types import SVGPath, SVGShape # Absolutes coords assumed @@ -44,15 +45,21 @@ _SVG_TO_SKIA_LINE_JOIN = { } -def _simple_skia_to_svg(svg_cmd, svg_path, points): +def _simple_skia_to_svg(svg_cmd, points) -> SVGCommandGen: # pathops.Path gives us sequences of points, flatten 'em - cmd_args = tuple(c for pt in points for c in pt) - svg_path._add_cmd(svg_cmd, *cmd_args) + yield (svg_cmd, tuple(c for pt in points for c in pt)) -def _qcurveto_to_svg(svg_path, points): +def _qcurveto_to_svg(points) -> SVGCommandGen: for (control_pt, end_pt) in pathops.decompose_quadratic_segment(points): - svg_path._add_cmd("Q", *control_pt, *end_pt) + yield ("Q", control_pt + end_pt) + + +def _end_path(points) -> SVGCommandGen: + if points: + raise ValueError("endPath should have no points") + return # pytype: disable=bad-return-type + yield _SKIA_CMD_TO_SVG_CMD = { @@ -65,77 +72,73 @@ _SKIA_CMD_TO_SVG_CMD = { # more interesting conversions "qCurveTo": _qcurveto_to_svg, # nop - "endPath": lambda *_: None, + "endPath": _end_path, } -def skia_path(shape: SVGShape): - path = ( - shape.as_path() - .explicit_lines() # hHvV => lL - .expand_shorthand(inplace=True) - .absolute(inplace=True) - .arcs_to_cubics(inplace=True) - ) - +def skia_path(svg_cmds: SVGCommandSeq): sk_path = pathops.Path() - for cmd, args in path: + for cmd, args in svg_cmds: if cmd not in _SVG_CMD_TO_SKIA_FN: raise ValueError(f'No mapping to Skia for "{cmd} {args}"') _SVG_CMD_TO_SKIA_FN[cmd](sk_path, *args) - return sk_path -def svg_path(skia_path: pathops.Path) -> SVGPath: - svg_path = SVGPath() +def svg_commands(skia_path: pathops.Path) -> SVGCommandGen: for cmd, points in skia_path.segments: if cmd not in _SKIA_CMD_TO_SVG_CMD: raise ValueError(f'No mapping to svg for "{cmd} {points}"') - _SKIA_CMD_TO_SVG_CMD[cmd](svg_path, points) - return svg_path - + for svg_cmd, svg_args in _SKIA_CMD_TO_SVG_CMD[cmd](points): + yield (svg_cmd, svg_args) -def _do_pathop(op, svg_shapes) -> SVGShape: - if not svg_shapes: - return SVGPath() - sk_path = skia_path(svg_shapes[0]) - for svg_shape in svg_shapes[1:]: - sk_path2 = skia_path(svg_shape) +def _do_pathop(op: str, svg_cmd_seqs: Sequence[SVGCommandSeq]) -> SVGCommandGen: + if not svg_cmd_seqs: + return # pytype: disable=bad-return-type + sk_path = skia_path(svg_cmd_seqs[0]) + for svg_cmds in svg_cmd_seqs[1:]: + sk_path2 = skia_path(svg_cmds) sk_path = pathops.op(sk_path, sk_path2, op) - return svg_path(sk_path) + return svg_commands(sk_path) -def union(*svg_shapes) -> SVGShape: - return _do_pathop(pathops.PathOp.UNION, svg_shapes) +def union(*svg_cmd_seqs: SVGCommandSeq) -> SVGCommandGen: + return _do_pathop(pathops.PathOp.UNION, svg_cmd_seqs) -def intersection(*svg_shapes) -> SVGShape: - return _do_pathop(pathops.PathOp.INTERSECTION, svg_shapes) +def intersection(*svg_cmd_seqs) -> SVGCommandGen: + return _do_pathop(pathops.PathOp.INTERSECTION, svg_cmd_seqs) -def transform(svg_shape: SVGShape, affine: Affine2D) -> SVGShape: - sk_path = skia_path(svg_shape).transform(*affine) - return svg_path(sk_path) +def transform(svg_cmds: SVGCommandSeq, affine: Affine2D) -> SVGCommandGen: + sk_path = skia_path(svg_cmds).transform(*affine) + return svg_commands(sk_path) -def stroke(shape: SVGShape, tolerance: float) -> SVGShape: +def stroke( + svg_cmds: SVGCommandSeq, + svg_linecap: str, + svg_linejoin: str, + stroke_width: float, + stroke_miterlimit: float, + tolerance: float, +) -> SVGCommandGen: """Create a path that is shape with it's stroke applied.""" - cap = _SVG_TO_SKIA_LINE_CAP.get(shape.stroke_linecap, None) + cap = _SVG_TO_SKIA_LINE_CAP.get(svg_linecap, None) if cap is None: - raise ValueError(f"Unsupported cap {shape.stroke_linecap}") - join = _SVG_TO_SKIA_LINE_JOIN.get(shape.stroke_linejoin, None) + raise ValueError(f"Unsupported cap {svg_linecap}") + join = _SVG_TO_SKIA_LINE_JOIN.get(svg_linejoin, None) if join is None: - raise ValueError(f"Unsupported join {shape.stroke_linejoin}") - sk_path = skia_path(shape) - sk_path.stroke(shape.stroke_width, cap, join, shape.stroke_miterlimit) + raise ValueError(f"Unsupported join {svg_linejoin}") + sk_path = skia_path(svg_cmds) + sk_path.stroke(stroke_width, cap, join, stroke_miterlimit) # nuke any conics that snuck in (e.g. with stroke-linecap="round") sk_path.convertConicsToQuads(tolerance) - return svg_path(sk_path) + return svg_commands(sk_path) -def bounding_box(shape: SVGShape): - return skia_path(shape).bounds +def bounding_box(svg_cmds: SVGCommandSeq): + return skia_path(svg_cmds).bounds diff --git a/src/picosvg/svg_reuse.py b/src/picosvg/svg_reuse.py index 6f5ac68..bc434cd 100644 --- a/src/picosvg/svg_reuse.py +++ b/src/picosvg/svg_reuse.py @@ -22,7 +22,7 @@ from picosvg.svg_transform import Affine2D def _first_move(shape: SVGShape) -> Tuple[float, float]: cmd, args = next(iter(shape.as_path())) - if cmd != "M": + if cmd.upper() != "M": raise ValueError(f"Path for {shape} should start with a move") return args diff --git a/src/picosvg/svg_types.py b/src/picosvg/svg_types.py index 0a91f22..779ec6d 100644 --- a/src/picosvg/svg_types.py +++ b/src/picosvg/svg_types.py @@ -20,6 +20,7 @@ from picosvg import svg_pathops from picosvg.arc_to_cubic import arc_to_cubic from picosvg.svg_path_iter import parse_svg_path from picosvg.svg_transform import Affine2D +from typing import Generator # Subset of https://www.w3.org/TR/SVG11/painting.html @@ -69,7 +70,7 @@ class SVGShape: self.opacity = opacity self.transform = transform - def visible(self): + def visible(self) -> bool: def _visible(fill, opacity): return fill != "none" and opacity != 0 # we're ignoring fill-opacity @@ -79,15 +80,25 @@ class SVGShape: ) def bounding_box(self) -> Rect: - x1, y1, x2, y2 = svg_pathops.bounding_box(self) + x1, y1, x2, y2 = svg_pathops.bounding_box(self.as_cmd_seq()) return Rect(x1, y1, x2 - x1, y2 - y1) - def apply_transform(self, transform: Affine2D): - return svg_pathops.transform(self, transform) + def apply_transform(self, transform: Affine2D) -> "SVGPath": + cmds = svg_pathops.transform(self.as_cmd_seq(), transform) + return SVGPath.from_commands(cmds) def as_path(self) -> "SVGPath": raise NotImplementedError("You should implement as_path") + def as_cmd_seq(self) -> svg_meta.SVGCommandSeq: + return ( + self.as_path() + .explicit_lines() # hHvV => lL + .expand_shorthand(inplace=True) + .absolute(inplace=True) + .arcs_to_cubics(inplace=True) + ) + def absolute(self, inplace=False) -> "SVGShape": """Returns equivalent path with only absolute commands.""" # only meaningful for path, which overrides @@ -95,9 +106,8 @@ class SVGShape: # https://www.w3.org/TR/SVG11/paths.html#PathElement -# Iterable, returning each command in the path. @dataclasses.dataclass -class SVGPath(SVGShape): +class SVGPath(SVGShape, svg_meta.SVGCommandSeq): d: str = "" def __init__(self, **kwargs): @@ -348,6 +358,15 @@ class SVGPath(SVGShape): target.walk(arc_to_cubic_callback) return target + @classmethod + def from_commands( + cls, svg_cmds: Generator[svg_meta.SVGCommand, None, None] + ) -> "SVGPath": + svg_path = cls() + for cmd, args in svg_cmds: + svg_path._add_cmd(cmd, *args) + return svg_path + # https://www.w3.org/TR/SVG11/shapes.html#CircleElement @dataclasses.dataclass
googlefonts/picosvg
1e947bcfe600dd5fee91f90fad261a46f14825eb
diff --git a/tests/svg_pathops_test.py b/tests/svg_pathops_test.py index b894fc9..e2dbf9f 100644 --- a/tests/svg_pathops_test.py +++ b/tests/svg_pathops_test.py @@ -58,12 +58,12 @@ def _round(pt, digits): ], ) def test_skia_path_roundtrip(shape, expected_segments, expected_path): - skia_path = svg_pathops.skia_path(shape) + skia_path = svg_pathops.skia_path(shape.as_cmd_seq()) rounded_segments = list(skia_path.segments) for idx, (cmd, points) in enumerate(rounded_segments): rounded_segments[idx] = (cmd, tuple(_round(pt, 3) for pt in points)) assert tuple(rounded_segments) == expected_segments - assert svg_pathops.svg_path(skia_path).d == expected_path + assert SVGPath.from_commands(svg_pathops.svg_commands(skia_path)).d == expected_path @pytest.mark.parametrize( @@ -80,7 +80,10 @@ def test_skia_path_roundtrip(shape, expected_segments, expected_path): ], ) def test_pathops_union(shapes, expected_result): - assert svg_pathops.union(*shapes).d == expected_result + assert ( + SVGPath.from_commands(svg_pathops.union(*[s.as_cmd_seq() for s in shapes])).d + == expected_result + ) @pytest.mark.parametrize( @@ -97,4 +100,9 @@ def test_pathops_union(shapes, expected_result): ], ) def test_pathops_intersection(shapes, expected_result): - assert svg_pathops.intersection(*shapes).d == expected_result + assert ( + SVGPath.from_commands( + svg_pathops.intersection(*[s.as_cmd_seq() for s in shapes]) + ).d + == expected_result + )
circular dependency We [me I think] have managed to setup a dependency cyle that causes importing modules to be order-dependent. This also causes pytest to work for some files or sets of files and fail for others. This slipped by because the complete pytest everything run succeeds. **sad** ``` python Python 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> from picosvg import svg_types Traceback (most recent call last): File "<stdin>", line 1, in <module> File "src/picosvg/svg_types.py", line 19, in <module> from picosvg import svg_pathops File "src/picosvg/svg_pathops.py", line 19, in <module> from picosvg.svg_types import SVGPath, SVGShape ImportError: cannot import name 'SVGPath' from 'picosvg.svg_types' (src/picosvg/svg_types.py) ``` **glad** ``` python Python 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> from picosvg import svg_pathops >>> from picosvg import svg_types >>> ``` Fix the cycle so this type of nonsense doesn't occur.
0.0
1e947bcfe600dd5fee91f90fad261a46f14825eb
[ "tests/svg_pathops_test.py::test_skia_path_roundtrip[shape0-expected_segments0-M1,1", "tests/svg_pathops_test.py::test_skia_path_roundtrip[shape1-expected_segments1-M4,4", "tests/svg_pathops_test.py::test_skia_path_roundtrip[shape2-expected_segments2-M1,5", "tests/svg_pathops_test.py::test_pathops_union[shapes0-M4,4", "tests/svg_pathops_test.py::test_pathops_intersection[shapes0-M6,6" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-05-27 20:52:46+00:00
apache-2.0
2,643
googlefonts__ufo2ft-509
diff --git a/Lib/ufo2ft/constants.py b/Lib/ufo2ft/constants.py index 662ed16..0028eac 100644 --- a/Lib/ufo2ft/constants.py +++ b/Lib/ufo2ft/constants.py @@ -21,5 +21,6 @@ COLOR_PALETTES_KEY = UFO2FT_PREFIX + "colorPalettes" COLOR_LAYER_MAPPING_KEY = UFO2FT_PREFIX + "colorLayerMapping" OPENTYPE_CATEGORIES_KEY = "public.openTypeCategories" +OPENTYPE_META_KEY = "public.openTypeMeta" UNICODE_VARIATION_SEQUENCES_KEY = "public.unicodeVariationSequences" diff --git a/Lib/ufo2ft/outlineCompiler.py b/Lib/ufo2ft/outlineCompiler.py index c3c2d1b..f3aff92 100644 --- a/Lib/ufo2ft/outlineCompiler.py +++ b/Lib/ufo2ft/outlineCompiler.py @@ -27,6 +27,7 @@ from fontTools.ttLib.tables.O_S_2f_2 import Panose from ufo2ft.constants import ( COLOR_LAYERS_KEY, COLOR_PALETTES_KEY, + OPENTYPE_META_KEY, UNICODE_VARIATION_SEQUENCES_KEY, ) from ufo2ft.errors import InvalidFontData @@ -86,6 +87,7 @@ class BaseOutlineCompiler: "vhea", "COLR", "CPAL", + "meta", ] ) @@ -135,6 +137,7 @@ class BaseOutlineCompiler: self.colorLayers = ( COLOR_LAYERS_KEY in self.ufo.lib and COLOR_PALETTES_KEY in self.ufo.lib ) + self.meta = OPENTYPE_META_KEY in self.ufo.lib # write the glyph order self.otf.setGlyphOrder(self.glyphOrder) @@ -154,6 +157,8 @@ class BaseOutlineCompiler: if self.colorLayers: self.setupTable_COLR() self.setupTable_CPAL() + if self.meta: + self.setupTable_meta() self.setupOtherTables() self.importTTX() @@ -978,6 +983,45 @@ class BaseOutlineCompiler: except ColorLibError as e: raise InvalidFontData("Failed to build CPAL table") from e + def setupTable_meta(self): + """ + Make the meta table. + + ***This should not be called externally.** Sublcasses + may override or supplement this method to handle the + table creation in a different way if desired. + """ + if "meta" not in self.tables: + return + + font = self.ufo + self.otf["meta"] = meta = newTable("meta") + ufo_meta = font.lib.get(OPENTYPE_META_KEY) + for key, value in ufo_meta.items(): + if key in ["dlng", "slng"]: + if not isinstance(value, list) or not all( + isinstance(string, str) for string in value + ): + raise TypeError( + f"public.openTypeMeta '{key}' value should " + "be a list of strings" + ) + meta.data[key] = ",".join(value) + elif key in ["appl", "bild"]: + if not isinstance(value, bytes): + raise TypeError( + f"public.openTypeMeta '{key}' value should be bytes." + ) + meta.data[key] = value + elif isinstance(value, bytes): + meta.data[key] = value + elif isinstance(value, str): + meta.data[key] = value.encode("utf-8") + else: + raise TypeError( + f"public.openTypeMeta '{key}' value should be bytes or a string." + ) + def setupOtherTables(self): """ Make the other tables. The default implementation does nothing.
googlefonts/ufo2ft
52408187e78296372c6983119fb9e310c0080b38
diff --git a/tests/outlineCompiler_test.py b/tests/outlineCompiler_test.py index 03ee1d9..cc9abc4 100644 --- a/tests/outlineCompiler_test.py +++ b/tests/outlineCompiler_test.py @@ -205,6 +205,29 @@ class OutlineTTFCompilerTest: assert endPts == [4] assert list(flags) == [0, 0, 0, 0, 1] + def test_setupTable_meta(self, testufo): + testufo.lib["public.openTypeMeta"] = { + "appl": b"BEEF", + "bild": b"AAAA", + "dlng": ["en-Latn", "nl-Latn"], + "slng": ["Latn"], + "PRIB": b"Some private bytes", + "PRIA": "Some private ascii string", + "PRIU": "Some private unicode string…", + } + + compiler = OutlineTTFCompiler(testufo) + ttFont = compiler.compile() + meta = ttFont["meta"] + + assert meta.data["appl"] == b"BEEF" + assert meta.data["bild"] == b"AAAA" + assert meta.data["dlng"] == "en-Latn,nl-Latn" + assert meta.data["slng"] == "Latn" + assert meta.data["PRIB"] == b"Some private bytes" + assert meta.data["PRIA"] == b"Some private ascii string" + assert meta.data["PRIU"] == "Some private unicode string…".encode("utf-8") + class OutlineOTFCompilerTest: def test_setupTable_CFF_all_blues_defined(self, testufo):
Add support for dlng and slng Now that UFO3 spec includes `public.openTypeMeta` in [lib.plist](http://unifiedfontobject.org/versions/ufo3/lib.plist/), could we get support for `dlng` and `slng` [metadata tags](https://docs.microsoft.com/en-us/typography/opentype/spec/meta#metadata-tags)?
0.0
52408187e78296372c6983119fb9e310c0080b38
[ "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_setupTable_meta[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_setupTable_meta[ufoLib2]" ]
[ "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_setupTable_gasp[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_compile_with_gasp[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_compile_without_gasp[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_compile_empty_gasp[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_makeGlyphsBoundingBoxes[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_autoUseMyMetrics[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_autoUseMyMetrics_None[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_importTTX[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_no_contour_glyphs[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_os2_no_widths[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_missing_component[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_contour_starts_with_offcurve_point[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_all_blues_defined[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_no_blues_defined[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_some_blues_defined[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_round_all[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_round_none[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_round_some[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_optimize[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_no_optimize[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_makeGlyphsBoundingBoxes[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_makeGlyphsBoundingBoxes_floats[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_importTTX[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_no_contour_glyphs[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_optimized_default_and_nominal_widths[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_optimized_default_but_no_nominal_widths[defcon]", "tests/outlineCompiler_test.py::GlyphOrderTest::test_compile_original_glyph_order[defcon]", "tests/outlineCompiler_test.py::GlyphOrderTest::test_compile_tweaked_glyph_order[defcon]", "tests/outlineCompiler_test.py::GlyphOrderTest::test_compile_strange_glyph_order[defcon]", "tests/outlineCompiler_test.py::NamesTest::test_compile_without_production_names[defcon-useProductionNames]", "tests/outlineCompiler_test.py::NamesTest::test_compile_without_production_names[defcon-Don't", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_production_names[defcon]", "tests/outlineCompiler_test.py::NamesTest::test_postprocess_production_names_no_notdef[defcon]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names[defcon-None]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names[defcon-True]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names_notdef_preserved[defcon-None]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names_notdef_preserved[defcon-True]", "tests/outlineCompiler_test.py::NamesTest::test_warn_name_exceeds_max_length[defcon]", "tests/outlineCompiler_test.py::NamesTest::test_duplicate_glyph_names[defcon]", "tests/outlineCompiler_test.py::NamesTest::test_too_long_production_name[defcon]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colr_cpal[defcon]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colr_cpal_raw[defcon]", "tests/outlineCompiler_test.py::CmapTest::test_cmap_BMP[defcon]", "tests/outlineCompiler_test.py::CmapTest::test_cmap_nonBMP_with_UVS[defcon]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes0-expected0]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes1-expected1]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes2-expected2]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes3-expected3]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes4-expected4]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes5-expected5]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes6-expected6]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes7-expected7]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes8-expected8]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes9-expected9]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes10-expected10]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes11-expected11]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes12-expected12]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes13-expected13]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes14-expected14]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes15-expected15]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes16-expected16]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes17-expected17]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes18-expected18]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes19-expected19]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes20-expected20]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes21-expected21]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes22-expected22]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes23-expected23]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes24-expected24]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes25-expected25]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes26-expected26]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes27-expected27]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes28-expected28]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes29-expected29]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes30-expected30]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes31-expected31]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes32-expected32]", "tests/outlineCompiler_test.py::test_custom_layer_compilation[defcon]", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable[defcon]", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_from_ds[defcon-not", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_from_ds[defcon-inplace]", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_otf_from_ds[defcon-not", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_otf_from_ds[defcon-inplace]", "tests/outlineCompiler_test.py::test_compilation_from_ds_missing_source_font[defcon]", "tests/outlineCompiler_test.py::test_compile_empty_ufo[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_setupTable_gasp[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_compile_with_gasp[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_compile_without_gasp[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_compile_empty_gasp[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_makeGlyphsBoundingBoxes[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_autoUseMyMetrics[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_autoUseMyMetrics_None[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_importTTX[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_no_contour_glyphs[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_os2_no_widths[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_missing_component[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_contour_starts_with_offcurve_point[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_all_blues_defined[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_no_blues_defined[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_some_blues_defined[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_round_all[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_round_none[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_round_some[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_optimize[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_no_optimize[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_makeGlyphsBoundingBoxes[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_makeGlyphsBoundingBoxes_floats[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_importTTX[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_no_contour_glyphs[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_optimized_default_and_nominal_widths[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_optimized_default_but_no_nominal_widths[ufoLib2]", "tests/outlineCompiler_test.py::GlyphOrderTest::test_compile_original_glyph_order[ufoLib2]", "tests/outlineCompiler_test.py::GlyphOrderTest::test_compile_tweaked_glyph_order[ufoLib2]", "tests/outlineCompiler_test.py::GlyphOrderTest::test_compile_strange_glyph_order[ufoLib2]", "tests/outlineCompiler_test.py::NamesTest::test_compile_without_production_names[ufoLib2-useProductionNames]", "tests/outlineCompiler_test.py::NamesTest::test_compile_without_production_names[ufoLib2-Don't", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_production_names[ufoLib2]", "tests/outlineCompiler_test.py::NamesTest::test_postprocess_production_names_no_notdef[ufoLib2]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names[ufoLib2-None]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names[ufoLib2-True]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names_notdef_preserved[ufoLib2-None]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names_notdef_preserved[ufoLib2-True]", "tests/outlineCompiler_test.py::NamesTest::test_warn_name_exceeds_max_length[ufoLib2]", "tests/outlineCompiler_test.py::NamesTest::test_duplicate_glyph_names[ufoLib2]", "tests/outlineCompiler_test.py::NamesTest::test_too_long_production_name[ufoLib2]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colr_cpal[ufoLib2]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colr_cpal_raw[ufoLib2]", "tests/outlineCompiler_test.py::CmapTest::test_cmap_BMP[ufoLib2]", "tests/outlineCompiler_test.py::CmapTest::test_cmap_nonBMP_with_UVS[ufoLib2]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes0-expected0]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes1-expected1]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes2-expected2]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes3-expected3]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes4-expected4]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes5-expected5]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes6-expected6]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes7-expected7]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes8-expected8]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes9-expected9]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes10-expected10]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes11-expected11]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes12-expected12]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes13-expected13]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes14-expected14]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes15-expected15]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes16-expected16]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes17-expected17]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes18-expected18]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes19-expected19]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes20-expected20]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes21-expected21]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes22-expected22]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes23-expected23]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes24-expected24]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes25-expected25]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes26-expected26]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes27-expected27]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes28-expected28]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes29-expected29]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes30-expected30]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes31-expected31]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes32-expected32]", "tests/outlineCompiler_test.py::test_custom_layer_compilation[ufoLib2]", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable[ufoLib2]", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_from_ds[ufoLib2-not", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_from_ds[ufoLib2-inplace]", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_otf_from_ds[ufoLib2-not", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_otf_from_ds[ufoLib2-inplace]", "tests/outlineCompiler_test.py::test_compilation_from_ds_missing_source_font[ufoLib2]", "tests/outlineCompiler_test.py::test_compile_empty_ufo[ufoLib2]" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-06-25 17:23:10+00:00
mit
2,644
googlefonts__ufo2ft-609
diff --git a/Lib/ufo2ft/filters/decomposeTransformedComponents.py b/Lib/ufo2ft/filters/decomposeTransformedComponents.py index ed03e12..0c39769 100644 --- a/Lib/ufo2ft/filters/decomposeTransformedComponents.py +++ b/Lib/ufo2ft/filters/decomposeTransformedComponents.py @@ -8,20 +8,13 @@ class DecomposeTransformedComponentsFilter(BaseFilter): def filter(self, glyph): if not glyph.components: return False - transformedComponents = [] + needs_decomposition = False for component in glyph.components: if component.transformation[:4] != Identity[:4]: - transformedComponents.append(component) - if not transformedComponents: + needs_decomposition = True + break + if not needs_decomposition: return False - specificComponents = [c.baseGlyph for c in transformedComponents] - ufo2ft.util.deepCopyContours( - self.context.glyphSet, - glyph, - glyph, - Transform(), - specificComponents=specificComponents, - ) - for component in transformedComponents: - glyph.removeComponent(component) + ufo2ft.util.deepCopyContours(self.context.glyphSet, glyph, glyph, Transform()) + glyph.clearComponents() return True diff --git a/Lib/ufo2ft/preProcessor.py b/Lib/ufo2ft/preProcessor.py index c0d476e..6906a8d 100644 --- a/Lib/ufo2ft/preProcessor.py +++ b/Lib/ufo2ft/preProcessor.py @@ -7,6 +7,9 @@ from ufo2ft.constants import ( ) from ufo2ft.filters import isValidFilter, loadFilters from ufo2ft.filters.decomposeComponents import DecomposeComponentsFilter +from ufo2ft.filters.decomposeTransformedComponents import ( + DecomposeTransformedComponentsFilter, +) from ufo2ft.fontInfoData import getAttrWithFallback from ufo2ft.util import _GlyphSet @@ -303,10 +306,20 @@ class TTFInterpolatablePreProcessor: def process(self): from cu2qu.ufo import fonts_to_quadratic + needs_decomposition = set() + # first apply all custom pre-filters for funcs, ufo, glyphSet in zip(self.preFilters, self.ufos, self.glyphSets): for func in funcs: - func(ufo, glyphSet) + if isinstance(func, DecomposeTransformedComponentsFilter): + needs_decomposition |= func(ufo, glyphSet) + else: + func(ufo, glyphSet) + # If we decomposed a glyph in some masters, we must ensure it is decomposed in + # all masters. (https://github.com/googlefonts/ufo2ft/issues/507) + decompose = DecomposeComponentsFilter(include=needs_decomposition) + for ufo, glyphSet in zip(self.ufos, self.glyphSets): + decompose(ufo, glyphSet) # then apply all default filters for funcs, ufo, glyphSet in zip(self.defaultFilters, self.ufos, self.glyphSets):
googlefonts/ufo2ft
23c2c0e92b4f11eb5b91fbd43d4d9eb817a1438a
diff --git a/tests/filters/decomposeTransformedComponents_test.py b/tests/filters/decomposeTransformedComponents_test.py index 9489a6a..0cbf260 100644 --- a/tests/filters/decomposeTransformedComponents_test.py +++ b/tests/filters/decomposeTransformedComponents_test.py @@ -1,6 +1,7 @@ from ufo2ft.filters.decomposeTransformedComponents import ( DecomposeTransformedComponentsFilter, ) +from ufo2ft.preProcessor import TTFInterpolatablePreProcessor class DecomposeTransformedComponentsFilterTest: @@ -56,3 +57,39 @@ class DecomposeTransformedComponentsFilterTest: # nine.of has no outline and one component, it was not decomposed assert len(ufo["nine.of"]) == 0 assert len(ufo["nine.of"].components) == 1 + + def test_decompose_compatibly(self, FontClass): + ufo1 = FontClass() + c = ufo1.newGlyph("comp") + c.width = 300 + pen = c.getPen() + pen.moveTo((0, 0)) + pen.lineTo((300, 0)) + pen.lineTo((150, 300)) + pen.closePath() + + b = ufo1.newGlyph("base") + b.width = 300 + pen = b.getPen() + pen.addComponent("comp", (0.5, 0, 0, 0.5, 0, 0)) + + ufo2 = FontClass() + c = ufo2.newGlyph("comp") + c.width = 600 + pen = c.getPen() + pen.moveTo((0, 0)) + pen.lineTo((600, 0)) + pen.lineTo((300, 600)) + pen.closePath() + + b = ufo2.newGlyph("base") + b.width = 600 + pen = b.getPen() + pen.addComponent("comp", (1, 0, 0, 1, 0, 0)) + + # Because ufo1.base needs decomposing, so should ufo2.base + glyphsets = TTFInterpolatablePreProcessor( + [ufo1, ufo2], filters=[DecomposeTransformedComponentsFilter(pre=True)] + ).process() + assert len(glyphsets[0]["base"]) == 1 + assert len(glyphsets[1]["base"]) == 1
decomposeTransformedComponents does not account for variable fonts I have been getting errors on my build project where it is being reported that some glyphs in the font have a different number of segments. In reviewing the glyphs, I realized that what is happening is that in the lighter weights, there are components that have been transformed, but in the heaviest weight, they are not transformed. So the decomposeTransformedComponents filter is decomposing them in the lighter weights, but not the heaviest weight. In a situation where compileVariableTTF is being run, it would make sense for decomposeTransformedComponents to check the components across all UFOs and decompose all of them if one is transformed in order to maintain compatibility.
0.0
23c2c0e92b4f11eb5b91fbd43d4d9eb817a1438a
[ "tests/filters/decomposeTransformedComponents_test.py::DecomposeTransformedComponentsFilterTest::test_decompose_compatibly[defcon]", "tests/filters/decomposeTransformedComponents_test.py::DecomposeTransformedComponentsFilterTest::test_decompose_compatibly[ufoLib2]" ]
[ "tests/filters/decomposeTransformedComponents_test.py::DecomposeTransformedComponentsFilterTest::test_transformed_components[defcon]", "tests/filters/decomposeTransformedComponents_test.py::DecomposeTransformedComponentsFilterTest::test_transformed_components[ufoLib2]" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2022-04-27 10:37:20+00:00
mit
2,645
googlefonts__ufo2ft-764
diff --git a/Lib/ufo2ft/constants.py b/Lib/ufo2ft/constants.py index 242800a..eec1a9d 100644 --- a/Lib/ufo2ft/constants.py +++ b/Lib/ufo2ft/constants.py @@ -38,6 +38,7 @@ COLR_CLIP_BOXES_KEY = UFO2FT_PREFIX + "colrClipBoxes" OBJECT_LIBS_KEY = "public.objectLibs" OPENTYPE_CATEGORIES_KEY = "public.openTypeCategories" OPENTYPE_META_KEY = "public.openTypeMeta" +OPENTYPE_POST_UNDERLINE_POSITION_KEY = "public.openTypePostUnderlinePosition" TRUETYPE_INSTRUCTIONS_KEY = "public.truetype.instructions" TRUETYPE_METRICS_KEY = "public.truetype.useMyMetrics" TRUETYPE_OVERLAP_KEY = "public.truetype.overlap" diff --git a/Lib/ufo2ft/outlineCompiler.py b/Lib/ufo2ft/outlineCompiler.py index 72b8328..14b7bfe 100644 --- a/Lib/ufo2ft/outlineCompiler.py +++ b/Lib/ufo2ft/outlineCompiler.py @@ -31,6 +31,7 @@ from ufo2ft.constants import ( COLOR_PALETTES_KEY, COLR_CLIP_BOXES_KEY, OPENTYPE_META_KEY, + OPENTYPE_POST_UNDERLINE_POSITION_KEY, UNICODE_VARIATION_SEQUENCES_KEY, ) from ufo2ft.errors import InvalidFontData @@ -928,9 +929,12 @@ class BaseOutlineCompiler: italicAngle = float(getAttrWithFallback(font.info, "italicAngle")) post.italicAngle = italicAngle # underline - underlinePosition = getAttrWithFallback( - font.info, "postscriptUnderlinePosition" - ) + if OPENTYPE_POST_UNDERLINE_POSITION_KEY in font.lib: + underlinePosition = font.lib[OPENTYPE_POST_UNDERLINE_POSITION_KEY] + else: + underlinePosition = getAttrWithFallback( + font.info, "postscriptUnderlinePosition" + ) post.underlinePosition = otRound(underlinePosition) underlineThickness = getAttrWithFallback( font.info, "postscriptUnderlineThickness" @@ -1326,7 +1330,16 @@ class OutlineOTFCompiler(BaseOutlineCompiler): # populate various numbers topDict.isFixedPitch = int(getAttrWithFallback(info, "postscriptIsFixedPitch")) topDict.ItalicAngle = float(getAttrWithFallback(info, "italicAngle")) - underlinePosition = getAttrWithFallback(info, "postscriptUnderlinePosition") + if ( + OPENTYPE_POST_UNDERLINE_POSITION_KEY in self.ufo.lib + and info.postscriptUnderlinePosition is None + ): + underlinePosition = ( + self.ufo.lib[OPENTYPE_POST_UNDERLINE_POSITION_KEY] + - getAttrWithFallback(info, "postscriptUnderlineThickness") / 2 + ) + else: + underlinePosition = getAttrWithFallback(info, "postscriptUnderlinePosition") topDict.UnderlinePosition = otRound(underlinePosition) underlineThickness = getAttrWithFallback(info, "postscriptUnderlineThickness") topDict.UnderlineThickness = otRound(underlineThickness)
googlefonts/ufo2ft
7807ef54076238fbe916905bca64a5830566c865
diff --git a/tests/fontInfoData_test.py b/tests/fontInfoData_test.py index 5e8bdd2..d594e77 100644 --- a/tests/fontInfoData_test.py +++ b/tests/fontInfoData_test.py @@ -185,6 +185,12 @@ class GetAttrWithFallbackTest: assert getAttrWithFallback(info, "xHeight") == 1024 assert getAttrWithFallback(info, "descender") == -410 + def test_underline_position(self, info): + assert getAttrWithFallback(info, "postscriptUnderlinePosition") == -75 + + info.postscriptUnderlinePosition = -485 + assert getAttrWithFallback(info, "postscriptUnderlinePosition") == -485 + class PostscriptBlueScaleFallbackTest: def test_without_blue_zones(self, info): diff --git a/tests/outlineCompiler_test.py b/tests/outlineCompiler_test.py index 2bbf025..86a02f8 100644 --- a/tests/outlineCompiler_test.py +++ b/tests/outlineCompiler_test.py @@ -17,6 +17,7 @@ from ufo2ft import ( ) from ufo2ft.constants import ( GLYPHS_DONT_USE_PRODUCTION_NAMES, + OPENTYPE_POST_UNDERLINE_POSITION_KEY, SPARSE_OTF_MASTER_TABLES, SPARSE_TTF_MASTER_TABLES, USE_PRODUCTION_NAMES, @@ -262,6 +263,19 @@ class OutlineTTFCompilerTest: actual = compiler.otf["name"].getName(1, 3, 1, 1033).string assert actual == "Custom Name for Windows" + def test_post_underline_without_public_key(self, testufo): + compiler = OutlineTTFCompiler(testufo) + compiler.compile() + actual = compiler.otf["post"].underlinePosition + assert actual == -200 + + def test_post_underline_with_public_key(self, testufo): + testufo.lib[OPENTYPE_POST_UNDERLINE_POSITION_KEY] = -485 + compiler = OutlineTTFCompiler(testufo) + compiler.compile() + actual = compiler.otf["post"].underlinePosition + assert actual == -485 + class OutlineOTFCompilerTest: def test_setupTable_CFF_all_blues_defined(self, testufo): @@ -596,6 +610,81 @@ class OutlineOTFCompilerTest: assert private.defaultWidthX == 500 assert private.nominalWidthX == 0 + def test_underline_without_public_key(self, testufo): + # Test with no lib key + compiler = OutlineOTFCompiler(testufo) + compiler.compile() + + post = compiler.otf["post"].underlinePosition + + cff = compiler.otf["CFF "].cff + cff_underline = cff[list(cff.keys())[0]].UnderlinePosition + + assert post == -200 + assert cff_underline == -200 + + def test_underline_with_public_key(self, testufo): + # Test with a lib key and postscriptUnderlinePosition + testufo.lib[OPENTYPE_POST_UNDERLINE_POSITION_KEY] = -485 + testufo.info.postscriptUnderlinePosition = -42 + compiler = OutlineOTFCompiler(testufo) + compiler.compile() + + post = compiler.otf["post"].underlinePosition + + cff = compiler.otf["CFF "].cff + cff_underline = cff[list(cff.keys())[0]].UnderlinePosition + + assert post == -485 + assert cff_underline == -42 + + def test_underline_with_public_key_and_no_psPosition(self, testufo): + # Test with a lib key and no postscriptUnderlinePosition + testufo.lib[OPENTYPE_POST_UNDERLINE_POSITION_KEY] = -485 + testufo.info.postscriptUnderlinePosition = None + testufo.info.postscriptUnderlineThickness = 100 + compiler = OutlineOTFCompiler(testufo) + compiler.compile() + + post = compiler.otf["post"].underlinePosition + + cff = compiler.otf["CFF "].cff + cff_underline = cff[list(cff.keys())[0]].UnderlinePosition + + assert post == -485 + assert cff_underline == -535 + + def test_underline_with_no_public_key_and_no_psPosition(self, testufo): + compiler = OutlineOTFCompiler(testufo) + compiler.compile() + + post = compiler.otf["post"].underlinePosition + + cff = compiler.otf["CFF "].cff + cff_underline = cff[list(cff.keys())[0]].UnderlinePosition + + # Note: This is actually incorrect according to the post/cff + # spec, but it is how UFO3 has things defined, and is expected + # current behavior. + assert post == -200 + assert cff_underline == -200 + + def test_underline_ps_rounding(self, testufo): + # Test rounding + testufo.lib[OPENTYPE_POST_UNDERLINE_POSITION_KEY] = -485 + testufo.info.postscriptUnderlinePosition = None + testufo.info.postscriptUnderlineThickness = 43 + compiler = OutlineOTFCompiler(testufo) + compiler.compile() + + post = compiler.otf["post"].underlinePosition + + cff = compiler.otf["CFF "].cff + cff_underline = cff[list(cff.keys())[0]].UnderlinePosition + + assert post == -485 + assert cff_underline == -506 + class GlyphOrderTest: def test_compile_original_glyph_order(self, testufo):
post table underlinePosition The `postscriptUnderlinePosition` wasn't well specified in the UFO: it conflated the value for `CFF/Type1` and `post/CFF2`, where those values are actually different: `post/CFF2` value is the top of the underline, `CFF/Type1` is the middle of the underline. The UFO spec got clarified to be clear that `postscriptUnerlinePosition` is `CFF/Type1' (https://github.com/unified-font-object/ufo-spec/issues/217). The behavior for calculation from above is what the AFDKO has been doing, just need to update ufo2ft to follow suit. I will PR a change (and tests) soon.
0.0
7807ef54076238fbe916905bca64a5830566c865
[ "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_family_and_style_names[defcon-infoDict0-expected0]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_family_and_style_names[defcon-infoDict1-expected1]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_family_and_style_names[defcon-infoDict2-expected2]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_family_and_style_names[defcon-infoDict3-expected3]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_family_and_style_names[defcon-infoDict4-expected4]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_family_and_style_names[defcon-infoDict5-expected5]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_redundant_metadata[defcon]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_unecessary_metadata[defcon]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_vertical_metrics[defcon]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_caret_slope[defcon]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_head_created[defcon]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_empty_info[defcon]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_empty_info_2048[defcon]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_underline_position[defcon]", "tests/fontInfoData_test.py::PostscriptBlueScaleFallbackTest::test_without_blue_zones[defcon]", "tests/fontInfoData_test.py::PostscriptBlueScaleFallbackTest::test_with_blue_zones[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_compile_with_gasp[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_compile_without_gasp[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_compile_empty_gasp[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_makeGlyphsBoundingBoxes[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_getMaxComponentDepths[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_autoUseMyMetrics[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_autoUseMyMetrics_False[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_autoUseMyMetrics_None[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_importTTX[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_no_contour_glyphs[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_os2_no_widths[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_missing_component[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_contour_starts_with_offcurve_point[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_setupTable_meta[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_setupTable_name[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_post_underline_without_public_key[defcon]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_post_underline_with_public_key[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_all_blues_defined[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_no_blues_defined[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_some_blues_defined[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_round_all[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_round_none[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_round_some[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_optimize[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_no_optimize[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_makeGlyphsBoundingBoxes[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_makeGlyphsBoundingBoxes_floats[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_importTTX[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_no_contour_glyphs[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_optimized_default_and_nominal_widths[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_optimized_default_but_no_nominal_widths[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_underline_without_public_key[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_underline_with_public_key[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_underline_with_public_key_and_no_psPosition[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_underline_with_no_public_key_and_no_psPosition[defcon]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_underline_ps_rounding[defcon]", "tests/outlineCompiler_test.py::GlyphOrderTest::test_compile_original_glyph_order[defcon]", "tests/outlineCompiler_test.py::GlyphOrderTest::test_compile_tweaked_glyph_order[defcon]", "tests/outlineCompiler_test.py::GlyphOrderTest::test_compile_strange_glyph_order[defcon]", "tests/outlineCompiler_test.py::NamesTest::test_compile_without_production_names[defcon-useProductionNames]", "tests/outlineCompiler_test.py::NamesTest::test_compile_without_production_names[defcon-Don't", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_production_names[defcon]", "tests/outlineCompiler_test.py::NamesTest::test_postprocess_production_names_no_notdef[defcon]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names[defcon-None]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names[defcon-True]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names_notdef_preserved[defcon-None]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names_notdef_preserved[defcon-True]", "tests/outlineCompiler_test.py::NamesTest::test_warn_name_exceeds_max_length[defcon]", "tests/outlineCompiler_test.py::NamesTest::test_duplicate_glyph_names[defcon]", "tests/outlineCompiler_test.py::NamesTest::test_too_long_production_name[defcon]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colr_cpal[defcon]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colr_cpal_raw[defcon]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colr_cpal_otf[defcon]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colr_cpal_interpolatable_ttf[defcon]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-True-1-True-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-True-1-True-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-True-1-False-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-True-1-False-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-True-32-True-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-True-32-True-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-True-32-False-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-True-32-False-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-True-100-True-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-True-100-True-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-True-100-False-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-True-100-False-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-False-None-True-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-False-None-True-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-False-None-False-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[defcon-False-None-False-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_strip_color_codepoints[defcon-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_strip_color_codepoints[defcon-compileOTF]", "tests/outlineCompiler_test.py::CmapTest::test_cmap_BMP[defcon]", "tests/outlineCompiler_test.py::CmapTest::test_cmap_nonBMP_with_UVS[defcon]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes0-expected0]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes1-expected1]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes2-expected2]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes3-expected3]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes4-expected4]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes5-expected5]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes6-expected6]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes7-expected7]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes8-expected8]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes9-expected9]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes10-expected10]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes11-expected11]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes12-expected12]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes13-expected13]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes14-expected14]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes15-expected15]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes16-expected16]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes17-expected17]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes18-expected18]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes19-expected19]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes20-expected20]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes21-expected21]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes22-expected22]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes23-expected23]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes24-expected24]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes25-expected25]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes26-expected26]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes27-expected27]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes28-expected28]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes29-expected29]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes30-expected30]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes31-expected31]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[defcon-unicodes32-expected32]", "tests/outlineCompiler_test.py::test_custom_layer_compilation[defcon]", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable[defcon]", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_from_ds[defcon-not", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_from_ds[defcon-inplace]", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_otf_from_ds[defcon-not", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_otf_from_ds[defcon-inplace]", "tests/outlineCompiler_test.py::test_compilation_from_ds_missing_source_font[defcon]", "tests/outlineCompiler_test.py::test_compile_empty_ufo[defcon]", "tests/outlineCompiler_test.py::test_pass_on_conversion_error[defcon]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_family_and_style_names[ufoLib2-infoDict0-expected0]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_family_and_style_names[ufoLib2-infoDict1-expected1]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_family_and_style_names[ufoLib2-infoDict2-expected2]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_family_and_style_names[ufoLib2-infoDict3-expected3]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_family_and_style_names[ufoLib2-infoDict4-expected4]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_family_and_style_names[ufoLib2-infoDict5-expected5]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_redundant_metadata[ufoLib2]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_unecessary_metadata[ufoLib2]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_vertical_metrics[ufoLib2]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_caret_slope[ufoLib2]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_head_created[ufoLib2]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_empty_info[ufoLib2]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_empty_info_2048[ufoLib2]", "tests/fontInfoData_test.py::GetAttrWithFallbackTest::test_underline_position[ufoLib2]", "tests/fontInfoData_test.py::PostscriptBlueScaleFallbackTest::test_without_blue_zones[ufoLib2]", "tests/fontInfoData_test.py::PostscriptBlueScaleFallbackTest::test_with_blue_zones[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_compile_with_gasp[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_compile_without_gasp[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_compile_empty_gasp[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_makeGlyphsBoundingBoxes[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_getMaxComponentDepths[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_autoUseMyMetrics[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_autoUseMyMetrics_False[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_autoUseMyMetrics_None[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_importTTX[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_no_contour_glyphs[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_os2_no_widths[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_missing_component[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_contour_starts_with_offcurve_point[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_setupTable_meta[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_setupTable_name[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_post_underline_without_public_key[ufoLib2]", "tests/outlineCompiler_test.py::OutlineTTFCompilerTest::test_post_underline_with_public_key[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_all_blues_defined[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_no_blues_defined[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_some_blues_defined[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_round_all[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_round_none[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_round_some[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_optimize[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_setupTable_CFF_no_optimize[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_makeGlyphsBoundingBoxes[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_makeGlyphsBoundingBoxes_floats[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_importTTX[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_no_contour_glyphs[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_optimized_default_and_nominal_widths[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_optimized_default_but_no_nominal_widths[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_underline_without_public_key[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_underline_with_public_key[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_underline_with_public_key_and_no_psPosition[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_underline_with_no_public_key_and_no_psPosition[ufoLib2]", "tests/outlineCompiler_test.py::OutlineOTFCompilerTest::test_underline_ps_rounding[ufoLib2]", "tests/outlineCompiler_test.py::GlyphOrderTest::test_compile_original_glyph_order[ufoLib2]", "tests/outlineCompiler_test.py::GlyphOrderTest::test_compile_tweaked_glyph_order[ufoLib2]", "tests/outlineCompiler_test.py::GlyphOrderTest::test_compile_strange_glyph_order[ufoLib2]", "tests/outlineCompiler_test.py::NamesTest::test_compile_without_production_names[ufoLib2-useProductionNames]", "tests/outlineCompiler_test.py::NamesTest::test_compile_without_production_names[ufoLib2-Don't", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_production_names[ufoLib2]", "tests/outlineCompiler_test.py::NamesTest::test_postprocess_production_names_no_notdef[ufoLib2]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names[ufoLib2-None]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names[ufoLib2-True]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names_notdef_preserved[ufoLib2-None]", "tests/outlineCompiler_test.py::NamesTest::test_compile_with_custom_postscript_names_notdef_preserved[ufoLib2-True]", "tests/outlineCompiler_test.py::NamesTest::test_warn_name_exceeds_max_length[ufoLib2]", "tests/outlineCompiler_test.py::NamesTest::test_duplicate_glyph_names[ufoLib2]", "tests/outlineCompiler_test.py::NamesTest::test_too_long_production_name[ufoLib2]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colr_cpal[ufoLib2]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colr_cpal_raw[ufoLib2]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colr_cpal_otf[ufoLib2]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colr_cpal_interpolatable_ttf[ufoLib2]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-True-1-True-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-True-1-True-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-True-1-False-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-True-1-False-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-True-32-True-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-True-32-True-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-True-32-False-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-True-32-False-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-True-100-True-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-True-100-True-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-True-100-False-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-True-100-False-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-False-None-True-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-False-None-True-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-False-None-False-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_colrv1_computeClipBoxes[ufoLib2-False-None-False-compileOTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_strip_color_codepoints[ufoLib2-compileTTF]", "tests/outlineCompiler_test.py::ColrCpalTest::test_strip_color_codepoints[ufoLib2-compileOTF]", "tests/outlineCompiler_test.py::CmapTest::test_cmap_BMP[ufoLib2]", "tests/outlineCompiler_test.py::CmapTest::test_cmap_nonBMP_with_UVS[ufoLib2]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes0-expected0]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes1-expected1]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes2-expected2]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes3-expected3]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes4-expected4]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes5-expected5]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes6-expected6]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes7-expected7]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes8-expected8]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes9-expected9]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes10-expected10]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes11-expected11]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes12-expected12]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes13-expected13]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes14-expected14]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes15-expected15]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes16-expected16]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes17-expected17]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes18-expected18]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes19-expected19]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes20-expected20]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes21-expected21]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes22-expected22]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes23-expected23]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes24-expected24]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes25-expected25]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes26-expected26]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes27-expected27]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes28-expected28]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes29-expected29]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes30-expected30]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes31-expected31]", "tests/outlineCompiler_test.py::test_calcCodePageRanges[ufoLib2-unicodes32-expected32]", "tests/outlineCompiler_test.py::test_custom_layer_compilation[ufoLib2]", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable[ufoLib2]", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_from_ds[ufoLib2-not", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_from_ds[ufoLib2-inplace]", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_otf_from_ds[ufoLib2-not", "tests/outlineCompiler_test.py::test_custom_layer_compilation_interpolatable_otf_from_ds[ufoLib2-inplace]", "tests/outlineCompiler_test.py::test_compilation_from_ds_missing_source_font[ufoLib2]", "tests/outlineCompiler_test.py::test_compile_empty_ufo[ufoLib2]", "tests/outlineCompiler_test.py::test_pass_on_conversion_error[ufoLib2]", "tests/fontInfoData_test.py::NormalizeStringForPostscriptTest::test_no_change", "tests/fontInfoData_test.py::DateStringToTimeValueTest::test_roundtrip_random_timestamp" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-07-08 03:00:34+00:00
mit
2,646
googlefonts__ufo2ft-811
diff --git a/Lib/ufo2ft/featureWriters/kernFeatureWriter.py b/Lib/ufo2ft/featureWriters/kernFeatureWriter.py index 0713bb8..524c4dd 100644 --- a/Lib/ufo2ft/featureWriters/kernFeatureWriter.py +++ b/Lib/ufo2ft/featureWriters/kernFeatureWriter.py @@ -60,6 +60,12 @@ def unicodeBidiType(uv): return None +def script_direction(script: str) -> str: + if script == COMMON_SCRIPT: + return "Auto" + return script_horizontal_direction(script, "LTR") + + @dataclass(frozen=True, order=False) class KerningPair: __slots__ = ("side1", "side2", "value") @@ -157,24 +163,26 @@ class KernFeatureWriter(BaseFeatureWriter): second lookup without the ignore marks flag. * Go through all kerning pairs and split them up by script, to put them in different lookups. This reduces the size of each lookup compared to - splitting by direction, as previously done. + splitting by direction, as previously done. If there are kerning pairs + with different scripts on each side, these scripts are all kept together + to allow for cross-script kerning (in implementations that apply it). + Scripts with different direction are always split. * Partition the first and second side of a pair by script and emit only those with the same script (e.g. `a` and `b` are both "Latn", `period` and `period` are both "Default", but `a` and `a-cy` would mix "Latn" - and "Cyrl" and are dropped) or those that kern an explicit against a - "common" or "inherited" script, e.g. `a` and `period`. + and "Cyrl" and are dropped), or those with kerning across them, or + those that kern an explicit against a "common" or "inherited" script + (e.g. `a` and `period`). * Glyphs can have multiple scripts assigned to them (legitimately, e.g. U+0951 DEVANAGARI STRESS SIGN UDATTA, or for random reasons like having both `sub h by h.sc` and `sub Etaprosgegrammeni by h.sc;`). Only scripts that were determined earlier to be supported by the font will be considered. Usually, we will emit pairs where both sides have - the same script and no splitting is necessary. The only mixed script - pairs we emit are common or inherited (Zyyy or Zinh) against explicit - (e.g. Latn) scripts. A glyph can be part of both for weird reasons, so - we always treat any glyph with a common or inherited script as a - purely common (not inherited) glyph for bucketing purposes. This - avoids creating overlapping groups with the multi-script glyph in a - lookup. + the same script and no splitting is necessary. A glyph can be part of + both for weird reasons, so we always treat any glyph with a common or + inherited script as a purely common (not inherited) glyph for + bucketing purposes. This avoids creating overlapping groups with the + multi-script glyph in a lookup. * Some glyphs may have a script of Zyyy or Zinh but have a disjoint set of explicit scripts as their script extension. By looking only at the script extension, we treat many of them as being part of an explicit @@ -291,7 +299,9 @@ class KernFeatureWriter(BaseFeatureWriter): lookupGroups = [] for _, lookupGroup in sorted(lookups.items()): - lookupGroups.extend(lookupGroup.values()) + lookupGroups.extend( + lkp for lkp in lookupGroup.values() if lkp not in lookupGroups + ) # NOTE: We don't write classDefs because we literalise all classes. self._insert( @@ -700,18 +710,11 @@ class KernFeatureWriter(BaseFeatureWriter): assert not side2Classes.keys() & newSide2Classes.keys() side2Classes.update(newSide2Classes) - for script, pairs in kerningPerScript.items(): - scriptLookups = lookups.setdefault(script, {}) - - key = f"kern_{script}{suffix}" - lookup = scriptLookups.get(key) - if not lookup: - # For neatness: - lookup = self._makeKerningLookup( - key.replace(COMMON_SCRIPT, COMMON_CLASS_NAME), - ignoreMarks=ignoreMarks, - ) - scriptLookups[key] = lookup + for scripts, pairs in kerningPerScript.items(): + lookupName = f"kern_{'_'.join(scripts)}{suffix}".replace( + COMMON_SCRIPT, COMMON_CLASS_NAME + ) + lookup = self._makeKerningLookup(lookupName, ignoreMarks=ignoreMarks) for pair in pairs: bidiTypes = { direction @@ -726,13 +729,17 @@ class KernFeatureWriter(BaseFeatureWriter): pair.value, ) continue - scriptIsRtl = script_horizontal_direction(script, "LTR") == "RTL" + directions = {script_direction(script) for script in scripts} + assert len(directions) == 1 + scriptIsRtl = directions == {"RTL"} # Numbers are always shaped LTR even in RTL scripts: pairIsRtl = scriptIsRtl and "L" not in bidiTypes rule = self._makePairPosRule( pair, side1Classes, side2Classes, pairIsRtl ) lookup.statements.append(rule) + for script in scripts: + lookups.setdefault(script, {})[lookupName] = lookup # Clean out empty lookups. for script, scriptLookups in list(lookups.items()): @@ -771,7 +778,9 @@ class KernFeatureWriter(BaseFeatureWriter): isKernBlock = feature.name == "kern" dfltLookups: list[ast.LookupBlock] = [] if isKernBlock and COMMON_SCRIPT in lookups: - dfltLookups.extend(lookups[COMMON_SCRIPT].values()) + dfltLookups.extend( + lkp for lkp in lookups[COMMON_SCRIPT].values() if lkp not in dfltLookups + ) # InDesign bugfix: register kerning lookups for all LTR scripts under DFLT # so that the basic composer, without a language selected, will still kern. @@ -780,12 +789,14 @@ class KernFeatureWriter(BaseFeatureWriter): lookupsLTR: list[ast.LookupBlock] = [] lookupsRTL: list[ast.LookupBlock] = [] for script, scriptLookups in sorted(lookups.items()): - if script != COMMON_SCRIPT and script not in DIST_ENABLED_SCRIPTS: - if script_horizontal_direction(script, "LTR") == "LTR": + if script not in DIST_ENABLED_SCRIPTS: + if script_direction(script) == "LTR": lookupsLTR.extend(scriptLookups.values()) - elif script_horizontal_direction(script, "LTR") == "RTL": + elif script_direction(script) == "RTL": lookupsRTL.extend(scriptLookups.values()) - dfltLookups.extend(lookupsLTR or lookupsRTL) + dfltLookups.extend( + lkp for lkp in (lookupsLTR or lookupsRTL) if lkp not in dfltLookups + ) if dfltLookups: languages = feaLanguagesByScript.get("DFLT", ["dflt"]) @@ -814,27 +825,38 @@ class KernFeatureWriter(BaseFeatureWriter): feature.statements.append(ast.Comment("")) # We have something for this script. First add the default # lookups, then the script-specific ones - lookupsForThisScript = [] + lookupsForThisScript = {} for dfltScript in DFLT_SCRIPTS: if dfltScript in lookups: - lookupsForThisScript.extend(lookups[dfltScript].values()) - lookupsForThisScript.extend(lookups[script].values()) + lookupsForThisScript.update(lookups[dfltScript]) + lookupsForThisScript.update(lookups[script]) # Register the lookups for all languages defined in the feature # file for the script, otherwise kerning is not applied if any # language is set at all. languages = feaLanguagesByScript.get(tag, ["dflt"]) - ast.addLookupReferences(feature, lookupsForThisScript, tag, languages) + ast.addLookupReferences( + feature, lookupsForThisScript.values(), tag, languages + ) def splitKerning(pairs, glyphScripts): # Split kerning into per-script buckets, so we can post-process them before - # continuing. + # continuing. Scripts that have cross-script kerning pairs will be put in + # the same bucket. kerningPerScript = {} for pair in pairs: - for script, splitPair in partitionByScript(pair, glyphScripts): - kerningPerScript.setdefault(script, []).append(splitPair) + for scripts, splitPair in partitionByScript(pair, glyphScripts): + scripts = tuple(sorted(scripts)) + kerningPerScript.setdefault(scripts, []).append(splitPair) - for pairs in kerningPerScript.values(): + kerningPerScript = mergeScripts(kerningPerScript) + + for scripts, pairs in kerningPerScript.items(): + if len(scripts) > 1: + LOGGER.info( + "Merging kerning lookups from the following scripts: %s", + ", ".join(scripts), + ) pairs.sort() return kerningPerScript @@ -847,8 +869,9 @@ def partitionByScript( """Split a potentially mixed-script pair into pairs that make sense based on the dominant script, and yield each combination with its dominant script.""" - side1Scripts: dict[str, set[str]] = {} - side2Scripts: dict[str, set[str]] = {} + side1Directions: dict[str, set[str]] = {} + side2Directions: dict[str, set[str]] = {} + resolvedScripts: dict[str, set[str]] = {} for glyph in pair.firstGlyphs: scripts = glyphScripts.get(glyph, DFLT_SCRIPTS) # If a glyph is both common or inherited *and* another script, treat it @@ -859,58 +882,102 @@ def partitionByScript( # script-specific one. if scripts & DFLT_SCRIPTS: scripts = COMMON_SCRIPTS_SET - for script in scripts: - side1Scripts.setdefault(script, set()).add(glyph) + resolvedScripts[glyph] = scripts + for direction in (script_direction(script) for script in sorted(scripts)): + side1Directions.setdefault(direction, set()).add(glyph) for glyph in pair.secondGlyphs: scripts = glyphScripts.get(glyph, DFLT_SCRIPTS) if scripts & DFLT_SCRIPTS: scripts = COMMON_SCRIPTS_SET - for script in scripts: - side2Scripts.setdefault(script, set()).add(glyph) + resolvedScripts[glyph] = scripts + for direction in (script_direction(script) for script in sorted(scripts)): + side2Directions.setdefault(direction, set()).add(glyph) - for firstScript, secondScript in itertools.product(side1Scripts, side2Scripts): - # Preserve the type (glyph or class) of each side. - localGlyphs: set[str] = set() + for side1Direction, side2Direction in itertools.product( + side1Directions, side2Directions + ): localSide1: str | tuple[str, ...] localSide2: str | tuple[str, ...] + side1Scripts: set[str] = set() + side2Scripts: set[str] = set() if pair.firstIsClass: - localSide1 = tuple(sorted(side1Scripts[firstScript])) - localGlyphs.update(localSide1) + localSide1 = tuple(sorted(side1Directions[side1Direction])) + for glyph in localSide1: + side1Scripts |= resolvedScripts[glyph] else: - assert len(side1Scripts[firstScript]) == 1 - (localSide1,) = side1Scripts[firstScript] - localGlyphs.add(localSide1) + assert len(side1Directions[side1Direction]) == 1 + (localSide1,) = side1Directions[side1Direction] + side1Scripts |= resolvedScripts[localSide1] if pair.secondIsClass: - localSide2 = tuple(sorted(side2Scripts[secondScript])) - localGlyphs.update(localSide2) - else: - assert len(side2Scripts[secondScript]) == 1 - (localSide2,) = side2Scripts[secondScript] - localGlyphs.add(localSide2) - - if firstScript == secondScript or secondScript == COMMON_SCRIPT: - localScript = firstScript - elif firstScript == COMMON_SCRIPT: - localScript = secondScript - # Two different explicit scripts: + localSide2 = tuple(sorted(side2Directions[side2Direction])) + for glyph in localSide2: + side2Scripts |= resolvedScripts[glyph] else: + assert len(side2Directions[side2Direction]) == 1 + (localSide2,) = side2Directions[side2Direction] + side2Scripts |= resolvedScripts[localSide2] + + # Skip pairs with mixed direction. + if side1Direction != side2Direction and not any( + side == "Auto" for side in (side1Direction, side2Direction) + ): LOGGER.info( - "Skipping kerning pair <%s %s %s> with mixed script (%s, %s)", + "Skipping kerning pair <%s %s %s> with mixed direction (%s, %s)", pair.side1, pair.side2, pair.value, - firstScript, - secondScript, + side1Direction, + side2Direction, ) continue - yield localScript, KerningPair( + scripts = side1Scripts | side2Scripts + # If only one side has Common, drop it + if not all(side & COMMON_SCRIPTS_SET for side in (side1Scripts, side2Scripts)): + scripts -= COMMON_SCRIPTS_SET + + yield scripts, KerningPair( localSide1, localSide2, pair.value, ) +def mergeScripts(kerningPerScript): + """Merge buckets that have common scripts. If we have [A, B], [B, C], and + [D] buckets, we want to merge the first two into [A, B, C] and leave [D] so + that all kerning pairs of the three scripts are in the same lookup.""" + sets = [set(scripts) for scripts in kerningPerScript if scripts] + merged = True + while merged: + merged = False + result = [] + while sets: + common, rest = sets[0], sets[1:] + sets = [] + for scripts in rest: + if scripts.isdisjoint(common): + sets.append(scripts) + else: + merged = True + common |= scripts + result.append(common) + sets = result + + # Now that we have merged all common-script buckets, we need to re-assign + # the kerning pairs to the new buckets. + result = {tuple(sorted(scripts)): [] for scripts in sets} + for scripts, pairs in kerningPerScript.items(): + for scripts2 in sets: + if scripts2 & set(scripts): + result[tuple(sorted(scripts2))].extend(pairs) + break + else: + # Shouldn't happen, but just in case. + raise AssertionError + return result + + def makeAllGlyphClassDefinitions(kerningPerScript, context, feaFile=None): # Note: Refer to the context for existing classDefs and mappings of glyph # class tuples to feaLib AST to avoid overwriting existing class names, @@ -931,9 +998,10 @@ def makeAllGlyphClassDefinitions(kerningPerScript, context, feaFile=None): # Generate common class names first so that common classes are correctly # named in other lookups. - if COMMON_SCRIPT in kerningPerScript: - common_pairs = kerningPerScript[COMMON_SCRIPT] - for pair in common_pairs: + for scripts, pairs in kerningPerScript.items(): + if set(scripts) != COMMON_SCRIPTS_SET: + continue + for pair in pairs: if ( pair.firstIsClass and pair.side1 not in existingSide1Classes @@ -964,9 +1032,10 @@ def makeAllGlyphClassDefinitions(kerningPerScript, context, feaFile=None): ) sortedKerningPerScript = sorted(kerningPerScript.items()) - for script, pairs in sortedKerningPerScript: - if script == COMMON_SCRIPT: + for scripts, pairs in sortedKerningPerScript: + if set(scripts) == COMMON_SCRIPTS_SET: continue + script = "_".join(scripts).replace(COMMON_SCRIPT, COMMON_CLASS_NAME) for pair in pairs: if ( pair.firstIsClass
googlefonts/ufo2ft
bb263dddb5c8b7913cc9c210756bd9a05ac76ec1
diff --git a/tests/featureWriters/kernFeatureWriter_test.py b/tests/featureWriters/kernFeatureWriter_test.py index 9653c3b..a4caa61 100644 --- a/tests/featureWriters/kernFeatureWriter_test.py +++ b/tests/featureWriters/kernFeatureWriter_test.py @@ -595,29 +595,23 @@ class KernFeatureWriterTest(FeatureWriterTest): assert dedent(str(generated)) == dedent( """\ - lookup kern_Arab { - lookupflag IgnoreMarks; - pos four-ar seven-ar -30; - } kern_Arab; - - lookup kern_Thaa { + lookup kern_Arab_Thaa { lookupflag IgnoreMarks; pos four-ar seven-ar -30; - } kern_Thaa; + } kern_Arab_Thaa; feature kern { script DFLT; language dflt; - lookup kern_Arab; - lookup kern_Thaa; + lookup kern_Arab_Thaa; script arab; language dflt; - lookup kern_Arab; + lookup kern_Arab_Thaa; script thaa; language dflt; - lookup kern_Thaa; + lookup kern_Arab_Thaa; } kern; """ ) @@ -1496,74 +1490,55 @@ def test_kern_split_and_drop(FontClass, caplog): assert dedent(str(newFeatures)) == dedent( """\ - @kern1.Grek.bar = [period]; - @kern1.Grek.foo = [alpha]; - @kern1.Latn.foo = [a]; - @kern1.Orya.foo = [a-orya]; - @kern2.Grek.bar = [period]; - @kern2.Grek.foo = [alpha]; - @kern2.Latn.foo = [a]; - @kern2.Orya.foo = [a-orya]; - - lookup kern_Grek { - lookupflag IgnoreMarks; - pos @kern1.Grek.foo @kern2.Grek.bar 20; - pos @kern1.Grek.bar @kern2.Grek.foo 20; - } kern_Grek; - - lookup kern_Latn { + @kern1.Cyrl_Grek_Latn_Orya.bar = [a-cy]; + @kern1.Cyrl_Grek_Latn_Orya.bar_1 = [period]; + @kern1.Cyrl_Grek_Latn_Orya.foo = [a a-orya alpha]; + @kern2.Cyrl_Grek_Latn_Orya.bar = [a-cy]; + @kern2.Cyrl_Grek_Latn_Orya.bar_1 = [period]; + @kern2.Cyrl_Grek_Latn_Orya.foo = [a a-orya alpha]; + + lookup kern_Cyrl_Grek_Latn_Orya { lookupflag IgnoreMarks; - pos @kern1.Latn.foo @kern2.Grek.bar 20; - pos @kern1.Grek.bar @kern2.Latn.foo 20; - } kern_Latn; - - lookup kern_Orya { - lookupflag IgnoreMarks; - pos @kern1.Orya.foo @kern2.Grek.bar 20; - pos @kern1.Grek.bar @kern2.Orya.foo 20; - } kern_Orya; + pos @kern1.Cyrl_Grek_Latn_Orya.foo @kern2.Cyrl_Grek_Latn_Orya.bar 20; + pos @kern1.Cyrl_Grek_Latn_Orya.foo @kern2.Cyrl_Grek_Latn_Orya.bar_1 20; + pos @kern1.Cyrl_Grek_Latn_Orya.bar @kern2.Cyrl_Grek_Latn_Orya.foo 20; + pos @kern1.Cyrl_Grek_Latn_Orya.bar_1 @kern2.Cyrl_Grek_Latn_Orya.foo 20; + } kern_Cyrl_Grek_Latn_Orya; feature kern { script DFLT; language dflt; - lookup kern_Grek; - lookup kern_Latn; + lookup kern_Cyrl_Grek_Latn_Orya; + + script cyrl; + language dflt; + lookup kern_Cyrl_Grek_Latn_Orya; script grek; language dflt; - lookup kern_Grek; + lookup kern_Cyrl_Grek_Latn_Orya; script latn; language dflt; - lookup kern_Latn; + lookup kern_Cyrl_Grek_Latn_Orya; } kern; feature dist { script ory2; language dflt; - lookup kern_Orya; + lookup kern_Cyrl_Grek_Latn_Orya; script orya; language dflt; - lookup kern_Orya; + lookup kern_Cyrl_Grek_Latn_Orya; } dist; """ ) - msgs = sorted(msg[-30:] for msg in caplog.messages) - assert msgs == [ - "with mixed script (Arab, Grek)", - "with mixed script (Arab, Latn)", - "with mixed script (Arab, Orya)", - "with mixed script (Cyrl, Grek)", - "with mixed script (Cyrl, Latn)", - "with mixed script (Cyrl, Orya)", - "with mixed script (Grek, Arab)", - "with mixed script (Grek, Cyrl)", - "with mixed script (Latn, Arab)", - "with mixed script (Latn, Cyrl)", - "with mixed script (Orya, Arab)", - "with mixed script (Orya, Cyrl)", + assert caplog.messages == [ + "Skipping kerning pair <('a', 'a-orya', 'alpha') ('a-cy', 'alef-ar', 'period') 20> with mixed direction (LTR, RTL)", + "Skipping kerning pair <('a-cy', 'alef-ar', 'period') ('a', 'a-orya', 'alpha') 20> with mixed direction (RTL, LTR)", + "Merging kerning lookups from the following scripts: Cyrl, Grek, Latn, Orya", ] @@ -1598,7 +1573,7 @@ def test_kern_split_and_drop_mixed(caplog, FontClass): """ ) assert ( - "Skipping kerning pair <('V', 'W') ('W', 'gba-nko') -20> with mixed script (Latn, Nkoo)" + "Skipping kerning pair <('V', 'W') ('W', 'gba-nko') -20> with mixed direction (LTR, RTL)" in caplog.text ) @@ -1681,33 +1656,26 @@ def test_kern_multi_script(FontClass): assert dedent(str(newFeatures)) == dedent( """\ - @kern1.Arab.foo = [lam-ar]; - @kern1.Nkoo.foo = [gba-nko]; - @kern2.Arab.foo = [comma-ar]; - - lookup kern_Arab { - lookupflag IgnoreMarks; - pos @kern1.Arab.foo @kern2.Arab.foo <-20 0 -20 0>; - } kern_Arab; + @kern1.Arab_Nkoo.foo = [gba-nko lam-ar]; + @kern2.Arab_Nkoo.foo = [comma-ar]; - lookup kern_Nkoo { + lookup kern_Arab_Nkoo { lookupflag IgnoreMarks; - pos @kern1.Nkoo.foo @kern2.Arab.foo <-20 0 -20 0>; - } kern_Nkoo; + pos @kern1.Arab_Nkoo.foo @kern2.Arab_Nkoo.foo <-20 0 -20 0>; + } kern_Arab_Nkoo; feature kern { script DFLT; language dflt; - lookup kern_Arab; - lookup kern_Nkoo; + lookup kern_Arab_Nkoo; script arab; language dflt; - lookup kern_Arab; + lookup kern_Arab_Nkoo; script nko; language dflt; - lookup kern_Nkoo; + lookup kern_Arab_Nkoo; } kern; """ ) @@ -1837,13 +1805,14 @@ def test_kern_zyyy_zinh(FontClass): pos uni1DC0 uni1DC0 6; } kern_Grek; - lookup kern_Hani { + lookup kern_Hani_Hrkt { lookupflag IgnoreMarks; pos uni1D360 uni1D360 37; pos uni1D370 uni1D370 38; pos uni1F250 uni1F250 39; pos uni3010 uni3010 8; pos uni3030 uni3030 9; + pos uni30A0 uni30A0 10; pos uni3190 uni3190 11; pos uni31C0 uni31C0 12; pos uni31D0 uni31D0 13; @@ -1861,15 +1830,8 @@ def test_kern_zyyy_zinh(FontClass): pos uni33E0 uni33E0 25; pos uni33F0 uni33F0 26; pos uniA700 uniA700 27; - } kern_Hani; - - lookup kern_Hrkt { - lookupflag IgnoreMarks; - pos uni3010 uni3010 8; - pos uni3030 uni3030 9; - pos uni30A0 uni30A0 10; pos uniFF70 uniFF70 29; - } kern_Hrkt; + } kern_Hani_Hrkt; lookup kern_Default { lookupflag IgnoreMarks; @@ -1889,8 +1851,7 @@ def test_kern_zyyy_zinh(FontClass): language dflt; lookup kern_Default; lookup kern_Grek; - lookup kern_Hani; - lookup kern_Hrkt; + lookup kern_Hani_Hrkt; script grek; language dflt; @@ -1900,12 +1861,12 @@ def test_kern_zyyy_zinh(FontClass): script hani; language dflt; lookup kern_Default; - lookup kern_Hani; + lookup kern_Hani_Hrkt; script kana; language dflt; lookup kern_Default; - lookup kern_Hrkt; + lookup kern_Hani_Hrkt; } kern; feature dist {
Cross-script kerning Since https://github.com/googlefonts/ufo2ft/pull/679, kerning pairs are split per-script, under the assumption that OpenType layout is not applied across scripts. As it turns out, there are more than one implementation that applies at least kerning across scripts (I have tested only Latin, Greek, Cyrillic, and Coptic, handling for other scripts might be different): * CoreText applies kerning across the 4 tested scripts. * InDesign’s Adobe Paragraph/Single-line Composer (see below) * I’m told DirectWrite shapes Latin, Greek, and Cyrillic together, though I couldn’t manage to find a Windows application that shows this behavior. There are also more than one orthography that uses a mix of scripts, so the issue here has potentially practical implications and not only theoretical ones: * Sliammon language: [uses Greek chi and theta mixed with Latin](https://www.firstvoices.com/tlaamin). * Heiltsuk alphabet: [uses Greek lambda and barred lambda mixed with Latin](https://www.omniglot.com/writing/heiltsuk.htm) Since the motivation for https://github.com/googlefonts/ufo2ft/pull/679 was to optimize lookup sizes, and optimizations shouldn’t break functionality, I suggest to either: * Revert this change and accept the sub-optimal lookup sizes * Make the per-script behavior an opt-in * Use a heuristic, e.g. if there are kerning pairs involving multiple scripts, keep these (or the kerning of involved scripts in general) in one lookup and register it for all the involved scripts. Here are three fonts that include a couple of Latin, Greek, Cyrillic, and Coptic glyphs, with class kerning between them (all first glyphs in one class, and all second glyphs in another). The first font has the kerning lookup under DFLT script as well as script-specific tags, the second does not have it under DFLT, and the third has it under DFLT only: [CrossScriptKerningFonts.zip](https://github.com/googlefonts/ufo2ft/files/13762669/CrossScriptKerningFonts.zip) If kerning is applied, the text should appear solid with no space between the triangular shapes. Here are the testing results for the string “AVАVΑVⲀVAУАУΑУⲀУAΥАΥΑΥⲀΥAⲨАⲨΑⲨⲀⲨ”: * Pages: works the same for all three fonts ![Pages](https://github.com/googlefonts/ufo2ft/assets/93914/1b11fdd5-3f04-442a-a5ee-8a3cdcc94be8) * InDesign (Adobe Paragraph Composer): seems to require the lookup under DFLT script ![InDesign](https://github.com/googlefonts/ufo2ft/assets/93914/debf4de4-c076-4101-a85f-3bebf561cd65) * Firefox (and basically every other application): ![Firefox](https://github.com/googlefonts/ufo2ft/assets/93914/83faee32-a5dd-4a22-8dde-8e431be67639)
0.0
bb263dddb5c8b7913cc9c210756bd9a05ac76ec1
[ "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_arabic_numerals[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_split_and_drop[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_split_and_drop_mixed[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_multi_script[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_zyyy_zinh[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_arabic_numerals[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_split_and_drop[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_split_and_drop_mixed[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_multi_script[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_zyyy_zinh[ufoLib2]" ]
[ "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_cleanup_missing_glyphs[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_ignoreMarks[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_mark_to_base_kern[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_mark_to_base_only[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_mode[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_insert_comment_before[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_comment_wrong_case_or_missing[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_insert_comment_before_extended[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_insert_comment_after[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_insert_comment_middle[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_skip_zero_class_kerns[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_kern_uniqueness[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_kern_LTR_and_RTL[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_kern_LTR_and_RTL_with_marks[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_kern_RTL_with_marks[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_kern_independent_of_languagesystem[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_dist_LTR[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_dist_RTL[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_dist_LTR_and_RTL[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_ambiguous_direction_pair[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_kern_RTL_and_DFLT_numbers[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_quantize[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_skip_spacing_marks[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_split_multi_glyph_class[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_split_and_mix_common[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_keep_common[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_mixed_bidis[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_hira_kana_hrkt[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_defining_classdefs[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_mark_base_kerning[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_hyphenated_duplicates[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::test_dflt_language[defcon]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_cleanup_missing_glyphs[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_ignoreMarks[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_mark_to_base_kern[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_mark_to_base_only[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_mode[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_insert_comment_before[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_comment_wrong_case_or_missing[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_insert_comment_before_extended[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_insert_comment_after[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_insert_comment_middle[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_skip_zero_class_kerns[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_kern_uniqueness[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_kern_LTR_and_RTL[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_kern_LTR_and_RTL_with_marks[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_kern_RTL_with_marks[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_kern_independent_of_languagesystem[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_dist_LTR[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_dist_RTL[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_dist_LTR_and_RTL[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_ambiguous_direction_pair[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_kern_RTL_and_DFLT_numbers[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_quantize[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::KernFeatureWriterTest::test_skip_spacing_marks[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_split_multi_glyph_class[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_split_and_mix_common[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_keep_common[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_mixed_bidis[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_kern_hira_kana_hrkt[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_defining_classdefs[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_mark_base_kerning[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_hyphenated_duplicates[ufoLib2]", "tests/featureWriters/kernFeatureWriter_test.py::test_dflt_language[ufoLib2]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2024-01-16 21:35:35+00:00
mit
2,647
googlemaps__google-maps-services-python-139
diff --git a/googlemaps/convert.py b/googlemaps/convert.py index 1c2264e..6206cfa 100644 --- a/googlemaps/convert.py +++ b/googlemaps/convert.py @@ -220,9 +220,17 @@ def components(arg): :rtype: basestring """ + + # Components may have multiple values per type, here we + # expand them into individual key/value items, eg: + # {"country": ["US", "AU"], "foo": 1} -> "country:AU", "country:US", "foo:1" + def expand(arg): + for k, v in arg.items(): + for item in as_list(v): + yield "%s:%s" % (k, item) + if isinstance(arg, dict): - arg = sorted(["%s:%s" % (k, arg[k]) for k in arg]) - return "|".join(arg) + return "|".join(sorted(expand(arg))) raise TypeError( "Expected a dict for components, "
googlemaps/google-maps-services-python
8013de5d7c1b4867dcafb4449b97c1cebab33127
diff --git a/test/test_convert.py b/test/test_convert.py index 851eda1..090a95f 100644 --- a/test/test_convert.py +++ b/test/test_convert.py @@ -91,6 +91,9 @@ class ConvertTest(unittest.TestCase): c = {"country": "US", "foo": 1} self.assertEqual("country:US|foo:1", convert.components(c)) + c = {"country": ["US", "AU"], "foo": 1} + self.assertEqual("country:AU|country:US|foo:1", convert.components(c)) + with self.assertRaises(TypeError): convert.components("test")
Allow user to append several values for same component filter type It would be nice to allow several values for same component type. Use case: You may want to filter sublocalities via locality component type ("locality matches against both locality and sublocality types") and in addition you may restrict result set by another locality. Maybe we should extend converting components by defaultdict containing lists?
0.0
8013de5d7c1b4867dcafb4449b97c1cebab33127
[ "test/test_convert.py::ConvertTest::test_components" ]
[ "test/test_convert.py::ConvertTest::test_as_list", "test/test_convert.py::ConvertTest::test_bounds", "test/test_convert.py::ConvertTest::test_join_list", "test/test_convert.py::ConvertTest::test_latlng", "test/test_convert.py::ConvertTest::test_location_list", "test/test_convert.py::ConvertTest::test_polyline_decode", "test/test_convert.py::ConvertTest::test_polyline_round_trip", "test/test_convert.py::ConvertTest::test_time" ]
{ "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2016-07-13 00:34:27+00:00
apache-2.0
2,648
gorakhargosh__watchdog-515
diff --git a/src/watchdog/observers/inotify_c.py b/src/watchdog/observers/inotify_c.py index c9c1b5c..1eae327 100644 --- a/src/watchdog/observers/inotify_c.py +++ b/src/watchdog/observers/inotify_c.py @@ -332,6 +332,13 @@ class Inotify(object): del self._wd_for_path[move_src_path] self._wd_for_path[inotify_event.src_path] = moved_wd self._path_for_wd[moved_wd] = inotify_event.src_path + if self.is_recursive: + for _path, _wd in self._wd_for_path.copy().items(): + if _path.startswith(move_src_path + os.path.sep.encode()): + moved_wd = self._wd_for_path.pop(_path) + _move_to_path = _path.replace(move_src_path, inotify_event.src_path) + self._wd_for_path[_move_to_path] = moved_wd + self._path_for_wd[moved_wd] = _move_to_path src_path = os.path.join(wd_path, name) inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
gorakhargosh/watchdog
ecaa92756fe78d4771d23550c0935b0190769035
diff --git a/tests/test_emitter.py b/tests/test_emitter.py index a594e70..8b66266 100644 --- a/tests/test_emitter.py +++ b/tests/test_emitter.py @@ -32,6 +32,7 @@ from watchdog.events import ( DirDeletedEvent, DirModifiedEvent, DirCreatedEvent, + DirMovedEvent ) from watchdog.observers.api import ObservedWatch @@ -322,3 +323,186 @@ def test_recursive_off(): with pytest.raises(Empty): event_queue.get(timeout=5) + + [email protected](platform.is_windows(), + reason="Windows create another set of events for this test") +def test_renaming_top_level_directory(): + start_watching() + + mkdir(p('a')) + event = event_queue.get(timeout=5)[0] + assert isinstance(event, DirCreatedEvent) + assert event.src_path == p('a') + event = event_queue.get(timeout=5)[0] + assert isinstance(event, DirModifiedEvent) + assert event.src_path == p() + + mkdir(p('a', 'b')) + event = event_queue.get(timeout=5)[0] + assert isinstance(event, DirCreatedEvent) + assert event.src_path == p('a', 'b') + event = event_queue.get(timeout=5)[0] + assert isinstance(event, DirModifiedEvent) + assert event.src_path == p('a') + + mv(p('a'), p('a2')) + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('a') + event = event_queue.get(timeout=5)[0] + assert isinstance(event, DirModifiedEvent) + assert event.src_path == p() + event = event_queue.get(timeout=5)[0] + assert isinstance(event, DirModifiedEvent) + assert event.src_path == p() + + event = event_queue.get(timeout=5)[0] + assert isinstance(event, DirMovedEvent) + assert event.src_path == p('a', 'b') + + open(p('a2', 'b', 'c'), 'a').close() + + # DirModifiedEvent may emitted, but sometimes after waiting time is out. + events = [] + while True: + events.append(event_queue.get(timeout=5)[0]) + if event_queue.empty(): + break + + assert all([isinstance(e, (FileCreatedEvent, FileMovedEvent, DirModifiedEvent)) for e in events]) + + for event in events: + if isinstance(event, FileCreatedEvent): + assert event.src_path == p('a2', 'b', 'c') + elif isinstance(event, FileMovedEvent): + assert event.dest_path == p('a2', 'b', 'c') + assert event.src_path == p('a', 'b', 'c') + elif isinstance(event, DirModifiedEvent): + assert event.src_path == p('a2', 'b') + + [email protected](platform.is_linux(), + reason="Linux create another set of events for this test") +def test_renaming_top_level_directory_on_windows(): + start_watching() + + mkdir(p('a')) + event = event_queue.get(timeout=5)[0] + assert isinstance(event, DirCreatedEvent) + assert event.src_path == p('a') + + mkdir(p('a', 'b')) + event = event_queue.get(timeout=5)[0] + assert isinstance(event, DirCreatedEvent) + assert event.src_path == p('a', 'b') + + event = event_queue.get(timeout=5)[0] + assert isinstance(event, DirCreatedEvent) + assert event.src_path == p('a', 'b') + + event = event_queue.get(timeout=5)[0] + assert isinstance(event, DirModifiedEvent) + assert event.src_path == p('a') + + mv(p('a'), p('a2')) + event = event_queue.get(timeout=5)[0] + assert isinstance(event, DirMovedEvent) + assert event.src_path == p('a', 'b') + + open(p('a2', 'b', 'c'), 'a').close() + + events = [] + while True: + events.append(event_queue.get(timeout=5)[0]) + if event_queue.empty(): + break + + assert all([isinstance(e, (FileCreatedEvent, FileMovedEvent, DirMovedEvent, DirModifiedEvent)) for e in events]) + + for event in events: + if isinstance(event, FileCreatedEvent): + assert event.src_path == p('a2', 'b', 'c') + elif isinstance(event, FileMovedEvent): + assert event.dest_path == p('a2', 'b', 'c') + assert event.src_path == p('a', 'b', 'c') + elif isinstance(event, DirMovedEvent): + assert event.dest_path == p('a2') + assert event.src_path == p('a') + elif isinstance(event, DirModifiedEvent): + assert event.src_path == p('a2', 'b') + + [email protected](platform.is_windows(), + reason="Windows create another set of events for this test") +def test_move_nested_subdirectories(): + mkdir(p('dir1/dir2/dir3'), parents=True) + touch(p('dir1/dir2/dir3', 'a')) + start_watching(p('')) + mv(p('dir1/dir2'), p('dir2')) + + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('dir1', 'dir2') + assert isinstance(event, DirMovedEvent) + + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('dir1') + assert isinstance(event, DirModifiedEvent) + + event = event_queue.get(timeout=5)[0] + assert p(event.src_path, '') == p('') + assert isinstance(event, DirModifiedEvent) + + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('dir1/dir2/dir3') + assert isinstance(event, DirMovedEvent) + + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('dir1/dir2/dir3', 'a') + assert isinstance(event, FileMovedEvent) + + touch(p('dir2/dir3', 'a')) + + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('dir2/dir3', 'a') + assert isinstance(event, FileModifiedEvent) + + [email protected](platform.is_linux(), + reason="Linux create another set of events for this test") +def test_move_nested_subdirectories_on_windows(): + mkdir(p('dir1/dir2/dir3'), parents=True) + touch(p('dir1/dir2/dir3', 'a')) + start_watching(p('')) + mv(p('dir1/dir2'), p('dir2')) + + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('dir1', 'dir2') + assert isinstance(event, FileDeletedEvent) + + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('dir2') + assert isinstance(event, DirCreatedEvent) + + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('dir2', 'dir3') + assert isinstance(event, DirCreatedEvent) + + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('dir2', 'dir3', 'a') + assert isinstance(event, FileCreatedEvent) + + touch(p('dir2/dir3', 'a')) + + events = [] + while True: + events.append(event_queue.get(timeout=5)[0]) + if event_queue.empty(): + break + + assert all([isinstance(e, (FileModifiedEvent, DirModifiedEvent)) for e in events]) + + for event in events: + if isinstance(event, FileModifiedEvent): + assert event.src_path == p('dir2', 'dir3', 'a') + elif isinstance(event, DirModifiedEvent): + assert event.src_path in [p('dir2'), p('dir2', 'dir3')]
Wrong source path after renaming a top level folder Steps to reproduce in Ubuntu 18.04 Start monitoring a folder in recursive mode p.e. "/" Create Folder A **"/A"** Create Folder B inside A **"/A/B"** Rename A -> A2 **"/A2/B"** Create a file inside B, named example.txt **/A2/B/example.txt** Bug: The src_path of the event fired when creating the file example.txt is **/A/B/example.txt** and should be **/A2/B/example.txt**
0.0
ecaa92756fe78d4771d23550c0935b0190769035
[ "tests/test_emitter.py::test_renaming_top_level_directory", "tests/test_emitter.py::test_move_nested_subdirectories" ]
[ "tests/test_emitter.py::test_create", "tests/test_emitter.py::test_delete", "tests/test_emitter.py::test_modify", "tests/test_emitter.py::test_move", "tests/test_emitter.py::test_move_to", "tests/test_emitter.py::test_move_to_full", "tests/test_emitter.py::test_move_from", "tests/test_emitter.py::test_move_from_full", "tests/test_emitter.py::test_separate_consecutive_moves", "tests/test_emitter.py::test_delete_self", "tests/test_emitter.py::test_fast_subdirectory_creation_deletion", "tests/test_emitter.py::test_passing_unicode_should_give_unicode", "tests/test_emitter.py::test_passing_bytes_should_give_bytes", "tests/test_emitter.py::test_recursive_on", "tests/test_emitter.py::test_recursive_off" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2019-01-25 10:14:44+00:00
apache-2.0
2,649
gorakhargosh__watchdog-519
diff --git a/src/watchdog/observers/inotify_c.py b/src/watchdog/observers/inotify_c.py index 19b2c01..d6db4f4 100644 --- a/src/watchdog/observers/inotify_c.py +++ b/src/watchdog/observers/inotify_c.py @@ -26,6 +26,7 @@ from functools import reduce from ctypes import c_int, c_char_p, c_uint32 from watchdog.utils import has_attribute from watchdog.utils import UnsupportedLibc +from watchdog.utils.unicode_paths import decode def _load_libc(): @@ -453,16 +454,16 @@ class InotifyEvent(object): :param cookie: Event cookie :param name: - Event name. + Base name of the event source path. :param src_path: - Event source path + Full event source path. """ def __init__(self, wd, mask, cookie, name, src_path): self._wd = wd self._mask = mask self._cookie = cookie - self._name = name.decode() + self._name = name self._src_path = src_path @property @@ -573,4 +574,4 @@ class InotifyEvent(object): mask_string = self._get_mask_string(self.mask) s = '<%s: src_path=%r, wd=%d, mask=%s, cookie=%d, name=%s>' return s % (type(self).__name__, self.src_path, self.wd, mask_string, - self.cookie, self.name) + self.cookie, decode(self.name)) diff --git a/src/watchdog/utils/dirsnapshot.py b/src/watchdog/utils/dirsnapshot.py index 320406c..52c4898 100644 --- a/src/watchdog/utils/dirsnapshot.py +++ b/src/watchdog/utils/dirsnapshot.py @@ -102,11 +102,11 @@ class DirectorySnapshotDiff(object): modified = set() for path in ref.paths & snapshot.paths: if ref.inode(path) == snapshot.inode(path): - if ref.mtime(path) != snapshot.mtime(path): + if ref.mtime(path) != snapshot.mtime(path) or ref.size(path) != snapshot.size(path): modified.add(path) for (old_path, new_path) in moved: - if ref.mtime(old_path) != snapshot.mtime(new_path): + if ref.mtime(old_path) != snapshot.mtime(new_path) or ref.size(old_path) != snapshot.size(new_path): modified.add(old_path) self._dirs_created = [path for path in created if snapshot.isdir(path)] @@ -267,6 +267,9 @@ class DirectorySnapshot(object): def mtime(self, path): return self._stat_info[path].st_mtime + + def size(self, path): + return self._stat_info[path].st_size def stat_info(self, path): """ diff --git a/tox.ini b/tox.ini index ce7b1c9..a4c5809 100644 --- a/tox.ini +++ b/tox.ini @@ -10,4 +10,4 @@ deps = py{34}: pytest-cov==2.6.0 extras = watchmedo commands = - python -m pytest {posargs} + python -bb -m pytest {posargs}
gorakhargosh/watchdog
11cb8d5ac57f162beb5c0eea13dcabc4dfdb9d63
diff --git a/tests/conftest.py b/tests/conftest.py index ae4eb2e..6fc9f6f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -37,3 +37,21 @@ def no_thread_leaks(): main = threading.main_thread() assert not [th for th in threading._dangling if th is not main and th.is_alive()] + + [email protected](autouse=True) +def no_warnings(recwarn): + """Fail on warning.""" + + yield + + warnings = [] + for warning in recwarn: # pragma: no cover + message = str(warning.message) + if ( + "Not importing directory" in message + or "Using or importing the ABCs" in message + ): + continue + warnings.append("{w.filename}:{w.lineno} {w.message}".format(w=warning)) + assert not warnings diff --git a/tests/shell.py b/tests/shell.py index 3989158..376fdf1 100644 --- a/tests/shell.py +++ b/tests/shell.py @@ -29,6 +29,7 @@ import os.path import tempfile import shutil import errno +import time # def tree(path='.', show_files=False): @@ -108,3 +109,14 @@ def mkdtemp(): def ls(path='.'): return os.listdir(path) + + +def msize(path): + """Modify the file size without updating the modified time.""" + with open(path, 'w') as w: + w.write('') + os.utime(path, (0, 0)) + time.sleep(0.4) + with open(path, 'w') as w: + w.write('0') + os.utime(path, (0, 0)) diff --git a/tests/test_inotify_c.py b/tests/test_inotify_c.py index 8bd24b1..912195a 100644 --- a/tests/test_inotify_c.py +++ b/tests/test_inotify_c.py @@ -154,3 +154,17 @@ def test_raise_error(monkeypatch): func() assert exc.value.errno == -1 assert "Unknown error -1" in str(exc.value) + + +def test_non_ascii_path(): + """ + Inotify can construct an event for a path containing non-ASCII. + """ + path = p(u"\N{SNOWMAN}") + with watching(p('')): + os.mkdir(path) + event, _ = event_queue.get(timeout=5) + assert isinstance(event.src_path, type(u"")) + assert event.src_path == path + # Just make sure it doesn't raise an exception. + assert repr(event) diff --git a/tests/test_observers_polling.py b/tests/test_observers_polling.py index 42bff6e..f94041a 100644 --- a/tests/test_observers_polling.py +++ b/tests/test_observers_polling.py @@ -41,7 +41,8 @@ from .shell import ( mkdtemp, touch, rm, - mv + mv, + msize ) @@ -98,6 +99,12 @@ def test___init__(event_queue, emitter): sleep(SLEEP_TIME) rm(p('afile')) + sleep(SLEEP_TIME) + msize(p('bfile')) + + sleep(SLEEP_TIME) + rm(p('bfile')) + sleep(SLEEP_TIME) emitter.stop() @@ -131,6 +138,13 @@ def test___init__(event_queue, emitter): DirModifiedEvent(p()), FileDeletedEvent(p('afile')), + + DirModifiedEvent(p()), + FileCreatedEvent(p('bfile')), + FileModifiedEvent(p('bfile')), + + DirModifiedEvent(p()), + FileDeletedEvent(p('bfile')), } expected.add(FileMovedEvent(p('fromfile'), p('project', 'tofile')))
Events from non-ASCII paths raise UncodeDecodeError ``` from os import mkdir from watchdog.observers.inotify import InotifyObserver from watchdog.events import FileSystemEventHandler class H(FileSystemEventHandler): def on_any_event(self, event): print(event) def main(): d = u"demo" handler = H() mkdir(d) obs = InotifyObserver() obs.schedule(handler, d, recursive=True) obs.start() mkdir(u"/".join((d, u"\N{SNOWMAN}"))) obs.join() main() ``` results in this exception ``` Exception in thread Thread-3: Traceback (most recent call last): File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner self.run() File "watchdog/src/watchdog/observers/inotify_buffer.py", line 87, in run inotify_events = self._inotify.read_events() File "watchdog/src/watchdog/observers/inotify_c.py", line 324, in read_events inotify_event = InotifyEvent(wd, mask, cookie, name, src_path) File "watchdog/src/watchdog/observers/inotify_c.py", line 478, in __init__ self._name = name.decode() UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128) ``` Apparently introduced in 0c5e3531748d231e34ef6ecf8581ac20fd956b25
0.0
11cb8d5ac57f162beb5c0eea13dcabc4dfdb9d63
[ "tests/test_observers_polling.py::test___init__" ]
[ "tests/test_inotify_c.py::test_late_double_deletion", "tests/test_inotify_c.py::test_raise_error", "tests/test_inotify_c.py::test_non_ascii_path", "tests/test_observers_polling.py::test_delete_watched_dir" ]
{ "failed_lite_validators": [ "has_git_commit_hash", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-02-01 18:24:53+00:00
apache-2.0
2,650
gorakhargosh__watchdog-528
diff --git a/changelog.rst b/changelog.rst index a738c1c..b71bd0e 100644 --- a/changelog.rst +++ b/changelog.rst @@ -18,6 +18,7 @@ API changes - Fixed a race condition crash when a directory is swapped for a file. - Fixed the way we re-raise ``OSError``. - Fixed the path separator used in watchmedo. +- Remove emitters which failed to start. - We now generate sub created events only if ``recursive=True``. - Security fix in watchmedo: use ``yaml.safe_load()`` instead of ``yaml.load()`` - Use ``scandir`` to save memory. diff --git a/src/watchdog/observers/api.py b/src/watchdog/observers/api.py index 01fc8a1..98d63a9 100644 --- a/src/watchdog/observers/api.py +++ b/src/watchdog/observers/api.py @@ -248,8 +248,12 @@ class BaseObserver(EventDispatcher): return self._emitters def start(self): - for emitter in self._emitters: - emitter.start() + for emitter in self._emitters.copy(): + try: + emitter.start() + except Exception: + self._remove_emitter(emitter) + raise super(BaseObserver, self).start() def schedule(self, event_handler, path, recursive=False):
gorakhargosh/watchdog
6ba82ac78ebe82861c693aabbb685e7187edce59
diff --git a/tests/test_emitter.py b/tests/test_emitter.py index 3e414cc..792267d 100644 --- a/tests/test_emitter.py +++ b/tests/test_emitter.py @@ -41,7 +41,7 @@ if platform.is_linux(): InotifyFullEmitter, ) elif platform.is_darwin(): - pytestmark = pytest.mark.skip("FIXME: It is a matter of bad comparisons between bytes and str.") + pytestmark = pytest.mark.skip("FIXME: issue #546.") from watchdog.observers.fsevents2 import FSEventsEmitter as Emitter elif platform.is_windows(): from watchdog.observers.read_directory_changes import ( diff --git a/tests/test_observer.py b/tests/test_observer.py index 26229cc..f40570d 100644 --- a/tests/test_observer.py +++ b/tests/test_observer.py @@ -115,3 +115,30 @@ def test_2_observers_on_the_same_path(observer, observer2): observer2.schedule(None, '') assert len(observer2.emitters) == 1 + + +def test_start_failure_should_not_prevent_further_try(monkeypatch, observer): + observer.schedule(None, '') + emitters = observer.emitters + assert len(emitters) == 1 + + # Make the emitter to fail on start() + + def mocked_start(): + raise OSError() + + emitter = next(iter(emitters)) + monkeypatch.setattr(emitter, "start", mocked_start) + with pytest.raises(OSError): + observer.start() + # The emitter should be removed from the list + assert len(observer.emitters) == 0 + + # Restoring the original behavior should work like there never be emitters + monkeypatch.undo() + observer.start() + assert len(observer.emitters) == 0 + + # Re-schduling the watch should work + observer.schedule(None, '') + assert len(observer.emitters) == 1 diff --git a/tests/test_observers_polling.py b/tests/test_observers_polling.py index f94041a..109163b 100644 --- a/tests/test_observers_polling.py +++ b/tests/test_observers_polling.py @@ -65,12 +65,14 @@ def event_queue(): @pytest.fixture def emitter(event_queue): watch = ObservedWatch(temp_dir, True) - yield Emitter(event_queue, watch, timeout=0.2) + em = Emitter(event_queue, watch, timeout=0.2) + em.start() + yield em + em.stop() def test___init__(event_queue, emitter): SLEEP_TIME = 0.4 - emitter.start() sleep(SLEEP_TIME) mkdir(p('project')) @@ -165,7 +167,6 @@ def test___init__(event_queue, emitter): def test_delete_watched_dir(event_queue, emitter): SLEEP_TIME = 0.4 - emitter.start() rm(p(''), recursive=True) sleep(SLEEP_TIME) diff --git a/tests/test_observers_winapi.py b/tests/test_observers_winapi.py index 52367e3..4be47c5 100644 --- a/tests/test_observers_winapi.py +++ b/tests/test_observers_winapi.py @@ -59,14 +59,14 @@ def event_queue(): @pytest.fixture def emitter(event_queue): watch = ObservedWatch(temp_dir, True) - yield WindowsApiEmitter(event_queue, watch, timeout=0.2) + em = WindowsApiEmitter(event_queue, watch, timeout=0.2) + yield em + em.stop() -def test___init__(): +def test___init__(event_queue, emitter): SLEEP_TIME = 2 - emitter.start() - sleep(SLEEP_TIME) mkdir(p('fromdir')) @@ -81,10 +81,10 @@ def test___init__(): # * unordered # * non-unique # A multiset! Python's collections.Counter class seems appropriate. - expected = {[ + expected = { DirCreatedEvent(p('fromdir')), DirMovedEvent(p('fromdir'), p('todir')), - ]} + } got = set()
Remove emitters which failed to start This is one corner case. When an emitter is added it is automatically started... but if fails to start, it remains added. Now, if someone add again an emitter for the same watch, the emitter is not retried and remains stopped. Not sure if watchdog should handle this case, or users should manually unsubscribe an emitter on failures Thanks!
0.0
6ba82ac78ebe82861c693aabbb685e7187edce59
[ "tests/test_observer.py::test_start_failure_should_not_prevent_further_try" ]
[ "tests/test_emitter.py::test_create", "tests/test_emitter.py::test_delete", "tests/test_emitter.py::test_modify", "tests/test_emitter.py::test_move", "tests/test_emitter.py::test_move_to", "tests/test_emitter.py::test_move_to_full", "tests/test_emitter.py::test_move_from", "tests/test_emitter.py::test_move_from_full", "tests/test_emitter.py::test_separate_consecutive_moves", "tests/test_emitter.py::test_fast_subdirectory_creation_deletion", "tests/test_emitter.py::test_passing_unicode_should_give_unicode", "tests/test_emitter.py::test_passing_bytes_should_give_bytes", "tests/test_emitter.py::test_recursive_on", "tests/test_emitter.py::test_recursive_off", "tests/test_observer.py::test_schedule_should_start_emitter_if_running", "tests/test_observer.py::test_schedule_should_not_start_emitter_if_not_running", "tests/test_observer.py::test_start_should_start_emitter", "tests/test_observer.py::test_stop_should_stop_emitter", "tests/test_observer.py::test_unschedule_self", "tests/test_observer.py::test_schedule_after_unschedule_all", "tests/test_observer.py::test_2_observers_on_the_same_path", "tests/test_observers_polling.py::test___init__", "tests/test_observers_polling.py::test_delete_watched_dir" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2019-02-17 08:14:10+00:00
apache-2.0
2,651
gorakhargosh__watchdog-530
diff --git a/src/watchdog/observers/inotify_c.py b/src/watchdog/observers/inotify_c.py index ba99bd5..19b2c01 100644 --- a/src/watchdog/observers/inotify_c.py +++ b/src/watchdog/observers/inotify_c.py @@ -376,7 +376,7 @@ class Inotify(object): Event bit mask. """ if not os.path.isdir(path): - raise OSError('Path is not a directory') + raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path) self._add_watch(path, mask) if recursive: for root, dirnames, _ in os.walk(path): @@ -410,11 +410,11 @@ class Inotify(object): """ err = ctypes.get_errno() if err == errno.ENOSPC: - raise OSError("inotify watch limit reached") + raise OSError(errno.ENOSPC, "inotify watch limit reached") elif err == errno.EMFILE: - raise OSError("inotify instance limit reached") + raise OSError(errno.EMFILE, "inotify instance limit reached") else: - raise OSError(os.strerror(err)) + raise OSError(err, os.strerror(err)) @staticmethod def _parse_event_buffer(event_buffer):
gorakhargosh/watchdog
9acf1855b94c73394ba453d39f35862dcdd92159
diff --git a/tests/shell.py b/tests/shell.py index ff4635d..3989158 100644 --- a/tests/shell.py +++ b/tests/shell.py @@ -24,6 +24,7 @@ from __future__ import with_statement +import os import os.path import tempfile import shutil @@ -71,7 +72,7 @@ def rm(path, recursive=False): # else: # os.rmdir(path) else: - raise OSError("rm: %s: is a directory." % path) + raise OSError(errno.EISDIR, os.strerror(errno.EISDIR), path) else: os.remove(path) diff --git a/tests/test_inotify_c.py b/tests/test_inotify_c.py index 0b5a887..1e8092c 100644 --- a/tests/test_inotify_c.py +++ b/tests/test_inotify_c.py @@ -1,17 +1,26 @@ from __future__ import unicode_literals -import os + import pytest + +from watchdog.utils import platform + +if not platform.is_linux(): + pytest.skip("GNU/Linux only.", allow_module_level=True) + +import ctypes +import errno +import os import logging import contextlib -from tests import Queue from functools import partial -from .shell import rm, mkdtemp -from watchdog.utils import platform + from watchdog.events import DirCreatedEvent, DirDeletedEvent, DirModifiedEvent from watchdog.observers.api import ObservedWatch +from watchdog.observers.inotify import InotifyFullEmitter, InotifyEmitter +from watchdog.observers.inotify_c import Inotify -if platform.is_linux(): - from watchdog.observers.inotify import InotifyFullEmitter, InotifyEmitter +from . import Queue +from .shell import mkdtemp, rm logging.basicConfig(level=logging.DEBUG) @@ -39,11 +48,12 @@ def watching(path=None, use_full_emitter=False): def teardown_function(function): rm(p(''), recursive=True) - assert not emitter.is_alive() + try: + assert not emitter.is_alive() + except NameError: + pass [email protected](not platform.is_linux(), - reason="Testing with inotify messages (Linux only)") def test_late_double_deletion(monkeypatch): inotify_fd = type(str("FD"), (object,), {})() # Empty object inotify_fd.last = 0 @@ -69,6 +79,7 @@ def test_late_double_deletion(monkeypatch): ) os_read_bkp = os.read + def fakeread(fd, length): if fd is inotify_fd: result, fd.buf = fd.buf[:length], fd.buf[length:] @@ -76,6 +87,7 @@ def test_late_double_deletion(monkeypatch): return os_read_bkp(fd, length) os_close_bkp = os.close + def fakeclose(fd): if fd is not inotify_fd: os_close_bkp(fd) @@ -112,6 +124,34 @@ def test_late_double_deletion(monkeypatch): assert isinstance(event, DirModifiedEvent) assert event.src_path == p('').rstrip(os.path.sep) - assert inotify_fd.last == 3 # Number of directories - assert inotify_fd.buf == b"" # Didn't miss any event - assert inotify_fd.wds == [2, 3] # Only 1 is removed explicitly + assert inotify_fd.last == 3 # Number of directories + assert inotify_fd.buf == b"" # Didn't miss any event + assert inotify_fd.wds == [2, 3] # Only 1 is removed explicitly + + +def test_raise_error(monkeypatch): + func = Inotify._raise_error + + monkeypatch.setattr(ctypes, "get_errno", lambda: errno.ENOSPC) + with pytest.raises(OSError) as exc: + func() + assert exc.value.errno == errno.ENOSPC + assert "inotify watch limit reached" in str(exc.value) + + monkeypatch.setattr(ctypes, "get_errno", lambda: errno.EMFILE) + with pytest.raises(OSError) as exc: + func() + assert exc.value.errno == errno.EMFILE + assert "inotify instance limit reached" in str(exc.value) + + monkeypatch.setattr(ctypes, "get_errno", lambda: errno.ENOENT) + with pytest.raises(OSError) as exc: + func() + assert exc.value.errno == errno.ENOENT + assert "No such file or directory" in str(exc.value) + + monkeypatch.setattr(ctypes, "get_errno", lambda: -1) + with pytest.raises(OSError) as exc: + func() + assert exc.value.errno == -1 + assert "Unknown error -1" in str(exc.value)
Constructor for OSError used incorrectly ( inotify_c.py line 406 ) OS in Windows10 with "Windows Subsystem for Linux" enabled, and from inside the "Bash" shell, although it still appears to be cmd.exe. > File "/usr/local/lib/python2.7/dist-packages/watchdog/observers/api.py", line 255, in start > emitter.start() > File "/usr/local/lib/python2.7/dist-packages/watchdog/utils/**init**.py", line 111, in start > self.on_thread_start() > File "/usr/local/lib/python2.7/dist-packages/watchdog/observers/inotify.py", line 121, in on_thread_start > self._inotify = InotifyBuffer(path, self.watch.is_recursive) > File "/usr/local/lib/python2.7/dist-packages/watchdog/observers/inotify_buffer.py", line 35, in __init__ > self._inotify = Inotify(path, recursive) > File "/usr/local/lib/python2.7/dist-packages/watchdog/observers/inotify_c.py", line 187, in __init__ > self._add_dir_watch(path, recursive, event_mask) > File "/usr/local/lib/python2.7/dist-packages/watchdog/observers/inotify_c.py", line 364, in _add_dir_watch > self._add_watch(path, mask) > File "/usr/local/lib/python2.7/dist-packages/watchdog/observers/inotify_c.py", line 385, in _add_watch > Inotify._raise_error() > File "/usr/local/lib/python2.7/dist-packages/watchdog/observers/inotify_c.py", line 406, in _raise_error > raise OSError(os.strerror(err)) > OSError: Invalid argument
0.0
9acf1855b94c73394ba453d39f35862dcdd92159
[ "tests/test_inotify_c.py::test_raise_error" ]
[ "tests/test_inotify_c.py::test_late_double_deletion" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2019-02-17 10:14:38+00:00
apache-2.0
2,652
gorakhargosh__watchdog-602
diff --git a/changelog.rst b/changelog.rst index e212e70..4cb688a 100644 --- a/changelog.rst +++ b/changelog.rst @@ -13,6 +13,8 @@ Breaking Changes - Dropped support for Python 2.6, 3.2 and 3.3 - Emitters that failed to start are now removed +- [snapshot] Removed the deprecated ``walker_callback`` argument, + use ``stat`` instead - [watchmedo] The utility is no more installed by default but via the extra ``watchdog[watchmedo]`` diff --git a/src/watchdog/observers/kqueue.py b/src/watchdog/observers/kqueue.py index ad54911..0e4e5bb 100644 --- a/src/watchdog/observers/kqueue.py +++ b/src/watchdog/observers/kqueue.py @@ -435,12 +435,14 @@ class KqueueEmitter(EventEmitter): # A collection of KeventDescriptor. self._descriptors = KeventDescriptorSet() - def walker_callback(path, stat_info, self=self): + def custom_stat(path, self=self): + stat_info = stat(path) self._register_kevent(path, stat.S_ISDIR(stat_info.st_mode)) + return stat_info self._snapshot = DirectorySnapshot(watch.path, - watch.is_recursive, - walker_callback) + recursive=watch.is_recursive, + stat=custom_stat) def _register_kevent(self, path, is_directory): """ diff --git a/src/watchdog/utils/dirsnapshot.py b/src/watchdog/utils/dirsnapshot.py index 40ff95b..4083b3f 100644 --- a/src/watchdog/utils/dirsnapshot.py +++ b/src/watchdog/utils/dirsnapshot.py @@ -226,31 +226,27 @@ class DirectorySnapshot(object): snapshot; ``False`` otherwise. :type recursive: ``bool`` - :param walker_callback: - .. deprecated:: 0.7.2 :param stat: Use custom stat function that returns a stat structure for path. Currently only st_dev, st_ino, st_mode and st_mtime are needed. - A function with the signature ``walker_callback(path, stat_info)`` - which will be called for every entry in the directory tree. + A function taking a ``path`` as argument which will be called + for every entry in the directory tree. :param listdir: Use custom listdir function. For details see ``os.scandir`` if available, else ``os.listdir``. """ def __init__(self, path, recursive=True, - walker_callback=(lambda p, s: None), stat=default_stat, listdir=scandir): self.recursive = recursive - self.walker_callback = walker_callback self.stat = stat self.listdir = listdir self._stat_info = {} self._inode_to_path = {} - st = stat(path) + st = self.stat(path) self._stat_info[path] = st self._inode_to_path[(st.st_ino, st.st_dev)] = path @@ -258,7 +254,6 @@ class DirectorySnapshot(object): i = (st.st_ino, st.st_dev) self._inode_to_path[i] = p self._stat_info[p] = st - walker_callback(p, st) def walk(self, root): try:
gorakhargosh/watchdog
587d11fa54945ed3b2efddd17d6cf657ede3afb0
diff --git a/tests/test_snapshot_diff.py b/tests/test_snapshot_diff.py index 9cae27d..477f041 100644 --- a/tests/test_snapshot_diff.py +++ b/tests/test_snapshot_diff.py @@ -16,12 +16,15 @@ import errno import os +import pickle import time -from .shell import mkdir, touch, mv, rm + from watchdog.utils.dirsnapshot import DirectorySnapshot from watchdog.utils.dirsnapshot import DirectorySnapshotDiff from watchdog.utils import platform +from .shell import mkdir, touch, mv, rm + def wait(): """ @@ -35,6 +38,13 @@ def wait(): time.sleep(0.5) +def test_pickle(p): + """It should be possible to pickle a snapshot.""" + mkdir(p('dir1')) + snasphot = DirectorySnapshot(p('dir1')) + pickle.dumps(snasphot) + + def test_move_to(p): mkdir(p('dir1')) mkdir(p('dir2'))
Can't pickle DirectorySnapshots First of all, I'm not sure if this is an issue, an expected behaviour or just an scenario not aimed to be solved. ## Expected Behavior I expect to be able to pickle DirectorySnapshot objects, as I can do in the latest release, 0.9.0. ## Current Behavior I can't pickle a DirectorySnapshot because I'm getting a PicklingError. It doesn't happen on the 0.9.0 version. ## Possible Solution One possible workaround is using the `dill` library and it would work, but I find it strange to not make it work with `pickle` like it did before because it breaks existing code. The following works: ```python import dill from src.watchdog.utils.dirsnapshot import DirectorySnapshot dill.dumps(DirectorySnapshot("/Users/alex/tmp")) ``` ## Steps to Reproduce 1. Clone the repository with the latest `master` commit. Currently `7a55f317946953d86a5acea64d9b31931ca25307`. 2. Try to pickle a DirectorySnapshot. Notice the import from `src` directory, not the pypi version. ```python import pickle from src.watchdog.utils.dirsnapshot import DirectorySnapshot pickle.dumps(DirectorySnapshot("/Users/alex/tmp")) ``` 3. You will see the following exception: ```python _pickle.PicklingError: Can't pickle <function DirectorySnapshot.<lambda> at 0x10dfb20d0>: attribute lookup DirectorySnapshot.<lambda> on src.watchdog.utils.dirsnapshot failed ``` ## Context (Environment) I'm facing this problem while using both python 3.7 and python 3.8 in a macOS Mojave 10.14.6. In my scenario I need to pickle a DirectorySnapshot object to later compare it with the current snapshot and see if there was any change while my program was down. ## Possible Implementation I just propose to make it work with `pickle`. Maybe the problematic lambda is not required and we can just avoid serializing it using `__getstate__`. Otherwise it just doesn't have a solution and we have to change to using the `dill` library.
0.0
587d11fa54945ed3b2efddd17d6cf657ede3afb0
[ "tests/test_snapshot_diff.py::test_pickle" ]
[ "tests/test_snapshot_diff.py::test_move_to", "tests/test_snapshot_diff.py::test_move_from", "tests/test_snapshot_diff.py::test_move_internal", "tests/test_snapshot_diff.py::test_move_replace", "tests/test_snapshot_diff.py::test_dir_modify_on_create", "tests/test_snapshot_diff.py::test_dir_modify_on_move", "tests/test_snapshot_diff.py::test_detect_modify_for_moved_files", "tests/test_snapshot_diff.py::test_replace_dir_with_file", "tests/test_snapshot_diff.py::test_permission_error", "tests/test_snapshot_diff.py::test_ignore_device" ]
{ "failed_lite_validators": [ "has_git_commit_hash", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-12-11 16:36:10+00:00
apache-2.0
2,653
gorakhargosh__watchdog-747
diff --git a/changelog.rst b/changelog.rst index 167f5cb..52943e5 100644 --- a/changelog.rst +++ b/changelog.rst @@ -9,7 +9,8 @@ Changelog 202x-xx-xx • `full history <https://github.com/gorakhargosh/watchdog/compare/v1.0.2...master>`__ - Avoid deprecated ``PyEval_InitThreads`` on Python 3.7+ (`#746 <https://github.com/gorakhargosh/watchdog/pull/746>`_) -- Thanks to our beloved contributors: @bstaletic +- [inotify] Add support for ``IN_CLOSE_WRITE`` events. A ``FileCloseEvent`` event will be fired. Note that ``IN_CLOSE_NOWRITE`` events are not handled to prevent much noise. (`#184 <https://github.com/gorakhargosh/watchdog/pull/184>`_, `#245 <https://github.com/gorakhargosh/watchdog/pull/245>`_, `#280 <https://github.com/gorakhargosh/watchdog/pull/280>`_, `#313 <https://github.com/gorakhargosh/watchdog/pull/313>`_, `#690 <https://github.com/gorakhargosh/watchdog/pull/690>`_) +- Thanks to our beloved contributors: @bstaletic, @lukassup, @ysard 1.0.2 diff --git a/src/watchdog/events.py b/src/watchdog/events.py index 73e82e8..8b9bca8 100755 --- a/src/watchdog/events.py +++ b/src/watchdog/events.py @@ -52,6 +52,10 @@ Event Classes :members: :show-inheritance: +.. autoclass:: FileClosedEvent + :members: + :show-inheritance: + .. autoclass:: DirCreatedEvent :members: :show-inheritance: @@ -95,6 +99,7 @@ EVENT_TYPE_MOVED = 'moved' EVENT_TYPE_DELETED = 'deleted' EVENT_TYPE_CREATED = 'created' EVENT_TYPE_MODIFIED = 'modified' +EVENT_TYPE_CLOSED = 'closed' class FileSystemEvent: @@ -212,6 +217,12 @@ class FileMovedEvent(FileSystemMovedEvent): """File system event representing file movement on the file system.""" +class FileClosedEvent(FileSystemEvent): + """File system event representing file close on the file system.""" + + event_type = EVENT_TYPE_CLOSED + + # Directory events. @@ -263,6 +274,7 @@ class FileSystemEventHandler: EVENT_TYPE_DELETED: self.on_deleted, EVENT_TYPE_MODIFIED: self.on_modified, EVENT_TYPE_MOVED: self.on_moved, + EVENT_TYPE_CLOSED: self.on_closed, }[event.event_type](event) def on_any_event(self, event): @@ -310,6 +322,15 @@ class FileSystemEventHandler: :class:`DirModifiedEvent` or :class:`FileModifiedEvent` """ + def on_closed(self, event): + """Called when a file opened for writing is closed. + + :param event: + Event representing file closing. + :type event: + :class:`FileClosedEvent` + """ + class PatternMatchingEventHandler(FileSystemEventHandler): """ diff --git a/src/watchdog/observers/inotify.py b/src/watchdog/observers/inotify.py index 5cb8c5d..7c5d9c6 100644 --- a/src/watchdog/observers/inotify.py +++ b/src/watchdog/observers/inotify.py @@ -86,6 +86,7 @@ from watchdog.events import ( FileModifiedEvent, FileMovedEvent, FileCreatedEvent, + FileClosedEvent, generate_sub_moved_events, generate_sub_created_events, ) @@ -170,6 +171,13 @@ class InotifyEmitter(EventEmitter): cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) + elif event.is_close_write and not event.is_directory: + cls = FileClosedEvent + self.queue_event(cls(src_path)) + self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) + # elif event.is_close_nowrite and not event.is_directory: + # cls = FileClosedEvent + # self.queue_event(cls(src_path)) elif event.is_delete_self and src_path == self.watch.path: self.queue_event(DirDeletedEvent(src_path)) self.stop() diff --git a/src/watchdog/observers/inotify_c.py b/src/watchdog/observers/inotify_c.py index 61452dd..8925147 100644 --- a/src/watchdog/observers/inotify_c.py +++ b/src/watchdog/observers/inotify_c.py @@ -139,6 +139,7 @@ WATCHDOG_ALL_EVENTS = reduce( InotifyConstants.IN_DELETE, InotifyConstants.IN_DELETE_SELF, InotifyConstants.IN_DONT_FOLLOW, + InotifyConstants.IN_CLOSE_WRITE, ])
gorakhargosh/watchdog
847a059ae717424baea81a949c7726fc3520bb8a
diff --git a/tests/test_emitter.py b/tests/test_emitter.py index 6df98ae..4fd0506 100644 --- a/tests/test_emitter.py +++ b/tests/test_emitter.py @@ -31,7 +31,8 @@ from watchdog.events import ( DirDeletedEvent, DirModifiedEvent, DirCreatedEvent, - DirMovedEvent + DirMovedEvent, + FileClosedEvent, ) from watchdog.observers.api import ObservedWatch @@ -107,6 +108,33 @@ def test_create(): assert os.path.normpath(event.src_path) == os.path.normpath(p('')) assert isinstance(event, DirModifiedEvent) + if platform.is_linux(): + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('a') + assert isinstance(event, FileClosedEvent) + + [email protected](not platform.is_linux(), reason="FileCloseEvent only supported in GNU/Linux") [email protected](max_runs=5, min_passes=1, rerun_filter=rerun_filter) +def test_close(): + f_d = open(p('a'), 'a') + start_watching() + f_d.close() + + # After file creation/open in append mode + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('a') + assert isinstance(event, FileClosedEvent) + + event = event_queue.get(timeout=5)[0] + assert os.path.normpath(event.src_path) == os.path.normpath(p('')) + assert isinstance(event, DirModifiedEvent) + + # After read-only, only IN_CLOSE_NOWRITE is emitted but not catched for now #747 + open(p('a'), 'r').close() + + assert event_queue.empty() + @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) @pytest.mark.skipif( @@ -153,6 +181,11 @@ def test_modify(): assert event.src_path == p('a') assert isinstance(event, FileModifiedEvent) + if platform.is_linux(): + event = event_queue.get(timeout=5)[0] + assert event.src_path == p('a') + assert isinstance(event, FileClosedEvent) + @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_move(): @@ -423,7 +456,7 @@ def test_renaming_top_level_directory(): if event_queue.empty(): break - assert all([isinstance(e, (FileCreatedEvent, FileMovedEvent, DirModifiedEvent)) for e in events]) + assert all([isinstance(e, (FileCreatedEvent, FileMovedEvent, DirModifiedEvent, FileClosedEvent)) for e in events]) for event in events: if isinstance(event, FileCreatedEvent): diff --git a/tests/test_events.py b/tests/test_events.py index f41a3f8..476f6ef 100644 --- a/tests/test_events.py +++ b/tests/test_events.py @@ -19,6 +19,7 @@ from watchdog.events import ( FileDeletedEvent, FileModifiedEvent, FileCreatedEvent, + FileClosedEvent, DirDeletedEvent, DirModifiedEvent, DirCreatedEvent, @@ -29,6 +30,7 @@ from watchdog.events import ( EVENT_TYPE_CREATED, EVENT_TYPE_DELETED, EVENT_TYPE_MOVED, + EVENT_TYPE_CLOSED, ) path_1 = '/path/xyz' @@ -82,6 +84,14 @@ def test_file_moved_event(): assert not event.is_synthetic +def test_file_closed_event(): + event = FileClosedEvent(path_1) + assert path_1 == event.src_path + assert EVENT_TYPE_CLOSED == event.event_type + assert not event.is_directory + assert not event.is_synthetic + + def test_dir_deleted_event(): event = DirDeletedEvent(path_1) assert path_1 == event.src_path @@ -111,6 +121,7 @@ def test_file_system_event_handler_dispatch(): file_del_event = FileDeletedEvent('/path/blah.txt') dir_cre_event = DirCreatedEvent('/path/blah.py') file_cre_event = FileCreatedEvent('/path/blah.txt') + file_cls_event = FileClosedEvent('/path/blah.txt') dir_mod_event = DirModifiedEvent('/path/blah.py') file_mod_event = FileModifiedEvent('/path/blah.txt') dir_mov_event = DirMovedEvent('/path/blah.py', '/path/blah') @@ -125,6 +136,7 @@ def test_file_system_event_handler_dispatch(): file_del_event, file_cre_event, file_mov_event, + file_cls_event, ] class TestableEventHandler(FileSystemEventHandler): @@ -144,6 +156,9 @@ def test_file_system_event_handler_dispatch(): def on_created(self, event): assert event.event_type == EVENT_TYPE_CREATED + def on_closed(self, event): + assert event.event_type == EVENT_TYPE_CLOSED + handler = TestableEventHandler() for event in all_events:
Modifed event triggered twice if I copy and paste a file on OSX lion it triggered created event once and modified event twice should it be just trigger once modified event? ` INFO:root:Created file: /Users/ouyangjichao/BoxSync/rename/testwatchdog.py INFO:root:Modified file: /Users/ouyangjichao/BoxSync/rename/testwatchdog.py INFO:root:Modified file: /Users/ouyangjichao/BoxSync/rename/testwatchdog.py `
0.0
847a059ae717424baea81a949c7726fc3520bb8a
[ "tests/test_emitter.py::test_create", "tests/test_emitter.py::test_close", "tests/test_emitter.py::test_create_wrong_encoding", "tests/test_emitter.py::test_delete", "tests/test_emitter.py::test_modify", "tests/test_emitter.py::test_move", "tests/test_emitter.py::test_move_to", "tests/test_emitter.py::test_move_to_full", "tests/test_emitter.py::test_move_from", "tests/test_emitter.py::test_move_from_full", "tests/test_emitter.py::test_separate_consecutive_moves", "tests/test_emitter.py::test_delete_self", "tests/test_emitter.py::test_fast_subdirectory_creation_deletion", "tests/test_emitter.py::test_passing_unicode_should_give_unicode", "tests/test_emitter.py::test_passing_bytes_should_give_bytes", "tests/test_emitter.py::test_recursive_on", "tests/test_emitter.py::test_recursive_off", "tests/test_emitter.py::test_renaming_top_level_directory", "tests/test_emitter.py::test_move_nested_subdirectories", "tests/test_events.py::test_file_deleted_event", "tests/test_events.py::test_file_delete_event_is_directory", "tests/test_events.py::test_file_modified_event", "tests/test_events.py::test_file_modified_event_is_directory", "tests/test_events.py::test_file_created_event", "tests/test_events.py::test_file_moved_event", "tests/test_events.py::test_file_closed_event", "tests/test_events.py::test_dir_deleted_event", "tests/test_events.py::test_dir_modified_event", "tests/test_events.py::test_dir_created_event", "tests/test_events.py::test_file_system_event_handler_dispatch" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-01-13 01:22:27+00:00
apache-2.0
2,654
gorakhargosh__watchdog-760
diff --git a/changelog.rst b/changelog.rst index 61e2b48..b47c5d8 100644 --- a/changelog.rst +++ b/changelog.rst @@ -10,6 +10,7 @@ Changelog - Avoid deprecated ``PyEval_InitThreads`` on Python 3.7+ (`#746 <https://github.com/gorakhargosh/watchdog/pull/746>`_) - [inotify] Add support for ``IN_CLOSE_WRITE`` events. A ``FileCloseEvent`` event will be fired. Note that ``IN_CLOSE_NOWRITE`` events are not handled to prevent much noise. (`#184 <https://github.com/gorakhargosh/watchdog/pull/184>`_, `#245 <https://github.com/gorakhargosh/watchdog/pull/245>`_, `#280 <https://github.com/gorakhargosh/watchdog/pull/280>`_, `#313 <https://github.com/gorakhargosh/watchdog/pull/313>`_, `#690 <https://github.com/gorakhargosh/watchdog/pull/690>`_) +- [inotify] Allow to stop the emitter multiple times (`#760 <https://github.com/gorakhargosh/watchdog/pull/760>`_) - [mac] Support coalesced filesystem events (`#734 <https://github.com/gorakhargosh/watchdog/pull/734>`_) - [mac] Drop support for OSX 10.12 and earlier (`#750 <https://github.com/gorakhargosh/watchdog/pull/750>`_) - [mac] Fix an issue when renaming an item changes only the casing (`#750 <https://github.com/gorakhargosh/watchdog/pull/750>`_) diff --git a/src/watchdog/observers/inotify.py b/src/watchdog/observers/inotify.py index 7c5d9c6..ff01938 100644 --- a/src/watchdog/observers/inotify.py +++ b/src/watchdog/observers/inotify.py @@ -120,6 +120,7 @@ class InotifyEmitter(EventEmitter): def on_thread_stop(self): if self._inotify: self._inotify.close() + self._inotify = None def queue_events(self, timeout, full_events=False): # If "full_events" is true, then the method will report unmatched move events as separate events @@ -179,7 +180,8 @@ class InotifyEmitter(EventEmitter): # cls = FileClosedEvent # self.queue_event(cls(src_path)) elif event.is_delete_self and src_path == self.watch.path: - self.queue_event(DirDeletedEvent(src_path)) + cls = DirDeletedEvent if event.is_directory else FileDeletedEvent + self.queue_event(cls(src_path)) self.stop() def _decode_path(self, path): diff --git a/src/watchdog/observers/inotify_c.py b/src/watchdog/observers/inotify_c.py index 9beff2b..c07960c 100644 --- a/src/watchdog/observers/inotify_c.py +++ b/src/watchdog/observers/inotify_c.py @@ -275,7 +275,12 @@ class Inotify: if self._path in self._wd_for_path: wd = self._wd_for_path[self._path] inotify_rm_watch(self._inotify_fd, wd) - os.close(self._inotify_fd) + + try: + os.close(self._inotify_fd) + except OSError: + # descriptor may be invalid because file was deleted + pass def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE): """
gorakhargosh/watchdog
72a254502960cd347555e593ef0694f31bc29d7e
diff --git a/tests/test_emitter.py b/tests/test_emitter.py index a1140b9..62b9500 100644 --- a/tests/test_emitter.py +++ b/tests/test_emitter.py @@ -70,11 +70,7 @@ def setup_teardown(tmpdir): yield - try: - emitter.stop() - except OSError: - # watch was already stopped, e.g., in `test_delete_self` - pass + emitter.stop() emitter.join(5) assert not emitter.is_alive() diff --git a/tests/test_inotify_c.py b/tests/test_inotify_c.py index a18f140..81f6586 100644 --- a/tests/test_inotify_c.py +++ b/tests/test_inotify_c.py @@ -40,11 +40,7 @@ def watching(path=None, use_full_emitter=False): emitter = Emitter(event_queue, ObservedWatch(path, recursive=True)) emitter.start() yield - try: - emitter.stop() - except OSError: - # watch was already stopped, e.g., because root was deleted - pass + emitter.stop() emitter.join(5)
Linux Bad File Descriptor when watched directory is removed Hello I tested watchdog on Linux (Ubuntu) and Windows (Win10). ### **Versions** **Linux::** ``` Python 3.6.9 watchdog-1.0.2 ``` **Windows** ``` Python 3.7.9 watchdog-1.0.2 ``` ### **Test scenario is as below:** 1. Create directory X 2. Watch directory X and handle events using FileSystemEventHandler 3. Remove directory X 4. Unschedule observer (using observer.unschedule_all or observer.unschedule) 5. Stop observer and join ### **Issue:** **Linux:** 1. When observer.unschedule_all() is called, OSError(9, "Bad File Descriptor") is raised. 2. When observer.unschedule(path) is called, KeyError is raised **Windows**: Everything works fine. ### **Example source code** ``` import os import time from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler # Linux path WATCHED_DIR = "/home/eplaszcz/PycharmProjects/project_dir" # Windows path # WATCHED_DIR = "C:\\Users\\eplaszcz\\Documents\\watchdog_test\\project_dir" def run_watchdog(): observer = Observer() watched_dir = WATCHED_DIR event_handler = FileSystemEventHandler() if not os.path.isdir(watched_dir): os.mkdir(WATCHED_DIR) observer.start() print("Schedule target watcher for %s" % watched_dir) observer.schedule(event_handler, path=watched_dir, recursive=True) while os.path.isdir(watched_dir): time.sleep(2) os.rmdir(WATCHED_DIR) print("Active watchers: {}".format(observer._watches)) print("Directory removed, unschedule") observer.unschedule_all() print("Observer unscheduled") print("Active watchers: {}".format(observer._watches)) print("Directory removed, stop") observer.stop() print("Directory removed, join") observer.join() if __name__ == '__main__': run_watchdog() ``` ### **Linux output when using observer.unschedule_all** ``` python3 main.py Schedule target watcher for C:\Users\eplaszcz\Documents\watchdog_test\project_dir Active watchers: {<ObservedWatch: path=C:\Users\eplaszcz\Documents\watchdog_test\project_dir, is_recursive=True>} Directory removed, unschedule Traceback (most recent call last): File "main.py", line 46, in <module> run_watchdog() File "main.py", line 33, in run_watchdog observer.unschedule_all() File "/home/eplaszcz/PycharmProjects/watchdogFileDescriptor/venv/lib/python3.6/site-packages/watchdog/observers/api.py", line 357, in unschedule_all self._clear_emitters() File "/home/eplaszcz/PycharmProjects/watchdogFileDescriptor/venv/lib/python3.6/site-packages/watchdog/observers/api.py", line 231, in _clear_emitters emitter.stop() File "/home/eplaszcz/PycharmProjects/watchdogFileDescriptor/venv/lib/python3.6/site-packages/watchdog/utils/__init__.py", line 81, in stop self.on_thread_stop() File "/home/eplaszcz/PycharmProjects/watchdogFileDescriptor/venv/lib/python3.6/site-packages/watchdog/observers/inotify.py", line 121, in on_thread_stop self._inotify.close() File "/home/eplaszcz/PycharmProjects/watchdogFileDescriptor/venv/lib/python3.6/site-packages/watchdog/observers/inotify_buffer.py", line 50, in close self.stop() File "/home/eplaszcz/PycharmProjects/watchdogFileDescriptor/venv/lib/python3.6/site-packages/watchdog/utils/__init__.py", line 81, in stop self.on_thread_stop() File "/home/eplaszcz/PycharmProjects/watchdogFileDescriptor/venv/lib/python3.6/site-packages/watchdog/observers/inotify_buffer.py", line 46, in on_thread_stop self._inotify.close() File "/home/eplaszcz/PycharmProjects/watchdogFileDescriptor/venv/lib/python3.6/site-packages/watchdog/observers/inotify_c.py", line 277, in close os.close(self._inotify_fd) OSError: [Errno 9] Bad file descriptor ``` ### **Linux output when using observer.unschedule(watched_dir)** ``` python3 main.py Schedule target watcher for /home/eplaszcz/PycharmProjects/project_dir Active watchers: {<ObservedWatch: path=/home/eplaszcz/PycharmProjects/project_dir, is_recursive=True>} Directory removed, unschedule Traceback (most recent call last): File "main.py", line 46, in <module> run_watchdog() File "main.py", line 33, in run_watchdog observer.unschedule(WATCHED_DIR) File "/home/eplaszcz/PycharmProjects/watchdogFileDescriptor/venv/lib/python3.6/site-packages/watchdog/observers/api.py", line 347, in unschedule emitter = self._emitter_for_watch[watch] KeyError: '/home/eplaszcz/PycharmProjects/project_dir' ``` ### ** Windows output ** ``` py main.py Schedule target watcher for C:\Users\eplaszcz\Documents\watchdog_test\project_dir Active watchers: {<ObservedWatch: path=C:\Users\eplaszcz\Documents\watchdog_test\project_dir, is_recursive=True>} Directory removed, unschedule Observer unscheduled Active watchers: set() Directory removed, stop Directory removed, join ```
0.0
72a254502960cd347555e593ef0694f31bc29d7e
[ "tests/test_emitter.py::test_delete_self", "tests/test_inotify_c.py::test_watch_file" ]
[ "tests/test_emitter.py::test_create", "tests/test_emitter.py::test_close", "tests/test_emitter.py::test_create_wrong_encoding", "tests/test_emitter.py::test_delete", "tests/test_emitter.py::test_modify", "tests/test_emitter.py::test_move", "tests/test_emitter.py::test_case_change", "tests/test_emitter.py::test_move_to", "tests/test_emitter.py::test_move_to_full", "tests/test_emitter.py::test_move_from", "tests/test_emitter.py::test_move_from_full", "tests/test_emitter.py::test_separate_consecutive_moves", "tests/test_emitter.py::test_fast_subdirectory_creation_deletion", "tests/test_emitter.py::test_passing_unicode_should_give_unicode", "tests/test_emitter.py::test_passing_bytes_should_give_bytes", "tests/test_emitter.py::test_recursive_on", "tests/test_emitter.py::test_recursive_off", "tests/test_emitter.py::test_renaming_top_level_directory", "tests/test_emitter.py::test_move_nested_subdirectories", "tests/test_emitter.py::test_file_lifecyle", "tests/test_inotify_c.py::test_late_double_deletion", "tests/test_inotify_c.py::test_raise_error", "tests/test_inotify_c.py::test_non_ascii_path" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-02-06 17:02:07+00:00
apache-2.0
2,655
gouline__dbt-metabase-132
diff --git a/dbtmetabase/metabase.py b/dbtmetabase/metabase.py index 9c7f3b8..0dade1f 100644 --- a/dbtmetabase/metabase.py +++ b/dbtmetabase/metabase.py @@ -19,7 +19,7 @@ from typing import ( from dbtmetabase.models import exceptions from .logger.logging import logger -from .models.metabase import MetabaseModel, MetabaseColumn, ModelType +from .models.metabase import MetabaseModel, MetabaseColumn, ModelType, NullValue class MetabaseClient: @@ -384,7 +384,7 @@ class MetabaseClient: column_visibility = column.visibility_type or "normal" # Preserve this relationship by default - if api_field["fk_target_field_id"] is not None and fk_target_field_id is None: + if api_field["fk_target_field_id"] and not fk_target_field_id: fk_target_field_id = api_field["fk_target_field_id"] body_field = {} @@ -394,14 +394,15 @@ class MetabaseClient: body_field["description"] = column_description if api_field.get("visibility_type") != column_visibility: body_field["visibility_type"] = column_visibility - if ( - column.semantic_type - and api_field.get(semantic_type_key) != column.semantic_type - ): - body_field[semantic_type_key] = column.semantic_type if api_field.get("fk_target_field_id") != fk_target_field_id: body_field["fk_target_field_id"] = fk_target_field_id + # Allow explicit null type to override detected one + if api_field.get(semantic_type_key) != column.semantic_type and ( + column.semantic_type or column.semantic_type is NullValue + ): + body_field[semantic_type_key] = column.semantic_type or None + if body_field: # Update with new values self.api( diff --git a/dbtmetabase/models/metabase.py b/dbtmetabase/models/metabase.py index a93a1b8..d05d424 100644 --- a/dbtmetabase/models/metabase.py +++ b/dbtmetabase/models/metabase.py @@ -60,3 +60,13 @@ class MetabaseModel: return None columns: Sequence[MetabaseColumn] = field(default_factory=list) + + +class _NullValue(str): + """Explicitly null field value.""" + + def __eq__(self, other: object) -> bool: + return other is None + + +NullValue = _NullValue() diff --git a/dbtmetabase/parsers/dbt.py b/dbtmetabase/parsers/dbt.py index c86c064..42cd902 100644 --- a/dbtmetabase/parsers/dbt.py +++ b/dbtmetabase/parsers/dbt.py @@ -1,8 +1,8 @@ from abc import ABCMeta, abstractmethod from os.path import expanduser -from typing import Optional, MutableMapping, Iterable, Tuple, List +from typing import Optional, Mapping, MutableMapping, Iterable, Tuple, List -from ..models.metabase import MetabaseModel +from ..models.metabase import METABASE_META_FIELDS, MetabaseModel, NullValue class DbtReader(metaclass=ABCMeta): @@ -44,3 +44,22 @@ class DbtReader(metaclass=ABCMeta): docs_url: Optional[str] = None, ) -> Tuple[List[MetabaseModel], MutableMapping]: pass + + @staticmethod + def read_meta_fields(obj: Mapping) -> Mapping: + """Reads meta fields from a schem object. + + Args: + obj (Mapping): Schema object. + + Returns: + Mapping: Field values. + """ + + vals = {} + meta = obj.get("meta", []) + for field in METABASE_META_FIELDS: + if f"metabase.{field}" in meta: + value = meta[f"metabase.{field}"] + vals[field] = value if value is not None else NullValue + return vals diff --git a/dbtmetabase/parsers/dbt_folder.py b/dbtmetabase/parsers/dbt_folder.py index 9fdde6b..a58acea 100644 --- a/dbtmetabase/parsers/dbt_folder.py +++ b/dbtmetabase/parsers/dbt_folder.py @@ -3,8 +3,7 @@ import yaml from pathlib import Path from typing import List, Iterable, Mapping, MutableMapping, Optional, Tuple -from ..models.metabase import METABASE_META_FIELDS, ModelType -from ..models.metabase import MetabaseModel, MetabaseColumn +from ..models.metabase import MetabaseModel, MetabaseColumn, ModelType from ..logger.logging import logger from .dbt import DbtReader @@ -227,11 +226,8 @@ class DbtFolderReader(DbtReader): metabase_column.fk_target_field, ) - if "meta" in column: - meta = column.get("meta", []) - for field in METABASE_META_FIELDS: - if f"metabase.{field}" in meta: - setattr(metabase_column, field, meta[f"metabase.{field}"]) + for field, value in DbtReader.read_meta_fields(column).items(): + setattr(metabase_column, field, value) return metabase_column @@ -246,7 +242,6 @@ class DbtFolderReader(DbtReader): str -- Name of the reference. """ - # matches = re.findall(r"ref\(['\"]([\w\_\-\ ]+)['\"]\)", text) # We are catching the rightmost argument of either source or ref which is ultimately the table name matches = re.findall(r"['\"]([\w\_\-\ ]+)['\"][ ]*\)$", text.strip()) if matches: diff --git a/dbtmetabase/parsers/dbt_manifest.py b/dbtmetabase/parsers/dbt_manifest.py index f043c74..7136dc6 100644 --- a/dbtmetabase/parsers/dbt_manifest.py +++ b/dbtmetabase/parsers/dbt_manifest.py @@ -1,8 +1,7 @@ import json from typing import List, Tuple, Mapping, Optional, MutableMapping -from ..models.metabase import METABASE_META_FIELDS, ModelType -from ..models.metabase import MetabaseModel, MetabaseColumn +from ..models.metabase import MetabaseModel, MetabaseColumn, ModelType from ..logger.logging import logger from .dbt import DbtReader @@ -297,7 +296,7 @@ class DbtManifestReader(DbtReader): unique_id=unique_id, source=source, dbt_name=dbt_name, - **DbtManifestReader._read_meta_fields(model), + **DbtReader.read_meta_fields(model), ) @staticmethod @@ -320,7 +319,7 @@ class DbtManifestReader(DbtReader): metabase_column = MetabaseColumn( name=column_name, description=column_description, - **DbtManifestReader._read_meta_fields(column), + **DbtReader.read_meta_fields(column), ) if relationship: @@ -335,21 +334,3 @@ class DbtManifestReader(DbtReader): ) return metabase_column - - @staticmethod - def _read_meta_fields(obj: Mapping) -> Mapping: - """Reads meta fields from a schem object. - - Args: - obj (Mapping): Schema object. - - Returns: - Mapping: Field values. - """ - - meta = obj.get("meta", []) - return { - k: meta[f"metabase.{k}"] - for k in METABASE_META_FIELDS - if f"metabase.{k}" in meta - }
gouline/dbt-metabase
f4425b95048a1bb04926602b30fbf1263c8c2fab
diff --git a/tests/fixtures/sample_project/models/schema.yml b/tests/fixtures/sample_project/models/schema.yml index 84a1398..95ebf6d 100644 --- a/tests/fixtures/sample_project/models/schema.yml +++ b/tests/fixtures/sample_project/models/schema.yml @@ -29,6 +29,7 @@ models: description: Count of the number of orders a customer has placed meta: metabase.display_name: order_count + metabase.semantic_type: null - name: total_order_amount description: Total value (AUD) of a customer's orders diff --git a/tests/test_dbt_parsers.py b/tests/test_dbt_parsers.py index 32c92a8..79362bf 100644 --- a/tests/test_dbt_parsers.py +++ b/tests/test_dbt_parsers.py @@ -2,7 +2,7 @@ import logging import unittest from dbtmetabase.models.interface import DbtInterface -from dbtmetabase.models.metabase import ModelType +from dbtmetabase.models.metabase import ModelType, NullValue from dbtmetabase.parsers.dbt_folder import ( MetabaseModel, MetabaseColumn, @@ -440,7 +440,7 @@ class TestDbtManifestReader(unittest.TestCase): name="NUMBER_OF_ORDERS", description="Count of the number of orders a customer has placed", meta_fields={}, - semantic_type=None, + semantic_type=NullValue, visibility_type=None, fk_target_table=None, fk_target_field=None,
Clearing an automatic metabase.semantic_type In 646a914009bdfcfbc7c4c36a377a79d7435c73da, you fixed it so that if we haven't specified a metabase.semantic_type. This is good, but now, how do we explicitly clear a semantic type set by the automagic rules that Metabase applies on schema sync? Setting `metabase.semantic_type: null` seems to have no effect. Can we look for the presence of this key, or maybe use a special value?
0.0
f4425b95048a1bb04926602b30fbf1263c8c2fab
[ "tests/test_dbt_parsers.py::TestDbtFolderReader::test_read_models", "tests/test_dbt_parsers.py::TestDbtManifestReader::test_read_models" ]
[]
{ "failed_lite_validators": [ "has_git_commit_hash", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-08-09 11:20:32+00:00
mit
2,656
gouline__dbt-metabase-158
diff --git a/dbtmetabase/parsers/dbt_manifest.py b/dbtmetabase/parsers/dbt_manifest.py index 0a1ca00..3ea7dd6 100644 --- a/dbtmetabase/parsers/dbt_manifest.py +++ b/dbtmetabase/parsers/dbt_manifest.py @@ -202,6 +202,16 @@ class DbtManifestReader(DbtReader): ) continue + # Skip the incoming relationship tests, in which the fk_target_table is the model currently being read. + # Otherwise, the primary key of the current model would be (incorrectly) determined to be a foreign key. + is_incoming_relationship_test = depends_on_nodes[1] != unique_id + if len(depends_on_nodes) == 2 and is_incoming_relationship_test: + logger().debug( + "Skip this incoming relationship test, concerning nodes %s.", + depends_on_nodes, + ) + continue + # Remove the current model from the list. Note, remove() only removes the first occurrence. This ensures # the logic also works for self referencing models. if len(depends_on_nodes) == 2 and unique_id in depends_on_nodes:
gouline/dbt-metabase
1b8af327f46bf34c3f79d6c1890e6ce0d226c46c
diff --git a/tests/test_dbt_parsers.py b/tests/test_dbt_parsers.py index 79362bf..2d01a04 100644 --- a/tests/test_dbt_parsers.py +++ b/tests/test_dbt_parsers.py @@ -395,10 +395,10 @@ class TestDbtManifestReader(unittest.TestCase): name="CUSTOMER_ID", description="This is a unique identifier for a customer", meta_fields={}, - semantic_type="type/FK", + semantic_type=None, # This is a PK field, should not be detected as FK visibility_type=None, - fk_target_table="PUBLIC.ORDERS", - fk_target_field="CUSTOMER_ID", + fk_target_table=None, + fk_target_field=None, ), MetabaseColumn( name="FIRST_NAME",
Relationship tests read incorrectly to determine fk_target_table The tool appears to read all relationship tests, both outgoing (the fk_target_table is a model **not** currently being read), as well as incoming (the fk_target_table **is** the model currently being read). If table A is the table currently being read by the tool, and it has a relationship test for a field referring to table B, then that field in table A should then have a foreign key (type/FK) pointing towards table B. Currently however it also looks the other way around to find foreign key fields in table B, pointing towards table A, but uses these to mark them as foreign key fields in table A (if the field names match between both tables). To fix this, our team implemented a patch involving this workaround: ``` # Skip the incoming relationship tests, in which the fk_target_table is the model currently being read. # Otherwise, the primary key of the current model would be (incorrectly) determined to be a foreign key. is_incoming_relationship_test = depends_on_nodes[1] != unique_id if len(depends_on_nodes) == 2 and is_incoming_relationship_test: logger().debug( "Skip this incoming relationship test, concerning nodes %s.", depends_on_nodes ) continue ``` We added this patch to ~line 200 in env/lib/python3.9/site-packages/dbtmetabase/parsers/dbt_manifest.py (before the note about removing a model)
0.0
1b8af327f46bf34c3f79d6c1890e6ce0d226c46c
[ "tests/test_dbt_parsers.py::TestDbtManifestReader::test_read_models" ]
[ "tests/test_dbt_parsers.py::TestDbtFolderReader::test_read_models" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-02-16 14:33:35+00:00
mit
2,657
gouline__dbt-metabase-224
diff --git a/dbtmetabase/_exposures.py b/dbtmetabase/_exposures.py index c42c05a..65d33b8 100644 --- a/dbtmetabase/_exposures.py +++ b/dbtmetabase/_exposures.py @@ -196,7 +196,7 @@ class ExposuresMixin(metaclass=ABCMeta): depends += self.__extract_card_exposures( ctx, card=self.metabase.get_card(uid=query_source.split("__")[-1]), - )["models"] + )["depends"] elif query_source in ctx.table_names: # Normal question source_table = ctx.table_names.get(query_source) @@ -212,7 +212,7 @@ class ExposuresMixin(metaclass=ABCMeta): depends += self.__extract_card_exposures( ctx, card=self.metabase.get_card(uid=join_source.split("__")[-1]), - )["models"] + )["depends"] continue # Joined model parsed
gouline/dbt-metabase
2f8c88dedc76fc1001aeb95bfdc1914c45ca5999
diff --git a/tests/fixtures/api/card.json b/tests/fixtures/api/card.json index d98330e..d7f4645 100644 --- a/tests/fixtures/api/card.json +++ b/tests/fixtures/api/card.json @@ -2493,5 +2493,971 @@ "favorite": false, "created_at": "2021-07-21T08:01:37.434243Z", "public_uuid": null + }, + { + "description": null, + "archived": false, + "collection_position": null, + "table_id": 10, + "result_metadata": [ + { + "description": "This is a unique identifier for an order", + "semantic_type": null, + "coercion_strategy": null, + "name": "order_id", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 47, + null + ], + "effective_type": "type/Integer", + "id": 47, + "visibility_type": "normal", + "display_name": "Order ID", + "fingerprint": { + "global": { + "distinct-count": 99, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 1.0, + "q1": 25.25, + "q3": 74.75, + "max": 99.0, + "sd": 28.719704534890823, + "avg": 50.0 + } + } + }, + "base_type": "type/Integer" + }, + { + "description": "Foreign key to the customers table", + "semantic_type": "type/FK", + "coercion_strategy": null, + "name": "customer_id", + "settings": null, + "fk_target_field_id": 87, + "field_ref": [ + "field", + 84, + null + ], + "effective_type": "type/Integer", + "id": 84, + "visibility_type": "normal", + "display_name": "Customer ID", + "fingerprint": { + "global": { + "distinct-count": 62, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 1.0, + "q1": 25.875, + "q3": 69.625, + "max": 99.0, + "sd": 27.781341350472964, + "avg": 48.25252525252525 + } + } + }, + "base_type": "type/Integer" + }, + { + "description": "Date (UTC) that the order was placed", + "semantic_type": null, + "coercion_strategy": null, + "unit": "default", + "name": "order_date", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 82, + { + "temporal-unit": "default" + } + ], + "effective_type": "type/Date", + "id": 82, + "visibility_type": "normal", + "display_name": "Order Date", + "fingerprint": { + "global": { + "distinct-count": 69, + "nil%": 0.0 + }, + "type": { + "type/DateTime": { + "earliest": "2018-01-01", + "latest": "2018-04-09" + } + } + }, + "base_type": "type/Date" + }, + { + "description": null, + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "status", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 78, + null + ], + "effective_type": "type/Text", + "id": 78, + "visibility_type": "normal", + "display_name": "Status", + "fingerprint": { + "global": { + "distinct-count": 5, + "nil%": 0.0 + }, + "type": { + "type/Text": { + "percent-json": 0.0, + "percent-url": 0.0, + "percent-email": 0.0, + "percent-state": 0.0, + "average-length": 8.404040404040405 + } + } + }, + "base_type": "type/Text" + }, + { + "description": "Amount of the order (AUD) paid for by credit card", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "credit_card_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 76, + null + ], + "effective_type": "type/BigInteger", + "id": 76, + "visibility_type": "normal", + "display_name": "Credit Card Amount", + "fingerprint": { + "global": { + "distinct-count": 25, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 0.0, + "q1": 0.0, + "q3": 18.797054997187544, + "max": 30.0, + "sd": 10.959088854927673, + "avg": 8.797979797979798 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Amount of the order (AUD) paid for by coupon", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "coupon_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 83, + null + ], + "effective_type": "type/BigInteger", + "id": 83, + "visibility_type": "normal", + "display_name": "Coupon Amount", + "fingerprint": { + "global": { + "distinct-count": 12, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 0.0, + "q1": 0.0, + "q3": 0.4747603274810728, + "max": 26.0, + "sd": 5.955012405351229, + "avg": 1.8686868686868687 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Amount of the order (AUD) paid for by bank transfer", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "bank_transfer_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 81, + null + ], + "effective_type": "type/BigInteger", + "id": 81, + "visibility_type": "normal", + "display_name": "Bank Transfer Amount", + "fingerprint": { + "global": { + "distinct-count": 19, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 0.0, + "q1": 0.0, + "q3": 4.75, + "max": 26.0, + "sd": 7.420825132023675, + "avg": 4.151515151515151 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Amount of the order (AUD) paid for by gift card", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "gift_card_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 77, + null + ], + "effective_type": "type/BigInteger", + "id": 77, + "visibility_type": "normal", + "display_name": "Gift Card Amount", + "fingerprint": { + "global": { + "distinct-count": 11, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 0.0, + "q1": 0.0, + "q3": 1.3692088763283736, + "max": 30.0, + "sd": 6.392362351566517, + "avg": 2.0707070707070705 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Total amount (AUD) of the order", + "semantic_type": null, + "coercion_strategy": null, + "name": "amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 80, + null + ], + "effective_type": "type/BigInteger", + "id": 80, + "visibility_type": "normal", + "display_name": "Amount", + "fingerprint": { + "global": { + "distinct-count": 32, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 0.0, + "q1": 8.202945002812456, + "q3": 24.26138721247417, + "max": 58.0, + "sd": 10.736062525374601, + "avg": 16.88888888888889 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": null, + "semantic_type": null, + "coercion_strategy": null, + "name": "customer_id_2", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 93, + { + "join-alias": "Stg Customers - Customer" + } + ], + "effective_type": "type/Integer", + "id": 93, + "visibility_type": "normal", + "display_name": "Stg Customers - Customer → Customer ID", + "fingerprint": { + "global": { + "distinct-count": 100, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 1.0, + "q1": 25.5, + "q3": 75.5, + "max": 100.0, + "sd": 29.008358252146028, + "avg": 50.5 + } + } + }, + "base_type": "type/Integer" + }, + { + "description": null, + "semantic_type": "type/Name", + "coercion_strategy": null, + "name": "first_name", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 94, + { + "join-alias": "Stg Customers - Customer" + } + ], + "effective_type": "type/Text", + "id": 94, + "visibility_type": "normal", + "display_name": "Stg Customers - Customer → First Name", + "fingerprint": { + "global": { + "distinct-count": 47, + "nil%": 0.0 + }, + "type": { + "type/Text": { + "percent-json": 0.0, + "percent-url": 0.0, + "percent-email": 0.0, + "percent-state": 0.02, + "average-length": 5.86 + } + } + }, + "base_type": "type/Text" + }, + { + "description": null, + "semantic_type": "type/Name", + "coercion_strategy": null, + "name": "last_name", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 92, + { + "join-alias": "Stg Customers - Customer" + } + ], + "effective_type": "type/Text", + "id": 92, + "visibility_type": "normal", + "display_name": "Stg Customers - Customer → Last Name", + "fingerprint": { + "global": { + "distinct-count": 19, + "nil%": 0.0 + }, + "type": { + "type/Text": { + "percent-json": 0.0, + "percent-url": 0.0, + "percent-email": 0.0, + "percent-state": 0.0, + "average-length": 2.0 + } + } + }, + "base_type": "type/Text" + } + ], + "creator": { + "email": "[email protected]", + "first_name": "dbtmetabase", + "last_login": "2024-01-26T23:35:13.524402", + "is_qbnewb": false, + "is_superuser": true, + "id": 1, + "last_name": null, + "date_joined": "2024-01-26T23:29:30.885378", + "common_name": "dbtmetabase" + }, + "database_id": 2, + "enable_embedding": false, + "collection_id": null, + "query_type": "query", + "name": "Orders Customers", + "creator_id": 1, + "updated_at": "2024-01-26T23:36:46.84084Z", + "made_public_by_id": null, + "embedding_params": null, + "cache_ttl": null, + "dataset_query": { + "database": 2, + "type": "query", + "query": { + "source-table": 10, + "joins": [ + { + "fields": "all", + "strategy": "left-join", + "alias": "Stg Customers - Customer", + "condition": [ + "=", + [ + "field", + 84, + { + "base-type": "type/Integer" + } + ], + [ + "field", + 93, + { + "base-type": "type/Integer", + "join-alias": "Stg Customers - Customer" + } + ] + ], + "source-table": 12 + } + ] + } + }, + "id": 23, + "parameter_mappings": [], + "display": "table", + "entity_id": "aR8nxcwbbZSX3_DSLdOBm", + "collection_preview": true, + "last-edit-info": { + "id": 1, + "email": "[email protected]", + "first_name": "dbtmetabase", + "last_name": null, + "timestamp": "2024-01-26T23:34:25.467752Z" + }, + "visualization_settings": { + "table.pivot_column": "status", + "table.cell_column": "order_id" + }, + "collection": null, + "metabase_version": "v0.48.0 (f985e19)", + "parameters": [], + "dataset": false, + "created_at": "2024-01-26T23:34:25.436685", + "public_uuid": null + }, + { + "description": null, + "archived": false, + "collection_position": null, + "table_id": 10, + "result_metadata": [ + { + "description": "This is a unique identifier for an order", + "semantic_type": null, + "coercion_strategy": null, + "name": "order_id", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 47, + null + ], + "effective_type": "type/Integer", + "id": 47, + "visibility_type": "normal", + "display_name": "Order ID", + "fingerprint": { + "global": { + "distinct-count": 99, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 1, + "q1": 25.25, + "q3": 74.75, + "max": 99, + "sd": 28.719704534890823, + "avg": 50 + } + } + }, + "base_type": "type/Integer" + }, + { + "description": "Foreign key to the customers table", + "semantic_type": "type/FK", + "coercion_strategy": null, + "name": "customer_id", + "settings": null, + "fk_target_field_id": 87, + "field_ref": [ + "field", + 84, + null + ], + "effective_type": "type/Integer", + "id": 84, + "visibility_type": "normal", + "display_name": "Customer ID", + "fingerprint": { + "global": { + "distinct-count": 62, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 1, + "q1": 25.875, + "q3": 69.625, + "max": 99, + "sd": 27.781341350472964, + "avg": 48.25252525252525 + } + } + }, + "base_type": "type/Integer" + }, + { + "description": "Date (UTC) that the order was placed", + "semantic_type": null, + "coercion_strategy": null, + "unit": "default", + "name": "order_date", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 82, + { + "temporal-unit": "default" + } + ], + "effective_type": "type/Date", + "id": 82, + "visibility_type": "normal", + "display_name": "Order Date", + "fingerprint": { + "global": { + "distinct-count": 69, + "nil%": 0 + }, + "type": { + "type/DateTime": { + "earliest": "2018-01-01", + "latest": "2018-04-09" + } + } + }, + "base_type": "type/Date" + }, + { + "description": null, + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "status", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 78, + null + ], + "effective_type": "type/Text", + "id": 78, + "visibility_type": "normal", + "display_name": "Status", + "fingerprint": { + "global": { + "distinct-count": 5, + "nil%": 0 + }, + "type": { + "type/Text": { + "percent-json": 0, + "percent-url": 0, + "percent-email": 0, + "percent-state": 0, + "average-length": 8.404040404040405 + } + } + }, + "base_type": "type/Text" + }, + { + "description": "Amount of the order (AUD) paid for by credit card", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "credit_card_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 76, + null + ], + "effective_type": "type/BigInteger", + "id": 76, + "visibility_type": "normal", + "display_name": "Credit Card Amount", + "fingerprint": { + "global": { + "distinct-count": 25, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 0, + "q1": 0, + "q3": 18.797054997187544, + "max": 30, + "sd": 10.959088854927673, + "avg": 8.797979797979798 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Amount of the order (AUD) paid for by coupon", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "coupon_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 83, + null + ], + "effective_type": "type/BigInteger", + "id": 83, + "visibility_type": "normal", + "display_name": "Coupon Amount", + "fingerprint": { + "global": { + "distinct-count": 12, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 0, + "q1": 0, + "q3": 0.4747603274810728, + "max": 26, + "sd": 5.955012405351229, + "avg": 1.8686868686868687 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Amount of the order (AUD) paid for by bank transfer", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "bank_transfer_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 81, + null + ], + "effective_type": "type/BigInteger", + "id": 81, + "visibility_type": "normal", + "display_name": "Bank Transfer Amount", + "fingerprint": { + "global": { + "distinct-count": 19, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 0, + "q1": 0, + "q3": 4.75, + "max": 26, + "sd": 7.420825132023675, + "avg": 4.151515151515151 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Amount of the order (AUD) paid for by gift card", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "gift_card_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 77, + null + ], + "effective_type": "type/BigInteger", + "id": 77, + "visibility_type": "normal", + "display_name": "Gift Card Amount", + "fingerprint": { + "global": { + "distinct-count": 11, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 0, + "q1": 0, + "q3": 1.3692088763283736, + "max": 30, + "sd": 6.392362351566517, + "avg": 2.0707070707070705 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Total amount (AUD) of the order", + "semantic_type": null, + "coercion_strategy": null, + "name": "amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 80, + null + ], + "effective_type": "type/BigInteger", + "id": 80, + "visibility_type": "normal", + "display_name": "Amount", + "fingerprint": { + "global": { + "distinct-count": 32, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 0, + "q1": 8.202945002812456, + "q3": 24.26138721247417, + "max": 58, + "sd": 10.736062525374601, + "avg": 16.88888888888889 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": null, + "semantic_type": null, + "coercion_strategy": null, + "name": "customer_id_2", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 93, + null + ], + "effective_type": "type/Integer", + "id": 93, + "visibility_type": "normal", + "display_name": "Stg Customers - Customer → Customer ID", + "fingerprint": { + "global": { + "distinct-count": 100, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 1, + "q1": 25.5, + "q3": 75.5, + "max": 100, + "sd": 29.008358252146028, + "avg": 50.5 + } + } + }, + "base_type": "type/Integer" + }, + { + "description": null, + "semantic_type": "type/Name", + "coercion_strategy": null, + "name": "first_name", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 94, + null + ], + "effective_type": "type/Text", + "id": 94, + "visibility_type": "normal", + "display_name": "Stg Customers - Customer → First Name", + "fingerprint": { + "global": { + "distinct-count": 47, + "nil%": 0 + }, + "type": { + "type/Text": { + "percent-json": 0, + "percent-url": 0, + "percent-email": 0, + "percent-state": 0.02, + "average-length": 5.86 + } + } + }, + "base_type": "type/Text" + }, + { + "description": null, + "semantic_type": "type/Name", + "coercion_strategy": null, + "name": "last_name", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 92, + null + ], + "effective_type": "type/Text", + "id": 92, + "visibility_type": "normal", + "display_name": "Stg Customers - Customer → Last Name", + "fingerprint": { + "global": { + "distinct-count": 19, + "nil%": 0 + }, + "type": { + "type/Text": { + "percent-json": 0, + "percent-url": 0, + "percent-email": 0, + "percent-state": 0, + "average-length": 2 + } + } + }, + "base_type": "type/Text" + } + ], + "creator": { + "email": "[email protected]", + "first_name": "dbtmetabase", + "last_login": "2024-01-26T23:35:13.524402", + "is_qbnewb": false, + "is_superuser": true, + "id": 1, + "last_name": null, + "date_joined": "2024-01-26T23:29:30.885378", + "common_name": "dbtmetabase" + }, + "database_id": 2, + "enable_embedding": false, + "collection_id": null, + "query_type": "query", + "name": "Orders Customers Filtered", + "creator_id": 1, + "updated_at": "2024-01-26T23:35:08.864176Z", + "made_public_by_id": null, + "embedding_params": null, + "cache_ttl": null, + "dataset_query": { + "database": 2, + "type": "query", + "query": { + "source-table": "card__3", + "filter": [ + "not-null", + [ + "field", + 47, + { + "base-type": "type/Integer" + } + ] + ] + } + }, + "id": 24, + "parameter_mappings": [], + "display": "table", + "entity_id": "OLbf7Q2yHWOn6CGvptTpi", + "collection_preview": true, + "last-edit-info": { + "id": 1, + "email": "[email protected]", + "first_name": "dbtmetabase", + "last_name": null, + "timestamp": "2024-01-26T23:35:08.900746Z" + }, + "visualization_settings": { + "table.pivot_column": "status", + "table.cell_column": "order_id" + }, + "collection": null, + "metabase_version": "v0.48.0 (f985e19)", + "parameters": [], + "dataset": false, + "created_at": "2024-01-26T23:35:08.864176", + "public_uuid": null } -] \ No newline at end of file +] diff --git a/tests/fixtures/api/card/23.json b/tests/fixtures/api/card/23.json new file mode 100644 index 0000000..d40778e --- /dev/null +++ b/tests/fixtures/api/card/23.json @@ -0,0 +1,507 @@ +{ + "description": null, + "archived": false, + "collection_position": null, + "table_id": 10, + "result_metadata": [ + { + "description": "This is a unique identifier for an order", + "semantic_type": null, + "coercion_strategy": null, + "name": "order_id", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 47, + null + ], + "effective_type": "type/Integer", + "id": 47, + "visibility_type": "normal", + "display_name": "Order ID", + "fingerprint": { + "global": { + "distinct-count": 99, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 1.0, + "q1": 25.25, + "q3": 74.75, + "max": 99.0, + "sd": 28.719704534890823, + "avg": 50.0 + } + } + }, + "base_type": "type/Integer" + }, + { + "description": "Foreign key to the customers table", + "semantic_type": "type/FK", + "coercion_strategy": null, + "name": "customer_id", + "settings": null, + "fk_target_field_id": 87, + "field_ref": [ + "field", + 84, + null + ], + "effective_type": "type/Integer", + "id": 84, + "visibility_type": "normal", + "display_name": "Customer ID", + "fingerprint": { + "global": { + "distinct-count": 62, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 1.0, + "q1": 25.875, + "q3": 69.625, + "max": 99.0, + "sd": 27.781341350472964, + "avg": 48.25252525252525 + } + } + }, + "base_type": "type/Integer" + }, + { + "description": "Date (UTC) that the order was placed", + "semantic_type": null, + "coercion_strategy": null, + "unit": "default", + "name": "order_date", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 82, + { + "temporal-unit": "default" + } + ], + "effective_type": "type/Date", + "id": 82, + "visibility_type": "normal", + "display_name": "Order Date", + "fingerprint": { + "global": { + "distinct-count": 69, + "nil%": 0.0 + }, + "type": { + "type/DateTime": { + "earliest": "2018-01-01", + "latest": "2018-04-09" + } + } + }, + "base_type": "type/Date" + }, + { + "description": null, + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "status", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 78, + null + ], + "effective_type": "type/Text", + "id": 78, + "visibility_type": "normal", + "display_name": "Status", + "fingerprint": { + "global": { + "distinct-count": 5, + "nil%": 0.0 + }, + "type": { + "type/Text": { + "percent-json": 0.0, + "percent-url": 0.0, + "percent-email": 0.0, + "percent-state": 0.0, + "average-length": 8.404040404040405 + } + } + }, + "base_type": "type/Text" + }, + { + "description": "Amount of the order (AUD) paid for by credit card", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "credit_card_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 76, + null + ], + "effective_type": "type/BigInteger", + "id": 76, + "visibility_type": "normal", + "display_name": "Credit Card Amount", + "fingerprint": { + "global": { + "distinct-count": 25, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 0.0, + "q1": 0.0, + "q3": 18.797054997187544, + "max": 30.0, + "sd": 10.959088854927673, + "avg": 8.797979797979798 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Amount of the order (AUD) paid for by coupon", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "coupon_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 83, + null + ], + "effective_type": "type/BigInteger", + "id": 83, + "visibility_type": "normal", + "display_name": "Coupon Amount", + "fingerprint": { + "global": { + "distinct-count": 12, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 0.0, + "q1": 0.0, + "q3": 0.4747603274810728, + "max": 26.0, + "sd": 5.955012405351229, + "avg": 1.8686868686868687 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Amount of the order (AUD) paid for by bank transfer", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "bank_transfer_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 81, + null + ], + "effective_type": "type/BigInteger", + "id": 81, + "visibility_type": "normal", + "display_name": "Bank Transfer Amount", + "fingerprint": { + "global": { + "distinct-count": 19, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 0.0, + "q1": 0.0, + "q3": 4.75, + "max": 26.0, + "sd": 7.420825132023675, + "avg": 4.151515151515151 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Amount of the order (AUD) paid for by gift card", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "gift_card_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 77, + null + ], + "effective_type": "type/BigInteger", + "id": 77, + "visibility_type": "normal", + "display_name": "Gift Card Amount", + "fingerprint": { + "global": { + "distinct-count": 11, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 0.0, + "q1": 0.0, + "q3": 1.3692088763283736, + "max": 30.0, + "sd": 6.392362351566517, + "avg": 2.0707070707070705 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Total amount (AUD) of the order", + "semantic_type": null, + "coercion_strategy": null, + "name": "amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 80, + null + ], + "effective_type": "type/BigInteger", + "id": 80, + "visibility_type": "normal", + "display_name": "Amount", + "fingerprint": { + "global": { + "distinct-count": 32, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 0.0, + "q1": 8.202945002812456, + "q3": 24.26138721247417, + "max": 58.0, + "sd": 10.736062525374601, + "avg": 16.88888888888889 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": null, + "semantic_type": null, + "coercion_strategy": null, + "name": "customer_id_2", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 93, + { + "join-alias": "Stg Customers - Customer" + } + ], + "effective_type": "type/Integer", + "id": 93, + "visibility_type": "normal", + "display_name": "Stg Customers - Customer → Customer ID", + "fingerprint": { + "global": { + "distinct-count": 100, + "nil%": 0.0 + }, + "type": { + "type/Number": { + "min": 1.0, + "q1": 25.5, + "q3": 75.5, + "max": 100.0, + "sd": 29.008358252146028, + "avg": 50.5 + } + } + }, + "base_type": "type/Integer" + }, + { + "description": null, + "semantic_type": "type/Name", + "coercion_strategy": null, + "name": "first_name", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 94, + { + "join-alias": "Stg Customers - Customer" + } + ], + "effective_type": "type/Text", + "id": 94, + "visibility_type": "normal", + "display_name": "Stg Customers - Customer → First Name", + "fingerprint": { + "global": { + "distinct-count": 47, + "nil%": 0.0 + }, + "type": { + "type/Text": { + "percent-json": 0.0, + "percent-url": 0.0, + "percent-email": 0.0, + "percent-state": 0.02, + "average-length": 5.86 + } + } + }, + "base_type": "type/Text" + }, + { + "description": null, + "semantic_type": "type/Name", + "coercion_strategy": null, + "name": "last_name", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 92, + { + "join-alias": "Stg Customers - Customer" + } + ], + "effective_type": "type/Text", + "id": 92, + "visibility_type": "normal", + "display_name": "Stg Customers - Customer → Last Name", + "fingerprint": { + "global": { + "distinct-count": 19, + "nil%": 0.0 + }, + "type": { + "type/Text": { + "percent-json": 0.0, + "percent-url": 0.0, + "percent-email": 0.0, + "percent-state": 0.0, + "average-length": 2.0 + } + } + }, + "base_type": "type/Text" + } + ], + "creator": { + "email": "[email protected]", + "first_name": "dbtmetabase", + "last_login": "2024-01-26T23:35:13.524402", + "is_qbnewb": false, + "is_superuser": true, + "id": 1, + "last_name": null, + "date_joined": "2024-01-26T23:29:30.885378", + "common_name": "dbtmetabase" + }, + "can_write": true, + "database_id": 2, + "enable_embedding": false, + "collection_id": null, + "query_type": "query", + "name": "Orders Customers", + "last_query_start": "2024-01-26T23:36:46.40105Z", + "dashboard_count": 0, + "average_query_time": 412.0, + "creator_id": 1, + "moderation_reviews": [], + "updated_at": "2024-01-26T23:36:46.84084Z", + "made_public_by_id": null, + "embedding_params": null, + "cache_ttl": null, + "dataset_query": { + "database": 2, + "type": "query", + "query": { + "source-table": 10, + "joins": [ + { + "fields": "all", + "strategy": "left-join", + "alias": "Stg Customers - Customer", + "condition": [ + "=", + [ + "field", + 84, + { + "base-type": "type/Integer" + } + ], + [ + "field", + 93, + { + "base-type": "type/Integer", + "join-alias": "Stg Customers - Customer" + } + ] + ], + "source-table": 12 + } + ] + } + }, + "id": 23, + "parameter_mappings": [], + "display": "table", + "entity_id": "aR8nxcwbbZSX3_DSLdOBm", + "collection_preview": true, + "last-edit-info": { + "id": 1, + "email": "[email protected]", + "first_name": "dbtmetabase", + "last_name": null, + "timestamp": "2024-01-26T23:34:25.467752Z" + }, + "visualization_settings": { + "table.pivot_column": "status", + "table.cell_column": "order_id" + }, + "collection": { + "metabase.models.collection.root/is-root?": true, + "authority_level": null, + "name": "Our analytics", + "is_personal": false, + "id": "root", + "can_write": true + }, + "metabase_version": "v0.48.0 (f985e19)", + "parameters": [], + "dataset": false, + "created_at": "2024-01-26T23:34:25.436685", + "parameter_usage_count": 0, + "public_uuid": null +} diff --git a/tests/fixtures/api/card/24.json b/tests/fixtures/api/card/24.json new file mode 100644 index 0000000..73a4aed --- /dev/null +++ b/tests/fixtures/api/card/24.json @@ -0,0 +1,485 @@ +{ + "description": null, + "archived": false, + "collection_position": null, + "table_id": 10, + "result_metadata": [ + { + "description": "This is a unique identifier for an order", + "semantic_type": null, + "coercion_strategy": null, + "name": "order_id", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 47, + null + ], + "effective_type": "type/Integer", + "id": 47, + "visibility_type": "normal", + "display_name": "Order ID", + "fingerprint": { + "global": { + "distinct-count": 99, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 1, + "q1": 25.25, + "q3": 74.75, + "max": 99, + "sd": 28.719704534890823, + "avg": 50 + } + } + }, + "base_type": "type/Integer" + }, + { + "description": "Foreign key to the customers table", + "semantic_type": "type/FK", + "coercion_strategy": null, + "name": "customer_id", + "settings": null, + "fk_target_field_id": 87, + "field_ref": [ + "field", + 84, + null + ], + "effective_type": "type/Integer", + "id": 84, + "visibility_type": "normal", + "display_name": "Customer ID", + "fingerprint": { + "global": { + "distinct-count": 62, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 1, + "q1": 25.875, + "q3": 69.625, + "max": 99, + "sd": 27.781341350472964, + "avg": 48.25252525252525 + } + } + }, + "base_type": "type/Integer" + }, + { + "description": "Date (UTC) that the order was placed", + "semantic_type": null, + "coercion_strategy": null, + "unit": "default", + "name": "order_date", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 82, + { + "temporal-unit": "default" + } + ], + "effective_type": "type/Date", + "id": 82, + "visibility_type": "normal", + "display_name": "Order Date", + "fingerprint": { + "global": { + "distinct-count": 69, + "nil%": 0 + }, + "type": { + "type/DateTime": { + "earliest": "2018-01-01", + "latest": "2018-04-09" + } + } + }, + "base_type": "type/Date" + }, + { + "description": null, + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "status", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 78, + null + ], + "effective_type": "type/Text", + "id": 78, + "visibility_type": "normal", + "display_name": "Status", + "fingerprint": { + "global": { + "distinct-count": 5, + "nil%": 0 + }, + "type": { + "type/Text": { + "percent-json": 0, + "percent-url": 0, + "percent-email": 0, + "percent-state": 0, + "average-length": 8.404040404040405 + } + } + }, + "base_type": "type/Text" + }, + { + "description": "Amount of the order (AUD) paid for by credit card", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "credit_card_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 76, + null + ], + "effective_type": "type/BigInteger", + "id": 76, + "visibility_type": "normal", + "display_name": "Credit Card Amount", + "fingerprint": { + "global": { + "distinct-count": 25, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 0, + "q1": 0, + "q3": 18.797054997187544, + "max": 30, + "sd": 10.959088854927673, + "avg": 8.797979797979798 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Amount of the order (AUD) paid for by coupon", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "coupon_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 83, + null + ], + "effective_type": "type/BigInteger", + "id": 83, + "visibility_type": "normal", + "display_name": "Coupon Amount", + "fingerprint": { + "global": { + "distinct-count": 12, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 0, + "q1": 0, + "q3": 0.4747603274810728, + "max": 26, + "sd": 5.955012405351229, + "avg": 1.8686868686868687 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Amount of the order (AUD) paid for by bank transfer", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "bank_transfer_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 81, + null + ], + "effective_type": "type/BigInteger", + "id": 81, + "visibility_type": "normal", + "display_name": "Bank Transfer Amount", + "fingerprint": { + "global": { + "distinct-count": 19, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 0, + "q1": 0, + "q3": 4.75, + "max": 26, + "sd": 7.420825132023675, + "avg": 4.151515151515151 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Amount of the order (AUD) paid for by gift card", + "semantic_type": "type/Category", + "coercion_strategy": null, + "name": "gift_card_amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 77, + null + ], + "effective_type": "type/BigInteger", + "id": 77, + "visibility_type": "normal", + "display_name": "Gift Card Amount", + "fingerprint": { + "global": { + "distinct-count": 11, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 0, + "q1": 0, + "q3": 1.3692088763283736, + "max": 30, + "sd": 6.392362351566517, + "avg": 2.0707070707070705 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": "Total amount (AUD) of the order", + "semantic_type": null, + "coercion_strategy": null, + "name": "amount", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 80, + null + ], + "effective_type": "type/BigInteger", + "id": 80, + "visibility_type": "normal", + "display_name": "Amount", + "fingerprint": { + "global": { + "distinct-count": 32, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 0, + "q1": 8.202945002812456, + "q3": 24.26138721247417, + "max": 58, + "sd": 10.736062525374601, + "avg": 16.88888888888889 + } + } + }, + "base_type": "type/BigInteger" + }, + { + "description": null, + "semantic_type": null, + "coercion_strategy": null, + "name": "customer_id_2", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 93, + null + ], + "effective_type": "type/Integer", + "id": 93, + "visibility_type": "normal", + "display_name": "Stg Customers - Customer → Customer ID", + "fingerprint": { + "global": { + "distinct-count": 100, + "nil%": 0 + }, + "type": { + "type/Number": { + "min": 1, + "q1": 25.5, + "q3": 75.5, + "max": 100, + "sd": 29.008358252146028, + "avg": 50.5 + } + } + }, + "base_type": "type/Integer" + }, + { + "description": null, + "semantic_type": "type/Name", + "coercion_strategy": null, + "name": "first_name", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 94, + null + ], + "effective_type": "type/Text", + "id": 94, + "visibility_type": "normal", + "display_name": "Stg Customers - Customer → First Name", + "fingerprint": { + "global": { + "distinct-count": 47, + "nil%": 0 + }, + "type": { + "type/Text": { + "percent-json": 0, + "percent-url": 0, + "percent-email": 0, + "percent-state": 0.02, + "average-length": 5.86 + } + } + }, + "base_type": "type/Text" + }, + { + "description": null, + "semantic_type": "type/Name", + "coercion_strategy": null, + "name": "last_name", + "settings": null, + "fk_target_field_id": null, + "field_ref": [ + "field", + 92, + null + ], + "effective_type": "type/Text", + "id": 92, + "visibility_type": "normal", + "display_name": "Stg Customers - Customer → Last Name", + "fingerprint": { + "global": { + "distinct-count": 19, + "nil%": 0 + }, + "type": { + "type/Text": { + "percent-json": 0, + "percent-url": 0, + "percent-email": 0, + "percent-state": 0, + "average-length": 2 + } + } + }, + "base_type": "type/Text" + } + ], + "creator": { + "email": "[email protected]", + "first_name": "dbtmetabase", + "last_login": "2024-01-26T23:35:13.524402", + "is_qbnewb": true, + "is_superuser": true, + "id": 1, + "last_name": null, + "date_joined": "2024-01-26T23:29:30.885378", + "common_name": "dbtmetabase" + }, + "can_write": true, + "database_id": 2, + "enable_embedding": false, + "collection_id": null, + "query_type": "query", + "name": "Orders Customers Filtered", + "last_query_start": null, + "dashboard_count": 0, + "average_query_time": null, + "creator_id": 1, + "moderation_reviews": [], + "updated_at": "2024-01-26T23:35:08.864176Z", + "made_public_by_id": null, + "embedding_params": null, + "cache_ttl": null, + "dataset_query": { + "database": 2, + "type": "query", + "query": { + "source-table": "card__3", + "filter": [ + "not-null", + [ + "field", + 47, + { + "base-type": "type/Integer" + } + ] + ] + } + }, + "id": 24, + "parameter_mappings": [], + "display": "table", + "entity_id": "OLbf7Q2yHWOn6CGvptTpi", + "collection_preview": true, + "last-edit-info": { + "id": 1, + "email": "[email protected]", + "first_name": "dbtmetabase", + "last_name": null, + "timestamp": "2024-01-26T23:35:08.900746Z" + }, + "visualization_settings": { + "table.pivot_column": "status", + "table.cell_column": "order_id" + }, + "collection": { + "metabase.models.collection.root/is-root?": true, + "authority_level": null, + "name": "Our analytics", + "is_personal": false, + "id": "root", + "can_write": true + }, + "metabase_version": "v0.48.0 (f985e19)", + "parameters": [], + "dataset": false, + "created_at": "2024-01-26T23:35:08.864176", + "parameter_usage_count": 0, + "public_uuid": null +} diff --git a/tests/fixtures/api/collection/root/items.json b/tests/fixtures/api/collection/root/items.json index 6a54de1..642e4c7 100644 --- a/tests/fixtures/api/collection/root/items.json +++ b/tests/fixtures/api/collection/root/items.json @@ -44,5 +44,21 @@ }, "favorite": false, "model": "card" + }, + { + "description": null, + "collection_position": null, + "name": "Orders Customers Filtered", + "id": 24, + "display": "table", + "last-edit-info": { + "id": 1, + "last_name": "", + "first_name": "dbtmetabase", + "email": "[email protected]", + "timestamp": "2024-01-21T08:01:37.449936Z" + }, + "favorite": false, + "model": "card" } -] \ No newline at end of file +] diff --git a/tests/fixtures/exposure/collection/our_analytics.yml b/tests/fixtures/exposure/collection/our_analytics.yml index 8e0161f..c43489c 100644 --- a/tests/fixtures/exposure/collection/our_analytics.yml +++ b/tests/fixtures/exposure/collection/our_analytics.yml @@ -47,3 +47,26 @@ exposures: email: [email protected] depends_on: - ref('orders') + - name: orders_customers_filtered + label: Orders Customers Filtered + description: '### Visualization: Table + + + No description provided in Metabase + + + #### Metadata + + + Metabase ID: __24__ + + + Created On: __2024-01-26T23:35:08.864176__' + type: analysis + url: http://localhost:3000/card/24 + maturity: medium + owner: + name: dbtmetabase + email: [email protected] + depends_on: + - ref('customers') diff --git a/tests/fixtures/exposure/default/exposures.yml b/tests/fixtures/exposure/default/exposures.yml index b81194a..ac77e7e 100644 --- a/tests/fixtures/exposure/default/exposures.yml +++ b/tests/fixtures/exposure/default/exposures.yml @@ -254,6 +254,29 @@ exposures: email: [email protected] depends_on: - ref('customers') +- name: orders_customers_filtered + label: Orders Customers Filtered + description: '### Visualization: Table + + + No description provided in Metabase + + + #### Metadata + + + Metabase ID: __24__ + + + Created On: __2024-01-26T23:35:08.864176__' + type: analysis + url: http://localhost:3000/card/24 + maturity: medium + owner: + name: dbtmetabase + email: [email protected] + depends_on: + - ref('customers') - name: timestamp_by_day_of_the_week label: Timestamp by day of the week description: '### Visualization: Bar
Exposures command fails. dbtmetabase/_exposures.py; Function: __extract_card_exposures(); dbt-metabase: 1.0.1 When I run `dbt-metabase exposures` I get the following error: ```bash Traceback (most recent call last): File "/pypoetry/virtualenvs/dbt-yDNzCqHh-py3.10/bin/dbt-metabase", line 8, in <module> sys.exit(cli()) File "/pypoetry/virtualenvs/dbt-yDNzCqHh-py3.10/lib/python3.10/site-packages/click/core.py", line 1157, in __call__ return self.main(*args, **kwargs) File "/pypoetry/virtualenvs/dbt-yDNzCqHh-py3.10/lib/python3.10/site-packages/click/core.py", line 1078, in main rv = self.invoke(ctx) File "/pypoetry/virtualenvs/dbt-yDNzCqHh-py3.10/lib/python3.10/site-packages/click/core.py", line 1688, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/pypoetry/virtualenvs/dbt-yDNzCqHh-py3.10/lib/python3.10/site-packages/click/core.py", line 1434, in invoke return ctx.invoke(self.callback, **ctx.params) File "/pypoetry/virtualenvs/dbt-yDNzCqHh-py3.10/lib/python3.10/site-packages/click/core.py", line 783, in invoke return __callback(*args, **kwargs) File "/pypoetry/virtualenvs/dbt-yDNzCqHh-py3.10/lib/python3.10/site-packages/dbtmetabase/__main__.py", line 178, in wrapper return func( File "/pypoetry/virtualenvs/dbt-yDNzCqHh-py3.10/lib/python3.10/site-packages/dbtmetabase/__main__.py", line 361, in exposures core.extract_exposures( File "/pypoetry/virtualenvs/dbt-yDNzCqHh-py3.10/lib/python3.10/site-packages/dbtmetabase/_exposures.py", line 104, in extract_exposures result = self.__extract_card_exposures(ctx, card=entity) File "/pypoetry/virtualenvs/dbt-yDNzCqHh-py3.10/lib/python3.10/site-packages/dbtmetabase/_exposures.py", line 196, in __extract_card_exposures depends += self.__extract_card_exposures( KeyError: 'models' ``` Versions: - dbt-metabase: 1.0.1 - dbt-core = 1.3.2 - dbt-snowflake = 1.3.0 - python 3.10.6 I went into the code, put in some prints for debugging and for me this was the object that was missing the `models` key: File path: dbtmetabase/_exposures.py; Function: __extract_card_exposures(); lines 196 - 199: ```python if str(query_source).startswith("card__"): # Handle questions based on other questions depends += self.__extract_card_exposures( ctx, card=self.metabase.get_card(uid=query_source.split("__")[-1]), )["models"] # The object that is missing the `models` key (changed the values for example replacements): { "depends": [ "TABLE_1", "TABLE_2", "TABLE_3", "TABLE_4", "TABLE_5" ], "native_query": "with example_table as ( ... select * from final" } ```
0.0
2f8c88dedc76fc1001aeb95bfdc1914c45ca5999
[ "tests/test_exposures.py::TestExposures::test_exposures", "tests/test_exposures.py::TestExposures::test_exposures_collection_grouping", "tests/test_exposures.py::TestExposures::test_exposures_grouping_type" ]
[ "tests/test_format.py::TestFormat::test_dump_yaml", "tests/test_format.py::TestFormat::test_filter", "tests/test_format.py::TestFormat::test_null_value", "tests/test_format.py::TestFormat::test_safe_description", "tests/test_format.py::TestFormat::test_safe_name", "tests/test_manifest.py::TestManifest::test_v11", "tests/test_manifest.py::TestManifest::test_v11_disabled", "tests/test_manifest.py::TestManifest::test_v2", "tests/test_metabase.py::TestMetabase::test_metabase_find_database", "tests/test_metabase.py::TestMetabase::test_metabase_get_collection_items", "tests/test_metabase.py::TestMetabase::test_metabase_get_collections", "tests/test_models.py::TestModels::test_build_lookups", "tests/test_models.py::TestModels::test_export" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2024-01-27 00:07:42+00:00
mit
2,658
gouline__dbt-metabase-233
diff --git a/Makefile b/Makefile index ee21a27..068fca0 100644 --- a/Makefile +++ b/Makefile @@ -77,7 +77,7 @@ sandbox-models: --metabase-username $$MB_USER \ --metabase-password $$MB_PASSWORD \ --metabase-database $$POSTGRES_DB \ - --include-schemas "public",other \ + --include-schemas "pub*",other \ --http-header x-dummy-key dummy-value \ --order-fields \ --verbose ) diff --git a/README.md b/README.md index 673aa3c..cb6e9a3 100644 --- a/README.md +++ b/README.md @@ -211,7 +211,7 @@ dbt-metabase exposures \ --metabase-username [email protected] \ --metabase-password Password123 \ --output-path models/ \ - --exclude-collections temporary + --exclude-collections "temp*" ``` Once the execution completes, check your output path for exposures files containing descriptions, creator details and links for Metabase questions and dashboards: @@ -295,7 +295,7 @@ c.export_models( # Extracting exposures c.extract_exposures( output_path=".", - collection_filter=Filter(exclude=["temporary"]), + collection_filter=Filter(exclude=["temp*"]), ) ``` diff --git a/dbtmetabase/format.py b/dbtmetabase/format.py index 7ef2b29..862a715 100644 --- a/dbtmetabase/format.py +++ b/dbtmetabase/format.py @@ -1,5 +1,6 @@ from __future__ import annotations +import fnmatch import logging import re from logging.handlers import RotatingFileHandler @@ -29,9 +30,18 @@ class Filter: def match(self, item: str) -> bool: item = self._norm_item(item) - included = not self.include or item in self.include - excluded = self.exclude and item in self.exclude - return included and not excluded + + for exclude in self.exclude: + if fnmatch.fnmatch(item, exclude): + return False + + if self.include: + for include in self.include: + if fnmatch.fnmatch(item, include): + return True + return False + + return True @staticmethod def _norm_arg(arg: Optional[Sequence[str]]) -> Sequence[str]:
gouline/dbt-metabase
d9f50dc0bdcf26fe52cf88de4075664cf310bdad
diff --git a/tests/test_format.py b/tests/test_format.py index b24d172..9b48af6 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -27,6 +27,15 @@ class TestFormat(unittest.TestCase): self.assertTrue(Filter(include="alpha").match("Alpha")) self.assertFalse(Filter(exclude="alpha").match("Alpha")) + def test_filter_wildcard(self): + self.assertTrue(Filter(include="stg_*").match("stg_orders")) + self.assertTrue(Filter(include="STG_*").match("stg_ORDERS")) + self.assertFalse(Filter(include="stg_*").match("orders")) + self.assertTrue(Filter(include="order?").match("orders")) + self.assertFalse(Filter(include="order?").match("ordersz")) + self.assertTrue(Filter(include="*orders", exclude="stg_*").match("_orders")) + self.assertFalse(Filter(include="*orders", exclude="stg_*").match("stg_orders")) + def test_null_value(self): self.assertIsNotNone(NullValue) self.assertFalse(NullValue)
Add wildcard syntax to table filters Certain models - such as staging tables - might not be a part of Metabase's table definitions. As opposed to having to specify all includes or excludes, `dbtmetabase` should allow you to define a wildcard to exclude them, i.e. ```python c.export_models( metabase_database="my_database", schema_filter=Filter(include=["PUBLIC", "SNAPSHOTS"], exclude=["STG_*"]), skip_sources=True, ) ```
0.0
d9f50dc0bdcf26fe52cf88de4075664cf310bdad
[ "tests/test_format.py::TestFormat::test_filter_wildcard" ]
[ "tests/test_format.py::TestFormat::test_filter", "tests/test_format.py::TestFormat::test_null_value", "tests/test_format.py::TestFormat::test_safe_description", "tests/test_format.py::TestFormat::test_safe_name" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2024-02-20 00:46:10+00:00
mit
2,659
gouline__dbt-metabase-234
diff --git a/dbtmetabase/manifest.py b/dbtmetabase/manifest.py index 14a0b94..014ef03 100644 --- a/dbtmetabase/manifest.py +++ b/dbtmetabase/manifest.py @@ -3,6 +3,7 @@ from __future__ import annotations import dataclasses as dc import json import logging +import re from enum import Enum from pathlib import Path from typing import ( @@ -42,6 +43,9 @@ _MODEL_META_FIELDS = _COMMON_META_FIELDS + [ # Default model schema (only schema in BigQuery) DEFAULT_SCHEMA = "PUBLIC" +# Foreign key constraint: "schema.model (column)" / "model (column)" +_CONSTRAINT_FK_PARSER = re.compile(r"(?P<model>.+)\s+\((?P<column>.+)\)") + class Manifest: """dbt manifest reader.""" @@ -142,12 +146,11 @@ class Manifest: ), ) - self._set_column_fk( + self._set_column_relationship( manifest_column=manifest_column, column=column, - table=relationship["fk_target_table"] if relationship else None, - field=relationship["fk_target_field"] if relationship else None, schema=schema, + relationship=relationship, ) return column @@ -250,43 +253,62 @@ class Manifest: return relationships - def _set_column_fk( + def _set_column_relationship( self, manifest_column: Mapping, column: Column, - table: Optional[str], - field: Optional[str], - schema: Optional[str], + schema: str, + relationship: Optional[Mapping], ): - """Sets foreign key target on a column. + """Sets primary key and foreign key target on a column from constraints, meta fields or provided test relationship.""" + + fk_target_table = "" + fk_target_field = "" + + # Precedence 1: Relationship test + if relationship: + fk_target_table = relationship["fk_target_table"] + fk_target_field = relationship["fk_target_field"] + + # Precedence 2: Constraints + for constraint in manifest_column.get("constraints", []): + if constraint["type"] == "primary_key": + if not column.semantic_type: + column.semantic_type = "type/PK" + + elif constraint["type"] == "foreign_key": + constraint_expr = constraint.get("expression", "") + constraint_fk = _CONSTRAINT_FK_PARSER.search(constraint_expr) + if constraint_fk: + fk_target_table = constraint_fk.group("model") + fk_target_field = constraint_fk.group("column") + else: + _logger.warning( + "Unparsable '%s' foreign key constraint: %s", + column.name, + constraint_expr, + ) - Args: - manifest_column (Mapping): Schema column definition. - column (Column): Metabase column definition. - table (str): Foreign key target table. - field (str): Foreign key target field. - schema (str): Current schema name. - """ - # Meta fields take precedence + # Precedence 3: Meta fields meta = manifest_column.get("meta", {}) - table = meta.get(f"{_META_NS}.fk_target_table", table) - field = meta.get(f"{_META_NS}.fk_target_field", field) + fk_target_table = meta.get(f"{_META_NS}.fk_target_table", fk_target_table) + fk_target_field = meta.get(f"{_META_NS}.fk_target_field", fk_target_field) - if not table or not field: - if table or field: + if not fk_target_table or not fk_target_field: + if fk_target_table or fk_target_table: _logger.warning( "Foreign key requires table and field for column '%s'", column.name, ) return - table_path = table.split(".") - if len(table_path) == 1 and schema: - table_path.insert(0, schema) + fk_target_table_path = fk_target_table.split(".") + if len(fk_target_table_path) == 1 and schema: + fk_target_table_path.insert(0, schema) column.semantic_type = "type/FK" - column.fk_target_table = ".".join([x.strip('"') for x in table_path]) - column.fk_target_field = field.strip('"') + column.fk_target_table = ".".join([x.strip('"') for x in fk_target_table_path]) + column.fk_target_field = fk_target_field.strip('"') _logger.debug( "Relation from '%s' to '%s.%s'", column.name, diff --git a/sandbox/models/schema.yml b/sandbox/models/schema.yml index 5918425..caafabd 100644 --- a/sandbox/models/schema.yml +++ b/sandbox/models/schema.yml @@ -42,6 +42,8 @@ models: columns: - name: order_id + constraints: + - type: primary_key tests: - unique - not_null @@ -49,11 +51,11 @@ models: - name: customer_id description: Foreign key to the customers table + constraints: + - type: foreign_key + expression: customers (customer_id) tests: - not_null - - relationships: - to: ref('customers') - field: customer_id - name: order_date description: Date (UTC) that the order was placed
gouline/dbt-metabase
d8788cc994f64730bb2e5f6c7818b5cd9f86c756
diff --git a/tests/fixtures/manifest-v11.json b/tests/fixtures/manifest-v11.json index 8a20dd2..14630c0 100644 --- a/tests/fixtures/manifest-v11.json +++ b/tests/fixtures/manifest-v11.json @@ -1,9 +1,9 @@ { "metadata": { "dbt_schema_version": "https://schemas.getdbt.com/dbt/manifest/v11.json", - "dbt_version": "1.7.5", - "generated_at": "2024-01-25T01:54:22.285700Z", - "invocation_id": "2a379e22-b3cb-4596-8663-17abbbc84330", + "dbt_version": "1.7.8", + "generated_at": "2024-02-20T01:11:30.963279Z", + "invocation_id": "e0c2a315-8369-44bf-8cad-a6cbb98ce697", "env": {}, "project_name": "sandbox", "project_id": "93bc63e0b4f48fbbff568d9fc0dc3def", @@ -150,7 +150,7 @@ "metabase.display_name": "clients" } }, - "created_at": 1706145555.0016356, + "created_at": 1708330299.0858583, "relation_name": "\"dbtmetabase\".\"public\".\"customers\"", "raw_code": "with customers as (\n\n select * from {{ ref('stg_customers') }}\n\n),\n\norders as (\n\n select * from {{ ref('stg_orders') }}\n\n),\n\npayments as (\n\n select * from {{ ref('stg_payments') }}\n\n),\n\ncustomer_orders as (\n\n select\n customer_id,\n\n min(order_date) as first_order,\n max(order_date) as most_recent_order,\n count(order_id) as number_of_orders\n from orders\n\n group by 1\n\n),\n\ncustomer_payments as (\n\n select\n orders.customer_id,\n sum(amount) as total_amount\n\n from payments\n\n left join orders using (order_id)\n\n group by 1\n\n),\n\nfinal as (\n\n select\n customers.customer_id,\n customers.first_name,\n customers.last_name,\n customer_orders.first_order,\n customer_orders.most_recent_order,\n customer_orders.number_of_orders,\n customer_payments.total_amount as customer_lifetime_value\n\n from customers\n\n left join customer_orders using (customer_id)\n\n left join customer_payments using (customer_id)\n\n)\n\nselect * from final", "language": "sql", @@ -197,23 +197,23 @@ "latest_version": null, "deprecation_date": null }, - "model.sandbox.orders": { + "seed.sandbox.raw_customers": { "database": "dbtmetabase", "schema": "public", - "name": "orders", - "resource_type": "model", + "name": "raw_customers", + "resource_type": "seed", "package_name": "sandbox", - "path": "orders.sql", - "original_file_path": "models/orders.sql", - "unique_id": "model.sandbox.orders", + "path": "raw_customers.csv", + "original_file_path": "seeds/raw_customers.csv", + "unique_id": "seed.sandbox.raw_customers", "fqn": [ "sandbox", - "orders" + "raw_customers" ], - "alias": "orders", + "alias": "raw_customers", "checksum": { "name": "sha256", - "checksum": "9a13423dec138c8cedf1eb7e03f4ad86be3b378ef088ac9ecc09328b76d8986e" + "checksum": "357d173dda65a741ad97d6683502286cc2655bb396ab5f4dfad12b8c39bd2a63" }, "config": { "enabled": true, @@ -221,12 +221,9 @@ "schema": null, "database": null, "tags": [], - "meta": { - "metabase.points_of_interest": "Basic information only", - "metabase.caveats": "Some facts are derived from payments" - }, + "meta": {}, "group": null, - "materialized": "table", + "materialized": "seed", "incremental_strategy": null, "persist_docs": {}, "post-hook": [], @@ -247,171 +244,47 @@ "enforced": false, "alias_types": true }, - "access": "protected" + "delimiter": ",", + "quote_columns": null }, "tags": [], - "description": "This table has basic information about orders, as well as some derived facts based on payments", - "columns": { - "order_id": { - "name": "order_id", - "description": "This is a unique identifier for an order", - "meta": {}, - "data_type": null, - "constraints": [], - "quote": null, - "tags": [] - }, - "customer_id": { - "name": "customer_id", - "description": "Foreign key to the customers table", - "meta": {}, - "data_type": null, - "constraints": [], - "quote": null, - "tags": [] - }, - "order_date": { - "name": "order_date", - "description": "Date (UTC) that the order was placed", - "meta": {}, - "data_type": null, - "constraints": [], - "quote": null, - "tags": [] - }, - "status": { - "name": "status", - "description": "", - "meta": {}, - "data_type": null, - "constraints": [], - "quote": null, - "tags": [] - }, - "amount": { - "name": "amount", - "description": "Total amount (AUD) of the order", - "meta": {}, - "data_type": null, - "constraints": [], - "quote": null, - "tags": [] - }, - "credit_card_amount": { - "name": "credit_card_amount", - "description": "Amount of the order (AUD) paid for by credit card", - "meta": {}, - "data_type": null, - "constraints": [], - "quote": null, - "tags": [] - }, - "coupon_amount": { - "name": "coupon_amount", - "description": "Amount of the order (AUD) paid for by coupon", - "meta": {}, - "data_type": null, - "constraints": [], - "quote": null, - "tags": [] - }, - "bank_transfer_amount": { - "name": "bank_transfer_amount", - "description": "Amount of the order (AUD) paid for by bank transfer", - "meta": {}, - "data_type": null, - "constraints": [], - "quote": null, - "tags": [] - }, - "gift_card_amount": { - "name": "gift_card_amount", - "description": "Amount of the order (AUD) paid for by gift card", - "meta": {}, - "data_type": null, - "constraints": [], - "quote": null, - "tags": [] - } - }, - "meta": { - "metabase.points_of_interest": "Basic information only", - "metabase.caveats": "Some facts are derived from payments" - }, + "description": "", + "columns": {}, + "meta": {}, "group": null, "docs": { "show": true, "node_color": null }, - "patch_path": "sandbox://models/schema.yml", - "build_path": "target/run/sandbox/models/orders.sql", + "patch_path": null, + "build_path": null, "deferred": false, - "unrendered_config": { - "materialized": "table", - "meta": { - "metabase.points_of_interest": "Basic information only", - "metabase.caveats": "Some facts are derived from payments" - } - }, - "created_at": 1706145555.0041838, - "relation_name": "\"dbtmetabase\".\"public\".\"orders\"", - "raw_code": "{% set payment_methods = ['credit_card', 'coupon', 'bank_transfer', 'gift_card'] %}\n\nwith orders as (\n\n select * from {{ ref('stg_orders') }}\n\n),\n\npayments as (\n\n select * from {{ ref('stg_payments') }}\n\n),\n\norder_payments as (\n\n select\n order_id,\n\n {% for payment_method in payment_methods -%}\n sum(case when payment_method = '{{ payment_method }}' then amount else 0 end) as {{ payment_method }}_amount,\n {% endfor -%}\n\n sum(amount) as total_amount\n\n from payments\n\n group by 1\n\n),\n\nfinal as (\n\n select\n orders.order_id,\n orders.customer_id,\n orders.order_date,\n orders.status,\n\n {% for payment_method in payment_methods -%}\n\n order_payments.{{ payment_method }}_amount,\n\n {% endfor -%}\n\n order_payments.total_amount as amount\n\n from orders\n\n left join order_payments using (order_id)\n\n)\n\nselect * from final", - "language": "sql", - "refs": [ - { - "name": "stg_orders", - "package": null, - "version": null - }, - { - "name": "stg_payments", - "package": null, - "version": null - } - ], - "sources": [], - "metrics": [], + "unrendered_config": {}, + "created_at": 1708330299.0421724, + "relation_name": "\"dbtmetabase\".\"public\".\"raw_customers\"", + "raw_code": "", + "root_path": "/app/sandbox", "depends_on": { - "macros": [], - "nodes": [ - "model.sandbox.stg_orders", - "model.sandbox.stg_payments" - ] - }, - "compiled_path": "target/compiled/sandbox/models/orders.sql", - "compiled": true, - "compiled_code": "\n\nwith orders as (\n\n select * from \"dbtmetabase\".\"public\".\"stg_orders\"\n\n),\n\npayments as (\n\n select * from \"dbtmetabase\".\"public\".\"stg_payments\"\n\n),\n\norder_payments as (\n\n select\n order_id,\n\n sum(case when payment_method = 'credit_card' then amount else 0 end) as credit_card_amount,\n sum(case when payment_method = 'coupon' then amount else 0 end) as coupon_amount,\n sum(case when payment_method = 'bank_transfer' then amount else 0 end) as bank_transfer_amount,\n sum(case when payment_method = 'gift_card' then amount else 0 end) as gift_card_amount,\n sum(amount) as total_amount\n\n from payments\n\n group by 1\n\n),\n\nfinal as (\n\n select\n orders.order_id,\n orders.customer_id,\n orders.order_date,\n orders.status,\n\n order_payments.credit_card_amount,\n\n order_payments.coupon_amount,\n\n order_payments.bank_transfer_amount,\n\n order_payments.gift_card_amount,\n\n order_payments.total_amount as amount\n\n from orders\n\n left join order_payments using (order_id)\n\n)\n\nselect * from final", - "extra_ctes_injected": true, - "extra_ctes": [], - "contract": { - "enforced": false, - "alias_types": true, - "checksum": null - }, - "access": "protected", - "constraints": [], - "version": null, - "latest_version": null, - "deprecation_date": null + "macros": [] + } }, - "model.sandbox.stg_customers": { + "seed.sandbox.raw_orders": { "database": "dbtmetabase", "schema": "public", - "name": "stg_customers", - "resource_type": "model", + "name": "raw_orders", + "resource_type": "seed", "package_name": "sandbox", - "path": "staging/stg_customers.sql", - "original_file_path": "models/staging/stg_customers.sql", - "unique_id": "model.sandbox.stg_customers", + "path": "raw_orders.csv", + "original_file_path": "seeds/raw_orders.csv", + "unique_id": "seed.sandbox.raw_orders", "fqn": [ "sandbox", - "staging", - "stg_customers" + "raw_orders" ], - "alias": "stg_customers", + "alias": "raw_orders", "checksum": { "name": "sha256", - "checksum": "80e3223cd54387e11fa16cd0f4cbe15f8ff74dcd9900b93856b9e39416178c9d" + "checksum": "6228dde8e17b9621f35c13e272ec67d3ff55b55499433f47d303adf2be72c17f" }, "config": { "enabled": true, @@ -421,7 +294,7 @@ "tags": [], "meta": {}, "group": null, - "materialized": "view", + "materialized": "seed", "incremental_strategy": null, "persist_docs": {}, "post-hook": [], @@ -442,86 +315,47 @@ "enforced": false, "alias_types": true }, - "access": "protected" + "delimiter": ",", + "quote_columns": null }, "tags": [], "description": "", - "columns": { - "customer_id": { - "name": "customer_id", - "description": "", - "meta": {}, - "data_type": null, - "constraints": [], - "quote": null, - "tags": [] - } - }, + "columns": {}, "meta": {}, "group": null, "docs": { "show": true, "node_color": null }, - "patch_path": "sandbox://models/staging/schema.yml", - "build_path": "target/run/sandbox/models/staging/stg_customers.sql", + "patch_path": null, + "build_path": null, "deferred": false, - "unrendered_config": { - "materialized": "view" - }, - "created_at": 1706145555.0613995, - "relation_name": "\"dbtmetabase\".\"public\".\"stg_customers\"", - "raw_code": "with source as (\n\n {#-\n Normally we would select from the table here, but we are using seeds to load\n our data in this project\n #}\n select * from {{ ref('raw_customers') }}\n\n),\n\nrenamed as (\n\n select\n id as customer_id,\n first_name,\n last_name\n\n from source\n\n)\n\nselect * from renamed", - "language": "sql", - "refs": [ - { - "name": "raw_customers", - "package": null, - "version": null - } - ], - "sources": [], - "metrics": [], + "unrendered_config": {}, + "created_at": 1708330299.0437803, + "relation_name": "\"dbtmetabase\".\"public\".\"raw_orders\"", + "raw_code": "", + "root_path": "/app/sandbox", "depends_on": { - "macros": [], - "nodes": [ - "seed.sandbox.raw_customers" - ] - }, - "compiled_path": "target/compiled/sandbox/models/staging/stg_customers.sql", - "compiled": true, - "compiled_code": "with source as (\n select * from \"dbtmetabase\".\"public\".\"raw_customers\"\n\n),\n\nrenamed as (\n\n select\n id as customer_id,\n first_name,\n last_name\n\n from source\n\n)\n\nselect * from renamed", - "extra_ctes_injected": true, - "extra_ctes": [], - "contract": { - "enforced": false, - "alias_types": true, - "checksum": null - }, - "access": "protected", - "constraints": [], - "version": null, - "latest_version": null, - "deprecation_date": null + "macros": [] + } }, - "model.sandbox.stg_payments": { + "seed.sandbox.raw_payments": { "database": "dbtmetabase", "schema": "public", - "name": "stg_payments", - "resource_type": "model", + "name": "raw_payments", + "resource_type": "seed", "package_name": "sandbox", - "path": "staging/stg_payments.sql", - "original_file_path": "models/staging/stg_payments.sql", - "unique_id": "model.sandbox.stg_payments", + "path": "raw_payments.csv", + "original_file_path": "seeds/raw_payments.csv", + "unique_id": "seed.sandbox.raw_payments", "fqn": [ "sandbox", - "staging", - "stg_payments" + "raw_payments" ], - "alias": "stg_payments", + "alias": "raw_payments", "checksum": { "name": "sha256", - "checksum": "e9a45326cc72cf5bdc59163207bac2f3ed3697c1758d916b17327c7720110fcc" + "checksum": "6de0626a8db9c1750eefd1b2e17fac4c2a4b9f778eb50532d8b377b90de395e6" }, "config": { "enabled": true, @@ -531,7 +365,7 @@ "tags": [], "meta": {}, "group": null, - "materialized": "view", + "materialized": "seed", "incremental_strategy": null, "persist_docs": {}, "post-hook": [], @@ -552,49 +386,182 @@ "enforced": false, "alias_types": true }, - "access": "protected" + "delimiter": ",", + "quote_columns": null }, "tags": [], "description": "", - "columns": { - "payment_id": { - "name": "payment_id", - "description": "", - "meta": {}, - "data_type": null, - "constraints": [], - "quote": null, - "tags": [] + "columns": {}, + "meta": {}, + "group": null, + "docs": { + "show": true, + "node_color": null + }, + "patch_path": null, + "build_path": null, + "deferred": false, + "unrendered_config": {}, + "created_at": 1708330299.045119, + "relation_name": "\"dbtmetabase\".\"public\".\"raw_payments\"", + "raw_code": "", + "root_path": "/app/sandbox", + "depends_on": { + "macros": [] + } + }, + "test.sandbox.unique_customers_customer_id.c5af1ff4b1": { + "test_metadata": { + "name": "unique", + "kwargs": { + "column_name": "customer_id", + "model": "{{ get_where_subquery(ref('customers')) }}" }, - "payment_method": { - "name": "payment_method", - "description": "", - "meta": {}, - "data_type": null, - "constraints": [], - "quote": null, - "tags": [] - } + "namespace": null + }, + "database": "dbtmetabase", + "schema": "public_dbt_test__audit", + "name": "unique_customers_customer_id", + "resource_type": "test", + "package_name": "sandbox", + "path": "unique_customers_customer_id.sql", + "original_file_path": "models/schema.yml", + "unique_id": "test.sandbox.unique_customers_customer_id.c5af1ff4b1", + "fqn": [ + "sandbox", + "unique_customers_customer_id" + ], + "alias": "unique_customers_customer_id", + "checksum": { + "name": "none", + "checksum": "" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "test", + "severity": "ERROR", + "store_failures": null, + "store_failures_as": null, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" }, + "tags": [], + "description": "", + "columns": {}, "meta": {}, "group": null, "docs": { "show": true, "node_color": null }, - "patch_path": "sandbox://models/staging/schema.yml", - "build_path": "target/run/sandbox/models/staging/stg_payments.sql", + "patch_path": null, + "build_path": null, "deferred": false, - "unrendered_config": { - "materialized": "view" + "unrendered_config": {}, + "created_at": 1708330299.1369452, + "relation_name": null, + "raw_code": "{{ test_unique(**_dbt_generic_test_kwargs) }}", + "language": "sql", + "refs": [ + { + "name": "customers", + "package": null, + "version": null + } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": [ + "macro.dbt.test_unique" + ], + "nodes": [ + "model.sandbox.customers" + ] }, - "created_at": 1706145555.062218, - "relation_name": "\"dbtmetabase\".\"public\".\"stg_payments\"", - "raw_code": "with source as (\n \n {#-\n Normally we would select from the table here, but we are using seeds to load\n our data in this project\n #}\n select * from {{ ref('raw_payments') }}\n\n),\n\nrenamed as (\n\n select\n id as payment_id,\n order_id,\n payment_method,\n\n --`amount` is currently stored in cents, so we convert it to dollars\n amount / 100 as amount\n\n from source\n\n)\n\nselect * from renamed", + "compiled_path": null, + "contract": { + "enforced": false, + "alias_types": true, + "checksum": null + }, + "column_name": "customer_id", + "file_key_name": "models.customers", + "attached_node": "model.sandbox.customers" + }, + "test.sandbox.not_null_customers_customer_id.5c9bf9911d": { + "test_metadata": { + "name": "not_null", + "kwargs": { + "column_name": "customer_id", + "model": "{{ get_where_subquery(ref('customers')) }}" + }, + "namespace": null + }, + "database": "dbtmetabase", + "schema": "public_dbt_test__audit", + "name": "not_null_customers_customer_id", + "resource_type": "test", + "package_name": "sandbox", + "path": "not_null_customers_customer_id.sql", + "original_file_path": "models/schema.yml", + "unique_id": "test.sandbox.not_null_customers_customer_id.5c9bf9911d", + "fqn": [ + "sandbox", + "not_null_customers_customer_id" + ], + "alias": "not_null_customers_customer_id", + "checksum": { + "name": "none", + "checksum": "" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "test", + "severity": "ERROR", + "store_failures": null, + "store_failures_as": null, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + }, + "tags": [], + "description": "", + "columns": {}, + "meta": {}, + "group": null, + "docs": { + "show": true, + "node_color": null + }, + "patch_path": null, + "build_path": null, + "deferred": false, + "unrendered_config": {}, + "created_at": 1708330299.1383352, + "relation_name": null, + "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "raw_payments", + "name": "customers", "package": null, "version": null } @@ -602,45 +569,41 @@ "sources": [], "metrics": [], "depends_on": { - "macros": [], + "macros": [ + "macro.dbt.test_not_null" + ], "nodes": [ - "seed.sandbox.raw_payments" + "model.sandbox.customers" ] }, - "compiled_path": "target/compiled/sandbox/models/staging/stg_payments.sql", - "compiled": true, - "compiled_code": "with source as (\n select * from \"dbtmetabase\".\"public\".\"raw_payments\"\n\n),\n\nrenamed as (\n\n select\n id as payment_id,\n order_id,\n payment_method,\n\n --`amount` is currently stored in cents, so we convert it to dollars\n amount / 100 as amount\n\n from source\n\n)\n\nselect * from renamed", - "extra_ctes_injected": true, - "extra_ctes": [], + "compiled_path": null, "contract": { "enforced": false, "alias_types": true, "checksum": null }, - "access": "protected", - "constraints": [], - "version": null, - "latest_version": null, - "deprecation_date": null + "column_name": "customer_id", + "file_key_name": "models.customers", + "attached_node": "model.sandbox.customers" }, - "model.sandbox.stg_orders": { + "model.sandbox.stg_customers": { "database": "dbtmetabase", "schema": "public", - "name": "stg_orders", + "name": "stg_customers", "resource_type": "model", "package_name": "sandbox", - "path": "staging/stg_orders.sql", - "original_file_path": "models/staging/stg_orders.sql", - "unique_id": "model.sandbox.stg_orders", + "path": "staging/stg_customers.sql", + "original_file_path": "models/staging/stg_customers.sql", + "unique_id": "model.sandbox.stg_customers", "fqn": [ "sandbox", "staging", - "stg_orders" + "stg_customers" ], - "alias": "stg_orders", + "alias": "stg_customers", "checksum": { "name": "sha256", - "checksum": "f4f881cb09d2c4162200fc331d7401df6d1abd4fed492554a7db70dede347108" + "checksum": "80e3223cd54387e11fa16cd0f4cbe15f8ff74dcd9900b93856b9e39416178c9d" }, "config": { "enabled": true, @@ -676,8 +639,8 @@ "tags": [], "description": "", "columns": { - "order_id": { - "name": "order_id", + "customer_id": { + "name": "customer_id", "description": "", "meta": {}, "data_type": null, @@ -685,8 +648,17 @@ "quote": null, "tags": [] }, - "status": { - "name": "status", + "first_name": { + "name": "first_name", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "last_name": { + "name": "last_name", "description": "", "meta": {}, "data_type": null, @@ -702,18 +674,18 @@ "node_color": null }, "patch_path": "sandbox://models/staging/schema.yml", - "build_path": "target/run/sandbox/models/staging/stg_orders.sql", + "build_path": "target/run/sandbox/models/staging/stg_customers.sql", "deferred": false, "unrendered_config": { "materialized": "view" }, - "created_at": 1706145555.0616965, - "relation_name": "\"dbtmetabase\".\"public\".\"stg_orders\"", - "raw_code": "with source as (\n\n {#-\n Normally we would select from the table here, but we are using seeds to load\n our data in this project\n #}\n select * from {{ ref('raw_orders') }}\n\n),\n\nrenamed as (\n\n select\n id as order_id,\n user_id as customer_id,\n order_date,\n status\n\n from source\n\n)\n\nselect * from renamed", + "created_at": 1708338357.5425656, + "relation_name": "\"dbtmetabase\".\"public\".\"stg_customers\"", + "raw_code": "with source as (\n\n {#-\n Normally we would select from the table here, but we are using seeds to load\n our data in this project\n #}\n select * from {{ ref('raw_customers') }}\n\n),\n\nrenamed as (\n\n select\n id as customer_id,\n first_name,\n last_name\n\n from source\n\n)\n\nselect * from renamed", "language": "sql", "refs": [ { - "name": "raw_orders", + "name": "raw_customers", "package": null, "version": null } @@ -723,12 +695,12 @@ "depends_on": { "macros": [], "nodes": [ - "seed.sandbox.raw_orders" + "seed.sandbox.raw_customers" ] }, - "compiled_path": "target/compiled/sandbox/models/staging/stg_orders.sql", + "compiled_path": "target/compiled/sandbox/models/staging/stg_customers.sql", "compiled": true, - "compiled_code": "with source as (\n select * from \"dbtmetabase\".\"public\".\"raw_orders\"\n\n),\n\nrenamed as (\n\n select\n id as order_id,\n user_id as customer_id,\n order_date,\n status\n\n from source\n\n)\n\nselect * from renamed", + "compiled_code": "with source as (\n select * from \"dbtmetabase\".\"public\".\"raw_customers\"\n\n),\n\nrenamed as (\n\n select\n id as customer_id,\n first_name,\n last_name\n\n from source\n\n)\n\nselect * from renamed", "extra_ctes_injected": true, "extra_ctes": [], "contract": { @@ -742,23 +714,24 @@ "latest_version": null, "deprecation_date": null }, - "seed.sandbox.raw_customers": { + "model.sandbox.stg_payments": { "database": "dbtmetabase", "schema": "public", - "name": "raw_customers", - "resource_type": "seed", + "name": "stg_payments", + "resource_type": "model", "package_name": "sandbox", - "path": "raw_customers.csv", - "original_file_path": "seeds/raw_customers.csv", - "unique_id": "seed.sandbox.raw_customers", + "path": "staging/stg_payments.sql", + "original_file_path": "models/staging/stg_payments.sql", + "unique_id": "model.sandbox.stg_payments", "fqn": [ "sandbox", - "raw_customers" + "staging", + "stg_payments" ], - "alias": "raw_customers", + "alias": "stg_payments", "checksum": { "name": "sha256", - "checksum": "357d173dda65a741ad97d6683502286cc2655bb396ab5f4dfad12b8c39bd2a63" + "checksum": "e9a45326cc72cf5bdc59163207bac2f3ed3697c1758d916b17327c7720110fcc" }, "config": { "enabled": true, @@ -768,7 +741,7 @@ "tags": [], "meta": {}, "group": null, - "materialized": "seed", + "materialized": "view", "incremental_strategy": null, "persist_docs": {}, "post-hook": [], @@ -789,118 +762,113 @@ "enforced": false, "alias_types": true }, - "delimiter": ",", - "quote_columns": null + "access": "protected" }, "tags": [], "description": "", - "columns": {}, - "meta": {}, - "group": null, - "docs": { - "show": true, - "node_color": null - }, - "patch_path": null, - "build_path": null, - "deferred": false, - "unrendered_config": {}, - "created_at": 1706145554.9672556, - "relation_name": "\"dbtmetabase\".\"public\".\"raw_customers\"", - "raw_code": "", - "root_path": "/app/sandbox", - "depends_on": { - "macros": [] - } - }, - "seed.sandbox.raw_orders": { - "database": "dbtmetabase", - "schema": "public", - "name": "raw_orders", - "resource_type": "seed", - "package_name": "sandbox", - "path": "raw_orders.csv", - "original_file_path": "seeds/raw_orders.csv", - "unique_id": "seed.sandbox.raw_orders", - "fqn": [ - "sandbox", - "raw_orders" - ], - "alias": "raw_orders", - "checksum": { - "name": "sha256", - "checksum": "6228dde8e17b9621f35c13e272ec67d3ff55b55499433f47d303adf2be72c17f" - }, - "config": { - "enabled": true, - "alias": null, - "schema": null, - "database": null, - "tags": [], - "meta": {}, - "group": null, - "materialized": "seed", - "incremental_strategy": null, - "persist_docs": {}, - "post-hook": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "full_refresh": null, - "unique_key": null, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "grants": {}, - "packages": [], - "docs": { - "show": true, - "node_color": null + "columns": { + "payment_id": { + "name": "payment_id", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] }, - "contract": { - "enforced": false, - "alias_types": true + "payment_method": { + "name": "payment_method", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] }, - "delimiter": ",", - "quote_columns": null + "order_id": { + "name": "order_id", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "amount": { + "name": "amount", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } }, - "tags": [], - "description": "", - "columns": {}, "meta": {}, "group": null, "docs": { "show": true, "node_color": null }, - "patch_path": null, - "build_path": null, + "patch_path": "sandbox://models/staging/schema.yml", + "build_path": "target/run/sandbox/models/staging/stg_payments.sql", "deferred": false, - "unrendered_config": {}, - "created_at": 1706145554.9684632, - "relation_name": "\"dbtmetabase\".\"public\".\"raw_orders\"", - "raw_code": "", - "root_path": "/app/sandbox", + "unrendered_config": { + "materialized": "view" + }, + "created_at": 1708338357.5437949, + "relation_name": "\"dbtmetabase\".\"public\".\"stg_payments\"", + "raw_code": "with source as (\n \n {#-\n Normally we would select from the table here, but we are using seeds to load\n our data in this project\n #}\n select * from {{ ref('raw_payments') }}\n\n),\n\nrenamed as (\n\n select\n id as payment_id,\n order_id,\n payment_method,\n\n --`amount` is currently stored in cents, so we convert it to dollars\n amount / 100 as amount\n\n from source\n\n)\n\nselect * from renamed", + "language": "sql", + "refs": [ + { + "name": "raw_payments", + "package": null, + "version": null + } + ], + "sources": [], + "metrics": [], "depends_on": { - "macros": [] - } + "macros": [], + "nodes": [ + "seed.sandbox.raw_payments" + ] + }, + "compiled_path": "target/compiled/sandbox/models/staging/stg_payments.sql", + "compiled": true, + "compiled_code": "with source as (\n select * from \"dbtmetabase\".\"public\".\"raw_payments\"\n\n),\n\nrenamed as (\n\n select\n id as payment_id,\n order_id,\n payment_method,\n\n --`amount` is currently stored in cents, so we convert it to dollars\n amount / 100 as amount\n\n from source\n\n)\n\nselect * from renamed", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { + "enforced": false, + "alias_types": true, + "checksum": null + }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null }, - "seed.sandbox.raw_payments": { + "model.sandbox.stg_orders": { "database": "dbtmetabase", "schema": "public", - "name": "raw_payments", - "resource_type": "seed", + "name": "stg_orders", + "resource_type": "model", "package_name": "sandbox", - "path": "raw_payments.csv", - "original_file_path": "seeds/raw_payments.csv", - "unique_id": "seed.sandbox.raw_payments", + "path": "staging/stg_orders.sql", + "original_file_path": "models/staging/stg_orders.sql", + "unique_id": "model.sandbox.stg_orders", "fqn": [ "sandbox", - "raw_payments" + "staging", + "stg_orders" ], - "alias": "raw_payments", + "alias": "stg_orders", "checksum": { "name": "sha256", - "checksum": "6de0626a8db9c1750eefd1b2e17fac4c2a4b9f778eb50532d8b377b90de395e6" + "checksum": "f4f881cb09d2c4162200fc331d7401df6d1abd4fed492554a7db70dede347108" }, "config": { "enabled": true, @@ -910,7 +878,7 @@ "tags": [], "meta": {}, "group": null, - "materialized": "seed", + "materialized": "view", "incremental_strategy": null, "persist_docs": {}, "post-hook": [], @@ -931,182 +899,67 @@ "enforced": false, "alias_types": true }, - "delimiter": ",", - "quote_columns": null + "access": "protected" }, "tags": [], "description": "", - "columns": {}, - "meta": {}, - "group": null, - "docs": { - "show": true, - "node_color": null - }, - "patch_path": null, - "build_path": null, - "deferred": false, - "unrendered_config": {}, - "created_at": 1706145554.969442, - "relation_name": "\"dbtmetabase\".\"public\".\"raw_payments\"", - "raw_code": "", - "root_path": "/app/sandbox", - "depends_on": { - "macros": [] - } - }, - "test.sandbox.unique_customers_customer_id.c5af1ff4b1": { - "test_metadata": { - "name": "unique", - "kwargs": { - "column_name": "customer_id", - "model": "{{ get_where_subquery(ref('customers')) }}" + "columns": { + "order_id": { + "name": "order_id", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] }, - "namespace": null - }, - "database": "dbtmetabase", - "schema": "public_dbt_test__audit", - "name": "unique_customers_customer_id", - "resource_type": "test", - "package_name": "sandbox", - "path": "unique_customers_customer_id.sql", - "original_file_path": "models/schema.yml", - "unique_id": "test.sandbox.unique_customers_customer_id.c5af1ff4b1", - "fqn": [ - "sandbox", - "unique_customers_customer_id" - ], - "alias": "unique_customers_customer_id", - "checksum": { - "name": "none", - "checksum": "" - }, - "config": { - "enabled": true, - "alias": null, - "schema": "dbt_test__audit", - "database": null, - "tags": [], - "meta": {}, - "group": null, - "materialized": "test", - "severity": "ERROR", - "store_failures": null, - "store_failures_as": null, - "where": null, - "limit": null, - "fail_calc": "count(*)", - "warn_if": "!= 0", - "error_if": "!= 0" - }, - "tags": [], - "description": "", - "columns": {}, - "meta": {}, - "group": null, - "docs": { - "show": true, - "node_color": null - }, - "patch_path": null, - "build_path": null, - "deferred": false, - "unrendered_config": {}, - "created_at": 1706145555.0420036, - "relation_name": null, - "raw_code": "{{ test_unique(**_dbt_generic_test_kwargs) }}", - "language": "sql", - "refs": [ - { - "name": "customers", - "package": null, - "version": null - } - ], - "sources": [], - "metrics": [], - "depends_on": { - "macros": [ - "macro.dbt.test_unique" - ], - "nodes": [ - "model.sandbox.customers" - ] - }, - "compiled_path": null, - "contract": { - "enforced": false, - "alias_types": true, - "checksum": null - }, - "column_name": "customer_id", - "file_key_name": "models.customers", - "attached_node": "model.sandbox.customers" - }, - "test.sandbox.not_null_customers_customer_id.5c9bf9911d": { - "test_metadata": { - "name": "not_null", - "kwargs": { - "column_name": "customer_id", - "model": "{{ get_where_subquery(ref('customers')) }}" + "status": { + "name": "status", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] }, - "namespace": null - }, - "database": "dbtmetabase", - "schema": "public_dbt_test__audit", - "name": "not_null_customers_customer_id", - "resource_type": "test", - "package_name": "sandbox", - "path": "not_null_customers_customer_id.sql", - "original_file_path": "models/schema.yml", - "unique_id": "test.sandbox.not_null_customers_customer_id.5c9bf9911d", - "fqn": [ - "sandbox", - "not_null_customers_customer_id" - ], - "alias": "not_null_customers_customer_id", - "checksum": { - "name": "none", - "checksum": "" - }, - "config": { - "enabled": true, - "alias": null, - "schema": "dbt_test__audit", - "database": null, - "tags": [], - "meta": {}, - "group": null, - "materialized": "test", - "severity": "ERROR", - "store_failures": null, - "store_failures_as": null, - "where": null, - "limit": null, - "fail_calc": "count(*)", - "warn_if": "!= 0", - "error_if": "!= 0" + "order_date": { + "name": "order_date", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "customer_id": { + "name": "customer_id", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } }, - "tags": [], - "description": "", - "columns": {}, "meta": {}, "group": null, "docs": { "show": true, "node_color": null }, - "patch_path": null, - "build_path": null, + "patch_path": "sandbox://models/staging/schema.yml", + "build_path": "target/run/sandbox/models/staging/stg_orders.sql", "deferred": false, - "unrendered_config": {}, - "created_at": 1706145555.043206, - "relation_name": null, - "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", + "unrendered_config": { + "materialized": "view" + }, + "created_at": 1708338357.5444813, + "relation_name": "\"dbtmetabase\".\"public\".\"stg_orders\"", + "raw_code": "with source as (\n\n {#-\n Normally we would select from the table here, but we are using seeds to load\n our data in this project\n #}\n select * from {{ ref('raw_orders') }}\n\n),\n\nrenamed as (\n\n select\n id as order_id,\n user_id as customer_id,\n order_date,\n status\n\n from source\n\n)\n\nselect * from renamed", "language": "sql", "refs": [ { - "name": "customers", + "name": "raw_orders", "package": null, "version": null } @@ -1114,45 +967,50 @@ "sources": [], "metrics": [], "depends_on": { - "macros": [ - "macro.dbt.test_not_null" - ], + "macros": [], "nodes": [ - "model.sandbox.customers" + "seed.sandbox.raw_orders" ] }, - "compiled_path": null, + "compiled_path": "target/compiled/sandbox/models/staging/stg_orders.sql", + "compiled": true, + "compiled_code": "with source as (\n select * from \"dbtmetabase\".\"public\".\"raw_orders\"\n\n),\n\nrenamed as (\n\n select\n id as order_id,\n user_id as customer_id,\n order_date,\n status\n\n from source\n\n)\n\nselect * from renamed", + "extra_ctes_injected": true, + "extra_ctes": [], "contract": { "enforced": false, "alias_types": true, "checksum": null }, - "column_name": "customer_id", - "file_key_name": "models.customers", - "attached_node": "model.sandbox.customers" + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null }, - "test.sandbox.unique_orders_order_id.fed79b3a6e": { + "test.sandbox.unique_stg_customers_customer_id.c7614daada": { "test_metadata": { "name": "unique", "kwargs": { - "column_name": "order_id", - "model": "{{ get_where_subquery(ref('orders')) }}" + "column_name": "customer_id", + "model": "{{ get_where_subquery(ref('stg_customers')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "unique_orders_order_id", + "name": "unique_stg_customers_customer_id", "resource_type": "test", "package_name": "sandbox", - "path": "unique_orders_order_id.sql", - "original_file_path": "models/schema.yml", - "unique_id": "test.sandbox.unique_orders_order_id.fed79b3a6e", + "path": "unique_stg_customers_customer_id.sql", + "original_file_path": "models/staging/schema.yml", + "unique_id": "test.sandbox.unique_stg_customers_customer_id.c7614daada", "fqn": [ "sandbox", - "unique_orders_order_id" + "staging", + "unique_stg_customers_customer_id" ], - "alias": "unique_orders_order_id", + "alias": "unique_stg_customers_customer_id", "checksum": { "name": "none", "checksum": "" @@ -1188,13 +1046,13 @@ "build_path": null, "deferred": false, "unrendered_config": {}, - "created_at": 1706145555.0442696, + "created_at": 1708338357.5923638, "relation_name": null, "raw_code": "{{ test_unique(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "orders", + "name": "stg_customers", "package": null, "version": null } @@ -1206,7 +1064,7 @@ "macro.dbt.test_unique" ], "nodes": [ - "model.sandbox.orders" + "model.sandbox.stg_customers" ] }, "compiled_path": null, @@ -1215,32 +1073,33 @@ "alias_types": true, "checksum": null }, - "column_name": "order_id", - "file_key_name": "models.orders", - "attached_node": "model.sandbox.orders" + "column_name": "customer_id", + "file_key_name": "models.stg_customers", + "attached_node": "model.sandbox.stg_customers" }, - "test.sandbox.not_null_orders_order_id.cf6c17daed": { + "test.sandbox.not_null_stg_customers_customer_id.e2cfb1f9aa": { "test_metadata": { "name": "not_null", "kwargs": { - "column_name": "order_id", - "model": "{{ get_where_subquery(ref('orders')) }}" + "column_name": "customer_id", + "model": "{{ get_where_subquery(ref('stg_customers')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "not_null_orders_order_id", + "name": "not_null_stg_customers_customer_id", "resource_type": "test", "package_name": "sandbox", - "path": "not_null_orders_order_id.sql", - "original_file_path": "models/schema.yml", - "unique_id": "test.sandbox.not_null_orders_order_id.cf6c17daed", + "path": "not_null_stg_customers_customer_id.sql", + "original_file_path": "models/staging/schema.yml", + "unique_id": "test.sandbox.not_null_stg_customers_customer_id.e2cfb1f9aa", "fqn": [ "sandbox", - "not_null_orders_order_id" + "staging", + "not_null_stg_customers_customer_id" ], - "alias": "not_null_orders_order_id", + "alias": "not_null_stg_customers_customer_id", "checksum": { "name": "none", "checksum": "" @@ -1276,13 +1135,13 @@ "build_path": null, "deferred": false, "unrendered_config": {}, - "created_at": 1706145555.0451243, + "created_at": 1708338357.5937386, "relation_name": null, "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "orders", + "name": "stg_customers", "package": null, "version": null } @@ -1294,7 +1153,7 @@ "macro.dbt.test_not_null" ], "nodes": [ - "model.sandbox.orders" + "model.sandbox.stg_customers" ] }, "compiled_path": null, @@ -1303,32 +1162,33 @@ "alias_types": true, "checksum": null }, - "column_name": "order_id", - "file_key_name": "models.orders", - "attached_node": "model.sandbox.orders" + "column_name": "customer_id", + "file_key_name": "models.stg_customers", + "attached_node": "model.sandbox.stg_customers" }, - "test.sandbox.not_null_orders_customer_id.c5f02694af": { + "test.sandbox.unique_stg_payments_payment_id.3744510712": { "test_metadata": { - "name": "not_null", + "name": "unique", "kwargs": { - "column_name": "customer_id", - "model": "{{ get_where_subquery(ref('orders')) }}" + "column_name": "payment_id", + "model": "{{ get_where_subquery(ref('stg_payments')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "not_null_orders_customer_id", + "name": "unique_stg_payments_payment_id", "resource_type": "test", "package_name": "sandbox", - "path": "not_null_orders_customer_id.sql", - "original_file_path": "models/schema.yml", - "unique_id": "test.sandbox.not_null_orders_customer_id.c5f02694af", + "path": "unique_stg_payments_payment_id.sql", + "original_file_path": "models/staging/schema.yml", + "unique_id": "test.sandbox.unique_stg_payments_payment_id.3744510712", "fqn": [ "sandbox", - "not_null_orders_customer_id" + "staging", + "unique_stg_payments_payment_id" ], - "alias": "not_null_orders_customer_id", + "alias": "unique_stg_payments_payment_id", "checksum": { "name": "none", "checksum": "" @@ -1364,13 +1224,13 @@ "build_path": null, "deferred": false, "unrendered_config": {}, - "created_at": 1706145555.046102, + "created_at": 1708338357.595022, "relation_name": null, - "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", + "raw_code": "{{ test_unique(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "orders", + "name": "stg_payments", "package": null, "version": null } @@ -1379,10 +1239,10 @@ "metrics": [], "depends_on": { "macros": [ - "macro.dbt.test_not_null" + "macro.dbt.test_unique" ], "nodes": [ - "model.sandbox.orders" + "model.sandbox.stg_payments" ] }, "compiled_path": null, @@ -1391,34 +1251,33 @@ "alias_types": true, "checksum": null }, - "column_name": "customer_id", - "file_key_name": "models.orders", - "attached_node": "model.sandbox.orders" + "column_name": "payment_id", + "file_key_name": "models.stg_payments", + "attached_node": "model.sandbox.stg_payments" }, - "test.sandbox.relationships_orders_customer_id__customer_id__ref_customers_.c6ec7f58f2": { + "test.sandbox.not_null_stg_payments_payment_id.c19cc50075": { "test_metadata": { - "name": "relationships", + "name": "not_null", "kwargs": { - "to": "ref('customers')", - "field": "customer_id", - "column_name": "customer_id", - "model": "{{ get_where_subquery(ref('orders')) }}" + "column_name": "payment_id", + "model": "{{ get_where_subquery(ref('stg_payments')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "relationships_orders_customer_id__customer_id__ref_customers_", + "name": "not_null_stg_payments_payment_id", "resource_type": "test", "package_name": "sandbox", - "path": "relationships_orders_customer_id__customer_id__ref_customers_.sql", - "original_file_path": "models/schema.yml", - "unique_id": "test.sandbox.relationships_orders_customer_id__customer_id__ref_customers_.c6ec7f58f2", + "path": "not_null_stg_payments_payment_id.sql", + "original_file_path": "models/staging/schema.yml", + "unique_id": "test.sandbox.not_null_stg_payments_payment_id.c19cc50075", "fqn": [ "sandbox", - "relationships_orders_customer_id__customer_id__ref_customers_" + "staging", + "not_null_stg_payments_payment_id" ], - "alias": "relationships_orders_customer_id__customer_id__ref_customers_", + "alias": "not_null_stg_payments_payment_id", "checksum": { "name": "none", "checksum": "" @@ -1454,18 +1313,13 @@ "build_path": null, "deferred": false, "unrendered_config": {}, - "created_at": 1706145555.0468988, + "created_at": 1708338357.5963225, "relation_name": null, - "raw_code": "{{ test_relationships(**_dbt_generic_test_kwargs) }}", + "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "customers", - "package": null, - "version": null - }, - { - "name": "orders", + "name": "stg_payments", "package": null, "version": null } @@ -1474,12 +1328,10 @@ "metrics": [], "depends_on": { "macros": [ - "macro.dbt.test_relationships", - "macro.dbt.get_where_subquery" + "macro.dbt.test_not_null" ], "nodes": [ - "model.sandbox.customers", - "model.sandbox.orders" + "model.sandbox.stg_payments" ] }, "compiled_path": null, @@ -1488,46 +1340,46 @@ "alias_types": true, "checksum": null }, - "column_name": "customer_id", - "file_key_name": "models.orders", - "attached_node": "model.sandbox.orders" + "column_name": "payment_id", + "file_key_name": "models.stg_payments", + "attached_node": "model.sandbox.stg_payments" }, - "test.sandbox.accepted_values_orders_status__placed__shipped__completed__return_pending__returned.be6b5b5ec3": { + "test.sandbox.accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card.3c3820f278": { "test_metadata": { "name": "accepted_values", "kwargs": { "values": [ - "placed", - "shipped", - "completed", - "return_pending", - "returned" + "credit_card", + "coupon", + "bank_transfer", + "gift_card" ], - "column_name": "status", - "model": "{{ get_where_subquery(ref('orders')) }}" + "column_name": "payment_method", + "model": "{{ get_where_subquery(ref('stg_payments')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "accepted_values_orders_status__placed__shipped__completed__return_pending__returned", + "name": "accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card", "resource_type": "test", "package_name": "sandbox", - "path": "accepted_values_orders_1ce6ab157c285f7cd2ac656013faf758.sql", - "original_file_path": "models/schema.yml", - "unique_id": "test.sandbox.accepted_values_orders_status__placed__shipped__completed__return_pending__returned.be6b5b5ec3", + "path": "accepted_values_stg_payments_c7909fb19b1f0177c2bf99c7912f06ef.sql", + "original_file_path": "models/staging/schema.yml", + "unique_id": "test.sandbox.accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card.3c3820f278", "fqn": [ "sandbox", - "accepted_values_orders_status__placed__shipped__completed__return_pending__returned" + "staging", + "accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card" ], - "alias": "accepted_values_orders_1ce6ab157c285f7cd2ac656013faf758", + "alias": "accepted_values_stg_payments_c7909fb19b1f0177c2bf99c7912f06ef", "checksum": { "name": "none", "checksum": "" }, "config": { "enabled": true, - "alias": "accepted_values_orders_1ce6ab157c285f7cd2ac656013faf758", + "alias": "accepted_values_stg_payments_c7909fb19b1f0177c2bf99c7912f06ef", "schema": "dbt_test__audit", "database": null, "tags": [], @@ -1556,15 +1408,15 @@ "build_path": null, "deferred": false, "unrendered_config": { - "alias": "accepted_values_orders_1ce6ab157c285f7cd2ac656013faf758" + "alias": "accepted_values_stg_payments_c7909fb19b1f0177c2bf99c7912f06ef" }, - "created_at": 1706145555.0521317, + "created_at": 1708338357.5973942, "relation_name": null, - "raw_code": "{{ test_accepted_values(**_dbt_generic_test_kwargs) }}{{ config(alias=\"accepted_values_orders_1ce6ab157c285f7cd2ac656013faf758\") }}", + "raw_code": "{{ test_accepted_values(**_dbt_generic_test_kwargs) }}{{ config(alias=\"accepted_values_stg_payments_c7909fb19b1f0177c2bf99c7912f06ef\") }}", "language": "sql", "refs": [ { - "name": "orders", + "name": "stg_payments", "package": null, "version": null } @@ -1577,7 +1429,7 @@ "macro.dbt.get_where_subquery" ], "nodes": [ - "model.sandbox.orders" + "model.sandbox.stg_payments" ] }, "compiled_path": null, @@ -1586,32 +1438,33 @@ "alias_types": true, "checksum": null }, - "column_name": "status", - "file_key_name": "models.orders", - "attached_node": "model.sandbox.orders" + "column_name": "payment_method", + "file_key_name": "models.stg_payments", + "attached_node": "model.sandbox.stg_payments" }, - "test.sandbox.not_null_orders_amount.106140f9fd": { + "test.sandbox.unique_stg_orders_order_id.e3b841c71a": { "test_metadata": { - "name": "not_null", + "name": "unique", "kwargs": { - "column_name": "amount", - "model": "{{ get_where_subquery(ref('orders')) }}" + "column_name": "order_id", + "model": "{{ get_where_subquery(ref('stg_orders')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "not_null_orders_amount", + "name": "unique_stg_orders_order_id", "resource_type": "test", "package_name": "sandbox", - "path": "not_null_orders_amount.sql", - "original_file_path": "models/schema.yml", - "unique_id": "test.sandbox.not_null_orders_amount.106140f9fd", + "path": "unique_stg_orders_order_id.sql", + "original_file_path": "models/staging/schema.yml", + "unique_id": "test.sandbox.unique_stg_orders_order_id.e3b841c71a", "fqn": [ "sandbox", - "not_null_orders_amount" + "staging", + "unique_stg_orders_order_id" ], - "alias": "not_null_orders_amount", + "alias": "unique_stg_orders_order_id", "checksum": { "name": "none", "checksum": "" @@ -1647,13 +1500,13 @@ "build_path": null, "deferred": false, "unrendered_config": {}, - "created_at": 1706145555.0571928, + "created_at": 1708338357.6061413, "relation_name": null, - "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", + "raw_code": "{{ test_unique(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "orders", + "name": "stg_orders", "package": null, "version": null } @@ -1662,10 +1515,10 @@ "metrics": [], "depends_on": { "macros": [ - "macro.dbt.test_not_null" + "macro.dbt.test_unique" ], "nodes": [ - "model.sandbox.orders" + "model.sandbox.stg_orders" ] }, "compiled_path": null, @@ -1674,32 +1527,33 @@ "alias_types": true, "checksum": null }, - "column_name": "amount", - "file_key_name": "models.orders", - "attached_node": "model.sandbox.orders" + "column_name": "order_id", + "file_key_name": "models.stg_orders", + "attached_node": "model.sandbox.stg_orders" }, - "test.sandbox.not_null_orders_credit_card_amount.d3ca593b59": { + "test.sandbox.not_null_stg_orders_order_id.81cfe2fe64": { "test_metadata": { "name": "not_null", "kwargs": { - "column_name": "credit_card_amount", - "model": "{{ get_where_subquery(ref('orders')) }}" + "column_name": "order_id", + "model": "{{ get_where_subquery(ref('stg_orders')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "not_null_orders_credit_card_amount", + "name": "not_null_stg_orders_order_id", "resource_type": "test", "package_name": "sandbox", - "path": "not_null_orders_credit_card_amount.sql", - "original_file_path": "models/schema.yml", - "unique_id": "test.sandbox.not_null_orders_credit_card_amount.d3ca593b59", + "path": "not_null_stg_orders_order_id.sql", + "original_file_path": "models/staging/schema.yml", + "unique_id": "test.sandbox.not_null_stg_orders_order_id.81cfe2fe64", "fqn": [ "sandbox", - "not_null_orders_credit_card_amount" + "staging", + "not_null_stg_orders_order_id" ], - "alias": "not_null_orders_credit_card_amount", + "alias": "not_null_stg_orders_order_id", "checksum": { "name": "none", "checksum": "" @@ -1735,13 +1589,13 @@ "build_path": null, "deferred": false, "unrendered_config": {}, - "created_at": 1706145555.0580268, + "created_at": 1708338357.6072319, "relation_name": null, "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "orders", + "name": "stg_orders", "package": null, "version": null } @@ -1753,7 +1607,7 @@ "macro.dbt.test_not_null" ], "nodes": [ - "model.sandbox.orders" + "model.sandbox.stg_orders" ] }, "compiled_path": null, @@ -1762,39 +1616,47 @@ "alias_types": true, "checksum": null }, - "column_name": "credit_card_amount", - "file_key_name": "models.orders", - "attached_node": "model.sandbox.orders" + "column_name": "order_id", + "file_key_name": "models.stg_orders", + "attached_node": "model.sandbox.stg_orders" }, - "test.sandbox.not_null_orders_coupon_amount.ab90c90625": { + "test.sandbox.accepted_values_stg_orders_status__placed__shipped__completed__return_pending__returned.080fb20aad": { "test_metadata": { - "name": "not_null", + "name": "accepted_values", "kwargs": { - "column_name": "coupon_amount", - "model": "{{ get_where_subquery(ref('orders')) }}" + "values": [ + "placed", + "shipped", + "completed", + "return_pending", + "returned" + ], + "column_name": "status", + "model": "{{ get_where_subquery(ref('stg_orders')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "not_null_orders_coupon_amount", + "name": "accepted_values_stg_orders_status__placed__shipped__completed__return_pending__returned", "resource_type": "test", "package_name": "sandbox", - "path": "not_null_orders_coupon_amount.sql", - "original_file_path": "models/schema.yml", - "unique_id": "test.sandbox.not_null_orders_coupon_amount.ab90c90625", + "path": "accepted_values_stg_orders_4f514bf94b77b7ea437830eec4421c58.sql", + "original_file_path": "models/staging/schema.yml", + "unique_id": "test.sandbox.accepted_values_stg_orders_status__placed__shipped__completed__return_pending__returned.080fb20aad", "fqn": [ "sandbox", - "not_null_orders_coupon_amount" + "staging", + "accepted_values_stg_orders_status__placed__shipped__completed__return_pending__returned" ], - "alias": "not_null_orders_coupon_amount", + "alias": "accepted_values_stg_orders_4f514bf94b77b7ea437830eec4421c58", "checksum": { "name": "none", "checksum": "" }, "config": { "enabled": true, - "alias": null, + "alias": "accepted_values_stg_orders_4f514bf94b77b7ea437830eec4421c58", "schema": "dbt_test__audit", "database": null, "tags": [], @@ -1822,14 +1684,16 @@ "patch_path": null, "build_path": null, "deferred": false, - "unrendered_config": {}, - "created_at": 1706145555.0588226, + "unrendered_config": { + "alias": "accepted_values_stg_orders_4f514bf94b77b7ea437830eec4421c58" + }, + "created_at": 1708338357.6084142, "relation_name": null, - "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", + "raw_code": "{{ test_accepted_values(**_dbt_generic_test_kwargs) }}{{ config(alias=\"accepted_values_stg_orders_4f514bf94b77b7ea437830eec4421c58\") }}", "language": "sql", "refs": [ { - "name": "orders", + "name": "stg_orders", "package": null, "version": null } @@ -1838,10 +1702,11 @@ "metrics": [], "depends_on": { "macros": [ - "macro.dbt.test_not_null" + "macro.dbt.test_accepted_values", + "macro.dbt.get_where_subquery" ], "nodes": [ - "model.sandbox.orders" + "model.sandbox.stg_orders" ] }, "compiled_path": null, @@ -1850,74 +1715,194 @@ "alias_types": true, "checksum": null }, - "column_name": "coupon_amount", - "file_key_name": "models.orders", - "attached_node": "model.sandbox.orders" + "column_name": "status", + "file_key_name": "models.stg_orders", + "attached_node": "model.sandbox.stg_orders" }, - "test.sandbox.not_null_orders_bank_transfer_amount.7743500c49": { - "test_metadata": { - "name": "not_null", - "kwargs": { - "column_name": "bank_transfer_amount", - "model": "{{ get_where_subquery(ref('orders')) }}" - }, - "namespace": null - }, + "model.sandbox.orders": { "database": "dbtmetabase", - "schema": "public_dbt_test__audit", - "name": "not_null_orders_bank_transfer_amount", - "resource_type": "test", + "schema": "public", + "name": "orders", + "resource_type": "model", "package_name": "sandbox", - "path": "not_null_orders_bank_transfer_amount.sql", - "original_file_path": "models/schema.yml", - "unique_id": "test.sandbox.not_null_orders_bank_transfer_amount.7743500c49", + "path": "orders.sql", + "original_file_path": "models/orders.sql", + "unique_id": "model.sandbox.orders", "fqn": [ "sandbox", - "not_null_orders_bank_transfer_amount" + "orders" ], - "alias": "not_null_orders_bank_transfer_amount", + "alias": "orders", "checksum": { - "name": "none", - "checksum": "" + "name": "sha256", + "checksum": "9a13423dec138c8cedf1eb7e03f4ad86be3b378ef088ac9ecc09328b76d8986e" }, "config": { "enabled": true, "alias": null, - "schema": "dbt_test__audit", + "schema": null, "database": null, "tags": [], - "meta": {}, + "meta": { + "metabase.points_of_interest": "Basic information only", + "metabase.caveats": "Some facts are derived from payments" + }, "group": null, - "materialized": "test", - "severity": "ERROR", - "store_failures": null, - "store_failures_as": null, - "where": null, - "limit": null, - "fail_calc": "count(*)", - "warn_if": "!= 0", - "error_if": "!= 0" + "materialized": "table", + "incremental_strategy": null, + "persist_docs": {}, + "post-hook": [], + "pre-hook": [], + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "contract": { + "enforced": false, + "alias_types": true + }, + "access": "protected" }, "tags": [], - "description": "", - "columns": {}, - "meta": {}, + "description": "This table has basic information about orders, as well as some derived facts based on payments", + "columns": { + "order_id": { + "name": "order_id", + "description": "This is a unique identifier for an order", + "meta": {}, + "data_type": null, + "constraints": [ + { + "type": "primary_key", + "name": null, + "expression": null, + "warn_unenforced": true, + "warn_unsupported": true + } + ], + "quote": null, + "tags": [] + }, + "customer_id": { + "name": "customer_id", + "description": "Foreign key to the customers table", + "meta": {}, + "data_type": null, + "constraints": [ + { + "type": "foreign_key", + "name": null, + "expression": "customers (customer_id)", + "warn_unenforced": true, + "warn_unsupported": true + } + ], + "quote": null, + "tags": [] + }, + "order_date": { + "name": "order_date", + "description": "Date (UTC) that the order was placed", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "status": { + "name": "status", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "amount": { + "name": "amount", + "description": "Total amount (AUD) of the order", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "credit_card_amount": { + "name": "credit_card_amount", + "description": "Amount of the order (AUD) paid for by credit card", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "coupon_amount": { + "name": "coupon_amount", + "description": "Amount of the order (AUD) paid for by coupon", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "bank_transfer_amount": { + "name": "bank_transfer_amount", + "description": "Amount of the order (AUD) paid for by bank transfer", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "gift_card_amount": { + "name": "gift_card_amount", + "description": "Amount of the order (AUD) paid for by gift card", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": { + "metabase.points_of_interest": "Basic information only", + "metabase.caveats": "Some facts are derived from payments" + }, "group": null, "docs": { "show": true, "node_color": null }, - "patch_path": null, - "build_path": null, + "patch_path": "sandbox://models/schema.yml", + "build_path": "target/run/sandbox/models/orders.sql", "deferred": false, - "unrendered_config": {}, - "created_at": 1706145555.0596151, - "relation_name": null, - "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", + "unrendered_config": { + "materialized": "table", + "meta": { + "metabase.points_of_interest": "Basic information only", + "metabase.caveats": "Some facts are derived from payments" + } + }, + "created_at": 1708391489.32839, + "relation_name": "\"dbtmetabase\".\"public\".\"orders\"", + "raw_code": "{% set payment_methods = ['credit_card', 'coupon', 'bank_transfer', 'gift_card'] %}\n\nwith orders as (\n\n select * from {{ ref('stg_orders') }}\n\n),\n\npayments as (\n\n select * from {{ ref('stg_payments') }}\n\n),\n\norder_payments as (\n\n select\n order_id,\n\n {% for payment_method in payment_methods -%}\n sum(case when payment_method = '{{ payment_method }}' then amount else 0 end) as {{ payment_method }}_amount,\n {% endfor -%}\n\n sum(amount) as total_amount\n\n from payments\n\n group by 1\n\n),\n\nfinal as (\n\n select\n orders.order_id,\n orders.customer_id,\n orders.order_date,\n orders.status,\n\n {% for payment_method in payment_methods -%}\n\n order_payments.{{ payment_method }}_amount,\n\n {% endfor -%}\n\n order_payments.total_amount as amount\n\n from orders\n\n left join order_payments using (order_id)\n\n)\n\nselect * from final", "language": "sql", "refs": [ { - "name": "orders", + "name": "stg_orders", + "package": null, + "version": null + }, + { + "name": "stg_payments", "package": null, "version": null } @@ -1925,45 +1910,50 @@ "sources": [], "metrics": [], "depends_on": { - "macros": [ - "macro.dbt.test_not_null" - ], + "macros": [], "nodes": [ - "model.sandbox.orders" + "model.sandbox.stg_orders", + "model.sandbox.stg_payments" ] }, - "compiled_path": null, + "compiled_path": "target/compiled/sandbox/models/orders.sql", + "compiled": true, + "compiled_code": "\n\nwith orders as (\n\n select * from \"dbtmetabase\".\"public\".\"stg_orders\"\n\n),\n\npayments as (\n\n select * from \"dbtmetabase\".\"public\".\"stg_payments\"\n\n),\n\norder_payments as (\n\n select\n order_id,\n\n sum(case when payment_method = 'credit_card' then amount else 0 end) as credit_card_amount,\n sum(case when payment_method = 'coupon' then amount else 0 end) as coupon_amount,\n sum(case when payment_method = 'bank_transfer' then amount else 0 end) as bank_transfer_amount,\n sum(case when payment_method = 'gift_card' then amount else 0 end) as gift_card_amount,\n sum(amount) as total_amount\n\n from payments\n\n group by 1\n\n),\n\nfinal as (\n\n select\n orders.order_id,\n orders.customer_id,\n orders.order_date,\n orders.status,\n\n order_payments.credit_card_amount,\n\n order_payments.coupon_amount,\n\n order_payments.bank_transfer_amount,\n\n order_payments.gift_card_amount,\n\n order_payments.total_amount as amount\n\n from orders\n\n left join order_payments using (order_id)\n\n)\n\nselect * from final", + "extra_ctes_injected": true, + "extra_ctes": [], "contract": { "enforced": false, "alias_types": true, "checksum": null }, - "column_name": "bank_transfer_amount", - "file_key_name": "models.orders", - "attached_node": "model.sandbox.orders" + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null }, - "test.sandbox.not_null_orders_gift_card_amount.413a0d2d7a": { + "test.sandbox.unique_orders_order_id.fed79b3a6e": { "test_metadata": { - "name": "not_null", + "name": "unique", "kwargs": { - "column_name": "gift_card_amount", + "column_name": "order_id", "model": "{{ get_where_subquery(ref('orders')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "not_null_orders_gift_card_amount", + "name": "unique_orders_order_id", "resource_type": "test", "package_name": "sandbox", - "path": "not_null_orders_gift_card_amount.sql", + "path": "unique_orders_order_id.sql", "original_file_path": "models/schema.yml", - "unique_id": "test.sandbox.not_null_orders_gift_card_amount.413a0d2d7a", + "unique_id": "test.sandbox.unique_orders_order_id.fed79b3a6e", "fqn": [ "sandbox", - "not_null_orders_gift_card_amount" + "unique_orders_order_id" ], - "alias": "not_null_orders_gift_card_amount", + "alias": "unique_orders_order_id", "checksum": { "name": "none", "checksum": "" @@ -1999,9 +1989,9 @@ "build_path": null, "deferred": false, "unrendered_config": {}, - "created_at": 1706145555.0603943, + "created_at": 1708391489.3618965, "relation_name": null, - "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", + "raw_code": "{{ test_unique(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { @@ -2014,7 +2004,7 @@ "metrics": [], "depends_on": { "macros": [ - "macro.dbt.test_not_null" + "macro.dbt.test_unique" ], "nodes": [ "model.sandbox.orders" @@ -2026,33 +2016,32 @@ "alias_types": true, "checksum": null }, - "column_name": "gift_card_amount", + "column_name": "order_id", "file_key_name": "models.orders", "attached_node": "model.sandbox.orders" }, - "test.sandbox.unique_stg_customers_customer_id.c7614daada": { + "test.sandbox.not_null_orders_order_id.cf6c17daed": { "test_metadata": { - "name": "unique", + "name": "not_null", "kwargs": { - "column_name": "customer_id", - "model": "{{ get_where_subquery(ref('stg_customers')) }}" + "column_name": "order_id", + "model": "{{ get_where_subquery(ref('orders')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "unique_stg_customers_customer_id", + "name": "not_null_orders_order_id", "resource_type": "test", "package_name": "sandbox", - "path": "unique_stg_customers_customer_id.sql", - "original_file_path": "models/staging/schema.yml", - "unique_id": "test.sandbox.unique_stg_customers_customer_id.c7614daada", + "path": "not_null_orders_order_id.sql", + "original_file_path": "models/schema.yml", + "unique_id": "test.sandbox.not_null_orders_order_id.cf6c17daed", "fqn": [ "sandbox", - "staging", - "unique_stg_customers_customer_id" + "not_null_orders_order_id" ], - "alias": "unique_stg_customers_customer_id", + "alias": "not_null_orders_order_id", "checksum": { "name": "none", "checksum": "" @@ -2088,13 +2077,13 @@ "build_path": null, "deferred": false, "unrendered_config": {}, - "created_at": 1706145555.062559, + "created_at": 1708391489.362866, "relation_name": null, - "raw_code": "{{ test_unique(**_dbt_generic_test_kwargs) }}", + "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "stg_customers", + "name": "orders", "package": null, "version": null } @@ -2103,10 +2092,10 @@ "metrics": [], "depends_on": { "macros": [ - "macro.dbt.test_unique" + "macro.dbt.test_not_null" ], "nodes": [ - "model.sandbox.stg_customers" + "model.sandbox.orders" ] }, "compiled_path": null, @@ -2115,33 +2104,32 @@ "alias_types": true, "checksum": null }, - "column_name": "customer_id", - "file_key_name": "models.stg_customers", - "attached_node": "model.sandbox.stg_customers" + "column_name": "order_id", + "file_key_name": "models.orders", + "attached_node": "model.sandbox.orders" }, - "test.sandbox.not_null_stg_customers_customer_id.e2cfb1f9aa": { + "test.sandbox.not_null_orders_customer_id.c5f02694af": { "test_metadata": { "name": "not_null", "kwargs": { "column_name": "customer_id", - "model": "{{ get_where_subquery(ref('stg_customers')) }}" + "model": "{{ get_where_subquery(ref('orders')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "not_null_stg_customers_customer_id", + "name": "not_null_orders_customer_id", "resource_type": "test", "package_name": "sandbox", - "path": "not_null_stg_customers_customer_id.sql", - "original_file_path": "models/staging/schema.yml", - "unique_id": "test.sandbox.not_null_stg_customers_customer_id.e2cfb1f9aa", + "path": "not_null_orders_customer_id.sql", + "original_file_path": "models/schema.yml", + "unique_id": "test.sandbox.not_null_orders_customer_id.c5f02694af", "fqn": [ "sandbox", - "staging", - "not_null_stg_customers_customer_id" + "not_null_orders_customer_id" ], - "alias": "not_null_stg_customers_customer_id", + "alias": "not_null_orders_customer_id", "checksum": { "name": "none", "checksum": "" @@ -2177,13 +2165,13 @@ "build_path": null, "deferred": false, "unrendered_config": {}, - "created_at": 1706145555.0633464, + "created_at": 1708391489.3637223, "relation_name": null, "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "stg_customers", + "name": "orders", "package": null, "version": null } @@ -2195,7 +2183,7 @@ "macro.dbt.test_not_null" ], "nodes": [ - "model.sandbox.stg_customers" + "model.sandbox.orders" ] }, "compiled_path": null, @@ -2205,39 +2193,45 @@ "checksum": null }, "column_name": "customer_id", - "file_key_name": "models.stg_customers", - "attached_node": "model.sandbox.stg_customers" + "file_key_name": "models.orders", + "attached_node": "model.sandbox.orders" }, - "test.sandbox.unique_stg_orders_order_id.e3b841c71a": { + "test.sandbox.accepted_values_orders_status__placed__shipped__completed__return_pending__returned.be6b5b5ec3": { "test_metadata": { - "name": "unique", + "name": "accepted_values", "kwargs": { - "column_name": "order_id", - "model": "{{ get_where_subquery(ref('stg_orders')) }}" + "values": [ + "placed", + "shipped", + "completed", + "return_pending", + "returned" + ], + "column_name": "status", + "model": "{{ get_where_subquery(ref('orders')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "unique_stg_orders_order_id", + "name": "accepted_values_orders_status__placed__shipped__completed__return_pending__returned", "resource_type": "test", "package_name": "sandbox", - "path": "unique_stg_orders_order_id.sql", - "original_file_path": "models/staging/schema.yml", - "unique_id": "test.sandbox.unique_stg_orders_order_id.e3b841c71a", + "path": "accepted_values_orders_1ce6ab157c285f7cd2ac656013faf758.sql", + "original_file_path": "models/schema.yml", + "unique_id": "test.sandbox.accepted_values_orders_status__placed__shipped__completed__return_pending__returned.be6b5b5ec3", "fqn": [ "sandbox", - "staging", - "unique_stg_orders_order_id" + "accepted_values_orders_status__placed__shipped__completed__return_pending__returned" ], - "alias": "unique_stg_orders_order_id", + "alias": "accepted_values_orders_1ce6ab157c285f7cd2ac656013faf758", "checksum": { "name": "none", "checksum": "" }, "config": { "enabled": true, - "alias": null, + "alias": "accepted_values_orders_1ce6ab157c285f7cd2ac656013faf758", "schema": "dbt_test__audit", "database": null, "tags": [], @@ -2265,14 +2259,16 @@ "patch_path": null, "build_path": null, "deferred": false, - "unrendered_config": {}, - "created_at": 1706145555.064111, + "unrendered_config": { + "alias": "accepted_values_orders_1ce6ab157c285f7cd2ac656013faf758" + }, + "created_at": 1708391489.3645678, "relation_name": null, - "raw_code": "{{ test_unique(**_dbt_generic_test_kwargs) }}", + "raw_code": "{{ test_accepted_values(**_dbt_generic_test_kwargs) }}{{ config(alias=\"accepted_values_orders_1ce6ab157c285f7cd2ac656013faf758\") }}", "language": "sql", "refs": [ { - "name": "stg_orders", + "name": "orders", "package": null, "version": null } @@ -2281,10 +2277,11 @@ "metrics": [], "depends_on": { "macros": [ - "macro.dbt.test_unique" + "macro.dbt.test_accepted_values", + "macro.dbt.get_where_subquery" ], "nodes": [ - "model.sandbox.stg_orders" + "model.sandbox.orders" ] }, "compiled_path": null, @@ -2293,33 +2290,32 @@ "alias_types": true, "checksum": null }, - "column_name": "order_id", - "file_key_name": "models.stg_orders", - "attached_node": "model.sandbox.stg_orders" + "column_name": "status", + "file_key_name": "models.orders", + "attached_node": "model.sandbox.orders" }, - "test.sandbox.not_null_stg_orders_order_id.81cfe2fe64": { + "test.sandbox.not_null_orders_amount.106140f9fd": { "test_metadata": { "name": "not_null", "kwargs": { - "column_name": "order_id", - "model": "{{ get_where_subquery(ref('stg_orders')) }}" + "column_name": "amount", + "model": "{{ get_where_subquery(ref('orders')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "not_null_stg_orders_order_id", + "name": "not_null_orders_amount", "resource_type": "test", "package_name": "sandbox", - "path": "not_null_stg_orders_order_id.sql", - "original_file_path": "models/staging/schema.yml", - "unique_id": "test.sandbox.not_null_stg_orders_order_id.81cfe2fe64", + "path": "not_null_orders_amount.sql", + "original_file_path": "models/schema.yml", + "unique_id": "test.sandbox.not_null_orders_amount.106140f9fd", "fqn": [ "sandbox", - "staging", - "not_null_stg_orders_order_id" + "not_null_orders_amount" ], - "alias": "not_null_stg_orders_order_id", + "alias": "not_null_orders_amount", "checksum": { "name": "none", "checksum": "" @@ -2355,13 +2351,13 @@ "build_path": null, "deferred": false, "unrendered_config": {}, - "created_at": 1706145555.064856, + "created_at": 1708391489.3704727, "relation_name": null, "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "stg_orders", + "name": "orders", "package": null, "version": null } @@ -2373,7 +2369,7 @@ "macro.dbt.test_not_null" ], "nodes": [ - "model.sandbox.stg_orders" + "model.sandbox.orders" ] }, "compiled_path": null, @@ -2382,47 +2378,39 @@ "alias_types": true, "checksum": null }, - "column_name": "order_id", - "file_key_name": "models.stg_orders", - "attached_node": "model.sandbox.stg_orders" + "column_name": "amount", + "file_key_name": "models.orders", + "attached_node": "model.sandbox.orders" }, - "test.sandbox.accepted_values_stg_orders_status__placed__shipped__completed__return_pending__returned.080fb20aad": { + "test.sandbox.not_null_orders_credit_card_amount.d3ca593b59": { "test_metadata": { - "name": "accepted_values", + "name": "not_null", "kwargs": { - "values": [ - "placed", - "shipped", - "completed", - "return_pending", - "returned" - ], - "column_name": "status", - "model": "{{ get_where_subquery(ref('stg_orders')) }}" + "column_name": "credit_card_amount", + "model": "{{ get_where_subquery(ref('orders')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "accepted_values_stg_orders_status__placed__shipped__completed__return_pending__returned", + "name": "not_null_orders_credit_card_amount", "resource_type": "test", "package_name": "sandbox", - "path": "accepted_values_stg_orders_4f514bf94b77b7ea437830eec4421c58.sql", - "original_file_path": "models/staging/schema.yml", - "unique_id": "test.sandbox.accepted_values_stg_orders_status__placed__shipped__completed__return_pending__returned.080fb20aad", + "path": "not_null_orders_credit_card_amount.sql", + "original_file_path": "models/schema.yml", + "unique_id": "test.sandbox.not_null_orders_credit_card_amount.d3ca593b59", "fqn": [ "sandbox", - "staging", - "accepted_values_stg_orders_status__placed__shipped__completed__return_pending__returned" + "not_null_orders_credit_card_amount" ], - "alias": "accepted_values_stg_orders_4f514bf94b77b7ea437830eec4421c58", + "alias": "not_null_orders_credit_card_amount", "checksum": { "name": "none", "checksum": "" }, "config": { "enabled": true, - "alias": "accepted_values_stg_orders_4f514bf94b77b7ea437830eec4421c58", + "alias": null, "schema": "dbt_test__audit", "database": null, "tags": [], @@ -2450,16 +2438,14 @@ "patch_path": null, "build_path": null, "deferred": false, - "unrendered_config": { - "alias": "accepted_values_stg_orders_4f514bf94b77b7ea437830eec4421c58" - }, - "created_at": 1706145555.0657606, + "unrendered_config": {}, + "created_at": 1708391489.3713634, "relation_name": null, - "raw_code": "{{ test_accepted_values(**_dbt_generic_test_kwargs) }}{{ config(alias=\"accepted_values_stg_orders_4f514bf94b77b7ea437830eec4421c58\") }}", + "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "stg_orders", + "name": "orders", "package": null, "version": null } @@ -2468,11 +2454,10 @@ "metrics": [], "depends_on": { "macros": [ - "macro.dbt.test_accepted_values", - "macro.dbt.get_where_subquery" + "macro.dbt.test_not_null" ], "nodes": [ - "model.sandbox.stg_orders" + "model.sandbox.orders" ] }, "compiled_path": null, @@ -2481,33 +2466,32 @@ "alias_types": true, "checksum": null }, - "column_name": "status", - "file_key_name": "models.stg_orders", - "attached_node": "model.sandbox.stg_orders" + "column_name": "credit_card_amount", + "file_key_name": "models.orders", + "attached_node": "model.sandbox.orders" }, - "test.sandbox.unique_stg_payments_payment_id.3744510712": { + "test.sandbox.not_null_orders_coupon_amount.ab90c90625": { "test_metadata": { - "name": "unique", + "name": "not_null", "kwargs": { - "column_name": "payment_id", - "model": "{{ get_where_subquery(ref('stg_payments')) }}" + "column_name": "coupon_amount", + "model": "{{ get_where_subquery(ref('orders')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "unique_stg_payments_payment_id", + "name": "not_null_orders_coupon_amount", "resource_type": "test", "package_name": "sandbox", - "path": "unique_stg_payments_payment_id.sql", - "original_file_path": "models/staging/schema.yml", - "unique_id": "test.sandbox.unique_stg_payments_payment_id.3744510712", + "path": "not_null_orders_coupon_amount.sql", + "original_file_path": "models/schema.yml", + "unique_id": "test.sandbox.not_null_orders_coupon_amount.ab90c90625", "fqn": [ "sandbox", - "staging", - "unique_stg_payments_payment_id" + "not_null_orders_coupon_amount" ], - "alias": "unique_stg_payments_payment_id", + "alias": "not_null_orders_coupon_amount", "checksum": { "name": "none", "checksum": "" @@ -2543,13 +2527,13 @@ "build_path": null, "deferred": false, "unrendered_config": {}, - "created_at": 1706145555.0677629, + "created_at": 1708391489.3721504, "relation_name": null, - "raw_code": "{{ test_unique(**_dbt_generic_test_kwargs) }}", + "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "stg_payments", + "name": "orders", "package": null, "version": null } @@ -2558,10 +2542,10 @@ "metrics": [], "depends_on": { "macros": [ - "macro.dbt.test_unique" + "macro.dbt.test_not_null" ], "nodes": [ - "model.sandbox.stg_payments" + "model.sandbox.orders" ] }, "compiled_path": null, @@ -2570,33 +2554,32 @@ "alias_types": true, "checksum": null }, - "column_name": "payment_id", - "file_key_name": "models.stg_payments", - "attached_node": "model.sandbox.stg_payments" + "column_name": "coupon_amount", + "file_key_name": "models.orders", + "attached_node": "model.sandbox.orders" }, - "test.sandbox.not_null_stg_payments_payment_id.c19cc50075": { + "test.sandbox.not_null_orders_bank_transfer_amount.7743500c49": { "test_metadata": { "name": "not_null", "kwargs": { - "column_name": "payment_id", - "model": "{{ get_where_subquery(ref('stg_payments')) }}" + "column_name": "bank_transfer_amount", + "model": "{{ get_where_subquery(ref('orders')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "not_null_stg_payments_payment_id", + "name": "not_null_orders_bank_transfer_amount", "resource_type": "test", "package_name": "sandbox", - "path": "not_null_stg_payments_payment_id.sql", - "original_file_path": "models/staging/schema.yml", - "unique_id": "test.sandbox.not_null_stg_payments_payment_id.c19cc50075", + "path": "not_null_orders_bank_transfer_amount.sql", + "original_file_path": "models/schema.yml", + "unique_id": "test.sandbox.not_null_orders_bank_transfer_amount.7743500c49", "fqn": [ "sandbox", - "staging", - "not_null_stg_payments_payment_id" + "not_null_orders_bank_transfer_amount" ], - "alias": "not_null_stg_payments_payment_id", + "alias": "not_null_orders_bank_transfer_amount", "checksum": { "name": "none", "checksum": "" @@ -2632,13 +2615,13 @@ "build_path": null, "deferred": false, "unrendered_config": {}, - "created_at": 1706145555.0685904, + "created_at": 1708391489.3729131, "relation_name": null, "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "stg_payments", + "name": "orders", "package": null, "version": null } @@ -2650,7 +2633,7 @@ "macro.dbt.test_not_null" ], "nodes": [ - "model.sandbox.stg_payments" + "model.sandbox.orders" ] }, "compiled_path": null, @@ -2659,46 +2642,39 @@ "alias_types": true, "checksum": null }, - "column_name": "payment_id", - "file_key_name": "models.stg_payments", - "attached_node": "model.sandbox.stg_payments" + "column_name": "bank_transfer_amount", + "file_key_name": "models.orders", + "attached_node": "model.sandbox.orders" }, - "test.sandbox.accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card.3c3820f278": { + "test.sandbox.not_null_orders_gift_card_amount.413a0d2d7a": { "test_metadata": { - "name": "accepted_values", + "name": "not_null", "kwargs": { - "values": [ - "credit_card", - "coupon", - "bank_transfer", - "gift_card" - ], - "column_name": "payment_method", - "model": "{{ get_where_subquery(ref('stg_payments')) }}" + "column_name": "gift_card_amount", + "model": "{{ get_where_subquery(ref('orders')) }}" }, "namespace": null }, "database": "dbtmetabase", "schema": "public_dbt_test__audit", - "name": "accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card", + "name": "not_null_orders_gift_card_amount", "resource_type": "test", "package_name": "sandbox", - "path": "accepted_values_stg_payments_c7909fb19b1f0177c2bf99c7912f06ef.sql", - "original_file_path": "models/staging/schema.yml", - "unique_id": "test.sandbox.accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card.3c3820f278", + "path": "not_null_orders_gift_card_amount.sql", + "original_file_path": "models/schema.yml", + "unique_id": "test.sandbox.not_null_orders_gift_card_amount.413a0d2d7a", "fqn": [ "sandbox", - "staging", - "accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card" + "not_null_orders_gift_card_amount" ], - "alias": "accepted_values_stg_payments_c7909fb19b1f0177c2bf99c7912f06ef", + "alias": "not_null_orders_gift_card_amount", "checksum": { "name": "none", "checksum": "" }, "config": { "enabled": true, - "alias": "accepted_values_stg_payments_c7909fb19b1f0177c2bf99c7912f06ef", + "alias": null, "schema": "dbt_test__audit", "database": null, "tags": [], @@ -2726,16 +2702,14 @@ "patch_path": null, "build_path": null, "deferred": false, - "unrendered_config": { - "alias": "accepted_values_stg_payments_c7909fb19b1f0177c2bf99c7912f06ef" - }, - "created_at": 1706145555.0700967, + "unrendered_config": {}, + "created_at": 1708391489.3737113, "relation_name": null, - "raw_code": "{{ test_accepted_values(**_dbt_generic_test_kwargs) }}{{ config(alias=\"accepted_values_stg_payments_c7909fb19b1f0177c2bf99c7912f06ef\") }}", + "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", "language": "sql", "refs": [ { - "name": "stg_payments", + "name": "orders", "package": null, "version": null } @@ -2744,11 +2718,10 @@ "metrics": [], "depends_on": { "macros": [ - "macro.dbt.test_accepted_values", - "macro.dbt.get_where_subquery" + "macro.dbt.test_not_null" ], "nodes": [ - "model.sandbox.stg_payments" + "model.sandbox.orders" ] }, "compiled_path": null, @@ -2757,9 +2730,9 @@ "alias_types": true, "checksum": null }, - "column_name": "payment_method", - "file_key_name": "models.stg_payments", - "attached_node": "model.sandbox.stg_payments" + "column_name": "gift_card_amount", + "file_key_name": "models.orders", + "attached_node": "model.sandbox.orders" } }, "sources": {}, @@ -2783,7 +2756,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5681622, + "created_at": 1708330298.500109, "supported_languages": null }, "macro.dbt_postgres.postgres__snapshot_string_as_time": { @@ -2805,7 +2778,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5683513, + "created_at": 1708330298.50035, "supported_languages": null }, "macro.dbt_postgres.postgres__snapshot_get_time": { @@ -2829,7 +2802,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.568444, + "created_at": 1708330298.5004683, "supported_languages": null }, "macro.dbt_postgres.postgres__current_timestamp_backcompat": { @@ -2853,7 +2826,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.568544, + "created_at": 1708330298.5006177, "supported_languages": null }, "macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat": { @@ -2877,7 +2850,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5686326, + "created_at": 1708330298.500729, "supported_languages": null }, "macro.dbt_postgres.postgres__create_table_as": { @@ -2904,7 +2877,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5763297, + "created_at": 1708330298.5101242, "supported_languages": null }, "macro.dbt_postgres.postgres__get_create_index_sql": { @@ -2926,7 +2899,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5767663, + "created_at": 1708330298.5106356, "supported_languages": null }, "macro.dbt_postgres.postgres__create_schema": { @@ -2950,7 +2923,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5770383, + "created_at": 1708330298.510947, "supported_languages": null }, "macro.dbt_postgres.postgres__drop_schema": { @@ -2974,7 +2947,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5772965, + "created_at": 1708330298.5112536, "supported_languages": null }, "macro.dbt_postgres.postgres__get_columns_in_relation": { @@ -2999,7 +2972,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5776834, + "created_at": 1708330298.5117238, "supported_languages": null }, "macro.dbt_postgres.postgres__list_relations_without_caching": { @@ -3023,7 +2996,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5781384, + "created_at": 1708330298.512267, "supported_languages": null }, "macro.dbt_postgres.postgres__information_schema_name": { @@ -3045,7 +3018,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.578278, + "created_at": 1708330298.5124376, "supported_languages": null }, "macro.dbt_postgres.postgres__list_schemas": { @@ -3069,7 +3042,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5785592, + "created_at": 1708330298.512787, "supported_languages": null }, "macro.dbt_postgres.postgres__check_schema_exists": { @@ -3093,7 +3066,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.578856, + "created_at": 1708330298.513163, "supported_languages": null }, "macro.dbt_postgres.postgres__make_relation_with_suffix": { @@ -3115,7 +3088,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5795238, + "created_at": 1708330298.5140045, "supported_languages": null }, "macro.dbt_postgres.postgres__make_intermediate_relation": { @@ -3139,7 +3112,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5796766, + "created_at": 1708330298.514195, "supported_languages": null }, "macro.dbt_postgres.postgres__make_temp_relation": { @@ -3163,7 +3136,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5799258, + "created_at": 1708330298.514516, "supported_languages": null }, "macro.dbt_postgres.postgres__make_backup_relation": { @@ -3187,7 +3160,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.580137, + "created_at": 1708330298.5147827, "supported_languages": null }, "macro.dbt_postgres.postgres_escape_comment": { @@ -3209,7 +3182,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5804667, + "created_at": 1708330298.515186, "supported_languages": null }, "macro.dbt_postgres.postgres__alter_relation_comment": { @@ -3233,7 +3206,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5806398, + "created_at": 1708330298.5154004, "supported_languages": null }, "macro.dbt_postgres.postgres__alter_column_comment": { @@ -3257,7 +3230,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5810978, + "created_at": 1708330298.5159972, "supported_languages": null }, "macro.dbt_postgres.postgres__get_show_grant_sql": { @@ -3279,7 +3252,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5812597, + "created_at": 1708330298.5162005, "supported_languages": null }, "macro.dbt_postgres.postgres__copy_grants": { @@ -3301,7 +3274,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5813498, + "created_at": 1708330298.516311, "supported_languages": null }, "macro.dbt_postgres.postgres__get_show_indexes_sql": { @@ -3323,7 +3296,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5815203, + "created_at": 1708330298.5165095, "supported_languages": null }, "macro.dbt_postgres.postgres__get_drop_index_sql": { @@ -3345,7 +3318,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5816274, + "created_at": 1708330298.5166447, "supported_languages": null }, "macro.dbt_postgres.postgres__get_catalog_relations": { @@ -3369,7 +3342,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5828412, + "created_at": 1708330298.5183465, "supported_languages": null }, "macro.dbt_postgres.postgres__get_catalog": { @@ -3393,7 +3366,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.583115, + "created_at": 1708330298.5188854, "supported_languages": null }, "macro.dbt_postgres.postgres__get_relations": { @@ -3417,7 +3390,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5836577, + "created_at": 1708330298.519621, "supported_languages": null }, "macro.dbt_postgres.postgres_get_relations": { @@ -3441,7 +3414,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5837507, + "created_at": 1708330298.519743, "supported_languages": null }, "macro.dbt_postgres.postgres__get_incremental_default_sql": { @@ -3466,7 +3439,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5840359, + "created_at": 1708330298.5201066, "supported_languages": null }, "macro.dbt_postgres.postgres__snapshot_merge_sql": { @@ -3488,7 +3461,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5845869, + "created_at": 1708330298.5208235, "supported_languages": null }, "macro.dbt_postgres.postgres__get_replace_view_sql": { @@ -3512,7 +3485,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5850036, + "created_at": 1708330298.5213654, "supported_languages": null }, "macro.dbt_postgres.postgres__drop_view": { @@ -3534,7 +3507,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.585122, + "created_at": 1708330298.5215158, "supported_languages": null }, "macro.dbt_postgres.postgres__get_rename_view_sql": { @@ -3556,7 +3529,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5852513, + "created_at": 1708330298.5216765, "supported_languages": null }, "macro.dbt_postgres.postgres__get_replace_table_sql": { @@ -3582,7 +3555,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5858848, + "created_at": 1708330298.5223463, "supported_languages": null }, "macro.dbt_postgres.postgres__drop_table": { @@ -3604,7 +3577,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.58603, + "created_at": 1708330298.5224812, "supported_languages": null }, "macro.dbt_postgres.postgres__get_rename_table_sql": { @@ -3626,7 +3599,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5861673, + "created_at": 1708330298.5226514, "supported_languages": null }, "macro.dbt_postgres.postgres__refresh_materialized_view": { @@ -3648,7 +3621,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5862722, + "created_at": 1708330298.5227792, "supported_languages": null }, "macro.dbt_postgres.postgres__describe_materialized_view": { @@ -3673,7 +3646,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.586539, + "created_at": 1708330298.523064, "supported_languages": null }, "macro.dbt_postgres.postgres__drop_materialized_view": { @@ -3695,7 +3668,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.586741, + "created_at": 1708330298.52329, "supported_languages": null }, "macro.dbt_postgres.postgres__get_rename_materialized_view_sql": { @@ -3717,7 +3690,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5868738, + "created_at": 1708330298.5234535, "supported_languages": null }, "macro.dbt_postgres.postgres__get_alter_materialized_view_as_sql": { @@ -3742,7 +3715,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5875628, + "created_at": 1708330298.524322, "supported_languages": null }, "macro.dbt_postgres.postgres__update_indexes_on_materialized_view": { @@ -3767,7 +3740,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5879343, + "created_at": 1708330298.5248008, "supported_languages": null }, "macro.dbt_postgres.postgres__get_materialized_view_configuration_changes": { @@ -3791,7 +3764,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5882244, + "created_at": 1708330298.52508, "supported_languages": null }, "macro.dbt_postgres.postgres__get_create_materialized_view_as_sql": { @@ -3815,7 +3788,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5886245, + "created_at": 1708330298.5254297, "supported_languages": null }, "macro.dbt_postgres.postgres__listagg": { @@ -3837,7 +3810,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5891001, + "created_at": 1708330298.5260458, "supported_languages": null }, "macro.dbt_postgres.postgres__dateadd": { @@ -3859,7 +3832,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5892606, + "created_at": 1708330298.5262516, "supported_languages": null }, "macro.dbt_postgres.postgres__any_value": { @@ -3881,7 +3854,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5893629, + "created_at": 1708330298.5263839, "supported_languages": null }, "macro.dbt_postgres.postgres__split_part": { @@ -3906,7 +3879,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5896747, + "created_at": 1708330298.5267837, "supported_languages": null }, "macro.dbt_postgres.postgres__last_day": { @@ -3932,7 +3905,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5900493, + "created_at": 1708330298.527255, "supported_languages": null }, "macro.dbt_postgres.postgres__datediff": { @@ -3956,7 +3929,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.592488, + "created_at": 1708330298.5305197, "supported_languages": null }, "macro.dbt.generate_database_name": { @@ -3980,7 +3953,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5928009, + "created_at": 1708330298.5309165, "supported_languages": null }, "macro.dbt.default__generate_database_name": { @@ -4002,7 +3975,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.592988, + "created_at": 1708330298.5311565, "supported_languages": null }, "macro.dbt.generate_schema_name": { @@ -4026,7 +3999,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.59339, + "created_at": 1708330298.531699, "supported_languages": null }, "macro.dbt.default__generate_schema_name": { @@ -4048,7 +4021,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5935931, + "created_at": 1708330298.5319543, "supported_languages": null }, "macro.dbt.generate_schema_name_for_env": { @@ -4070,7 +4043,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5938025, + "created_at": 1708330298.5322382, "supported_languages": null }, "macro.dbt.generate_alias_name": { @@ -4094,7 +4067,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5941062, + "created_at": 1708330298.5326614, "supported_languages": null }, "macro.dbt.default__generate_alias_name": { @@ -4116,7 +4089,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5943813, + "created_at": 1708330298.5330265, "supported_languages": null }, "macro.dbt.resolve_model_name": { @@ -4140,7 +4113,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5956953, + "created_at": 1708330298.5347013, "supported_languages": null }, "macro.dbt.default__resolve_model_name": { @@ -4162,7 +4135,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5958927, + "created_at": 1708330298.5348513, "supported_languages": null }, "macro.dbt.build_ref_function": { @@ -4186,7 +4159,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5966134, + "created_at": 1708330298.535658, "supported_languages": null }, "macro.dbt.build_source_function": { @@ -4210,7 +4183,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5969474, + "created_at": 1708330298.536067, "supported_languages": null }, "macro.dbt.build_config_dict": { @@ -4232,7 +4205,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5973961, + "created_at": 1708330298.5366523, "supported_languages": null }, "macro.dbt.py_script_postfix": { @@ -4261,7 +4234,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5980356, + "created_at": 1708330298.5373712, "supported_languages": null }, "macro.dbt.py_script_comment": { @@ -4283,7 +4256,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.598095, + "created_at": 1708330298.537449, "supported_languages": null }, "macro.dbt.run_hooks": { @@ -4307,7 +4280,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5989525, + "created_at": 1708330298.5387776, "supported_languages": null }, "macro.dbt.make_hook_config": { @@ -4329,7 +4302,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5991056, + "created_at": 1708330298.5390067, "supported_languages": null }, "macro.dbt.before_begin": { @@ -4353,7 +4326,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.599219, + "created_at": 1708330298.5391488, "supported_languages": null }, "macro.dbt.in_transaction": { @@ -4377,7 +4350,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.599329, + "created_at": 1708330298.539292, "supported_languages": null }, "macro.dbt.after_commit": { @@ -4401,7 +4374,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.599435, + "created_at": 1708330298.5395236, "supported_languages": null }, "macro.dbt.set_sql_header": { @@ -4423,7 +4396,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5997381, + "created_at": 1708330298.5399194, "supported_languages": null }, "macro.dbt.should_full_refresh": { @@ -4445,7 +4418,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.5999718, + "created_at": 1708330298.5402334, "supported_languages": null }, "macro.dbt.should_store_failures": { @@ -4467,7 +4440,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.600199, + "created_at": 1708330298.540542, "supported_languages": null }, "macro.dbt.materialization_test_default": { @@ -4494,7 +4467,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6022599, + "created_at": 1708330298.5431957, "supported_languages": [ "sql" ] @@ -4520,7 +4493,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6026115, + "created_at": 1708330298.5436575, "supported_languages": null }, "macro.dbt.default__get_test_sql": { @@ -4542,7 +4515,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6028411, + "created_at": 1708330298.5439508, "supported_languages": null }, "macro.dbt.get_where_subquery": { @@ -4566,7 +4539,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6031418, + "created_at": 1708330298.5443087, "supported_languages": null }, "macro.dbt.default__get_where_subquery": { @@ -4588,7 +4561,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6034262, + "created_at": 1708330298.5446825, "supported_languages": null }, "macro.dbt.create_columns": { @@ -4612,7 +4585,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6070242, + "created_at": 1708330298.549088, "supported_languages": null }, "macro.dbt.default__create_columns": { @@ -4636,7 +4609,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6072512, + "created_at": 1708330298.54937, "supported_languages": null }, "macro.dbt.post_snapshot": { @@ -4660,7 +4633,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.607383, + "created_at": 1708330298.5495458, "supported_languages": null }, "macro.dbt.default__post_snapshot": { @@ -4682,7 +4655,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6074603, + "created_at": 1708330298.549637, "supported_languages": null }, "macro.dbt.get_true_sql": { @@ -4706,7 +4679,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6075723, + "created_at": 1708330298.5497756, "supported_languages": null }, "macro.dbt.default__get_true_sql": { @@ -4728,7 +4701,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6076605, + "created_at": 1708330298.5498874, "supported_languages": null }, "macro.dbt.snapshot_staging_table": { @@ -4752,7 +4725,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6078143, + "created_at": 1708330298.5500886, "supported_languages": null }, "macro.dbt.default__snapshot_staging_table": { @@ -4776,7 +4749,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6085184, + "created_at": 1708330298.5510135, "supported_languages": null }, "macro.dbt.build_snapshot_table": { @@ -4800,7 +4773,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6086595, + "created_at": 1708330298.5511975, "supported_languages": null }, "macro.dbt.default__build_snapshot_table": { @@ -4822,7 +4795,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6088536, + "created_at": 1708330298.5514443, "supported_languages": null }, "macro.dbt.build_snapshot_staging_table": { @@ -4849,7 +4822,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6091673, + "created_at": 1708330298.55188, "supported_languages": null }, "macro.dbt.materialization_snapshot_default": { @@ -4886,7 +4859,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6140904, + "created_at": 1708330298.5586557, "supported_languages": [ "sql" ] @@ -4912,7 +4885,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.614464, + "created_at": 1708330298.5591733, "supported_languages": null }, "macro.dbt.default__snapshot_merge_sql": { @@ -4934,7 +4907,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6146834, + "created_at": 1708330298.5594583, "supported_languages": null }, "macro.dbt.strategy_dispatch": { @@ -4956,7 +4929,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.617663, + "created_at": 1708330298.563218, "supported_languages": null }, "macro.dbt.snapshot_hash_arguments": { @@ -4980,7 +4953,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6177986, + "created_at": 1708330298.5633988, "supported_languages": null }, "macro.dbt.default__snapshot_hash_arguments": { @@ -5002,7 +4975,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6179757, + "created_at": 1708330298.5637388, "supported_languages": null }, "macro.dbt.snapshot_timestamp_strategy": { @@ -5026,7 +4999,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6185966, + "created_at": 1708330298.5645275, "supported_languages": null }, "macro.dbt.snapshot_string_as_time": { @@ -5050,7 +5023,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6187344, + "created_at": 1708330298.564707, "supported_languages": null }, "macro.dbt.default__snapshot_string_as_time": { @@ -5072,7 +5045,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.618874, + "created_at": 1708330298.5648892, "supported_languages": null }, "macro.dbt.snapshot_check_all_get_existing_columns": { @@ -5096,7 +5069,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6200058, + "created_at": 1708330298.5663006, "supported_languages": null }, "macro.dbt.snapshot_check_strategy": { @@ -5123,7 +5096,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6210408, + "created_at": 1708330298.5675998, "supported_languages": null }, "macro.dbt.create_csv_table": { @@ -5147,7 +5120,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6250587, + "created_at": 1708330298.572908, "supported_languages": null }, "macro.dbt.default__create_csv_table": { @@ -5171,7 +5144,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6257298, + "created_at": 1708330298.5737932, "supported_languages": null }, "macro.dbt.reset_csv_table": { @@ -5195,7 +5168,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6259162, + "created_at": 1708330298.5740166, "supported_languages": null }, "macro.dbt.default__reset_csv_table": { @@ -5219,7 +5192,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6262684, + "created_at": 1708330298.5744674, "supported_languages": null }, "macro.dbt.get_csv_sql": { @@ -5243,7 +5216,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.626413, + "created_at": 1708330298.5746727, "supported_languages": null }, "macro.dbt.default__get_csv_sql": { @@ -5265,7 +5238,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.626523, + "created_at": 1708330298.574799, "supported_languages": null }, "macro.dbt.get_binding_char": { @@ -5289,7 +5262,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6266289, + "created_at": 1708330298.5749333, "supported_languages": null }, "macro.dbt.default__get_binding_char": { @@ -5311,7 +5284,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6267116, + "created_at": 1708330298.5750396, "supported_languages": null }, "macro.dbt.get_batch_size": { @@ -5335,7 +5308,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.626845, + "created_at": 1708330298.5751874, "supported_languages": null }, "macro.dbt.default__get_batch_size": { @@ -5357,7 +5330,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6269305, + "created_at": 1708330298.5752983, "supported_languages": null }, "macro.dbt.get_seed_column_quoted_csv": { @@ -5379,7 +5352,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6273472, + "created_at": 1708330298.575762, "supported_languages": null }, "macro.dbt.load_csv_rows": { @@ -5403,7 +5376,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6275103, + "created_at": 1708330298.5759432, "supported_languages": null }, "macro.dbt.default__load_csv_rows": { @@ -5429,7 +5402,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6285172, + "created_at": 1708330298.5772634, "supported_languages": null }, "macro.dbt.materialization_seed_default": { @@ -5463,7 +5436,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.630931, + "created_at": 1708330298.5807245, "supported_languages": [ "sql" ] @@ -5499,7 +5472,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.633134, + "created_at": 1708330298.583626, "supported_languages": [ "sql" ] @@ -5534,7 +5507,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6353793, + "created_at": 1708330298.5865545, "supported_languages": [ "sql" ] @@ -5567,7 +5540,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6393204, + "created_at": 1708330298.5918787, "supported_languages": [ "sql" ] @@ -5595,7 +5568,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.639623, + "created_at": 1708330298.5922709, "supported_languages": null }, "macro.dbt.materialized_view_teardown": { @@ -5620,7 +5593,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6398118, + "created_at": 1708330298.5925407, "supported_languages": null }, "macro.dbt.materialized_view_get_build_sql": { @@ -5649,7 +5622,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.640766, + "created_at": 1708330298.593817, "supported_languages": null }, "macro.dbt.materialized_view_execute_no_op": { @@ -5671,7 +5644,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6409376, + "created_at": 1708330298.5940437, "supported_languages": null }, "macro.dbt.materialized_view_execute_build_sql": { @@ -5699,7 +5672,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6414285, + "created_at": 1708330298.5946872, "supported_languages": null }, "macro.dbt.incremental_validate_on_schema_change": { @@ -5721,7 +5694,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6458056, + "created_at": 1708330298.6007638, "supported_languages": null }, "macro.dbt.check_for_schema_changes": { @@ -5746,7 +5719,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6467223, + "created_at": 1708330298.6020534, "supported_languages": null }, "macro.dbt.sync_column_schemas": { @@ -5771,7 +5744,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.647604, + "created_at": 1708330298.6033158, "supported_languages": null }, "macro.dbt.process_schema_changes": { @@ -5796,7 +5769,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.64823, + "created_at": 1708330298.6042094, "supported_languages": null }, "macro.dbt.get_merge_sql": { @@ -5820,7 +5793,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6534753, + "created_at": 1708330298.6113167, "supported_languages": null }, "macro.dbt.default__get_merge_sql": { @@ -5845,7 +5818,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6546695, + "created_at": 1708330298.612972, "supported_languages": null }, "macro.dbt.get_delete_insert_merge_sql": { @@ -5869,7 +5842,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.654864, + "created_at": 1708330298.6132426, "supported_languages": null }, "macro.dbt.default__get_delete_insert_merge_sql": { @@ -5893,7 +5866,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6556215, + "created_at": 1708330298.614259, "supported_languages": null }, "macro.dbt.get_insert_overwrite_merge_sql": { @@ -5917,7 +5890,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.65582, + "created_at": 1708330298.6145384, "supported_languages": null }, "macro.dbt.default__get_insert_overwrite_merge_sql": { @@ -5941,7 +5914,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6562977, + "created_at": 1708330298.615291, "supported_languages": null }, "macro.dbt.get_quoted_csv": { @@ -5963,7 +5936,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6575282, + "created_at": 1708330298.6169353, "supported_languages": null }, "macro.dbt.diff_columns": { @@ -5985,7 +5958,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6579359, + "created_at": 1708330298.6174629, "supported_languages": null }, "macro.dbt.diff_column_data_types": { @@ -6007,7 +5980,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6585205, + "created_at": 1708330298.6182358, "supported_languages": null }, "macro.dbt.get_merge_update_columns": { @@ -6031,7 +6004,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6586964, + "created_at": 1708330298.618634, "supported_languages": null }, "macro.dbt.default__get_merge_update_columns": { @@ -6053,7 +6026,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6592205, + "created_at": 1708330298.6195307, "supported_languages": null }, "macro.dbt.is_incremental": { @@ -6077,7 +6050,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6597126, + "created_at": 1708330298.6202323, "supported_languages": null }, "macro.dbt.materialization_incremental_default": { @@ -6116,7 +6089,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6634784, + "created_at": 1708330298.6252937, "supported_languages": [ "sql" ] @@ -6142,7 +6115,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6641579, + "created_at": 1708330298.6262195, "supported_languages": null }, "macro.dbt.default__get_incremental_append_sql": { @@ -6166,7 +6139,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6643257, + "created_at": 1708330298.6264565, "supported_languages": null }, "macro.dbt.get_incremental_delete_insert_sql": { @@ -6190,7 +6163,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6644704, + "created_at": 1708330298.6266503, "supported_languages": null }, "macro.dbt.default__get_incremental_delete_insert_sql": { @@ -6214,7 +6187,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.664682, + "created_at": 1708330298.6269372, "supported_languages": null }, "macro.dbt.get_incremental_merge_sql": { @@ -6238,7 +6211,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6648176, + "created_at": 1708330298.6271317, "supported_languages": null }, "macro.dbt.default__get_incremental_merge_sql": { @@ -6262,7 +6235,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6650286, + "created_at": 1708330298.6274204, "supported_languages": null }, "macro.dbt.get_incremental_insert_overwrite_sql": { @@ -6286,7 +6259,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6651597, + "created_at": 1708330298.62761, "supported_languages": null }, "macro.dbt.default__get_incremental_insert_overwrite_sql": { @@ -6310,7 +6283,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6653452, + "created_at": 1708330298.6278706, "supported_languages": null }, "macro.dbt.get_incremental_default_sql": { @@ -6334,7 +6307,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6654878, + "created_at": 1708330298.6280508, "supported_languages": null }, "macro.dbt.default__get_incremental_default_sql": { @@ -6358,7 +6331,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6655974, + "created_at": 1708330298.628201, "supported_languages": null }, "macro.dbt.get_insert_into_sql": { @@ -6382,7 +6355,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6658008, + "created_at": 1708330298.6284833, "supported_languages": null }, "macro.dbt.can_clone_table": { @@ -6406,7 +6379,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.665979, + "created_at": 1708330298.6287339, "supported_languages": null }, "macro.dbt.default__can_clone_table": { @@ -6428,7 +6401,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.666064, + "created_at": 1708330298.6288488, "supported_languages": null }, "macro.dbt.create_or_replace_clone": { @@ -6452,7 +6425,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.666278, + "created_at": 1708330298.6292129, "supported_languages": null }, "macro.dbt.default__create_or_replace_clone": { @@ -6474,7 +6447,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6663783, + "created_at": 1708330298.6293995, "supported_languages": null }, "macro.dbt.materialization_clone_default": { @@ -6505,7 +6478,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6691573, + "created_at": 1708330298.6333277, "supported_languages": [ "sql" ] @@ -6531,7 +6504,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6698363, + "created_at": 1708330298.6342292, "supported_languages": null }, "macro.dbt.default__get_replace_sql": { @@ -6563,7 +6536,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6707203, + "created_at": 1708330298.6354191, "supported_languages": null }, "macro.dbt.get_create_backup_sql": { @@ -6587,7 +6560,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6709929, + "created_at": 1708330298.6358008, "supported_languages": null }, "macro.dbt.default__get_create_backup_sql": { @@ -6613,7 +6586,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.671187, + "created_at": 1708330298.6360586, "supported_languages": null }, "macro.dbt.get_drop_sql": { @@ -6637,7 +6610,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6717353, + "created_at": 1708330298.6367738, "supported_languages": null }, "macro.dbt.default__get_drop_sql": { @@ -6663,7 +6636,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6719952, + "created_at": 1708330298.6371417, "supported_languages": null }, "macro.dbt.drop_relation": { @@ -6687,7 +6660,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6721287, + "created_at": 1708330298.6373215, "supported_languages": null }, "macro.dbt.default__drop_relation": { @@ -6712,7 +6685,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.672272, + "created_at": 1708330298.6375327, "supported_languages": null }, "macro.dbt.drop_relation_if_exists": { @@ -6734,7 +6707,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6724102, + "created_at": 1708330298.6377306, "supported_languages": null }, "macro.dbt.get_rename_sql": { @@ -6758,7 +6731,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6729453, + "created_at": 1708330298.6384318, "supported_languages": null }, "macro.dbt.default__get_rename_sql": { @@ -6784,7 +6757,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6732552, + "created_at": 1708330298.6388555, "supported_languages": null }, "macro.dbt.rename_relation": { @@ -6808,7 +6781,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.673405, + "created_at": 1708330298.6390538, "supported_languages": null }, "macro.dbt.default__rename_relation": { @@ -6832,7 +6805,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6736226, + "created_at": 1708330298.6393352, "supported_languages": null }, "macro.dbt.get_create_intermediate_sql": { @@ -6856,7 +6829,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.673898, + "created_at": 1708330298.6397078, "supported_languages": null }, "macro.dbt.default__get_create_intermediate_sql": { @@ -6882,7 +6855,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6740813, + "created_at": 1708330298.6399567, "supported_languages": null }, "macro.dbt.get_drop_backup_sql": { @@ -6906,7 +6879,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6743135, + "created_at": 1708330298.640266, "supported_languages": null }, "macro.dbt.default__get_drop_backup_sql": { @@ -6931,7 +6904,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6744716, + "created_at": 1708330298.6404645, "supported_languages": null }, "macro.dbt.get_create_sql": { @@ -6955,7 +6928,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6748202, + "created_at": 1708330298.6409438, "supported_languages": null }, "macro.dbt.default__get_create_sql": { @@ -6981,7 +6954,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.675137, + "created_at": 1708330298.6413696, "supported_languages": null }, "macro.dbt.get_rename_intermediate_sql": { @@ -7005,7 +6978,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6753833, + "created_at": 1708330298.641697, "supported_languages": null }, "macro.dbt.default__get_rename_intermediate_sql": { @@ -7030,7 +7003,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.675544, + "created_at": 1708330298.6418974, "supported_languages": null }, "macro.dbt.get_table_columns_and_constraints": { @@ -7054,7 +7027,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.676307, + "created_at": 1708330298.6429393, "supported_languages": null }, "macro.dbt.default__get_table_columns_and_constraints": { @@ -7078,7 +7051,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.676395, + "created_at": 1708330298.6430545, "supported_languages": null }, "macro.dbt.table_columns_and_constraints": { @@ -7100,7 +7073,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.676791, + "created_at": 1708330298.6436424, "supported_languages": null }, "macro.dbt.get_assert_columns_equivalent": { @@ -7124,7 +7097,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6769798, + "created_at": 1708330298.6438968, "supported_languages": null }, "macro.dbt.default__get_assert_columns_equivalent": { @@ -7148,7 +7121,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.677078, + "created_at": 1708330298.644028, "supported_languages": null }, "macro.dbt.assert_columns_equivalent": { @@ -7174,7 +7147,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6780024, + "created_at": 1708330298.6452618, "supported_languages": null }, "macro.dbt.format_columns": { @@ -7198,7 +7171,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6782806, + "created_at": 1708330298.6456428, "supported_languages": null }, "macro.dbt.default__format_column": { @@ -7220,7 +7193,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6785462, + "created_at": 1708330298.6459916, "supported_languages": null }, "macro.dbt.get_replace_view_sql": { @@ -7244,7 +7217,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6792011, + "created_at": 1708330298.6468515, "supported_languages": null }, "macro.dbt.default__get_replace_view_sql": { @@ -7266,7 +7239,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6793077, + "created_at": 1708330298.6469932, "supported_languages": null }, "macro.dbt.create_or_replace_view": { @@ -7296,7 +7269,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6801229, + "created_at": 1708330298.6480694, "supported_languages": null }, "macro.dbt.handle_existing_table": { @@ -7320,7 +7293,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6802635, + "created_at": 1708330298.648259, "supported_languages": null }, "macro.dbt.default__handle_existing_table": { @@ -7342,7 +7315,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.680422, + "created_at": 1708330298.6484735, "supported_languages": null }, "macro.dbt.drop_view": { @@ -7366,7 +7339,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6806257, + "created_at": 1708330298.6487405, "supported_languages": null }, "macro.dbt.default__drop_view": { @@ -7388,7 +7361,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6807032, + "created_at": 1708330298.6488426, "supported_languages": null }, "macro.dbt.get_rename_view_sql": { @@ -7412,7 +7385,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6809, + "created_at": 1708330298.649106, "supported_languages": null }, "macro.dbt.default__get_rename_view_sql": { @@ -7434,7 +7407,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6810055, + "created_at": 1708330298.6492453, "supported_languages": null }, "macro.dbt.get_create_view_as_sql": { @@ -7458,7 +7431,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.681312, + "created_at": 1708330298.6496537, "supported_languages": null }, "macro.dbt.default__get_create_view_as_sql": { @@ -7482,7 +7455,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.68143, + "created_at": 1708330298.649803, "supported_languages": null }, "macro.dbt.create_view_as": { @@ -7506,7 +7479,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.681569, + "created_at": 1708330298.6499746, "supported_languages": null }, "macro.dbt.default__create_view_as": { @@ -7530,7 +7503,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6818724, + "created_at": 1708330298.6503804, "supported_languages": null }, "macro.dbt.get_replace_table_sql": { @@ -7554,7 +7527,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.682073, + "created_at": 1708330298.6506543, "supported_languages": null }, "macro.dbt.default__get_replace_table_sql": { @@ -7576,7 +7549,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6821785, + "created_at": 1708330298.650794, "supported_languages": null }, "macro.dbt.drop_table": { @@ -7600,7 +7573,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.682369, + "created_at": 1708330298.6510668, "supported_languages": null }, "macro.dbt.default__drop_table": { @@ -7622,7 +7595,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6824481, + "created_at": 1708330298.6512222, "supported_languages": null }, "macro.dbt.get_rename_table_sql": { @@ -7646,7 +7619,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6826518, + "created_at": 1708330298.651542, "supported_languages": null }, "macro.dbt.default__get_rename_table_sql": { @@ -7668,7 +7641,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.682761, + "created_at": 1708330298.6517115, "supported_languages": null }, "macro.dbt.get_create_table_as_sql": { @@ -7692,7 +7665,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.683403, + "created_at": 1708330298.6526318, "supported_languages": null }, "macro.dbt.default__get_create_table_as_sql": { @@ -7716,7 +7689,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6835518, + "created_at": 1708330298.6528091, "supported_languages": null }, "macro.dbt.create_table_as": { @@ -7740,7 +7713,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6839387, + "created_at": 1708330298.6533344, "supported_languages": null }, "macro.dbt.default__create_table_as": { @@ -7766,7 +7739,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.684438, + "created_at": 1708330298.654016, "supported_languages": null }, "macro.dbt.default__get_column_names": { @@ -7788,7 +7761,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6847801, + "created_at": 1708330298.6544642, "supported_languages": null }, "macro.dbt.get_select_subquery": { @@ -7812,7 +7785,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6849122, + "created_at": 1708330298.6546543, "supported_languages": null }, "macro.dbt.default__get_select_subquery": { @@ -7836,7 +7809,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.685039, + "created_at": 1708330298.6548293, "supported_languages": null }, "macro.dbt.refresh_materialized_view": { @@ -7860,7 +7833,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6852703, + "created_at": 1708330298.6551301, "supported_languages": null }, "macro.dbt.default__refresh_materialized_view": { @@ -7882,7 +7855,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.685366, + "created_at": 1708330298.6552625, "supported_languages": null }, "macro.dbt.get_replace_materialized_view_sql": { @@ -7906,7 +7879,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6855707, + "created_at": 1708330298.655537, "supported_languages": null }, "macro.dbt.default__get_replace_materialized_view_sql": { @@ -7928,7 +7901,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.685677, + "created_at": 1708330298.6556823, "supported_languages": null }, "macro.dbt.drop_materialized_view": { @@ -7952,7 +7925,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6858692, + "created_at": 1708330298.6559346, "supported_languages": null }, "macro.dbt.default__drop_materialized_view": { @@ -7974,7 +7947,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.685948, + "created_at": 1708330298.6560376, "supported_languages": null }, "macro.dbt.get_rename_materialized_view_sql": { @@ -7998,7 +7971,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6861484, + "created_at": 1708330298.6562972, "supported_languages": null }, "macro.dbt.default__get_rename_materialized_view_sql": { @@ -8020,7 +7993,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6862562, + "created_at": 1708330298.65644, "supported_languages": null }, "macro.dbt.get_alter_materialized_view_as_sql": { @@ -8044,7 +8017,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6867146, + "created_at": 1708330298.6570497, "supported_languages": null }, "macro.dbt.default__get_alter_materialized_view_as_sql": { @@ -8066,7 +8039,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6868513, + "created_at": 1708330298.6572325, "supported_languages": null }, "macro.dbt.get_materialized_view_configuration_changes": { @@ -8090,7 +8063,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.68706, + "created_at": 1708330298.6575205, "supported_languages": null }, "macro.dbt.default__get_materialized_view_configuration_changes": { @@ -8112,7 +8085,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.687166, + "created_at": 1708330298.6576629, "supported_languages": null }, "macro.dbt.get_create_materialized_view_as_sql": { @@ -8136,7 +8109,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6873653, + "created_at": 1708330298.6579247, "supported_languages": null }, "macro.dbt.default__get_create_materialized_view_as_sql": { @@ -8158,7 +8131,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6874814, + "created_at": 1708330298.6580632, "supported_languages": null }, "macro.dbt.default__test_accepted_values": { @@ -8180,7 +8153,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6879034, + "created_at": 1708330298.6586328, "supported_languages": null }, "macro.dbt.default__test_not_null": { @@ -8204,7 +8177,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6881247, + "created_at": 1708330298.658927, "supported_languages": null }, "macro.dbt.default__test_relationships": { @@ -8226,7 +8199,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6883876, + "created_at": 1708330298.6592968, "supported_languages": null }, "macro.dbt.default__test_unique": { @@ -8248,7 +8221,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6885736, + "created_at": 1708330298.6595511, "supported_languages": null }, "macro.dbt.listagg": { @@ -8272,7 +8245,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6890483, + "created_at": 1708330298.6601758, "supported_languages": null }, "macro.dbt.default__listagg": { @@ -8294,7 +8267,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.689351, + "created_at": 1708330298.6605818, "supported_languages": null }, "macro.dbt.safe_cast": { @@ -8318,7 +8291,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6895661, + "created_at": 1708330298.6608617, "supported_languages": null }, "macro.dbt.default__safe_cast": { @@ -8340,7 +8313,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6896698, + "created_at": 1708330298.661014, "supported_languages": null }, "macro.dbt.intersect": { @@ -8364,7 +8337,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6898313, + "created_at": 1708330298.6612434, "supported_languages": null }, "macro.dbt.default__intersect": { @@ -8386,7 +8359,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6898885, + "created_at": 1708330298.6613193, "supported_languages": null }, "macro.dbt.except": { @@ -8410,7 +8383,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6901135, + "created_at": 1708330298.6617448, "supported_languages": null }, "macro.dbt.default__except": { @@ -8432,7 +8405,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6901712, + "created_at": 1708330298.661872, "supported_languages": null }, "macro.dbt.get_powers_of_two": { @@ -8456,7 +8429,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.69081, + "created_at": 1708330298.6627808, "supported_languages": null }, "macro.dbt.default__get_powers_of_two": { @@ -8478,7 +8451,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6911185, + "created_at": 1708330298.6632087, "supported_languages": null }, "macro.dbt.generate_series": { @@ -8502,7 +8475,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6912532, + "created_at": 1708330298.6633935, "supported_languages": null }, "macro.dbt.default__generate_series": { @@ -8526,7 +8499,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6916382, + "created_at": 1708330298.6639416, "supported_languages": null }, "macro.dbt.replace": { @@ -8550,7 +8523,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6918845, + "created_at": 1708330298.6642861, "supported_languages": null }, "macro.dbt.default__replace": { @@ -8572,7 +8545,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6920025, + "created_at": 1708330298.664454, "supported_languages": null }, "macro.dbt.get_intervals_between": { @@ -8596,7 +8569,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6925623, + "created_at": 1708330298.6652257, "supported_languages": null }, "macro.dbt.default__get_intervals_between": { @@ -8621,7 +8594,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6929936, + "created_at": 1708330298.6658232, "supported_languages": null }, "macro.dbt.date_spine": { @@ -8645,7 +8618,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6931622, + "created_at": 1708330298.6660545, "supported_languages": null }, "macro.dbt.default__date_spine": { @@ -8671,7 +8644,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.693432, + "created_at": 1708330298.6664183, "supported_languages": null }, "macro.dbt.hash": { @@ -8695,7 +8668,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6936297, + "created_at": 1708330298.6666849, "supported_languages": null }, "macro.dbt.default__hash": { @@ -8717,7 +8690,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6937442, + "created_at": 1708330298.6668458, "supported_languages": null }, "macro.dbt.concat": { @@ -8741,7 +8714,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6939178, + "created_at": 1708330298.6671133, "supported_languages": null }, "macro.dbt.default__concat": { @@ -8763,7 +8736,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6940048, + "created_at": 1708330298.6672306, "supported_languages": null }, "macro.dbt.string_literal": { @@ -8787,7 +8760,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6941786, + "created_at": 1708330298.6675715, "supported_languages": null }, "macro.dbt.default__string_literal": { @@ -8809,7 +8782,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6942525, + "created_at": 1708330298.667697, "supported_languages": null }, "macro.dbt.type_string": { @@ -8833,7 +8806,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6950414, + "created_at": 1708330298.6687655, "supported_languages": null }, "macro.dbt.default__type_string": { @@ -8855,7 +8828,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.695152, + "created_at": 1708330298.668916, "supported_languages": null }, "macro.dbt.type_timestamp": { @@ -8879,7 +8852,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.69527, + "created_at": 1708330298.6690693, "supported_languages": null }, "macro.dbt.default__type_timestamp": { @@ -8901,7 +8874,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6953776, + "created_at": 1708330298.669215, "supported_languages": null }, "macro.dbt.type_float": { @@ -8925,7 +8898,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6955047, + "created_at": 1708330298.6693678, "supported_languages": null }, "macro.dbt.default__type_float": { @@ -8947,7 +8920,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6956182, + "created_at": 1708330298.6695323, "supported_languages": null }, "macro.dbt.type_numeric": { @@ -8971,7 +8944,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6957312, + "created_at": 1708330298.6696868, "supported_languages": null }, "macro.dbt.default__type_numeric": { @@ -8993,7 +8966,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.695857, + "created_at": 1708330298.6698532, "supported_languages": null }, "macro.dbt.type_bigint": { @@ -9017,7 +8990,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6959705, + "created_at": 1708330298.6700027, "supported_languages": null }, "macro.dbt.default__type_bigint": { @@ -9039,7 +9012,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6961434, + "created_at": 1708330298.6702461, "supported_languages": null }, "macro.dbt.type_int": { @@ -9063,7 +9036,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6962574, + "created_at": 1708330298.670405, "supported_languages": null }, "macro.dbt.default__type_int": { @@ -9085,7 +9058,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6963584, + "created_at": 1708330298.670555, "supported_languages": null }, "macro.dbt.type_boolean": { @@ -9109,7 +9082,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.696479, + "created_at": 1708330298.6707082, "supported_languages": null }, "macro.dbt.default__type_boolean": { @@ -9131,7 +9104,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6965823, + "created_at": 1708330298.6708455, "supported_languages": null }, "macro.dbt.array_construct": { @@ -9155,7 +9128,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.696855, + "created_at": 1708330298.6712036, "supported_languages": null }, "macro.dbt.default__array_construct": { @@ -9177,7 +9150,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6970685, + "created_at": 1708330298.6714365, "supported_languages": null }, "macro.dbt.cast_bool_to_text": { @@ -9201,7 +9174,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.697262, + "created_at": 1708330298.6717927, "supported_languages": null }, "macro.dbt.default__cast_bool_to_text": { @@ -9223,7 +9196,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6973813, + "created_at": 1708330298.671995, "supported_languages": null }, "macro.dbt.bool_or": { @@ -9247,7 +9220,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.697574, + "created_at": 1708330298.6722603, "supported_languages": null }, "macro.dbt.default__bool_or": { @@ -9269,7 +9242,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6976514, + "created_at": 1708330298.6723726, "supported_languages": null }, "macro.dbt.dateadd": { @@ -9293,7 +9266,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.697899, + "created_at": 1708330298.6727378, "supported_languages": null }, "macro.dbt.default__dateadd": { @@ -9315,7 +9288,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6980197, + "created_at": 1708330298.6729088, "supported_languages": null }, "macro.dbt.any_value": { @@ -9339,7 +9312,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6981988, + "created_at": 1708330298.673148, "supported_languages": null }, "macro.dbt.default__any_value": { @@ -9361,7 +9334,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6982768, + "created_at": 1708330298.6732557, "supported_languages": null }, "macro.dbt.split_part": { @@ -9385,7 +9358,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6986735, + "created_at": 1708330298.6738064, "supported_languages": null }, "macro.dbt.default__split_part": { @@ -9407,7 +9380,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6987917, + "created_at": 1708330298.673971, "supported_languages": null }, "macro.dbt._split_part_negative": { @@ -9429,7 +9402,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6989524, + "created_at": 1708330298.6741853, "supported_languages": null }, "macro.dbt.length": { @@ -9453,7 +9426,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6991348, + "created_at": 1708330298.674433, "supported_languages": null }, "macro.dbt.default__length": { @@ -9475,7 +9448,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6992104, + "created_at": 1708330298.6745496, "supported_languages": null }, "macro.dbt.right": { @@ -9499,7 +9472,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6994207, + "created_at": 1708330298.6748302, "supported_languages": null }, "macro.dbt.default__right": { @@ -9521,7 +9494,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.699532, + "created_at": 1708330298.6749709, "supported_languages": null }, "macro.dbt.position": { @@ -9545,7 +9518,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.699738, + "created_at": 1708330298.6752727, "supported_languages": null }, "macro.dbt.default__position": { @@ -9567,7 +9540,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.6998453, + "created_at": 1708330298.675453, "supported_languages": null }, "macro.dbt.date_trunc": { @@ -9591,7 +9564,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7000463, + "created_at": 1708330298.6757562, "supported_languages": null }, "macro.dbt.default__date_trunc": { @@ -9613,7 +9586,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7001402, + "created_at": 1708330298.6758823, "supported_languages": null }, "macro.dbt.last_day": { @@ -9637,7 +9610,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7003872, + "created_at": 1708330298.676216, "supported_languages": null }, "macro.dbt.default_last_day": { @@ -9662,7 +9635,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7006617, + "created_at": 1708330298.676614, "supported_languages": null }, "macro.dbt.default__last_day": { @@ -9686,7 +9659,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7007701, + "created_at": 1708330298.6767573, "supported_languages": null }, "macro.dbt.escape_single_quotes": { @@ -9710,7 +9683,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7009664, + "created_at": 1708330298.6770108, "supported_languages": null }, "macro.dbt.default__escape_single_quotes": { @@ -9732,7 +9705,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7010655, + "created_at": 1708330298.6771474, "supported_languages": null }, "macro.dbt.datediff": { @@ -9756,7 +9729,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7013066, + "created_at": 1708330298.677466, "supported_languages": null }, "macro.dbt.default__datediff": { @@ -9778,7 +9751,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7014256, + "created_at": 1708330298.6776445, "supported_languages": null }, "macro.dbt.array_append": { @@ -9802,7 +9775,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7016346, + "created_at": 1708330298.6779194, "supported_languages": null }, "macro.dbt.default__array_append": { @@ -9824,7 +9797,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7017322, + "created_at": 1708330298.678047, "supported_languages": null }, "macro.dbt.array_concat": { @@ -9848,7 +9821,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7019274, + "created_at": 1708330298.6783056, "supported_languages": null }, "macro.dbt.default__array_concat": { @@ -9870,7 +9843,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7020214, + "created_at": 1708330298.678443, "supported_languages": null }, "macro.dbt.convert_datetime": { @@ -9892,7 +9865,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7034445, + "created_at": 1708330298.680482, "supported_languages": null }, "macro.dbt.dates_in_range": { @@ -9916,7 +9889,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7043116, + "created_at": 1708330298.6818788, "supported_languages": null }, "macro.dbt.partition_range": { @@ -9940,7 +9913,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7048607, + "created_at": 1708330298.6826746, "supported_languages": null }, "macro.dbt.py_current_timestring": { @@ -9962,7 +9935,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7050266, + "created_at": 1708330298.6829095, "supported_languages": null }, "macro.dbt.statement": { @@ -9984,7 +9957,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7061265, + "created_at": 1708330298.6844487, "supported_languages": null }, "macro.dbt.noop_statement": { @@ -10006,7 +9979,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7065449, + "created_at": 1708330298.6850069, "supported_languages": null }, "macro.dbt.run_query": { @@ -10030,7 +10003,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7067552, + "created_at": 1708330298.6852882, "supported_languages": null }, "macro.dbt.current_timestamp": { @@ -10054,7 +10027,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.707132, + "created_at": 1708330298.6857924, "supported_languages": null }, "macro.dbt.default__current_timestamp": { @@ -10076,7 +10049,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7072425, + "created_at": 1708330298.6859424, "supported_languages": null }, "macro.dbt.snapshot_get_time": { @@ -10100,7 +10073,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.707349, + "created_at": 1708330298.6860795, "supported_languages": null }, "macro.dbt.default__snapshot_get_time": { @@ -10124,7 +10097,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7074232, + "created_at": 1708330298.6861804, "supported_languages": null }, "macro.dbt.current_timestamp_backcompat": { @@ -10148,7 +10121,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7075512, + "created_at": 1708330298.686342, "supported_languages": null }, "macro.dbt.default__current_timestamp_backcompat": { @@ -10170,7 +10143,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7076056, + "created_at": 1708330298.686416, "supported_languages": null }, "macro.dbt.current_timestamp_in_utc_backcompat": { @@ -10194,7 +10167,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7077236, + "created_at": 1708330298.686591, "supported_languages": null }, "macro.dbt.default__current_timestamp_in_utc_backcompat": { @@ -10219,7 +10192,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7078428, + "created_at": 1708330298.686751, "supported_languages": null }, "macro.dbt.create_schema": { @@ -10243,7 +10216,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7081206, + "created_at": 1708330298.6871312, "supported_languages": null }, "macro.dbt.default__create_schema": { @@ -10267,7 +10240,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.708255, + "created_at": 1708330298.6873097, "supported_languages": null }, "macro.dbt.drop_schema": { @@ -10291,7 +10264,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7083747, + "created_at": 1708330298.6874635, "supported_languages": null }, "macro.dbt.default__drop_schema": { @@ -10315,7 +10288,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7085874, + "created_at": 1708330298.687781, "supported_languages": null }, "macro.dbt.alter_column_comment": { @@ -10339,7 +10312,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7091265, + "created_at": 1708330298.6884515, "supported_languages": null }, "macro.dbt.default__alter_column_comment": { @@ -10361,7 +10334,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7092514, + "created_at": 1708330298.6886406, "supported_languages": null }, "macro.dbt.alter_relation_comment": { @@ -10385,7 +10358,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7093942, + "created_at": 1708330298.6888337, "supported_languages": null }, "macro.dbt.default__alter_relation_comment": { @@ -10407,7 +10380,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7095273, + "created_at": 1708330298.689002, "supported_languages": null }, "macro.dbt.persist_docs": { @@ -10431,7 +10404,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.709718, + "created_at": 1708330298.6893237, "supported_languages": null }, "macro.dbt.default__persist_docs": { @@ -10457,7 +10430,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7100742, + "created_at": 1708330298.6899436, "supported_languages": null }, "macro.dbt.get_show_sql": { @@ -10481,7 +10454,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7104666, + "created_at": 1708330298.6905391, "supported_languages": null }, "macro.dbt.get_limit_subquery_sql": { @@ -10505,7 +10478,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7106028, + "created_at": 1708330298.690736, "supported_languages": null }, "macro.dbt.default__get_limit_subquery_sql": { @@ -10527,7 +10500,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.710701, + "created_at": 1708330298.6908708, "supported_languages": null }, "macro.dbt.get_catalog_relations": { @@ -10551,7 +10524,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7128594, + "created_at": 1708330298.6937242, "supported_languages": null }, "macro.dbt.default__get_catalog_relations": { @@ -10573,7 +10546,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7130463, + "created_at": 1708330298.6939764, "supported_languages": null }, "macro.dbt.get_catalog": { @@ -10597,7 +10570,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.713192, + "created_at": 1708330298.6941724, "supported_languages": null }, "macro.dbt.default__get_catalog": { @@ -10619,7 +10592,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.713377, + "created_at": 1708330298.6944196, "supported_languages": null }, "macro.dbt.information_schema_name": { @@ -10643,7 +10616,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.713514, + "created_at": 1708330298.6946058, "supported_languages": null }, "macro.dbt.default__information_schema_name": { @@ -10665,7 +10638,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7136285, + "created_at": 1708330298.6947546, "supported_languages": null }, "macro.dbt.list_schemas": { @@ -10689,7 +10662,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.713758, + "created_at": 1708330298.6949258, "supported_languages": null }, "macro.dbt.default__list_schemas": { @@ -10714,7 +10687,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7139344, + "created_at": 1708330298.6951747, "supported_languages": null }, "macro.dbt.check_schema_exists": { @@ -10738,7 +10711,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.71408, + "created_at": 1708330298.695369, "supported_languages": null }, "macro.dbt.default__check_schema_exists": { @@ -10763,7 +10736,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7143009, + "created_at": 1708330298.6956708, "supported_languages": null }, "macro.dbt.list_relations_without_caching": { @@ -10787,7 +10760,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7144334, + "created_at": 1708330298.69585, "supported_languages": null }, "macro.dbt.default__list_relations_without_caching": { @@ -10809,7 +10782,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7145672, + "created_at": 1708330298.6960087, "supported_languages": null }, "macro.dbt.get_relations": { @@ -10833,7 +10806,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7146888, + "created_at": 1708330298.6961653, "supported_languages": null }, "macro.dbt.default__get_relations": { @@ -10855,7 +10828,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7148013, + "created_at": 1708330298.6963146, "supported_languages": null }, "macro.dbt.get_relation_last_modified": { @@ -10879,7 +10852,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.714948, + "created_at": 1708330298.6965284, "supported_languages": null }, "macro.dbt.default__get_relation_last_modified": { @@ -10901,7 +10874,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7150745, + "created_at": 1708330298.6967025, "supported_languages": null }, "macro.dbt.validate_sql": { @@ -10925,7 +10898,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7152803, + "created_at": 1708330298.6969883, "supported_languages": null }, "macro.dbt.default__validate_sql": { @@ -10949,7 +10922,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.715514, + "created_at": 1708330298.6973152, "supported_languages": null }, "macro.dbt.make_intermediate_relation": { @@ -10973,7 +10946,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7170892, + "created_at": 1708330298.6996481, "supported_languages": null }, "macro.dbt.default__make_intermediate_relation": { @@ -10997,7 +10970,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7172108, + "created_at": 1708330298.6998343, "supported_languages": null }, "macro.dbt.make_temp_relation": { @@ -11021,7 +10994,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7173676, + "created_at": 1708330298.7000697, "supported_languages": null }, "macro.dbt.default__make_temp_relation": { @@ -11043,7 +11016,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7175884, + "created_at": 1708330298.700396, "supported_languages": null }, "macro.dbt.make_backup_relation": { @@ -11067,7 +11040,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7177603, + "created_at": 1708330298.7006702, "supported_languages": null }, "macro.dbt.default__make_backup_relation": { @@ -11089,7 +11062,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.717989, + "created_at": 1708330298.7010062, "supported_languages": null }, "macro.dbt.truncate_relation": { @@ -11113,7 +11086,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7181194, + "created_at": 1708330298.7011907, "supported_languages": null }, "macro.dbt.default__truncate_relation": { @@ -11137,7 +11110,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7182357, + "created_at": 1708330298.701359, "supported_languages": null }, "macro.dbt.get_or_create_relation": { @@ -11161,7 +11134,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7184162, + "created_at": 1708330298.701628, "supported_languages": null }, "macro.dbt.default__get_or_create_relation": { @@ -11183,7 +11156,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7188413, + "created_at": 1708330298.7022717, "supported_languages": null }, "macro.dbt.load_cached_relation": { @@ -11205,7 +11178,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.71901, + "created_at": 1708330298.7025523, "supported_languages": null }, "macro.dbt.load_relation": { @@ -11229,7 +11202,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7191117, + "created_at": 1708330298.7026973, "supported_languages": null }, "macro.dbt.get_create_index_sql": { @@ -11253,7 +11226,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7197466, + "created_at": 1708330298.703569, "supported_languages": null }, "macro.dbt.default__get_create_index_sql": { @@ -11275,7 +11248,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7198482, + "created_at": 1708330298.7037008, "supported_languages": null }, "macro.dbt.create_indexes": { @@ -11299,7 +11272,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7199607, + "created_at": 1708330298.703851, "supported_languages": null }, "macro.dbt.default__create_indexes": { @@ -11324,7 +11297,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7202456, + "created_at": 1708330298.7042568, "supported_languages": null }, "macro.dbt.get_drop_index_sql": { @@ -11348,7 +11321,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7203767, + "created_at": 1708330298.7044408, "supported_languages": null }, "macro.dbt.default__get_drop_index_sql": { @@ -11370,7 +11343,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.720484, + "created_at": 1708330298.704594, "supported_languages": null }, "macro.dbt.get_show_indexes_sql": { @@ -11394,7 +11367,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7205935, + "created_at": 1708330298.7047448, "supported_languages": null }, "macro.dbt.default__get_show_indexes_sql": { @@ -11416,7 +11389,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7206821, + "created_at": 1708330298.7048671, "supported_languages": null }, "macro.dbt.collect_freshness": { @@ -11440,7 +11413,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7209866, + "created_at": 1708330298.7052772, "supported_languages": null }, "macro.dbt.default__collect_freshness": { @@ -11465,7 +11438,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7212818, + "created_at": 1708330298.7057054, "supported_languages": null }, "macro.dbt.copy_grants": { @@ -11489,7 +11462,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7226195, + "created_at": 1708330298.7075596, "supported_languages": null }, "macro.dbt.default__copy_grants": { @@ -11511,7 +11484,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7227015, + "created_at": 1708330298.70769, "supported_languages": null }, "macro.dbt.support_multiple_grantees_per_dcl_statement": { @@ -11535,7 +11508,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7228231, + "created_at": 1708330298.7078502, "supported_languages": null }, "macro.dbt.default__support_multiple_grantees_per_dcl_statement": { @@ -11557,7 +11530,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7229025, + "created_at": 1708330298.7079563, "supported_languages": null }, "macro.dbt.should_revoke": { @@ -11581,7 +11554,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7233257, + "created_at": 1708330298.708632, "supported_languages": null }, "macro.dbt.get_show_grant_sql": { @@ -11605,7 +11578,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7234676, + "created_at": 1708330298.7088225, "supported_languages": null }, "macro.dbt.default__get_show_grant_sql": { @@ -11627,7 +11600,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7235425, + "created_at": 1708330298.7089264, "supported_languages": null }, "macro.dbt.get_grant_sql": { @@ -11651,7 +11624,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7237387, + "created_at": 1708330298.7091422, "supported_languages": null }, "macro.dbt.default__get_grant_sql": { @@ -11673,7 +11646,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.72389, + "created_at": 1708330298.7094028, "supported_languages": null }, "macro.dbt.get_revoke_sql": { @@ -11697,7 +11670,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7240582, + "created_at": 1708330298.7096798, "supported_languages": null }, "macro.dbt.default__get_revoke_sql": { @@ -11719,7 +11692,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.724196, + "created_at": 1708330298.7098777, "supported_languages": null }, "macro.dbt.get_dcl_statement_list": { @@ -11743,7 +11716,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7243588, + "created_at": 1708330298.7101038, "supported_languages": null }, "macro.dbt.default__get_dcl_statement_list": { @@ -11767,7 +11740,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7248604, + "created_at": 1708330298.7108228, "supported_languages": null }, "macro.dbt.call_dcl_statements": { @@ -11791,7 +11764,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7250001, + "created_at": 1708330298.7110174, "supported_languages": null }, "macro.dbt.default__call_dcl_statements": { @@ -11815,7 +11788,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7251859, + "created_at": 1708330298.7112856, "supported_languages": null }, "macro.dbt.apply_grants": { @@ -11839,7 +11812,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7253506, + "created_at": 1708330298.7115273, "supported_languages": null }, "macro.dbt.default__apply_grants": { @@ -11866,7 +11839,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7261891, + "created_at": 1708330298.7127564, "supported_languages": null }, "macro.dbt.get_columns_in_relation": { @@ -11890,7 +11863,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7279563, + "created_at": 1708330298.715107, "supported_languages": null }, "macro.dbt.default__get_columns_in_relation": { @@ -11912,7 +11885,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7280762, + "created_at": 1708330298.715303, "supported_languages": null }, "macro.dbt.sql_convert_columns_in_relation": { @@ -11934,7 +11907,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7283032, + "created_at": 1708330298.7156436, "supported_languages": null }, "macro.dbt.get_empty_subquery_sql": { @@ -11958,7 +11931,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7284687, + "created_at": 1708330298.7158732, "supported_languages": null }, "macro.dbt.default__get_empty_subquery_sql": { @@ -11980,7 +11953,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7286265, + "created_at": 1708330298.7160895, "supported_languages": null }, "macro.dbt.get_empty_schema_sql": { @@ -12004,7 +11977,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7287583, + "created_at": 1708330298.716269, "supported_languages": null }, "macro.dbt.default__get_empty_schema_sql": { @@ -12026,7 +11999,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7296267, + "created_at": 1708330298.7174451, "supported_languages": null }, "macro.dbt.get_column_schema_from_query": { @@ -12050,7 +12023,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7298827, + "created_at": 1708330298.7178178, "supported_languages": null }, "macro.dbt.get_columns_in_query": { @@ -12074,7 +12047,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7300174, + "created_at": 1708330298.7180011, "supported_languages": null }, "macro.dbt.default__get_columns_in_query": { @@ -12099,7 +12072,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7302794, + "created_at": 1708330298.7183414, "supported_languages": null }, "macro.dbt.alter_column_type": { @@ -12123,7 +12096,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7304437, + "created_at": 1708330298.7185905, "supported_languages": null }, "macro.dbt.default__alter_column_type": { @@ -12147,7 +12120,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.730887, + "created_at": 1708330298.7191694, "supported_languages": null }, "macro.dbt.alter_relation_add_remove_columns": { @@ -12171,7 +12144,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7310727, + "created_at": 1708330298.7194304, "supported_languages": null }, "macro.dbt.default__alter_relation_add_remove_columns": { @@ -12195,7 +12168,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.75984, + "created_at": 1708330298.753678, "supported_languages": null }, "macro.dbt.test_unique": { @@ -12219,7 +12192,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7602737, + "created_at": 1708330298.754263, "supported_languages": null }, "macro.dbt.test_not_null": { @@ -12243,7 +12216,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7604444, + "created_at": 1708330298.7545114, "supported_languages": null }, "macro.dbt.test_accepted_values": { @@ -12267,7 +12240,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7606668, + "created_at": 1708330298.7547958, "supported_languages": null }, "macro.dbt.test_relationships": { @@ -12291,7 +12264,7 @@ }, "patch_path": null, "arguments": [], - "created_at": 1706145554.7608657, + "created_at": 1708330298.7550614, "supported_languages": null } }, @@ -12307,25 +12280,25 @@ } }, "exposures": { - "exposure.sandbox.customers": { - "name": "customers", + "exposure.sandbox.clients": { + "name": "clients", "resource_type": "exposure", "package_name": "sandbox", "path": "exposures/our_analytics.yml", "original_file_path": "models/exposures/our_analytics.yml", - "unique_id": "exposure.sandbox.customers", + "unique_id": "exposure.sandbox.clients", "fqn": [ "sandbox", "exposures", - "customers" + "clients" ], "type": "analysis", "owner": { "email": "[email protected]", "name": "dbtmetabase" }, - "description": "### Visualization: Line\n\nCustomers test\n\n#### Metadata\n\nMetabase ID: __1__\n\nCreated On: __2024-01-24T05:33:25.647584__", - "label": "Customers", + "description": "### Visualization: Table\n\nNo description provided in Metabase\n\n#### Metadata\n\nMetabase ID: __3__\n\nCreated On: __2024-02-19T03:48:22.030934__", + "label": "clients", "maturity": "medium", "meta": {}, "tags": [], @@ -12333,7 +12306,7 @@ "enabled": true }, "unrendered_config": {}, - "url": "http://localhost:3000/card/1", + "url": "http://localhost:3000/card/3", "depends_on": { "macros": [], "nodes": [ @@ -12349,27 +12322,27 @@ ], "sources": [], "metrics": [], - "created_at": 1706145555.0796468 + "created_at": 1708330299.1868012 }, - "exposure.sandbox.orders": { - "name": "orders", + "exposure.sandbox.customers": { + "name": "customers", "resource_type": "exposure", "package_name": "sandbox", "path": "exposures/our_analytics.yml", "original_file_path": "models/exposures/our_analytics.yml", - "unique_id": "exposure.sandbox.orders", + "unique_id": "exposure.sandbox.customers", "fqn": [ "sandbox", "exposures", - "orders" + "customers" ], "type": "analysis", "owner": { "email": "[email protected]", "name": "dbtmetabase" }, - "description": "### Visualization: Table\n\nNo description provided in Metabase\n\n#### Metadata\n\nMetabase ID: __2__\n\nCreated On: __2024-01-24T05:58:57.73953__", - "label": "Orders", + "description": "### Visualization: Line\n\nCustomers test\n\n#### Metadata\n\nMetabase ID: __1__\n\nCreated On: __2024-02-19T03:42:12.127181__", + "label": "Customers", "maturity": "medium", "meta": {}, "tags": [], @@ -12377,43 +12350,43 @@ "enabled": true }, "unrendered_config": {}, - "url": "http://localhost:3000/card/2", + "url": "http://localhost:3000/card/1", "depends_on": { "macros": [], "nodes": [ - "model.sandbox.orders" + "model.sandbox.customers" ] }, "refs": [ { - "name": "orders", + "name": "customers", "package": null, "version": null } ], "sources": [], "metrics": [], - "created_at": 1706145555.0805569 + "created_at": 1708330299.1877074 }, - "exposure.sandbox.stg_payments": { - "name": "stg_payments", + "exposure.sandbox.orders": { + "name": "orders", "resource_type": "exposure", "package_name": "sandbox", "path": "exposures/our_analytics.yml", "original_file_path": "models/exposures/our_analytics.yml", - "unique_id": "exposure.sandbox.stg_payments", + "unique_id": "exposure.sandbox.orders", "fqn": [ "sandbox", "exposures", - "stg_payments" + "orders" ], "type": "analysis", "owner": { "email": "[email protected]", "name": "dbtmetabase" }, - "description": "### Visualization: Table\n\nNo description provided in Metabase\n\n#### Metadata\n\nMetabase ID: __3__\n\nCreated On: __2024-01-24T05:59:13.224303__", - "label": "Stg Payments", + "description": "### Visualization: Table\n\nNo description provided in Metabase\n\n#### Metadata\n\nMetabase ID: __2__\n\nCreated On: __2024-02-19T03:48:08.251792__", + "label": "Orders", "maturity": "medium", "meta": {}, "tags": [], @@ -12421,23 +12394,23 @@ "enabled": true }, "unrendered_config": {}, - "url": "http://localhost:3000/card/3", + "url": "http://localhost:3000/card/2", "depends_on": { "macros": [], "nodes": [ - "model.sandbox.stg_payments" + "model.sandbox.orders" ] }, "refs": [ { - "name": "stg_payments", + "name": "orders", "package": null, "version": null } ], "sources": [], "metrics": [], - "created_at": 1706145555.0812972 + "created_at": 1708330299.1887019 } }, "metrics": {}, @@ -12450,9 +12423,14 @@ "model.sandbox.stg_orders", "model.sandbox.stg_payments" ], - "model.sandbox.orders": [ - "model.sandbox.stg_orders", - "model.sandbox.stg_payments" + "seed.sandbox.raw_customers": [], + "seed.sandbox.raw_orders": [], + "seed.sandbox.raw_payments": [], + "test.sandbox.unique_customers_customer_id.c5af1ff4b1": [ + "model.sandbox.customers" + ], + "test.sandbox.not_null_customers_customer_id.5c9bf9911d": [ + "model.sandbox.customers" ], "model.sandbox.stg_customers": [ "seed.sandbox.raw_customers" @@ -12463,14 +12441,33 @@ "model.sandbox.stg_orders": [ "seed.sandbox.raw_orders" ], - "seed.sandbox.raw_customers": [], - "seed.sandbox.raw_orders": [], - "seed.sandbox.raw_payments": [], - "test.sandbox.unique_customers_customer_id.c5af1ff4b1": [ - "model.sandbox.customers" + "test.sandbox.unique_stg_customers_customer_id.c7614daada": [ + "model.sandbox.stg_customers" ], - "test.sandbox.not_null_customers_customer_id.5c9bf9911d": [ - "model.sandbox.customers" + "test.sandbox.not_null_stg_customers_customer_id.e2cfb1f9aa": [ + "model.sandbox.stg_customers" + ], + "test.sandbox.unique_stg_payments_payment_id.3744510712": [ + "model.sandbox.stg_payments" + ], + "test.sandbox.not_null_stg_payments_payment_id.c19cc50075": [ + "model.sandbox.stg_payments" + ], + "test.sandbox.accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card.3c3820f278": [ + "model.sandbox.stg_payments" + ], + "test.sandbox.unique_stg_orders_order_id.e3b841c71a": [ + "model.sandbox.stg_orders" + ], + "test.sandbox.not_null_stg_orders_order_id.81cfe2fe64": [ + "model.sandbox.stg_orders" + ], + "test.sandbox.accepted_values_stg_orders_status__placed__shipped__completed__return_pending__returned.080fb20aad": [ + "model.sandbox.stg_orders" + ], + "model.sandbox.orders": [ + "model.sandbox.stg_orders", + "model.sandbox.stg_payments" ], "test.sandbox.unique_orders_order_id.fed79b3a6e": [ "model.sandbox.orders" @@ -12481,10 +12478,6 @@ "test.sandbox.not_null_orders_customer_id.c5f02694af": [ "model.sandbox.orders" ], - "test.sandbox.relationships_orders_customer_id__customer_id__ref_customers_.c6ec7f58f2": [ - "model.sandbox.customers", - "model.sandbox.orders" - ], "test.sandbox.accepted_values_orders_status__placed__shipped__completed__return_pending__returned.be6b5b5ec3": [ "model.sandbox.orders" ], @@ -12503,67 +12496,40 @@ "test.sandbox.not_null_orders_gift_card_amount.413a0d2d7a": [ "model.sandbox.orders" ], - "test.sandbox.unique_stg_customers_customer_id.c7614daada": [ - "model.sandbox.stg_customers" - ], - "test.sandbox.not_null_stg_customers_customer_id.e2cfb1f9aa": [ - "model.sandbox.stg_customers" - ], - "test.sandbox.unique_stg_orders_order_id.e3b841c71a": [ - "model.sandbox.stg_orders" - ], - "test.sandbox.not_null_stg_orders_order_id.81cfe2fe64": [ - "model.sandbox.stg_orders" - ], - "test.sandbox.accepted_values_stg_orders_status__placed__shipped__completed__return_pending__returned.080fb20aad": [ - "model.sandbox.stg_orders" - ], - "test.sandbox.unique_stg_payments_payment_id.3744510712": [ - "model.sandbox.stg_payments" - ], - "test.sandbox.not_null_stg_payments_payment_id.c19cc50075": [ - "model.sandbox.stg_payments" - ], - "test.sandbox.accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card.3c3820f278": [ - "model.sandbox.stg_payments" + "exposure.sandbox.clients": [ + "model.sandbox.customers" ], "exposure.sandbox.customers": [ "model.sandbox.customers" ], "exposure.sandbox.orders": [ "model.sandbox.orders" - ], - "exposure.sandbox.stg_payments": [ - "model.sandbox.stg_payments" ] }, "child_map": { "model.sandbox.customers": [ + "exposure.sandbox.clients", "exposure.sandbox.customers", "test.sandbox.not_null_customers_customer_id.5c9bf9911d", - "test.sandbox.relationships_orders_customer_id__customer_id__ref_customers_.c6ec7f58f2", "test.sandbox.unique_customers_customer_id.c5af1ff4b1" ], - "model.sandbox.orders": [ - "exposure.sandbox.orders", - "test.sandbox.accepted_values_orders_status__placed__shipped__completed__return_pending__returned.be6b5b5ec3", - "test.sandbox.not_null_orders_amount.106140f9fd", - "test.sandbox.not_null_orders_bank_transfer_amount.7743500c49", - "test.sandbox.not_null_orders_coupon_amount.ab90c90625", - "test.sandbox.not_null_orders_credit_card_amount.d3ca593b59", - "test.sandbox.not_null_orders_customer_id.c5f02694af", - "test.sandbox.not_null_orders_gift_card_amount.413a0d2d7a", - "test.sandbox.not_null_orders_order_id.cf6c17daed", - "test.sandbox.relationships_orders_customer_id__customer_id__ref_customers_.c6ec7f58f2", - "test.sandbox.unique_orders_order_id.fed79b3a6e" + "seed.sandbox.raw_customers": [ + "model.sandbox.stg_customers" + ], + "seed.sandbox.raw_orders": [ + "model.sandbox.stg_orders" + ], + "seed.sandbox.raw_payments": [ + "model.sandbox.stg_payments" ], + "test.sandbox.unique_customers_customer_id.c5af1ff4b1": [], + "test.sandbox.not_null_customers_customer_id.5c9bf9911d": [], "model.sandbox.stg_customers": [ "model.sandbox.customers", "test.sandbox.not_null_stg_customers_customer_id.e2cfb1f9aa", "test.sandbox.unique_stg_customers_customer_id.c7614daada" ], "model.sandbox.stg_payments": [ - "exposure.sandbox.stg_payments", "model.sandbox.customers", "model.sandbox.orders", "test.sandbox.accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card.3c3820f278", @@ -12577,38 +12543,38 @@ "test.sandbox.not_null_stg_orders_order_id.81cfe2fe64", "test.sandbox.unique_stg_orders_order_id.e3b841c71a" ], - "seed.sandbox.raw_customers": [ - "model.sandbox.stg_customers" - ], - "seed.sandbox.raw_orders": [ - "model.sandbox.stg_orders" - ], - "seed.sandbox.raw_payments": [ - "model.sandbox.stg_payments" + "test.sandbox.unique_stg_customers_customer_id.c7614daada": [], + "test.sandbox.not_null_stg_customers_customer_id.e2cfb1f9aa": [], + "test.sandbox.unique_stg_payments_payment_id.3744510712": [], + "test.sandbox.not_null_stg_payments_payment_id.c19cc50075": [], + "test.sandbox.accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card.3c3820f278": [], + "test.sandbox.unique_stg_orders_order_id.e3b841c71a": [], + "test.sandbox.not_null_stg_orders_order_id.81cfe2fe64": [], + "test.sandbox.accepted_values_stg_orders_status__placed__shipped__completed__return_pending__returned.080fb20aad": [], + "model.sandbox.orders": [ + "exposure.sandbox.orders", + "test.sandbox.accepted_values_orders_status__placed__shipped__completed__return_pending__returned.be6b5b5ec3", + "test.sandbox.not_null_orders_amount.106140f9fd", + "test.sandbox.not_null_orders_bank_transfer_amount.7743500c49", + "test.sandbox.not_null_orders_coupon_amount.ab90c90625", + "test.sandbox.not_null_orders_credit_card_amount.d3ca593b59", + "test.sandbox.not_null_orders_customer_id.c5f02694af", + "test.sandbox.not_null_orders_gift_card_amount.413a0d2d7a", + "test.sandbox.not_null_orders_order_id.cf6c17daed", + "test.sandbox.unique_orders_order_id.fed79b3a6e" ], - "test.sandbox.unique_customers_customer_id.c5af1ff4b1": [], - "test.sandbox.not_null_customers_customer_id.5c9bf9911d": [], "test.sandbox.unique_orders_order_id.fed79b3a6e": [], "test.sandbox.not_null_orders_order_id.cf6c17daed": [], "test.sandbox.not_null_orders_customer_id.c5f02694af": [], - "test.sandbox.relationships_orders_customer_id__customer_id__ref_customers_.c6ec7f58f2": [], "test.sandbox.accepted_values_orders_status__placed__shipped__completed__return_pending__returned.be6b5b5ec3": [], "test.sandbox.not_null_orders_amount.106140f9fd": [], "test.sandbox.not_null_orders_credit_card_amount.d3ca593b59": [], "test.sandbox.not_null_orders_coupon_amount.ab90c90625": [], "test.sandbox.not_null_orders_bank_transfer_amount.7743500c49": [], "test.sandbox.not_null_orders_gift_card_amount.413a0d2d7a": [], - "test.sandbox.unique_stg_customers_customer_id.c7614daada": [], - "test.sandbox.not_null_stg_customers_customer_id.e2cfb1f9aa": [], - "test.sandbox.unique_stg_orders_order_id.e3b841c71a": [], - "test.sandbox.not_null_stg_orders_order_id.81cfe2fe64": [], - "test.sandbox.accepted_values_stg_orders_status__placed__shipped__completed__return_pending__returned.080fb20aad": [], - "test.sandbox.unique_stg_payments_payment_id.3744510712": [], - "test.sandbox.not_null_stg_payments_payment_id.c19cc50075": [], - "test.sandbox.accepted_values_stg_payments_payment_method__credit_card__coupon__bank_transfer__gift_card.3c3820f278": [], + "exposure.sandbox.clients": [], "exposure.sandbox.customers": [], - "exposure.sandbox.orders": [], - "exposure.sandbox.stg_payments": [] + "exposure.sandbox.orders": [] }, "group_map": {}, "saved_queries": {}, diff --git a/tests/test_manifest.py b/tests/test_manifest.py index 0b0c0a1..be5eda9 100644 --- a/tests/test_manifest.py +++ b/tests/test_manifest.py @@ -1,4 +1,5 @@ import unittest +from operator import attrgetter from typing import Optional, Sequence from dbtmetabase.manifest import Column, Group, Manifest, Model @@ -20,7 +21,7 @@ class TestManifest(unittest.TestCase): def test_v11(self): models = Manifest(FIXTURES_PATH / "manifest-v11.json").read_models() - self.assertEqual( + self._assertModelsEqual( models, [ Model( @@ -78,6 +79,7 @@ class TestManifest(unittest.TestCase): Column( name="order_id", description="This is a unique identifier for an order", + semantic_type="type/PK", ), Column( name="customer_id", @@ -128,7 +130,15 @@ class TestManifest(unittest.TestCase): Column( name="customer_id", description="", - ) + ), + Column( + name="first_name", + description="", + ), + Column( + name="last_name", + description="", + ), ], ), Model( @@ -148,6 +158,14 @@ class TestManifest(unittest.TestCase): name="payment_method", description="", ), + Column( + name="order_id", + description="", + ), + Column( + name="amount", + description="", + ), ], ), Model( @@ -167,6 +185,14 @@ class TestManifest(unittest.TestCase): name="status", description="", ), + Column( + name="order_date", + description="", + ), + Column( + name="customer_id", + description="", + ), ], ), ], @@ -174,7 +200,7 @@ class TestManifest(unittest.TestCase): def test_v2(self): models = Manifest(FIXTURES_PATH / "manifest-v2.json").read_models() - self.assertEqual( + self._assertModelsEqual( models, [ Model( @@ -324,6 +350,44 @@ class TestManifest(unittest.TestCase): ], ) + def _assertModelsEqual( + self, + first: Sequence[Model], + second: Sequence[Model], + ): + self.assertEqual(len(first), len(second), "mismatched model count") + + first = sorted(first, key=attrgetter("name")) + second = sorted(second, key=attrgetter("name")) + + for i, first_model in enumerate(first): + second_model = second[i] + self.assertEqual(first_model.name, second_model.name, "wrong model") + self.assertEqual( + len(first_model.columns), + len(second_model.columns), + f"mismatched column count in {first_model.name}", + ) + for j, first_column in enumerate(first_model.columns): + second_column = second_model.columns[j] + self.assertEqual( + first_column.name, + second_column.name, + f"wrong column in model {first_model.name}", + ) + self.assertEqual( + first_column, + second_column, + f"mismatched column {first_model.name}.{first_column.name}", + ) + self.assertEqual( + first_model, + second_model, + f"mismatched model {first_model.name}", + ) + + self.assertEqual(first, second) + @staticmethod def _find_model(models: Sequence[Model], model_name: str) -> Optional[Model]: filtered = [m for m in models if m.name == model_name]
Add support for constraints [constraints](https://docs.getdbt.com/reference/resource-properties/constraints) seem to be the modern way to define relationships in dbt. Plugins such as [dbt_constraints](https://hub.getdbt.com/Snowflake-Labs/dbt_constraints/latest/) make use of this metadata. Should `dbt-metabase` also support constraints as well as tests for defining relationships?
0.0
d8788cc994f64730bb2e5f6c7818b5cd9f86c756
[ "tests/test_manifest.py::TestManifest::test_v11" ]
[ "tests/test_manifest.py::TestManifest::test_v11_disabled", "tests/test_manifest.py::TestManifest::test_v2" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2024-02-20 04:30:52+00:00
mit
2,660
grabbles__grabbit-6
diff --git a/grabbit/core.py b/grabbit/core.py index 84009db..a2a87be 100644 --- a/grabbit/core.py +++ b/grabbit/core.py @@ -197,7 +197,7 @@ class Layout(object): return_type (str): Type of result to return. Valid values: 'tuple': returns a list of namedtuples containing file name as well as attribute/value pairs for all named entities. - 'file': returns a list of File instances. + 'file': returns a list of matching filenames. 'dir': returns a list of directories. 'id': returns a list of unique IDs. Must be used together with a valid target. @@ -222,7 +222,7 @@ class Layout(object): result.append(file) if return_type == 'file': - return result + return natural_sort([f.path for f in result]) if return_type == 'tuple': result = [r.as_named_tuple() for r in result]
grabbles/grabbit
afe361809ca5c040a46caa9f8a9bae017bcc706e
diff --git a/grabbit/tests/test_core.py b/grabbit/tests/test_core.py index 0c92377..11da286 100644 --- a/grabbit/tests/test_core.py +++ b/grabbit/tests/test_core.py @@ -127,6 +127,8 @@ class TestLayout: result = layout.get(target='subject', return_type='dir') assert os.path.exists(result[0]) assert os.path.isdir(result[0]) + result = layout.get(target='subject', type='phasediff', return_type='file') + assert all([os.path.exists(f) for f in result]) def test_unique_and_count(self, layout): result = layout.unique('subject')
Redefining File class is confusing Returning File objects which are something different that python build in [file object](https://docs.python.org/3/glossary.html#term-file-object)
0.0
afe361809ca5c040a46caa9f8a9bae017bcc706e
[ "grabbit/tests/test_core.py::TestLayout::test_querying" ]
[ "grabbit/tests/test_core.py::TestFile::test_init", "grabbit/tests/test_core.py::TestFile::test_matches", "grabbit/tests/test_core.py::TestFile::test_named_tuple", "grabbit/tests/test_core.py::TestEntity::test_init", "grabbit/tests/test_core.py::TestEntity::test_matches", "grabbit/tests/test_core.py::TestEntity::test_unique_and_count", "grabbit/tests/test_core.py::TestEntity::test_add_file", "grabbit/tests/test_core.py::TestLayout::test_init", "grabbit/tests/test_core.py::TestLayout::test_absolute_paths", "grabbit/tests/test_core.py::TestLayout::test_dynamic_getters", "grabbit/tests/test_core.py::TestLayout::test_unique_and_count" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2016-08-18 05:30:39+00:00
mit
2,661
grabbles__grabbit-81
diff --git a/.gitignore b/.gitignore index ae8d540..51e101d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,5 @@ historical/ -# +# # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -92,3 +92,5 @@ ENV/ *.DS_Store *.orig + +.pytest_cache diff --git a/grabbit/core.py b/grabbit/core.py index 77616e7..0b77c3d 100644 --- a/grabbit/core.py +++ b/grabbit/core.py @@ -273,10 +273,7 @@ class Entity(object): m = self.regex.search(f.path) val = m.group(1) if m is not None else None - if val is not None and self.dtype is not None: - val = self.dtype(val) - - return val + return self._astype(val) def add_file(self, filename, value): """ Adds the specified filename to tracking. """ @@ -296,6 +293,11 @@ class Entity(object): """ return len(self.files) if files else len(self.unique()) + def _astype(self, val): + if val is not None and self.dtype is not None: + val = self.dtype(val) + return val + class Layout(object): @@ -860,7 +862,7 @@ class Layout(object): for ent in self.entities.values(): m = ent.regex.search(path) if m: - entities[ent.name] = m.group(1) + entities[ent.name] = ent._astype(m.group(1)) # Remove any entities we want to ignore when strict matching is on if strict and ignore_strict_entities is not None: diff --git a/grabbit/utils.py b/grabbit/utils.py index cb4bf64..7e4d143 100644 --- a/grabbit/utils.py +++ b/grabbit/utils.py @@ -13,6 +13,8 @@ def natural_sort(l, field=None): def alphanum_key(key): if field is not None: key = getattr(key, field) + if not isinstance(key, str): + key = str(key) return [convert(c) for c in re.split('([0-9]+)', key)] return sorted(l, key=alphanum_key)
grabbles/grabbit
9ac2624e836805699f657970483f55de7ee33c82
diff --git a/grabbit/tests/test_core.py b/grabbit/tests/test_core.py index 61dcb0b..29eb02a 100644 --- a/grabbit/tests/test_core.py +++ b/grabbit/tests/test_core.py @@ -243,6 +243,7 @@ class TestLayout: layout = Layout([(data_dir, config)], dynamic_getters=True) assert hasattr(layout, 'get_subjects') assert '01' in getattr(layout, 'get_subjects')() + assert 1 in getattr(layout, 'get_runs')() def test_querying(self, bids_layout): @@ -312,6 +313,12 @@ class TestLayout: assert len(nearest) == 3 assert nearest[0].subject == '01' + # Check for file with matching run (fails if types don't match) + nearest = bids_layout.get_nearest( + result, type='phasediff', extensions='.nii.gz') + assert nearest is not None + assert os.path.basename(nearest) == 'sub-01_ses-1_run-1_phasediff.nii.gz' + def test_index_regex(self, bids_layout, layout_include): targ = join('derivatives', 'excluded.json') assert targ not in bids_layout.files
"natural_sort" does not work if Entity is not string Related to: https://github.com/INCF/pybids/issues/237 If the dtype of an entity is set to `int`, the natural sort function (used in `get_runs` for example), will fail because it tries to use regex on an int.
0.0
9ac2624e836805699f657970483f55de7ee33c82
[ "grabbit/tests/test_core.py::TestLayout::test_get_nearest[local]", "grabbit/tests/test_core.py::TestLayout::test_dynamic_getters[/root/data/temp_dir/tmpccfrnzaz/grabbles__grabbit__0.0/grabbit/tests/data/7t_trt-/root/data/temp_dir/tmpccfrnzaz/grabbles__grabbit__0.0/grabbit/tests/specs/test.json]" ]
[ "grabbit/tests/test_core.py::TestFile::test_init", "grabbit/tests/test_core.py::TestFile::test_matches", "grabbit/tests/test_core.py::TestFile::test_named_tuple", "grabbit/tests/test_core.py::TestFile::test_named_tuple_with_reserved_name", "grabbit/tests/test_core.py::TestEntity::test_init", "grabbit/tests/test_core.py::TestEntity::test_matches", "grabbit/tests/test_core.py::TestEntity::test_unique_and_count", "grabbit/tests/test_core.py::TestEntity::test_add_file", "grabbit/tests/test_core.py::TestLayout::test_init[local]", "grabbit/tests/test_core.py::TestLayout::test_init_with_include_arg[local]", "grabbit/tests/test_core.py::TestLayout::test_init_with_exclude_arg[local]", "grabbit/tests/test_core.py::TestLayout::test_absolute_paths[local]", "grabbit/tests/test_core.py::TestLayout::test_querying[local]", "grabbit/tests/test_core.py::TestLayout::test_natsort[local]", "grabbit/tests/test_core.py::TestLayout::test_unique_and_count[local]", "grabbit/tests/test_core.py::TestLayout::test_index_regex[local]", "grabbit/tests/test_core.py::TestLayout::test_save_index[local]", "grabbit/tests/test_core.py::TestLayout::test_load_index[local]", "grabbit/tests/test_core.py::TestLayout::test_clone[local]", "grabbit/tests/test_core.py::TestLayout::test_parse_file_entities[local]", "grabbit/tests/test_core.py::test_merge_layouts[local]", "grabbit/tests/test_core.py::TestLayout::test_init_with_config_options", "grabbit/tests/test_core.py::TestLayout::test_entity_mapper", "grabbit/tests/test_core.py::TestLayout::test_excludes", "grabbit/tests/test_core.py::TestLayout::test_multiple_domains", "grabbit/tests/test_core.py::TestLayout::test_get_by_domain" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-08-20 22:08:32+00:00
mit
2,662
gradio-app__gradio-1437
diff --git a/gradio/blocks.py b/gradio/blocks.py index 6cb19350f..0b0a434e2 100644 --- a/gradio/blocks.py +++ b/gradio/blocks.py @@ -1,11 +1,13 @@ from __future__ import annotations +import copy import getpass import inspect import os import random import sys import time +import warnings import webbrowser from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple @@ -310,8 +312,91 @@ class Blocks(BlockContext): event_method = getattr(original_mapping[target], trigger) event_method(fn=fn, **dependency) + # Allows some use of Interface-specific methods with loaded Spaces + blocks.predict = [fns[0]] + dependency = blocks.dependencies[0] + blocks.input_components = [blocks.blocks[i] for i in dependency["inputs"]] + blocks.output_components = [blocks.blocks[o] for o in dependency["outputs"]] + + blocks.api_mode = True return blocks + def __call__(self, *params, fn_index=0): + """ + Allows Blocks objects to be called as functions + Parameters: + *params: the parameters to pass to the function + fn_index: the index of the function to call (defaults to 0, which for Interfaces, is the default prediction function) + """ + dependency = self.dependencies[fn_index] + block_fn = self.fns[fn_index] + + if self.api_mode: + serialized_params = [] + for i, input_id in enumerate(dependency["inputs"]): + block = self.blocks[input_id] + if getattr(block, "stateful", False): + raise ValueError( + "Cannot call Blocks object as a function if any of" + " the inputs are stateful." + ) + else: + serialized_input = block.serialize(params[i], True) + serialized_params.append(serialized_input) + else: + serialized_params = params + + processed_input = self.preprocess_data(fn_index, serialized_params, None) + + if inspect.iscoroutinefunction(block_fn.fn): + raise ValueError( + "Cannot call Blocks object as a function if the function is a coroutine" + ) + else: + predictions = block_fn.fn(*processed_input) + + output = self.postprocess_data(fn_index, predictions, None) + + if self.api_mode: + output_copy = copy.deepcopy(output) + deserialized_output = [] + for o, output_id in enumerate(dependency["outputs"]): + block = self.blocks[output_id] + if getattr(block, "stateful", False): + raise ValueError( + "Cannot call Blocks object as a function if any of" + " the outputs are stateful." + ) + else: + deserialized = block.deserialize(output_copy[o]) + deserialized_output.append(deserialized) + else: + deserialized_output = output + + if len(deserialized_output) == 1: + return deserialized_output[0] + return deserialized_output + + def __str__(self): + return self.__repr__() + + def __repr__(self): + num_backend_fns = len([d for d in self.dependencies if d["backend_fn"]]) + repr = f"Gradio Blocks instance: {num_backend_fns} backend functions" + repr += "\n" + "-" * len(repr) + for d, dependency in enumerate(self.dependencies): + if dependency["backend_fn"]: + repr += f"\nfn_index={d}" + repr += "\n inputs:" + for input_id in dependency["inputs"]: + block = self.blocks[input_id] + repr += "\n |-{}".format(str(block)) + repr += "\n outputs:" + for output_id in dependency["outputs"]: + block = self.blocks[output_id] + repr += "\n |-{}".format(str(block)) + return repr + def render(self): if Context.root_block is not None: Context.root_block.blocks.update(self.blocks) @@ -320,22 +405,7 @@ class Blocks(BlockContext): if Context.block is not None: Context.block.children.extend(self.children) - async def process_api( - self, - data: PredictBody, - username: str = None, - state: Optional[Dict[int, any]] = None, - ) -> Dict[str, Any]: - """ - Processes API calls from the frontend. - Parameters: - data: data recieved from the frontend - username: name of user if authentication is set up - state: data stored from stateful components for session - Returns: None - """ - raw_input = data.data - fn_index = data.fn_index + def preprocess_data(self, fn_index, raw_input, state): block_fn = self.fns[fn_index] dependency = self.dependencies[fn_index] @@ -349,14 +419,24 @@ class Blocks(BlockContext): processed_input.append(block.preprocess(raw_input[i])) else: processed_input = raw_input + return processed_input + + async def call_function(self, fn_index, processed_input): + """Calls and times function with given index and preprocessed input.""" + block_fn = self.fns[fn_index] + start = time.time() if inspect.iscoroutinefunction(block_fn.fn): - predictions = await block_fn.fn(*processed_input) + prediction = await block_fn.fn(*processed_input) else: - predictions = await run_in_threadpool(block_fn.fn, *processed_input) + prediction = await run_in_threadpool(block_fn.fn, *processed_input) duration = time.time() - start - block_fn.total_runtime += duration - block_fn.total_runs += 1 + return prediction, duration + + def postprocess_data(self, fn_index, predictions, state): + block_fn = self.fns[fn_index] + dependency = self.dependencies[fn_index] + if type(predictions) is dict and len(predictions) > 0: keys_are_blocks = [isinstance(key, Block) for key in predictions.keys()] if all(keys_are_blocks): @@ -375,6 +455,7 @@ class Blocks(BlockContext): ) if len(dependency["outputs"]) == 1: predictions = (predictions,) + if block_fn.postprocess: output = [] for i, output_id in enumerate(dependency["outputs"]): @@ -410,6 +491,34 @@ class Blocks(BlockContext): else: output = predictions + return output + + async def process_api( + self, + fn_index: int, + raw_input: List[Any], + username: str = None, + state: Optional[Dict[int, any]] = None, + ) -> Dict[str, Any]: + """ + Processes API calls from the frontend. First preprocesses the data, + then runs the relevant function, then postprocesses the output. + Parameters: + data: data recieved from the frontend + username: name of user if authentication is set up + state: data stored from stateful components for session + Returns: None + """ + block_fn = self.fns[fn_index] + + processed_input = self.preprocess_data(fn_index, raw_input, state) + + predictions, duration = await self.call_function(fn_index, processed_input) + block_fn.total_runtime += duration + block_fn.total_runs += 1 + + output = self.postprocess_data(fn_index, predictions, state) + return { "data": output, "duration": duration, diff --git a/gradio/components.py b/gradio/components.py index 982378836..8bff6374c 100644 --- a/gradio/components.py +++ b/gradio/components.py @@ -11,12 +11,11 @@ import numbers import operator import os import shutil -import sys import tempfile import warnings from copy import deepcopy from types import ModuleType -from typing import Any, Callable, Dict, List, Optional, Tuple, Type +from typing import Any, Callable, Dict, List, Optional, Tuple import matplotlib.figure import numpy as np @@ -26,7 +25,7 @@ from ffmpy import FFmpeg from markdown_it import MarkdownIt from gradio import media_data, processing_utils -from gradio.blocks import Block, BlockContext +from gradio.blocks import Block from gradio.events import ( Changeable, Clearable, @@ -889,7 +888,7 @@ class CheckboxGroup(Changeable, IOComponent): def __init__( self, - choices: List[str], + choices: List[str] = None, *, value: List[str] = None, type: str = "value", @@ -909,7 +908,7 @@ class CheckboxGroup(Changeable, IOComponent): show_label (bool): if True, will display label. visible (bool): If False, component will be hidden. """ - self.choices = choices + self.choices = choices or [] self.cleared_value = [] self.type = type self.value = self.postprocess(value) @@ -1052,7 +1051,7 @@ class Radio(Changeable, IOComponent): def __init__( self, - choices: List[str], + choices: List[str] = None, *, value: Optional[str] = None, type: str = "value", @@ -1072,7 +1071,7 @@ class Radio(Changeable, IOComponent): show_label (bool): if True, will display label. visible (bool): If False, component will be hidden. """ - self.choices = choices + self.choices = choices or [] self.type = type self.test_input = self.choices[0] if len(self.choices) else None self.value = self.postprocess(value) @@ -1197,7 +1196,7 @@ class Dropdown(Radio): def __init__( self, - choices: List[str], + choices: List[str] = None, *, value: Optional[str] = None, type: str = "value", @@ -1678,7 +1677,8 @@ class Video(Changeable, Clearable, Playable, IOComponent): return file_name def serialize(self, x, called_directly): - raise NotImplementedError() + data = processing_utils.encode_url_or_file_to_base64(x) + return {"name": x, "data": data, "is_example": False} def save_flagged(self, dir, label, data, encryption_key): """ @@ -1712,7 +1712,8 @@ class Video(Changeable, Clearable, Playable, IOComponent): } def deserialize(self, x): - return processing_utils.decode_base64_to_file(x).name + file = processing_utils.decode_base64_to_file(x["data"]) + return file.name def style( self, @@ -2001,7 +2002,8 @@ class Audio(Changeable, Clearable, Playable, Streamable, IOComponent): return processing_utils.encode_url_or_file_to_base64(y) def deserialize(self, x): - return processing_utils.decode_base64_to_file(x).name + file = processing_utils.decode_base64_to_file(x["data"]) + return file.name def stream( self, @@ -2209,7 +2211,8 @@ class File(Changeable, Clearable, IOComponent): } def deserialize(self, x): - return processing_utils.decode_base64_to_file(x).name + file = processing_utils.decode_base64_to_file(x["data"]) + return file.name def restore_flagged(self, dir, data, encryption_key): return self.restore_flagged_file(dir, data, encryption_key) diff --git a/gradio/external.py b/gradio/external.py index b545cf216..618d0f285 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -323,12 +323,12 @@ def get_spaces_blocks(model_name, config): headers = {"Content-Type": "application/json"} fns = [] - for _dependency in config["dependencies"]: - if _dependency["backend_fn"]: + for d, dependency in enumerate(config["dependencies"]): + if dependency["backend_fn"]: - def get_fn(dependency): + def get_fn(outputs, fn_index): def fn(*data): - data = json.dumps({"data": data}) + data = json.dumps({"data": data, "fn_index": fn_index}) response = requests.post(api_url, headers=headers, data=data) result = json.loads(response.content.decode("utf-8")) try: @@ -337,13 +337,14 @@ def get_spaces_blocks(model_name, config): raise KeyError( f"Could not find 'data' key in response from external Space. Response received: {result}" ) - if len(dependency["outputs"]) == 1: + if len(outputs) == 1: output = output[0] return output return fn - fns.append(get_fn(deepcopy(_dependency))) + fn = get_fn(deepcopy(dependency["outputs"]), d) + fns.append(fn) else: fns.append(None) return gradio.Blocks.from_config(config, fns) diff --git a/gradio/mix.py b/gradio/mix.py index 81f5cebbc..9eb6e9bf7 100644 --- a/gradio/mix.py +++ b/gradio/mix.py @@ -1,6 +1,8 @@ """ Ways to transform interfaces to produce new interfaces """ +import warnings + import gradio @@ -22,6 +24,10 @@ class Parallel(gradio.Interface): outputs = [] for io in interfaces: + if not (isinstance(io, gradio.Interface)): + warnings.warn( + "Parallel may not work properly with non-Interface objects." + ) fns.extend(io.predict) outputs.extend(io.output_components) @@ -52,7 +58,13 @@ class Series(gradio.Interface): Returns: (Interface): an Interface object connecting the given models """ - fns = [io.predict for io in interfaces] + fns = [] + for io in interfaces: + if not (isinstance(io, gradio.Interface)): + warnings.warn( + "Series may not work properly with non-Interface objects." + ) + fns.append(io.predict) def connected_fn( *data, diff --git a/gradio/routes.py b/gradio/routes.py index ebee2c8a2..32ca616b6 100644 --- a/gradio/routes.py +++ b/gradio/routes.py @@ -278,7 +278,11 @@ class App(FastAPI): else: session_state = {} try: - output = await app.blocks.process_api(body, username, session_state) + raw_input = body.data + fn_index = body.fn_index + output = await app.blocks.process_api( + fn_index, raw_input, username, session_state + ) except BaseException as error: if app.blocks.show_error: traceback.print_exc() diff --git a/ui/packages/upload/src/Upload.svelte b/ui/packages/upload/src/Upload.svelte index 5281171a9..708154f1d 100644 --- a/ui/packages/upload/src/Upload.svelte +++ b/ui/packages/upload/src/Upload.svelte @@ -10,7 +10,7 @@ export let click: boolean = true; export let center: boolean = true; export let flex: boolean = true; - export let file_count: string; + export let file_count: string = "single"; let hidden_upload: HTMLInputElement; @@ -46,7 +46,10 @@ } : (this.result as string); if (all_file_data.length === files.length) { - dispatch("load", all_file_data); + dispatch( + "load", + file_count == "single" ? all_file_data[0] : all_file_data + ); } }; });
gradio-app/gradio
74ccf3e957704f793ada88f4786633e847fc8a03
diff --git a/test/test_components.py b/test/test_components.py index f8a24e386..67948975a 100644 --- a/test/test_components.py +++ b/test/test_components.py @@ -843,9 +843,7 @@ class TestAudio(unittest.TestCase): }, ) self.assertTrue( - audio_output.deserialize( - deepcopy(media_data.BASE64_AUDIO)["data"] - ).endswith(".wav") + audio_output.deserialize(deepcopy(media_data.BASE64_AUDIO)).endswith(".wav") ) with tempfile.TemporaryDirectory() as tmpdirname: to_save = audio_output.save_flagged( @@ -1179,8 +1177,10 @@ class TestVideo(unittest.TestCase): self.assertIsNotNone(video_input.preprocess(x_video)) video_input = gr.Video(format="avi") self.assertEqual(video_input.preprocess(x_video)[-3:], "avi") - with self.assertRaises(NotImplementedError): - video_input.serialize(x_video, True) + + self.assertEqual( + video_input.serialize(x_video["name"], True)["data"], x_video["data"] + ) # Output functionalities y_vid_path = "test/test_files/video_sample.mp4" @@ -1191,9 +1191,7 @@ class TestVideo(unittest.TestCase): ) ) self.assertTrue( - video_output.deserialize( - deepcopy(media_data.BASE64_VIDEO)["data"] - ).endswith(".mp4") + video_output.deserialize(deepcopy(media_data.BASE64_VIDEO)).endswith(".mp4") ) with tempfile.TemporaryDirectory() as tmpdirname: to_save = video_output.save_flagged(
Gradio 3.0 cannot load interfaces from Gradio 2 ### Describe the bug Loading an interface of a Space that has <3 from a Space with Gradio 3.0 breaks ### Is there an existing issue for this? - [X] I have searched the existing issues ### Reproduction https://huggingface.co/spaces/osanseviero/mix_match_gradio ### Screenshot _No response_ ### Logs ```shell Fetching interface from: https://huggingface.co/spaces/mrm8488/GPT-J-6B /home/user/.local/lib/python3.8/site-packages/gradio/deprecation.py:40: UserWarning: `optional` parameter is deprecated, and it has no effect warnings.warn(value) /home/user/.local/lib/python3.8/site-packages/gradio/deprecation.py:43: UserWarning: You have unused kwarg parameters in Textbox, please remove them: {'default': ''} warnings.warn( /home/user/.local/lib/python3.8/site-packages/gradio/interface.py:282: UserWarning: Currently, only the 'default' theme is supported. warnings.warn("Currently, only the 'default' theme is supported.") Fetching interface from: https://huggingface.co/spaces/akhaliq/T0pp Traceback (most recent call last): File "app.py", line 4, in <module> iface2 = gr.Interface.load("spaces/akhaliq/T0pp") File "/home/user/.local/lib/python3.8/site-packages/gradio/interface.py", line 90, in load return super().load(name=name, src=src, api_key=api_key, alias=alias, **kwargs) File "/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py", line 518, in load return external.load_blocks_from_repo(name, src, api_key, alias, **kwargs) File "/home/user/.local/lib/python3.8/site-packages/gradio/external.py", line 34, in load_blocks_from_repo blocks: gradio.Blocks = factory_methods[src](name, api_key, alias, **kwargs) File "/home/user/.local/lib/python3.8/site-packages/gradio/external.py", line 309, in get_spaces return get_spaces_blocks(model_name, config) File "/home/user/.local/lib/python3.8/site-packages/gradio/external.py", line 349, in get_spaces_blocks return gradio.Blocks.from_config(config, fns) File "/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py", line 311, in from_config event_method(fn=fn, **dependency) File "/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py", line 471, in __exit__ self.config = self.get_config_file() File "/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py", line 440, in get_config_file "props": utils.delete_none(block.get_config()) File "/home/user/.local/lib/python3.8/site-packages/gradio/components.py", line 3590, in get_config "components": [component.get_block_name() for component in self.components], File "/home/user/.local/lib/python3.8/site-packages/gradio/components.py", line 3590, in <listcomp> "components": [component.get_block_name() for component in self.components], AttributeError: 'str' object has no attribute 'get_block_name' ``` ``` ### System Info ```shell Google Chrome, Space using Gradio 3.0.9 ``` ### Severity serious, but I can work around it
0.0
74ccf3e957704f793ada88f4786633e847fc8a03
[ "test/test_components.py::TestAudio::test_component_functions" ]
[ "test/test_components.py::TestComponent::test_component_functions", "test/test_components.py::TestTextbox::test_component_functions", "test/test_components.py::TestTextbox::test_in_interface_as_input", "test/test_components.py::TestTextbox::test_in_interface_as_output", "test/test_components.py::TestTextbox::test_static", "test/test_components.py::TestNumber::test_component_functions", "test/test_components.py::TestNumber::test_component_functions_integer", "test/test_components.py::TestNumber::test_component_functions_precision", "test/test_components.py::TestNumber::test_in_interface_as_input", "test/test_components.py::TestNumber::test_in_interface_as_output", "test/test_components.py::TestNumber::test_precision_0_in_interface", "test/test_components.py::TestNumber::test_static", "test/test_components.py::TestSlider::test_component_functions", "test/test_components.py::TestSlider::test_in_interface", "test/test_components.py::TestSlider::test_static", "test/test_components.py::TestCheckbox::test_component_functions", "test/test_components.py::TestCheckbox::test_in_interface", "test/test_components.py::TestCheckboxGroup::test_component_functions", "test/test_components.py::TestCheckboxGroup::test_in_interface", "test/test_components.py::TestRadio::test_component_functions", "test/test_components.py::TestRadio::test_in_interface", "test/test_components.py::TestImage::test_static", "test/test_components.py::TestPlot::test_in_interface_as_output", "test/test_components.py::TestPlot::test_static", "test/test_components.py::TestAudio::test_in_interface", "test/test_components.py::TestAudio::test_in_interface_as_output", "test/test_components.py::TestAudio::test_tokenize", "test/test_components.py::TestFile::test_as_component_as_output", "test/test_components.py::TestFile::test_component_functions", "test/test_components.py::TestFile::test_in_interface_as_input", "test/test_components.py::TestDataframe::test_component_functions", "test/test_components.py::TestDataframe::test_in_interface_as_output", "test/test_components.py::TestVideo::test_in_interface", "test/test_components.py::TestTimeseries::test_component_functions", "test/test_components.py::TestTimeseries::test_in_interface_as_input", "test/test_components.py::TestTimeseries::test_in_interface_as_output", "test/test_components.py::TestNames::test_no_duplicate_uncased_names", "test/test_components.py::TestLabel::test_component_functions", "test/test_components.py::TestLabel::test_in_interface", "test/test_components.py::TestHighlightedText::test_component_functions", "test/test_components.py::TestHighlightedText::test_in_interface", "test/test_components.py::TestJSON::test_component_functions", "test/test_components.py::TestHTML::test_component_functions", "test/test_components.py::TestHTML::test_in_interface", "test/test_components.py::TestModel3D::test_in_interface" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2022-06-01 03:00:38+00:00
apache-2.0
2,663
gradio-app__gradio-1667
diff --git a/gradio/components.py b/gradio/components.py index dad746069..f977fdca9 100644 --- a/gradio/components.py +++ b/gradio/components.py @@ -2119,7 +2119,7 @@ class Audio(Changeable, Clearable, Playable, Streamable, IOComponent): return processing_utils.encode_url_or_file_to_base64(y) def deserialize(self, x): - file = processing_utils.decode_base64_to_file(x["data"]) + file = processing_utils.decode_base64_to_file(x) return file.name def stream(
gradio-app/gradio
8c9a9a9696c54d109a8a2ca808b34221f56b0a90
diff --git a/test/test_components.py b/test/test_components.py index eec3de10e..d0ff01735 100644 --- a/test/test_components.py +++ b/test/test_components.py @@ -843,7 +843,9 @@ class TestAudio(unittest.TestCase): }, ) self.assertTrue( - audio_output.deserialize(deepcopy(media_data.BASE64_AUDIO)).endswith(".wav") + audio_output.deserialize( + deepcopy(media_data.BASE64_AUDIO)["data"] + ).endswith(".wav") ) with tempfile.TemporaryDirectory() as tmpdirname: to_save = audio_output.save_flagged(
TypeError in TTS demo ### Describe the bug Looking to demo a text-to-speech model using the [fastspeech2 checkpoint](https://huggingface.co/facebook/fastspeech2-en-ljspeech) from the HF hub as follows: ```python import gradio as gr gr.Interface.load("huggingface/facebook/fastspeech2-en-ljspeech").launch(); ``` This yields the following demo: https://45412.gradio.app/ However, when I try and run the demo with any sort of input I just get `ERROR`. The demo should work as it does on the fastspeech2 model card: https://huggingface.co/facebook/fastspeech2-en-ljspeech cc @AK391 ### Is there an existing issue for this? - [X] I have searched the existing issues ### Reproduction ```python import gradio as gr gr.Interface.load("huggingface/facebook/fastspeech2-en-ljspeech").launch(debug=True); ``` ### Screenshot _No response_ ### Logs ```shell Traceback (most recent call last): File "/usr/local/lib/python3.7/dist-packages/gradio/routes.py", line 256, in run_predict fn_index, raw_input, username, session_state File "/usr/local/lib/python3.7/dist-packages/gradio/blocks.py", line 546, in process_api predictions, duration = await self.call_function(fn_index, processed_input) File "/usr/local/lib/python3.7/dist-packages/gradio/blocks.py", line 462, in call_function block_fn.fn, *processed_input, limiter=self.limiter File "/usr/local/lib/python3.7/dist-packages/anyio/to_thread.py", line 32, in run_sync func, *args, cancellable=cancellable, limiter=limiter File "/usr/local/lib/python3.7/dist-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread return await future File "/usr/local/lib/python3.7/dist-packages/anyio/_backends/_asyncio.py", line 867, in run result = context.run(func, *args) File "/usr/local/lib/python3.7/dist-packages/gradio/interface.py", line 509, in <lambda> if len(self.output_components) == 1 File "/usr/local/lib/python3.7/dist-packages/gradio/interface.py", line 725, in run_prediction pred File "/usr/local/lib/python3.7/dist-packages/gradio/components.py", line 2103, in deserialize file = processing_utils.decode_base64_to_file(x["data"]) TypeError: string indices must be integers ``` ### System Info ```shell Gradio version: 3.0.20 System: G-colab (fresh install) ``` ### Severity blocking all usage of gradio
0.0
8c9a9a9696c54d109a8a2ca808b34221f56b0a90
[ "test/test_components.py::TestAudio::test_component_functions" ]
[ "test/test_components.py::TestComponent::test_component_functions", "test/test_components.py::TestTextbox::test_component_functions", "test/test_components.py::TestTextbox::test_in_interface_as_input", "test/test_components.py::TestTextbox::test_in_interface_as_output", "test/test_components.py::TestTextbox::test_static", "test/test_components.py::TestNumber::test_component_functions", "test/test_components.py::TestNumber::test_component_functions_integer", "test/test_components.py::TestNumber::test_component_functions_precision", "test/test_components.py::TestNumber::test_in_interface_as_input", "test/test_components.py::TestNumber::test_in_interface_as_output", "test/test_components.py::TestNumber::test_precision_0_in_interface", "test/test_components.py::TestNumber::test_static", "test/test_components.py::TestSlider::test_component_functions", "test/test_components.py::TestSlider::test_in_interface", "test/test_components.py::TestSlider::test_static", "test/test_components.py::TestCheckbox::test_component_functions", "test/test_components.py::TestCheckbox::test_in_interface", "test/test_components.py::TestCheckboxGroup::test_component_functions", "test/test_components.py::TestCheckboxGroup::test_in_interface", "test/test_components.py::TestRadio::test_component_functions", "test/test_components.py::TestRadio::test_in_interface", "test/test_components.py::TestImage::test_static", "test/test_components.py::TestPlot::test_in_interface_as_output", "test/test_components.py::TestPlot::test_static", "test/test_components.py::TestAudio::test_in_interface", "test/test_components.py::TestAudio::test_in_interface_as_output", "test/test_components.py::TestAudio::test_tokenize", "test/test_components.py::TestFile::test_as_component_as_output", "test/test_components.py::TestFile::test_component_functions", "test/test_components.py::TestFile::test_in_interface_as_input", "test/test_components.py::TestDataframe::test_component_functions", "test/test_components.py::TestDataframe::test_in_interface_as_output", "test/test_components.py::TestVideo::test_in_interface", "test/test_components.py::TestTimeseries::test_component_functions", "test/test_components.py::TestTimeseries::test_in_interface_as_input", "test/test_components.py::TestTimeseries::test_in_interface_as_output", "test/test_components.py::TestNames::test_no_duplicate_uncased_names", "test/test_components.py::TestLabel::test_component_functions", "test/test_components.py::TestLabel::test_in_interface", "test/test_components.py::TestHighlightedText::test_component_functions", "test/test_components.py::TestHighlightedText::test_in_interface", "test/test_components.py::TestJSON::test_component_functions", "test/test_components.py::TestHTML::test_component_functions", "test/test_components.py::TestHTML::test_in_interface", "test/test_components.py::TestModel3D::test_in_interface" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2022-06-29 17:02:27+00:00
apache-2.0
2,664
gradio-app__gradio-1684
diff --git a/demo/blocks_outputs/run.py b/demo/blocks_outputs/run.py index 4af996dbc..084be0da9 100644 --- a/demo/blocks_outputs/run.py +++ b/demo/blocks_outputs/run.py @@ -1,5 +1,26 @@ import gradio as gr + +def make_markdown(): + return [ + [ + "# hello again", + "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.", + '<img src="https://images.unsplash.com/photo-1574613362884-f79513a5128c?fit=crop&w=500&q=80"/>', + ], + [ + "## hello again again", + "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.", + '<img src="https://images.unsplash.com/photo-1574613362884-f79513a5128c?fit=crop&w=500&q=80"/>', + ], + [ + "### hello thrice", + "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.", + '<img src="https://images.unsplash.com/photo-1574613362884-f79513a5128c?fit=crop&w=500&q=80"/>', + ], + ] + + with gr.Blocks() as demo: with gr.Column(): txt = gr.Textbox(label="Small Textbox", lines=1, show_label=False) @@ -43,27 +64,31 @@ with gr.Blocks() as demo: gr.Dataframe( interactive=True, headers=["One", "Two", "Three", "Four"], col_count=4 ) - gr.DataFrame( + df = gr.DataFrame( [ [ + "# hello", "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.", - "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.", - "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.", + '<img src="https://images.unsplash.com/photo-1574613362884-f79513a5128c?fit=crop&w=500&q=80"/>', ], [ + "## hello", "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.", - "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.", - "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.", + '<img src="https://images.unsplash.com/photo-1574613362884-f79513a5128c?fit=crop&w=500&q=80"/>', ], [ + "### hello", "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.", - "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.", - "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.", + '<img src="https://images.unsplash.com/photo-1574613362884-f79513a5128c?fit=crop&w=500&q=80"/>', ], ], headers=["One", "Two", "Three"], wrap=True, + datatype=["markdown", "markdown", "html"], + interactive=True, ) + btn = gr.Button("Run") + btn.click(fn=make_markdown, inputs=None, outputs=df) if __name__ == "__main__": diff --git a/gradio/blocks.py b/gradio/blocks.py index 73b8f7bef..4910c8546 100644 --- a/gradio/blocks.py +++ b/gradio/blocks.py @@ -288,6 +288,7 @@ class Blocks(BlockContext): @classmethod def from_config(cls, config: dict, fns: List[Callable]) -> Blocks: """Factory method that creates a Blocks from a config and list of functions.""" + config = copy.deepcopy(config) components_config = config["components"] original_mapping: Dict[int, Block] = {} @@ -325,6 +326,7 @@ class Blocks(BlockContext): targets = dependency.pop("targets") trigger = dependency.pop("trigger") dependency.pop("backend_fn") + dependency.pop("documentation", None) dependency["inputs"] = [ original_mapping[i] for i in dependency["inputs"] ] diff --git a/gradio/components.py b/gradio/components.py index f977fdca9..3a07fd38d 100644 --- a/gradio/components.py +++ b/gradio/components.py @@ -1233,9 +1233,7 @@ class Radio(Changeable, IOComponent): Returns: (str): string of choice """ - return ( - y if y is not None else self.choices[0] if len(self.choices) > 0 else None - ) + return y def deserialize(self, x): """ @@ -2356,6 +2354,8 @@ class Dataframe(Changeable, IOComponent): Demos: filter_records, matrix_transpose, tax_calculator """ + markdown_parser = None + def __init__( self, value: Optional[List[List[Any]]] = None, @@ -2405,13 +2405,17 @@ class Dataframe(Changeable, IOComponent): self.__validate_headers(headers, self.col_count[0]) self.headers = headers - self.datatype = datatype + self.datatype = ( + datatype if isinstance(datatype, list) else [datatype] * self.col_count[0] + ) self.type = type values = { "str": "", "number": 0, "bool": False, "date": "01/01/1970", + "markdown": "", + "html": "", } column_dtypes = ( [datatype] * self.col_count[0] if isinstance(datatype, str) else datatype @@ -2419,7 +2423,10 @@ class Dataframe(Changeable, IOComponent): self.test_input = [ [values[c] for c in column_dtypes] for _ in range(self.row_count[0]) ] + self.value = value if value is not None else self.test_input + self.value = self.__process_markdown(self.value, datatype) + self.max_rows = max_rows self.max_cols = max_cols self.overflow_row_behaviour = overflow_row_behaviour @@ -2520,16 +2527,24 @@ class Dataframe(Changeable, IOComponent): if y is None: return y if isinstance(y, str): - y = pd.read_csv(str) - return {"headers": list(y.columns), "data": y.values.tolist()} + y = pd.read_csv(y) + return { + "headers": list(y.columns), + "data": Dataframe.__process_markdown(y.values.tolist(), self.datatype), + } if isinstance(y, pd.DataFrame): - return {"headers": list(y.columns), "data": y.values.tolist()} + return { + "headers": list(y.columns), + "data": Dataframe.__process_markdown(y.values.tolist(), self.datatype), + } if isinstance(y, (np.ndarray, list)): if isinstance(y, np.ndarray): y = y.tolist() if len(y) == 0 or not isinstance(y[0], list): y = [y] - return {"data": y} + return { + "data": Dataframe.__process_markdown(y, self.datatype), + } raise ValueError("Cannot process value as a Dataframe") @staticmethod @@ -2550,10 +2565,24 @@ class Dataframe(Changeable, IOComponent): ) ) + @classmethod + def __process_markdown(cls, data: List[List[Any]], datatype: List[str]): + if "markdown" not in datatype: + return data + + if cls.markdown_parser is None: + cls.markdown_parser = MarkdownIt() + + for i in range(len(data)): + for j in range(len(data[i])): + if datatype[j] == "markdown": + data[i][j] = Dataframe.markdown_parser.render(data[i][j]) + + return data + def style( self, rounded: Optional[bool | Tuple[bool, bool, bool, bool]] = None, - border: Optional[bool | Tuple[bool, bool, bool, bool]] = None, ): return IOComponent.style( self, @@ -2695,7 +2724,6 @@ class Timeseries(Changeable, IOComponent): def style( self, rounded: Optional[bool | Tuple[bool, bool, bool, bool]] = None, - border: Optional[bool | Tuple[bool, bool, bool, bool]] = None, ): return IOComponent.style( self, diff --git a/ui/packages/app/src/Blocks.svelte b/ui/packages/app/src/Blocks.svelte index 752d9b5e4..0243e1548 100644 --- a/ui/packages/app/src/Blocks.svelte +++ b/ui/packages/app/src/Blocks.svelte @@ -119,12 +119,22 @@ const is_input = is_dep(id, "inputs", dependencies); const is_output = is_dep(id, "outputs", dependencies); - if (!is_input && !is_output && !props.value) acc.add(id); // default dynamic + if (!is_input && !is_output && has_no_default_value(props.value)) + acc.add(id); // default dynamic if (is_input) acc.add(id); return acc; }, new Set()); + function has_no_default_value(value: any) { + return ( + (Array.isArray(value) && value.length === 0) || + value === "" || + value === 0 || + !value + ); + } + let instance_map = components.reduce((acc, next) => { acc[next.id] = next; return acc; diff --git a/ui/packages/app/src/components/DataFrame/DataFrame.svelte b/ui/packages/app/src/components/DataFrame/DataFrame.svelte index c09fae421..9f4f87c7f 100644 --- a/ui/packages/app/src/components/DataFrame/DataFrame.svelte +++ b/ui/packages/app/src/components/DataFrame/DataFrame.svelte @@ -7,6 +7,7 @@ type Headers = Array<string>; type Data = Array<Array<string | number>>; + type Datatype = "str" | "markdown" | "html" | "number" | "bool" | "date"; export let headers: Headers = []; export let elem_id: string = ""; @@ -19,6 +20,7 @@ export let style: Styles = {}; export let label: string | null = null; export let wrap: boolean; + export let datatype: Datatype | Array<Datatype>; $: { if (value && !Array.isArray(value)) { @@ -60,5 +62,6 @@ editable={mode === "dynamic"} {style} {wrap} + {datatype} /> </div> diff --git a/ui/packages/table/src/EditableCell.svelte b/ui/packages/table/src/EditableCell.svelte index adaa2e938..d111140fd 100644 --- a/ui/packages/table/src/EditableCell.svelte +++ b/ui/packages/table/src/EditableCell.svelte @@ -3,6 +3,7 @@ export let value: string | number = ""; export let el: HTMLInputElement | null; export let header: boolean = false; + export let datatype: "str" | "markdown" | "html" | "number" | "bool" | "date"; </script> {#if edit} @@ -24,5 +25,11 @@ role="button" class:opacity-0={edit} class:pointer-events-none={edit} - class="p-2 outline-none border-0 flex-1">{value}</span + class="p-2 outline-none border-0 flex-1" > + {#if datatype === "markdown" || datatype === "html"} + {@html value} + {:else} + {value} + {/if} +</span> diff --git a/ui/packages/table/src/Table.svelte b/ui/packages/table/src/Table.svelte index f4a40278c..cd91052be 100644 --- a/ui/packages/table/src/Table.svelte +++ b/ui/packages/table/src/Table.svelte @@ -9,6 +9,9 @@ import { Upload } from "@gradio/upload"; import EditableCell from "./EditableCell.svelte"; + type Datatype = "str" | "markdown" | "html" | "number" | "bool" | "date"; + + export let datatype: Datatype | Array<Datatype>; export let label: string | null = null; export let headers: Array<string> = []; export let values: Array<Array<string | number>> = [[]]; @@ -567,6 +570,9 @@ bind:value bind:el={els[id].input} edit={editing === id} + datatype={Array.isArray(datatype) + ? datatype[j] + : datatype} /> </div> </td>
gradio-app/gradio
eb42fc3cf874e2252536623462eab02d3d27f07f
diff --git a/test/test_blocks.py b/test/test_blocks.py index 400e14702..f1feb9c4f 100644 --- a/test/test_blocks.py +++ b/test/test_blocks.py @@ -88,6 +88,23 @@ class TestBlocks(unittest.TestCase): config.pop("version") # remove version key self.assertTrue(assert_configs_are_equivalent_besides_ids(XRAY_CONFIG, config)) + def test_load_from_config(self): + def update(name): + return f"Welcome to Gradio, {name}!" + + with gr.Blocks() as demo1: + inp = gr.Textbox(placeholder="What is your name?") + out = gr.Textbox() + + inp.submit(fn=update, inputs=inp, outputs=out, api_name="greet") + + gr.Image().style(height=54, width=240) + + config1 = demo1.get_config_file() + demo2 = gr.Blocks.from_config(config1, [update]) + config2 = demo2.get_config_file() + self.assertTrue(assert_configs_are_equivalent_besides_ids(config1, config2)) + @pytest.mark.asyncio async def test_async_function(self): async def wait(): diff --git a/test/test_components.py b/test/test_components.py index d0ff01735..276ce37d5 100644 --- a/test/test_components.py +++ b/test/test_components.py @@ -537,7 +537,7 @@ class TestRadio(unittest.TestCase): radio_input.get_config(), { "choices": ["a", "b", "c"], - "value": "a", + "value": None, "name": "radio", "show_label": True, "label": "Pick Your One Input", @@ -1022,7 +1022,7 @@ class TestDataframe(unittest.TestCase): dataframe_input.get_config(), { "headers": ["Name", "Age", "Member"], - "datatype": "str", + "datatype": ["str", "str", "str"], "row_count": (3, "dynamic"), "col_count": (3, "dynamic"), "value": [ @@ -1079,7 +1079,7 @@ class TestDataframe(unittest.TestCase): "style": {}, "elem_id": None, "visible": True, - "datatype": "str", + "datatype": ["str", "str", "str"], "row_count": (3, "dynamic"), "col_count": (3, "dynamic"), "value": [
Dataframe allow Markdown or HTML content - [X] I have searched to see if a similar issue already exists. **Is your feature request related to a problem? Please describe.** Simple example here: https://huggingface.co/spaces/Gradio-Blocks/Leaderboard. Currently it's not possible to create hyperlinks inside a Dataframe table. I can see other use cases beyond `<a>` elements, maybe images or simple video previews? I'm thinking that would be handy to preview rich content Dataframes outputs. I understand the performance issues with large tables, but I guess it needs lazy loading anyway.
0.0
eb42fc3cf874e2252536623462eab02d3d27f07f
[ "test/test_blocks.py::TestBlocks::test_load_from_config", "test/test_components.py::TestRadio::test_component_functions", "test/test_components.py::TestDataframe::test_component_functions" ]
[ "test/test_blocks.py::TestBlocks::test_async_function", "test/test_blocks.py::TestBlocks::test_set_share", "test/test_blocks.py::TestBlocks::test_set_share_in_colab", "test/test_blocks.py::TestBlocks::test_xray", "test/test_components.py::TestComponent::test_component_functions", "test/test_components.py::TestTextbox::test_component_functions", "test/test_components.py::TestTextbox::test_in_interface_as_input", "test/test_components.py::TestTextbox::test_in_interface_as_output", "test/test_components.py::TestTextbox::test_static", "test/test_components.py::TestNumber::test_component_functions", "test/test_components.py::TestNumber::test_component_functions_integer", "test/test_components.py::TestNumber::test_component_functions_precision", "test/test_components.py::TestNumber::test_in_interface_as_input", "test/test_components.py::TestNumber::test_in_interface_as_output", "test/test_components.py::TestNumber::test_precision_0_in_interface", "test/test_components.py::TestNumber::test_static", "test/test_components.py::TestSlider::test_component_functions", "test/test_components.py::TestSlider::test_in_interface", "test/test_components.py::TestSlider::test_static", "test/test_components.py::TestCheckbox::test_component_functions", "test/test_components.py::TestCheckbox::test_in_interface", "test/test_components.py::TestCheckboxGroup::test_component_functions", "test/test_components.py::TestCheckboxGroup::test_in_interface", "test/test_components.py::TestRadio::test_in_interface", "test/test_components.py::TestImage::test_static", "test/test_components.py::TestPlot::test_in_interface_as_output", "test/test_components.py::TestPlot::test_static", "test/test_components.py::TestAudio::test_component_functions", "test/test_components.py::TestAudio::test_in_interface", "test/test_components.py::TestAudio::test_in_interface_as_output", "test/test_components.py::TestAudio::test_tokenize", "test/test_components.py::TestFile::test_as_component_as_output", "test/test_components.py::TestFile::test_component_functions", "test/test_components.py::TestFile::test_in_interface_as_input", "test/test_components.py::TestDataframe::test_in_interface_as_output", "test/test_components.py::TestVideo::test_in_interface", "test/test_components.py::TestTimeseries::test_component_functions", "test/test_components.py::TestTimeseries::test_in_interface_as_input", "test/test_components.py::TestTimeseries::test_in_interface_as_output", "test/test_components.py::TestNames::test_no_duplicate_uncased_names", "test/test_components.py::TestLabel::test_component_functions", "test/test_components.py::TestLabel::test_in_interface", "test/test_components.py::TestHighlightedText::test_component_functions", "test/test_components.py::TestHighlightedText::test_in_interface", "test/test_components.py::TestJSON::test_component_functions", "test/test_components.py::TestHTML::test_component_functions", "test/test_components.py::TestHTML::test_in_interface", "test/test_components.py::TestModel3D::test_in_interface" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-07-01 13:48:28+00:00
apache-2.0
2,665
gradio-app__gradio-1685
diff --git a/gradio/components.py b/gradio/components.py index f977fdca9..246016f70 100644 --- a/gradio/components.py +++ b/gradio/components.py @@ -1233,9 +1233,7 @@ class Radio(Changeable, IOComponent): Returns: (str): string of choice """ - return ( - y if y is not None else self.choices[0] if len(self.choices) > 0 else None - ) + return y def deserialize(self, x): """ diff --git a/ui/packages/app/src/Blocks.svelte b/ui/packages/app/src/Blocks.svelte index 752d9b5e4..0243e1548 100644 --- a/ui/packages/app/src/Blocks.svelte +++ b/ui/packages/app/src/Blocks.svelte @@ -119,12 +119,22 @@ const is_input = is_dep(id, "inputs", dependencies); const is_output = is_dep(id, "outputs", dependencies); - if (!is_input && !is_output && !props.value) acc.add(id); // default dynamic + if (!is_input && !is_output && has_no_default_value(props.value)) + acc.add(id); // default dynamic if (is_input) acc.add(id); return acc; }, new Set()); + function has_no_default_value(value: any) { + return ( + (Array.isArray(value) && value.length === 0) || + value === "" || + value === 0 || + !value + ); + } + let instance_map = components.reduce((acc, next) => { acc[next.id] = next; return acc;
gradio-app/gradio
eb42fc3cf874e2252536623462eab02d3d27f07f
diff --git a/test/test_components.py b/test/test_components.py index d0ff01735..049fdc3a5 100644 --- a/test/test_components.py +++ b/test/test_components.py @@ -537,7 +537,7 @@ class TestRadio(unittest.TestCase): radio_input.get_config(), { "choices": ["a", "b", "c"], - "value": "a", + "value": None, "name": "radio", "show_label": True, "label": "Pick Your One Input",
Dropdown is disabled when created with blocks ### Describe the bug Hi, Tried creating gradio app with blocks, Dropdown was disabled after launch. ### Is there an existing issue for this? - [x] I have searched the existing issues ### Reproduction ``` import gradio as gr demo = gr.Blocks() with demo: with gr.Tabs(): with gr.TabItem("Record Video"): with gr.Row(): inp1=gr.inputs.Video(source="webcam",optional=False,label='Capture Video') inp2=gr.inputs.Video(source="webcam",optional=False,label='Capture Video') with gr.Row(): subject_id = gr.inputs.Dropdown(["cat", "dog", "bird"]) demo.launch() ``` ### Screenshot <img width="1283" alt="image" src="https://user-images.githubusercontent.com/102731859/173801150-712bc70e-479a-456e-a12b-7e0bb293bdf5.png"> <img width="915" alt="image" src="https://user-images.githubusercontent.com/102731859/173801336-8f571c6d-026e-457e-8269-192f5d4ecc4d.png"> ### Logs ```shell NA ``` ### System Info ```shell Browser: Chrome Gradio Version: 3.0.17 ``` ### Severity blocking all usage of gradio
0.0
eb42fc3cf874e2252536623462eab02d3d27f07f
[ "test/test_components.py::TestRadio::test_component_functions" ]
[ "test/test_components.py::TestComponent::test_component_functions", "test/test_components.py::TestTextbox::test_component_functions", "test/test_components.py::TestTextbox::test_in_interface_as_input", "test/test_components.py::TestTextbox::test_in_interface_as_output", "test/test_components.py::TestTextbox::test_static", "test/test_components.py::TestNumber::test_component_functions", "test/test_components.py::TestNumber::test_component_functions_integer", "test/test_components.py::TestNumber::test_component_functions_precision", "test/test_components.py::TestNumber::test_in_interface_as_input", "test/test_components.py::TestNumber::test_in_interface_as_output", "test/test_components.py::TestNumber::test_precision_0_in_interface", "test/test_components.py::TestNumber::test_static", "test/test_components.py::TestSlider::test_component_functions", "test/test_components.py::TestSlider::test_in_interface", "test/test_components.py::TestSlider::test_static", "test/test_components.py::TestCheckbox::test_component_functions", "test/test_components.py::TestCheckbox::test_in_interface", "test/test_components.py::TestCheckboxGroup::test_component_functions", "test/test_components.py::TestCheckboxGroup::test_in_interface", "test/test_components.py::TestRadio::test_in_interface", "test/test_components.py::TestImage::test_static", "test/test_components.py::TestPlot::test_in_interface_as_output", "test/test_components.py::TestPlot::test_static", "test/test_components.py::TestAudio::test_component_functions", "test/test_components.py::TestAudio::test_in_interface", "test/test_components.py::TestAudio::test_in_interface_as_output", "test/test_components.py::TestAudio::test_tokenize", "test/test_components.py::TestFile::test_as_component_as_output", "test/test_components.py::TestFile::test_component_functions", "test/test_components.py::TestFile::test_in_interface_as_input", "test/test_components.py::TestDataframe::test_component_functions", "test/test_components.py::TestDataframe::test_in_interface_as_output", "test/test_components.py::TestVideo::test_in_interface", "test/test_components.py::TestTimeseries::test_component_functions", "test/test_components.py::TestTimeseries::test_in_interface_as_input", "test/test_components.py::TestTimeseries::test_in_interface_as_output", "test/test_components.py::TestNames::test_no_duplicate_uncased_names", "test/test_components.py::TestLabel::test_component_functions", "test/test_components.py::TestLabel::test_in_interface", "test/test_components.py::TestHighlightedText::test_component_functions", "test/test_components.py::TestHighlightedText::test_in_interface", "test/test_components.py::TestJSON::test_component_functions", "test/test_components.py::TestHTML::test_component_functions", "test/test_components.py::TestHTML::test_in_interface", "test/test_components.py::TestModel3D::test_in_interface" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2022-07-01 14:10:00+00:00
apache-2.0
2,666
graphql-python__graphene-641
diff --git a/docs/relay/index.rst b/docs/relay/index.rst index e3a87d0..7eb418d 100644 --- a/docs/relay/index.rst +++ b/docs/relay/index.rst @@ -21,9 +21,9 @@ Useful links - `Relay Cursor Connection Specification`_ - `Relay input Object Mutation`_ -.. _Relay: https://facebook.github.io/relay/docs/graphql-relay-specification.html +.. _Relay: https://facebook.github.io/relay/docs/en/graphql-server-specification.html .. _Relay specification: https://facebook.github.io/relay/graphql/objectidentification.htm#sec-Node-root-field -.. _Getting started with Relay: https://facebook.github.io/relay/docs/graphql-relay-specification.html +.. _Getting started with Relay: https://facebook.github.io/relay/docs/en/quick-start-guide.html .. _Relay Global Identification Specification: https://facebook.github.io/relay/graphql/objectidentification.htm .. _Relay Cursor Connection Specification: https://facebook.github.io/relay/graphql/connections.htm .. _Relay input Object Mutation: https://facebook.github.io/relay/graphql/mutations.htm diff --git a/graphene/relay/connection.py b/graphene/relay/connection.py index afe6ffb..3e2e9ad 100644 --- a/graphene/relay/connection.py +++ b/graphene/relay/connection.py @@ -73,7 +73,7 @@ class Connection(ObjectType): edge = type(edge_name, edge_bases, {}) cls.Edge = edge - _meta.name = name + options['name'] = name _meta.node = node _meta.fields = OrderedDict([ ('page_info', Field(PageInfo, name='pageInfo', required=True)), diff --git a/graphene/utils/str_converters.py b/graphene/utils/str_converters.py index ae8ceff..6fcdfb7 100644 --- a/graphene/utils/str_converters.py +++ b/graphene/utils/str_converters.py @@ -1,13 +1,13 @@ import re -# From this response in Stackoverflow +# Adapted from this response in Stackoverflow # http://stackoverflow.com/a/19053800/1072990 def to_camel_case(snake_str): components = snake_str.split('_') # We capitalize the first letter of each component except the first one - # with the 'title' method and join them together. - return components[0] + "".join(x.title() if x else '_' for x in components[1:]) + # with the 'capitalize' method and join them together. + return components[0] + ''.join(x.capitalize() if x else '_' for x in components[1:]) # From this response in Stackoverflow
graphql-python/graphene
38db32e4f2d57f54a77879f9277ad4408792c881
diff --git a/graphene/relay/tests/test_connection.py b/graphene/relay/tests/test_connection.py index b6a26df..3697888 100644 --- a/graphene/relay/tests/test_connection.py +++ b/graphene/relay/tests/test_connection.py @@ -52,6 +52,21 @@ def test_connection_inherit_abstracttype(): assert list(fields.keys()) == ['page_info', 'edges', 'extra'] +def test_connection_name(): + custom_name = "MyObjectCustomNameConnection" + + class BaseConnection(object): + extra = String() + + class MyObjectConnection(BaseConnection, Connection): + + class Meta: + node = MyObject + name = custom_name + + assert MyObjectConnection._meta.name == custom_name + + def test_edge(): class MyObjectConnection(Connection): @@ -122,9 +137,10 @@ def test_connectionfield_node_deprecated(): field = ConnectionField(MyObject) with pytest.raises(Exception) as exc_info: field.type - + assert "ConnectionField's now need a explicit ConnectionType for Nodes." in str(exc_info.value) + def test_connectionfield_custom_args(): class MyObjectConnection(Connection): diff --git a/graphene/utils/tests/test_str_converters.py b/graphene/utils/tests/test_str_converters.py index 2ee7d7a..11f7e15 100644 --- a/graphene/utils/tests/test_str_converters.py +++ b/graphene/utils/tests/test_str_converters.py @@ -16,6 +16,7 @@ def test_camel_case(): assert to_camel_case('snakes_on_a_plane') == 'snakesOnAPlane' assert to_camel_case('snakes_on_a__plane') == 'snakesOnA_Plane' assert to_camel_case('i_phone_hysteria') == 'iPhoneHysteria' + assert to_camel_case('field_i18n') == 'fieldI18n' def test_to_const():
Bug: Name of a connection is always the name of the class ``` class BranchesOnClient(relay.Connection): pass ``` Based on the implementation of `relay.Connection` I expect the default name to be `BranchesOnClientConnection`. ``` class BranchesOnClient(relay.Connection, name="Foo"): pass ``` Now I expect the type name to be `Foo`. In fact, in both cases the name will be `BranchesOnClient`. This is because `BaseType` sets `_meta.name` to the class name if no `name` argument is passed, and no `name` argument is passed because `relay.Connection` already caught it, assigned it to `_meta`, and removed it from the `**kwargs` dict that is passed on through the ``__init_subclass_with_meta__`` chain.
0.0
38db32e4f2d57f54a77879f9277ad4408792c881
[ "graphene/relay/tests/test_connection.py::test_connection_name", "graphene/utils/tests/test_str_converters.py::test_camel_case" ]
[ "graphene/relay/tests/test_connection.py::test_connection", "graphene/relay/tests/test_connection.py::test_connection_inherit_abstracttype", "graphene/relay/tests/test_connection.py::test_edge", "graphene/relay/tests/test_connection.py::test_edge_with_bases", "graphene/relay/tests/test_connection.py::test_pageinfo", "graphene/relay/tests/test_connection.py::test_connectionfield", "graphene/relay/tests/test_connection.py::test_connectionfield_node_deprecated", "graphene/relay/tests/test_connection.py::test_connectionfield_custom_args", "graphene/utils/tests/test_str_converters.py::test_snake_case", "graphene/utils/tests/test_str_converters.py::test_to_const" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2018-01-08 17:42:57+00:00
mit
2,667
graphql-python__graphene-644
diff --git a/graphene/utils/str_converters.py b/graphene/utils/str_converters.py index ae8ceff..6fcdfb7 100644 --- a/graphene/utils/str_converters.py +++ b/graphene/utils/str_converters.py @@ -1,13 +1,13 @@ import re -# From this response in Stackoverflow +# Adapted from this response in Stackoverflow # http://stackoverflow.com/a/19053800/1072990 def to_camel_case(snake_str): components = snake_str.split('_') # We capitalize the first letter of each component except the first one - # with the 'title' method and join them together. - return components[0] + "".join(x.title() if x else '_' for x in components[1:]) + # with the 'capitalize' method and join them together. + return components[0] + ''.join(x.capitalize() if x else '_' for x in components[1:]) # From this response in Stackoverflow
graphql-python/graphene
38db32e4f2d57f54a77879f9277ad4408792c881
diff --git a/graphene/utils/tests/test_str_converters.py b/graphene/utils/tests/test_str_converters.py index 2ee7d7a..11f7e15 100644 --- a/graphene/utils/tests/test_str_converters.py +++ b/graphene/utils/tests/test_str_converters.py @@ -16,6 +16,7 @@ def test_camel_case(): assert to_camel_case('snakes_on_a_plane') == 'snakesOnAPlane' assert to_camel_case('snakes_on_a__plane') == 'snakesOnA_Plane' assert to_camel_case('i_phone_hysteria') == 'iPhoneHysteria' + assert to_camel_case('field_i18n') == 'fieldI18n' def test_to_const():
Fields with numbers in names do not capitalize correctly I noticed that the field with names containing numbers (e.g. `field_i18n`) doesn't capitalize correctly. For example: `correct_field` becomes `correctField`, but `field_i18n` becomes `field_I18N` and `t1e2s3t` becomes `t1E2S3T` which is obvious incorrect. This caused by using `.title` method in `str_converter.py` instead of `.capitalize`. Title method is suitable for use with words, but field names are single word.
0.0
38db32e4f2d57f54a77879f9277ad4408792c881
[ "graphene/utils/tests/test_str_converters.py::test_camel_case" ]
[ "graphene/utils/tests/test_str_converters.py::test_snake_case", "graphene/utils/tests/test_str_converters.py::test_to_const" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2018-01-10 14:24:14+00:00
mit
2,668
graphql-python__graphene-751
diff --git a/graphene/relay/node.py b/graphene/relay/node.py index 6596757..5c787ff 100644 --- a/graphene/relay/node.py +++ b/graphene/relay/node.py @@ -101,8 +101,8 @@ class Node(AbstractNode): if only_type: assert graphene_type == only_type, ( - 'Must receive an {} id.' - ).format(graphene_type._meta.name) + 'Must receive a {} id.' + ).format(only_type._meta.name) # We make sure the ObjectType implements the "Node" interface if cls not in graphene_type._meta.interfaces:
graphql-python/graphene
7bd77a0817677656e2ed8e8ac235ab5e8d557487
diff --git a/graphene/relay/tests/test_node.py b/graphene/relay/tests/test_node.py index 10dc5d9..df44fcb 100644 --- a/graphene/relay/tests/test_node.py +++ b/graphene/relay/tests/test_node.py @@ -115,7 +115,7 @@ def test_node_field_only_type_wrong(): '{ onlyNode(id:"%s") { __typename, name } } ' % Node.to_global_id("MyOtherNode", 1) ) assert len(executed.errors) == 1 - assert str(executed.errors[0]) == 'Must receive an MyOtherNode id.' + assert str(executed.errors[0]) == 'Must receive a MyNode id.' assert executed.data == {'onlyNode': None} @@ -132,7 +132,7 @@ def test_node_field_only_lazy_type_wrong(): '{ onlyNodeLazy(id:"%s") { __typename, name } } ' % Node.to_global_id("MyOtherNode", 1) ) assert len(executed.errors) == 1 - assert str(executed.errors[0]) == 'Must receive an MyOtherNode id.' + assert str(executed.errors[0]) == 'Must receive a MyNode id.' assert executed.data == {'onlyNodeLazy': None}
Incorrectly formatted error message. The error message generated here is incorrect. It should be `only_type._meta.name` instead of `graphene_type._meta.name`. This way the expected value will be reported, which is what the message says it is. https://github.com/graphql-python/graphene/blob/7bd77a0817677656e2ed8e8ac235ab5e8d557487/graphene/relay/node.py#L105 Also, it's better to use "a" instead of "an" as a default preposition in messages. https://github.com/graphql-python/graphene/blob/7bd77a0817677656e2ed8e8ac235ab5e8d557487/graphene/relay/node.py#L104
0.0
7bd77a0817677656e2ed8e8ac235ab5e8d557487
[ "graphene/relay/tests/test_node.py::test_node_field_only_type_wrong", "graphene/relay/tests/test_node.py::test_node_field_only_lazy_type_wrong" ]
[ "graphene/relay/tests/test_node.py::test_node_good", "graphene/relay/tests/test_node.py::test_node_query", "graphene/relay/tests/test_node.py::test_subclassed_node_query", "graphene/relay/tests/test_node.py::test_node_requesting_non_node", "graphene/relay/tests/test_node.py::test_node_query_incorrect_id", "graphene/relay/tests/test_node.py::test_node_field", "graphene/relay/tests/test_node.py::test_node_field_custom", "graphene/relay/tests/test_node.py::test_node_field_only_type", "graphene/relay/tests/test_node.py::test_node_field_only_lazy_type", "graphene/relay/tests/test_node.py::test_str_schema" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2018-05-30 22:59:46+00:00
mit
2,669
graphql-python__graphene-752
diff --git a/graphene/types/inputobjecttype.py b/graphene/types/inputobjecttype.py index dbfccc4..b84fc0f 100644 --- a/graphene/types/inputobjecttype.py +++ b/graphene/types/inputobjecttype.py @@ -50,7 +50,10 @@ class InputObjectType(UnmountedType, BaseType): yank_fields_from_attrs(base.__dict__, _as=InputField) ) - _meta.fields = fields + if _meta.fields: + _meta.fields.update(fields) + else: + _meta.fields = fields if container is None: container = type(cls.__name__, (InputObjectTypeContainer, cls), {}) _meta.container = container
graphql-python/graphene
332214ba9c545b6d899e181a34666540f02848fe
diff --git a/graphene/tests/issues/test_720.py b/graphene/tests/issues/test_720.py new file mode 100644 index 0000000..8cd99bd --- /dev/null +++ b/graphene/tests/issues/test_720.py @@ -0,0 +1,44 @@ +# https://github.com/graphql-python/graphene/issues/720 +# InputObjectTypes overwrite the "fields" attribute of the provided +# _meta object, so even if dynamic fields are provided with a standard +# InputObjectTypeOptions, they are ignored. + +import graphene + + +class MyInputClass(graphene.InputObjectType): + + @classmethod + def __init_subclass_with_meta__( + cls, container=None, _meta=None, fields=None, **options): + if _meta is None: + _meta = graphene.types.inputobjecttype.InputObjectTypeOptions(cls) + _meta.fields = fields + super(MyInputClass, cls).__init_subclass_with_meta__( + container=container, _meta=_meta, **options) + + +class MyInput(MyInputClass): + + class Meta: + fields = dict(x=graphene.Field(graphene.Int)) + + +class Query(graphene.ObjectType): + myField = graphene.Field(graphene.String, input=graphene.Argument(MyInput)) + + def resolve_myField(parent, info, input): + return 'ok' + + +def test_issue(): + query_string = ''' + query myQuery { + myField(input: {x: 1}) + } + ''' + + schema = graphene.Schema(query=Query) + result = schema.execute(query_string) + + assert not result.errors
InputObjectType.__init_sublcass_with_meta__ overwrites passed _meta.fields In `InputObjectType.__init_subclass_with_meta__`, the`fields` of the `_meta` arg are overwritten, which can cause complications for subclassing. @classmethod def __init_subclass_with_meta__(cls, container=None, _meta=None, **options): if not _meta: _meta = InputObjectTypeOptions(cls) fields = OrderedDict() for base in reversed(cls.__mro__): fields.update( yank_fields_from_attrs(base.__dict__, _as=InputField) ) _meta.fields = fields # should this be: # if _meta.fields: # _meta.fields.update(fields) # else: # _meta.fields = fields
0.0
332214ba9c545b6d899e181a34666540f02848fe
[ "graphene/tests/issues/test_720.py::test_issue" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2018-06-01 01:53:33+00:00
mit
2,670
graphql-python__graphene-864
diff --git a/docs/execution/execute.rst b/docs/execution/execute.rst index 21345aa..1c28548 100644 --- a/docs/execution/execute.rst +++ b/docs/execution/execute.rst @@ -24,7 +24,7 @@ You can pass context to a query via ``context``. class Query(graphene.ObjectType): name = graphene.String() - def resolve_name(self, info): + def resolve_name(root, info): return info.context.get('name') schema = graphene.Schema(Query) @@ -33,7 +33,7 @@ You can pass context to a query via ``context``. Variables -_______ +_________ You can pass variables to a query via ``variables``. @@ -41,10 +41,10 @@ You can pass variables to a query via ``variables``. .. code:: python class Query(graphene.ObjectType): - user = graphene.Field(User) + user = graphene.Field(User, id=graphene.ID(required=True)) - def resolve_user(self, info): - return info.context.get('user') + def resolve_user(root, info, id): + return get_user_by_id(id) schema = graphene.Schema(Query) result = schema.execute( diff --git a/docs/index.rst b/docs/index.rst index 3e9577a..aff3960 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -19,3 +19,5 @@ Integrations * `Graphene-SQLAlchemy <http://docs.graphene-python.org/projects/sqlalchemy/en/latest/>`_ (`source <https://github.com/graphql-python/graphene-sqlalchemy/>`_) * `Graphene-GAE <http://docs.graphene-python.org/projects/gae/en/latest/>`_ (`source <https://github.com/graphql-python/graphene-gae/>`_) * `Graphene-Mongo <http://graphene-mongo.readthedocs.io/en/latest/>`_ (`source <https://github.com/graphql-python/graphene-mongo>`_) +* `Starlette <https://www.starlette.io/graphql/>`_ (`source <https://github.com/encode/starlette>`_) +* `FastAPI <https://fastapi.tiangolo.com/tutorial/graphql/>`_ (`source <https://github.com/tiangolo/fastapi>`_) diff --git a/docs/types/objecttypes.rst b/docs/types/objecttypes.rst index b6eb308..18f91bd 100644 --- a/docs/types/objecttypes.rst +++ b/docs/types/objecttypes.rst @@ -57,8 +57,8 @@ so the first argument to the resolver method ``self`` (or ``root``) need not be an actual instance of the ``ObjectType``. If an explicit resolver is not defined on the ``ObjectType`` then Graphene will -attempt to use a property with the same name on the object that is passed to the -``ObjectType``. +attempt to use a property with the same name on the object or dict that is +passed to the ``ObjectType``. .. code:: python @@ -70,54 +70,18 @@ attempt to use a property with the same name on the object that is passed to the class Query(graphene.ObjectType): me = graphene.Field(Person) + best_friend = graphene.Field(Person) def resolve_me(_, info): # returns an object that represents a Person return get_human(name='Luke Skywalker') -If you are passing a dict instead of an object to your ``ObjectType`` you can -change the default resolver in the ``Meta`` class like this: - -.. code:: python - - import graphene - from graphene.types.resolver import dict_resolver - - class Person(graphene.ObjectType): - class Meta: - default_resolver = dict_resolver - - first_name = graphene.String() - last_name = graphene.String() - - class Query(graphene.ObjectType): - me = graphene.Field(Person) - - def resolve_me(_, info): + def resolve_best_friend(_, info): return { - "first_name": "Luke", - "last_name": "Skywalker", + "first_name": "R2", + "last_name": "D2", } -Or you can change the default resolver globally by calling ``set_default_resolver`` -before executing a query. - -.. code:: python - - import graphene - from graphene.types.resolver import dict_resolver, set_default_resolver - - set_default_resolver(dict_resolver) - - schema = graphene.Schema(query=Query) - result = schema.execute(''' - query { - me { - firstName - } - } - ''') - Resolvers with arguments ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -230,4 +194,17 @@ previous example you could do: peter.first_name # prints "Peter" peter.last_name # prints "Griffin" +Changing the name +----------------- + +By default the type name in the GraphQL schema will the same as the class name +that defines the ``ObjectType``. This can be changed by setting the ``name`` +property on the ``Meta`` class: + +.. code:: python + + class MyGraphQlSong(graphene.ObjectType): + class Meta: + name = 'Song' + .. _Interface: /docs/interfaces/ diff --git a/graphene/types/datetime.py b/graphene/types/datetime.py index 739032b..3519d76 100644 --- a/graphene/types/datetime.py +++ b/graphene/types/datetime.py @@ -4,6 +4,7 @@ import datetime from aniso8601 import parse_date, parse_datetime, parse_time from graphql.language import ast +from six import string_types from .scalars import Scalar @@ -32,7 +33,10 @@ class Date(Scalar): @staticmethod def parse_value(value): try: - return parse_date(value) + if isinstance(value, datetime.date): + return value + elif isinstance(value, string_types): + return parse_date(value) except ValueError: return None @@ -59,7 +63,10 @@ class DateTime(Scalar): @staticmethod def parse_value(value): try: - return parse_datetime(value) + if isinstance(value, datetime.datetime): + return value + elif isinstance(value, string_types): + return parse_datetime(value) except ValueError: return None @@ -86,6 +93,9 @@ class Time(Scalar): @classmethod def parse_value(cls, value): try: - return parse_time(value) + if isinstance(value, datetime.time): + return value + elif isinstance(value, string_types): + return parse_time(value) except ValueError: return None diff --git a/graphene/types/resolver.py b/graphene/types/resolver.py index 888aba8..6a8ea02 100644 --- a/graphene/types/resolver.py +++ b/graphene/types/resolver.py @@ -6,7 +6,14 @@ def dict_resolver(attname, default_value, root, info, **args): return root.get(attname, default_value) -default_resolver = attr_resolver +def dict_or_attr_resolver(attname, default_value, root, info, **args): + resolver = attr_resolver + if isinstance(root, dict): + resolver = dict_resolver + return resolver(attname, default_value, root, info, **args) + + +default_resolver = dict_or_attr_resolver def set_default_resolver(resolver):
graphql-python/graphene
96d497c2b8f5495fe603ffb1a7675e95f342dd2c
diff --git a/graphene/types/tests/test_datetime.py b/graphene/types/tests/test_datetime.py index 98e5e7a..0d9ee11 100644 --- a/graphene/types/tests/test_datetime.py +++ b/graphene/types/tests/test_datetime.py @@ -2,6 +2,7 @@ import datetime import pytz from graphql import GraphQLError +import pytest from ..datetime import Date, DateTime, Time from ..objecttype import ObjectType @@ -88,6 +89,15 @@ def test_datetime_query_variable(): now = datetime.datetime.now().replace(tzinfo=pytz.utc) isoformat = now.isoformat() + # test datetime variable provided as Python datetime + result = schema.execute( + """query Test($date: DateTime){ datetime(in: $date) }""", + variables={"date": now}, + ) + assert not result.errors + assert result.data == {"datetime": isoformat} + + # test datetime variable in string representation result = schema.execute( """query Test($date: DateTime){ datetime(in: $date) }""", variables={"date": isoformat}, @@ -100,6 +110,14 @@ def test_date_query_variable(): now = datetime.datetime.now().replace(tzinfo=pytz.utc).date() isoformat = now.isoformat() + # test date variable provided as Python date + result = schema.execute( + """query Test($date: Date){ date(in: $date) }""", variables={"date": now} + ) + assert not result.errors + assert result.data == {"date": isoformat} + + # test date variable in string representation result = schema.execute( """query Test($date: Date){ date(in: $date) }""", variables={"date": isoformat} ) @@ -112,8 +130,57 @@ def test_time_query_variable(): time = datetime.time(now.hour, now.minute, now.second, now.microsecond, now.tzinfo) isoformat = time.isoformat() + # test time variable provided as Python time + result = schema.execute( + """query Test($time: Time){ time(at: $time) }""", variables={"time": time} + ) + assert not result.errors + assert result.data == {"time": isoformat} + + # test time variable in string representation result = schema.execute( """query Test($time: Time){ time(at: $time) }""", variables={"time": isoformat} ) assert not result.errors assert result.data == {"time": isoformat} + + [email protected]( + reason="creating the error message fails when un-parsable object is not JSON serializable." +) +def test_bad_variables(): + def _test_bad_variables(type, input): + result = schema.execute( + """query Test($input: {}){{ {}(in: $input) }}""".format(type, type.lower()), + variables={"input": input}, + ) + assert len(result.errors) == 1 + # when `input` is not JSON serializable formatting the error message in + # `graphql.utils.is_valid_value` line 79 fails with a TypeError + assert isinstance(result.errors[0], GraphQLError) + print(result.errors[0]) + assert result.data is None + + not_a_date = dict() + not_a_date_str = "Some string that's not a date" + today = datetime.date.today() + now = datetime.datetime.now().replace(tzinfo=pytz.utc) + time = datetime.time(now.hour, now.minute, now.second, now.microsecond, now.tzinfo) + + bad_pairs = [ + ("DateTime", not_a_date), + ("DateTime", not_a_date_str), + ("DateTime", today), + ("DateTime", time), + ("Date", not_a_date), + ("Date", not_a_date_str), + ("Date", now), + ("Date", time), + ("Time", not_a_date), + ("Time", not_a_date_str), + ("Time", now), + ("Time", today), + ] + + for type, input in bad_pairs: + _test_bad_variables(type, input) diff --git a/graphene/types/tests/test_resolver.py b/graphene/types/tests/test_resolver.py index 2a15028..a03cf18 100644 --- a/graphene/types/tests/test_resolver.py +++ b/graphene/types/tests/test_resolver.py @@ -1,6 +1,7 @@ from ..resolver import ( attr_resolver, dict_resolver, + dict_or_attr_resolver, get_default_resolver, set_default_resolver, ) @@ -36,8 +37,16 @@ def test_dict_resolver_default_value(): assert resolved == "default" +def test_dict_or_attr_resolver(): + resolved = dict_or_attr_resolver("attr", None, demo_dict, info, **args) + assert resolved == "value" + + resolved = dict_or_attr_resolver("attr", None, demo_obj, info, **args) + assert resolved == "value" + + def test_get_default_resolver_is_attr_resolver(): - assert get_default_resolver() == attr_resolver + assert get_default_resolver() == dict_or_attr_resolver def test_set_default_resolver_workd():
[Enhancement] DateTime inputs should accept Python datetimes `DateTime`, `Date`, and `Time` will not accept corresponding instances of `datetime.*` when used as variables, you have to format your variables as strings. The following code simply providing Python datetimes should be supported in addition to string formatted datetimes: ```python from datetime import date, datetime, time from graphene import Field, InputObjectType, ObjectType, Schema, String from graphene.types.datetime import Date, DateTime, Time class Test(ObjectType): date = Date() datetime = DateTime() time = Time() class TestInput(InputObjectType): date = Date() datetime = DateTime() time = Time() class Query(ObjectType): test = Field(Test, input=TestInput(required=True)) def resolve_test(self, info, input): return Test(**input) schema = Schema(query=Query) the_query = """ query Test($input: TestInput!) { test(input: $input) { date datetime time } } """ input = dict(date=date.today(), datetime=datetime.now(), time=time(12, 0)) # currently graphene insists on specifying the input like # input = dict(date="2018-11-08", datetime="2018-11-08T12:00:00", time="12:00:00") result = schema.execute(the_query, variable_values=dict(input=input)) print(result.data) ``` Pull request is on the way.
0.0
96d497c2b8f5495fe603ffb1a7675e95f342dd2c
[ "graphene/types/tests/test_datetime.py::test_datetime_query", "graphene/types/tests/test_datetime.py::test_date_query", "graphene/types/tests/test_datetime.py::test_time_query", "graphene/types/tests/test_datetime.py::test_bad_datetime_query", "graphene/types/tests/test_datetime.py::test_bad_date_query", "graphene/types/tests/test_datetime.py::test_bad_time_query", "graphene/types/tests/test_datetime.py::test_datetime_query_variable", "graphene/types/tests/test_datetime.py::test_date_query_variable", "graphene/types/tests/test_datetime.py::test_time_query_variable", "graphene/types/tests/test_resolver.py::test_attr_resolver", "graphene/types/tests/test_resolver.py::test_attr_resolver_default_value", "graphene/types/tests/test_resolver.py::test_dict_resolver", "graphene/types/tests/test_resolver.py::test_dict_resolver_default_value", "graphene/types/tests/test_resolver.py::test_dict_or_attr_resolver", "graphene/types/tests/test_resolver.py::test_get_default_resolver_is_attr_resolver", "graphene/types/tests/test_resolver.py::test_set_default_resolver_workd" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-11-09 07:32:34+00:00
mit
2,671
graphql-python__graphene-957
diff --git a/graphene/types/enum.py b/graphene/types/enum.py index 6e6bab8..7b8e71f 100644 --- a/graphene/types/enum.py +++ b/graphene/types/enum.py @@ -46,7 +46,12 @@ class EnumMeta(SubclassWithMeta_Meta): def __call__(cls, *args, **kwargs): # noqa: N805 if cls is Enum: description = kwargs.pop("description", None) - return cls.from_enum(PyEnum(*args, **kwargs), description=description) + deprecation_reason = kwargs.pop("deprecation_reason", None) + return cls.from_enum( + PyEnum(*args, **kwargs), + description=description, + deprecation_reason=deprecation_reason, + ) return super(EnumMeta, cls).__call__(*args, **kwargs) # return cls._meta.enum(*args, **kwargs)
graphql-python/graphene
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
diff --git a/graphene/tests/issues/test_956.py b/graphene/tests/issues/test_956.py new file mode 100644 index 0000000..72ff971 --- /dev/null +++ b/graphene/tests/issues/test_956.py @@ -0,0 +1,8 @@ +import graphene + + +def test_issue(): + options = {"description": "This my enum", "deprecation_reason": "For the funs"} + new_enum = graphene.Enum("MyEnum", [("some", "data")], **options) + assert new_enum._meta.description == options["description"] + assert new_enum._meta.deprecation_reason == options["deprecation_reason"]
Cannot create a enum with a deprecation reason supplied ## How to reproduce ```python options = { 'description': 'This my enum', 'deprecation_reason': 'For the funs'} graphene.Enum('MyEnum', [('some', 'data')], **options) ``` ## What happened ``` File "/Users/Development/saleor/saleor/graphql/core/enums.py", line 35, in to_enum return graphene.Enum(type_name, enum_data, **options) File "/Users/Development/saleor-venv/lib/python3.7/site-packages/graphene/types/enum.py", line 49, in __call__ return cls.from_enum(PyEnum(*args, **kwargs), description=description) TypeError: __call__() got an unexpected keyword argument 'deprecation_reason' ```
0.0
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
[ "graphene/tests/issues/test_956.py::test_issue" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2019-05-02 13:15:48+00:00
mit
2,672
graphql-python__graphene-django-1477
diff --git a/docs/settings.rst b/docs/settings.rst index 79c52e2..521e434 100644 --- a/docs/settings.rst +++ b/docs/settings.rst @@ -142,6 +142,15 @@ Default: ``False`` # ] +``DJANGO_CHOICE_FIELD_ENUM_CONVERT`` +-------------------------------------- + +When set to ``True`` Django choice fields are automatically converted into Enum types. + +Can be disabled globally by setting it to ``False``. + +Default: ``True`` + ``DJANGO_CHOICE_FIELD_ENUM_V2_NAMING`` -------------------------------------- diff --git a/graphene_django/converter.py b/graphene_django/converter.py index f4775e8..121c1de 100644 --- a/graphene_django/converter.py +++ b/graphene_django/converter.py @@ -133,13 +133,17 @@ def convert_choice_field_to_enum(field, name=None): def convert_django_field_with_choices( - field, registry=None, convert_choices_to_enum=True + field, registry=None, convert_choices_to_enum=None ): if registry is not None: converted = registry.get_converted_field(field) if converted: return converted choices = getattr(field, "choices", None) + if convert_choices_to_enum is None: + convert_choices_to_enum = bool( + graphene_settings.DJANGO_CHOICE_FIELD_ENUM_CONVERT + ) if choices and convert_choices_to_enum: EnumCls = convert_choice_field_to_enum(field) required = not (field.blank or field.null) diff --git a/graphene_django/settings.py b/graphene_django/settings.py index f7e3ee7..da33700 100644 --- a/graphene_django/settings.py +++ b/graphene_django/settings.py @@ -30,6 +30,8 @@ DEFAULTS = { # Max items returned in ConnectionFields / FilterConnectionFields "RELAY_CONNECTION_MAX_LIMIT": 100, "CAMELCASE_ERRORS": True, + # Automatically convert Choice fields of Django into Enum fields + "DJANGO_CHOICE_FIELD_ENUM_CONVERT": True, # Set to True to enable v2 naming convention for choice field Enum's "DJANGO_CHOICE_FIELD_ENUM_V2_NAMING": False, "DJANGO_CHOICE_FIELD_ENUM_CUSTOM_NAME": None, diff --git a/graphene_django/types.py b/graphene_django/types.py index 02b7693..e310fe4 100644 --- a/graphene_django/types.py +++ b/graphene_django/types.py @@ -23,7 +23,7 @@ ALL_FIELDS = "__all__" def construct_fields( - model, registry, only_fields, exclude_fields, convert_choices_to_enum + model, registry, only_fields, exclude_fields, convert_choices_to_enum=None ): _model_fields = get_model_fields(model) @@ -47,7 +47,7 @@ def construct_fields( continue _convert_choices_to_enum = convert_choices_to_enum - if not isinstance(_convert_choices_to_enum, bool): + if isinstance(_convert_choices_to_enum, list): # then `convert_choices_to_enum` is a list of field names to convert if name in _convert_choices_to_enum: _convert_choices_to_enum = True @@ -146,7 +146,7 @@ class DjangoObjectType(ObjectType): connection_class=None, use_connection=None, interfaces=(), - convert_choices_to_enum=True, + convert_choices_to_enum=None, _meta=None, **options, ):
graphql-python/graphene-django
feb7252b8a12ebdfd056a34cf42c489ec4d001ba
diff --git a/graphene_django/tests/test_types.py b/graphene_django/tests/test_types.py index 34828db..5c36bb9 100644 --- a/graphene_django/tests/test_types.py +++ b/graphene_django/tests/test_types.py @@ -661,6 +661,122 @@ class TestDjangoObjectType: }""" ) + def test_django_objecttype_convert_choices_global_false( + self, graphene_settings, PetModel + ): + graphene_settings.DJANGO_CHOICE_FIELD_ENUM_CONVERT = False + + class Pet(DjangoObjectType): + class Meta: + model = PetModel + fields = "__all__" + + class Query(ObjectType): + pet = Field(Pet) + + schema = Schema(query=Query) + + assert str(schema) == dedent( + """\ + type Query { + pet: Pet + } + + type Pet { + id: ID! + kind: String! + cuteness: Int! + }""" + ) + + def test_django_objecttype_convert_choices_true_global_false( + self, graphene_settings, PetModel + ): + graphene_settings.DJANGO_CHOICE_FIELD_ENUM_CONVERT = False + + class Pet(DjangoObjectType): + class Meta: + model = PetModel + fields = "__all__" + convert_choices_to_enum = True + + class Query(ObjectType): + pet = Field(Pet) + + schema = Schema(query=Query) + + assert str(schema) == dedent( + """\ + type Query { + pet: Pet + } + + type Pet { + id: ID! + kind: TestsPetModelKindChoices! + cuteness: TestsPetModelCutenessChoices! + } + + \"""An enumeration.\""" + enum TestsPetModelKindChoices { + \"""Cat\""" + CAT + + \"""Dog\""" + DOG + } + + \"""An enumeration.\""" + enum TestsPetModelCutenessChoices { + \"""Kind of cute\""" + A_1 + + \"""Pretty cute\""" + A_2 + + \"""OMG SO CUTE!!!\""" + A_3 + }""" + ) + + def test_django_objecttype_convert_choices_enum_list_global_false( + self, graphene_settings, PetModel + ): + graphene_settings.DJANGO_CHOICE_FIELD_ENUM_CONVERT = False + + class Pet(DjangoObjectType): + class Meta: + model = PetModel + convert_choices_to_enum = ["kind"] + fields = "__all__" + + class Query(ObjectType): + pet = Field(Pet) + + schema = Schema(query=Query) + + assert str(schema) == dedent( + """\ + type Query { + pet: Pet + } + + type Pet { + id: ID! + kind: TestsPetModelKindChoices! + cuteness: Int! + } + + \"""An enumeration.\""" + enum TestsPetModelKindChoices { + \"""Cat\""" + CAT + + \"""Dog\""" + DOG + }""" + ) + @with_local_registry def test_django_objecttype_name_connection_propagation():
Provide setting to enable/disable converting choices to enums globally In our project we want to disable the auto-convertion of Django choice fields to Enums for every model. To specify convert_choices_to_enum = False on every Type is a bit cumbersome so a global setting would help a lot. I've created a PR which showcases the improvement I have in mind: #1477
0.0
feb7252b8a12ebdfd056a34cf42c489ec4d001ba
[ "graphene_django/tests/test_types.py::TestDjangoObjectType::test_django_objecttype_convert_choices_global_false" ]
[ "graphene_django/tests/test_types.py::test_django_objecttype_fields_empty", "graphene_django/tests/test_types.py::test_django_objecttype_fields_exclude_type_checking", "graphene_django/tests/test_types.py::test_django_objecttype_only_fields", "graphene_django/tests/test_types.py::TestDjangoObjectType::test_django_objecttype_convert_choices_enum_list", "graphene_django/tests/test_types.py::TestDjangoObjectType::test_django_objecttype_convert_choices_enum_list_global_false", "graphene_django/tests/test_types.py::test_schema_representation", "graphene_django/tests/test_types.py::TestDjangoObjectType::test_django_objecttype_convert_choices_enum_naming_collisions", "graphene_django/tests/test_types.py::test_django_objecttype_exclude_and_only", "graphene_django/tests/test_types.py::TestDjangoObjectType::test_django_objecttype_convert_choices_enum_false", "graphene_django/tests/test_types.py::test_django_objecttype_exclude_fields", "graphene_django/tests/test_types.py::test_django_objecttype_with_node_have_correct_fields", "graphene_django/tests/test_types.py::test_django_objecttype_all_fields", "graphene_django/tests/test_types.py::TestDjangoObjectType::test_django_objecttype_choices_custom_enum_name", "graphene_django/tests/test_types.py::test_django_objecttype_with_custom_meta", "graphene_django/tests/test_types.py::test_django_objecttype_map_correct_fields", "graphene_django/tests/test_types.py::TestDjangoObjectType::test_django_objecttype_convert_choices_enum_empty_list", "graphene_django/tests/test_types.py::test_django_objecttype_exclude", "graphene_django/tests/test_types.py::test_django_objecttype_fields", "graphene_django/tests/test_types.py::test_django_objecttype_name_connection_propagation", "graphene_django/tests/test_types.py::test_django_interface", "graphene_django/tests/test_types.py::test_django_get_node", "graphene_django/tests/test_types.py::test_django_objecttype_exclude_fields_and_exclude", "graphene_django/tests/test_types.py::test_django_objecttype_only_fields_and_fields", "graphene_django/tests/test_types.py::TestDjangoObjectType::test_django_objecttype_convert_choices_true_global_false" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-11-15 10:10:44+00:00
mit
2,673
grappa-py__grappa-39
diff --git a/grappa/operators/raises.py b/grappa/operators/raises.py index 48dcea9..763f2a5 100644 --- a/grappa/operators/raises.py +++ b/grappa/operators/raises.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -import inspect from ..operator import Operator @@ -57,7 +56,7 @@ class RaisesOperator(Operator): ) def match(self, fn, *errors): - if not any([inspect.isfunction(fn) or inspect.ismethod(fn)]): + if not callable(fn): return False, ['subject must be a function or method'] try:
grappa-py/grappa
c81d2eeaf63788dd130ba712645c82bb1af9b752
diff --git a/tests/operators/raises_test.py b/tests/operators/raises_test.py index 33f4277..a3acdfa 100644 --- a/tests/operators/raises_test.py +++ b/tests/operators/raises_test.py @@ -1,13 +1,21 @@ import pytest +from functools import partial def test_raises(should): def error(): raise AssertionError('foo') + def error_with_params(foo_param): + raise AssertionError(foo_param) + error | should.raise_error(AssertionError) error | should.do_not.raise_error(NotImplementedError) + partial(error_with_params, "Foobar") | should.raise_error(AssertionError) + partial(error_with_params, "Foobar") | should.to_not\ + .raise_error(NotImplementedError) + with pytest.raises(AssertionError): error | should.raise_error(NotImplementedError)
Assertion Errors Love the library. It is super useful! I am trying to figure out how to see if a function raises an exception when called with a certain set of parameters. Here is an example (a bit contrived as I am trying to raise my own Exception based on valid inputs). ```python t = [0,1,2] t.index(3) | should.raise_error(ValueError) ``` Right now the should.raise_error expects a function to be passed into it. Eg. ```python t.index | should.raise_error(ValueError) ``` Right now I have to compose the test this way. ```python err = "" try: self.index(3) except Exception as err: error = err error | should.be.an.instance.of(ValueError) ``` Am I missing Something?
0.0
c81d2eeaf63788dd130ba712645c82bb1af9b752
[ "tests/operators/raises_test.py::test_raises" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2018-01-23 15:13:23+00:00
mit
2,674
grappa-py__grappa-65
diff --git a/grappa/operators/raises.py b/grappa/operators/raises.py index 763f2a5..a60f91d 100644 --- a/grappa/operators/raises.py +++ b/grappa/operators/raises.py @@ -55,6 +55,14 @@ class RaisesOperator(Operator): 'an object of type "{type}" with reference "{value}"', ) + def after_success(self, obj, *keys): + message = getattr(self.value, 'message', None) + + if not message: + message = ' '.join([str(item) for item in self.value.args]) + + self.ctx.subject = message + def match(self, fn, *errors): if not callable(fn): return False, ['subject must be a function or method'] @@ -64,11 +72,6 @@ class RaisesOperator(Operator): except Exception as err: self.value = err - # If no errors present, just raise the exception - if not errors: - return True, [] - - # Otherwise match errors return isinstance(err, *errors), ['invalid raised exception'] else: return False, ['did not raise any exception']
grappa-py/grappa
b2c14fc9731abe01e9f0ca8093aa5ebc01846437
diff --git a/tests/operators/raises_test.py b/tests/operators/raises_test.py index a3acdfa..cbdf978 100644 --- a/tests/operators/raises_test.py +++ b/tests/operators/raises_test.py @@ -3,12 +3,16 @@ from functools import partial def test_raises(should): + def no_error(): + pass + def error(): raise AssertionError('foo') def error_with_params(foo_param): raise AssertionError(foo_param) + error | should.raise_error(Exception) error | should.raise_error(AssertionError) error | should.do_not.raise_error(NotImplementedError) @@ -24,3 +28,47 @@ def test_raises(should): with pytest.raises(AssertionError): None | should.raise_error(AssertionError) + + with pytest.raises(AssertionError): + no_error | should.raise_error(AssertionError) + + +def test_raises_with_message_redirection(should): + def error(): + raise AssertionError('foo') + + def env_error(): + raise EnvironmentError(3501, 'bar') + + error | should.raise_error(AssertionError) > should.equal('foo') + + error | should.raise_error(AssertionError) > should.contain('fo') + + error | should.do_not.raise_error(NotImplementedError) \ + > should.equal('foo') + + env_error | should.raise_error(EnvironmentError) > should.contain('bar') + + env_error | should.raise_error(EnvironmentError) > should.equal('3501 bar') + + with pytest.raises(AssertionError): + error | should.raise_error(AssertionError) > should.equal('fooe') + + with pytest.raises(AssertionError): + error | should.raise_error(NotImplementedError) > should.equal('foo') + + +def test_raises_custom_exception_message_redirection(should): + class CustomException(Exception): + message = 'foo' + + def __init__(self, *args): + super(CustomException, self).__init__(self.message, *args) + + def custom_error(): + raise CustomException('bar') + + custom_error | should.raise_error(CustomException) > should.equal('foo') + + custom_error | should.raise_error(CustomException) \ + > should.do_not.equal('foo bar')
Checking for error/exception messages Hello, I'm new to grappa. I'm trying to figure out how to test the contents of an Error/Exception. Typically, ```python import pytest def method(): raise RuntimeError('message') def test_method(): with pytest.raises(RuntimeError, match='message'): method() ``` How can that be achieved with Grappa? I can't find the missing step in documentation. Thanks.
0.0
b2c14fc9731abe01e9f0ca8093aa5ebc01846437
[ "tests/operators/raises_test.py::test_raises_with_message_redirection", "tests/operators/raises_test.py::test_raises_custom_exception_message_redirection" ]
[ "tests/operators/raises_test.py::test_raises" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2020-11-21 15:45:38+00:00
mit
2,675
graycarl__hbkit-22
diff --git a/hbkit/__init__.py b/hbkit/__init__.py index 78596de..4d9613b 100644 --- a/hbkit/__init__.py +++ b/hbkit/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import import click -from . import core, random, short, watch, git, backup, pi, time, config +from . import core, random, short, watch, git, backup, pi, time, config, ip __version__ = '0.6.0' @@ -33,3 +33,4 @@ cli.add_command(backup.cli, 'backup') cli.add_command(pi.cli, 'pi') cli.add_command(time.cli, 'time') cli.add_command(config.cli, 'config') +cli.add_command(ip.cli, 'ip') diff --git a/hbkit/ip.py b/hbkit/ip.py new file mode 100644 index 0000000..e3b846a --- /dev/null +++ b/hbkit/ip.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from builtins import * # noqa +import click +import requests + + +SERVICES = { + 'httpbin': { + 'url': 'https://httpbin.org/ip', + 'response': lambda data: data['origin'] + }, + 'ipify': { + 'url': 'https://api.ipify.org', + 'params': { + 'format': 'json' + }, + 'response': lambda data: data['ip'] + } +} + + [email protected]('ip') +def cli(): + """Tools about ip address.""" + + [email protected]('get-public') [email protected]('--timeout', default=5.0, help='Timeout for network requests.') +def cli_get_public(timeout): + """Get current public IP.""" + for name in ('ipify', 'httpbin'): + service = SERVICES[name] + try: + response = requests.get(service['url'], + params=service.get('params'), + timeout=timeout) + response.raise_for_status() + ip = service['response'](response.json()) + break + except requests.exceptions.RequestException: + continue + else: + raise click.ClickException('Can not get public IP') + click.echo(ip) diff --git a/hbkit/lib.py b/hbkit/lib.py index bee5361..3327846 100644 --- a/hbkit/lib.py +++ b/hbkit/lib.py @@ -101,7 +101,9 @@ class ConfigManager(object): def save_to_file(self): try: configfile = open(self.path, 'w') - except FileNotFoundError: + # 暂时没法使用 Python3 的 FileNotFoundError,因为 Python2 没有这个定义 + # 且 Python-Future 暂时没有对它进行兼容。 + except IOError: os.makedirs(os.path.dirname(self.path)) configfile = open(self.path, 'w') with configfile:
graycarl/hbkit
52e10591b3db82364ded1bfe3829b4293beedf60
diff --git a/tests/test_ip.py b/tests/test_ip.py new file mode 100644 index 0000000..8040ee2 --- /dev/null +++ b/tests/test_ip.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from builtins import * # noqa +import requests +from hbkit import ip + + +class MockGet(object): + + class FakeResponse(object): + def __init__(self, json): + self._json = json + + def raise_for_status(self): + pass + + def json(self): + return self._json + + def __init__(self, responses): + self.responses = responses + + def __call__(self, url, params, **kwargs): + resp = self.responses[url] + if isinstance(resp, Exception): + raise resp + return self.FakeResponse(resp) + + +def test_get_public(runner, monkeypatch): + mock_get = MockGet({ + 'https://httpbin.org/ip': { + 'origin': 'ip from httpbin', + }, + 'https://api.ipify.org': { + 'ip': 'ip from ipify', + } + }) + monkeypatch.setattr(requests, 'get', mock_get) + # normal case + result = runner.invoke(ip.cli_get_public).output.strip() + assert result == 'ip from ipify' + + # ipify failed case + mock_get.responses['https://api.ipify.org'] = requests.Timeout() + result = runner.invoke(ip.cli_get_public).output.strip() + assert result == 'ip from httpbin' + + # both failed case + mock_get.responses['https://httpbin.org/ip'] = requests.Timeout() + result = runner.invoke(ip.cli_get_public).output.strip() + assert 'Can not get public IP' in result
Get current public IP Sample: ``` $ hbkit ip get-public 202.111.111.111 ```
0.0
52e10591b3db82364ded1bfe3829b4293beedf60
[ "tests/test_ip.py::test_get_public" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2018-08-06 06:40:06+00:00
mit
2,676
griffithlab__VAtools-41
diff --git a/docs/index.rst b/docs/index.rst index 7f26fc7..3c991e0 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -24,7 +24,8 @@ annotate VCF files with data from other tools. field in the VCF INFO column. **vcf-genotype-annotator** - A tool to add a new sample to an existing VCF file. + A tool to add a new sample to an existing VCF file or fill in the GT field + for an existing sample in a VCF. **vep-annotation-reporter** A tool to create a tab-delimited (TSV) file of variants in a VCF and their diff --git a/docs/vcf_genotype_annotator.rst b/docs/vcf_genotype_annotator.rst index 4781da6..5948461 100644 --- a/docs/vcf_genotype_annotator.rst +++ b/docs/vcf_genotype_annotator.rst @@ -7,6 +7,9 @@ The sample's GT field is pre-populated with a default value given by the third positional argument. Options are ``0/1``, ``1/1``, ``0/0``, or ``.``. +It can also be used to add a GT field to an existing sample, e.g. for VCFs +created by Strelka which does not output a GT field for its calls. + By default the output VCF will be written to a ``.genotype.vcf`` file next to your input VCF file. You can set a different output file using the ``--output-vcf`` parameter. diff --git a/vatools/vcf_genotype_annotator.py b/vatools/vcf_genotype_annotator.py index 7df8942..65f7969 100644 --- a/vatools/vcf_genotype_annotator.py +++ b/vatools/vcf_genotype_annotator.py @@ -7,8 +7,9 @@ from collections import OrderedDict def create_vcf_reader(args): vcf_reader = vcfpy.Reader.from_path(args.input_vcf) if args.sample_name in vcf_reader.header.samples.names: - vcf_reader.close() - raise Exception("VCF already contains a sample column for sample {}.".format(args.sample_name)) + if 'GT' in vcf_reader.header.format_ids(): + vcf_reader.close() + raise Exception("VCF already contains a sample column for sample {} with a GT field.".format(args.sample_name)) return vcf_reader def create_vcf_writer(args, vcf_reader): @@ -18,14 +19,18 @@ def create_vcf_writer(args, vcf_reader): (head, sep, tail) = args.input_vcf.rpartition('.vcf') output_file = ('').join([head, '.genotype.vcf', tail]) sample_info = vcf_reader.header.samples - sample_info.names.append(args.sample_name) - sample_info.name_to_idx[args.sample_name] = len(sample_info.names)-1 + if args.sample_name in sample_info.names: + append_to_existing_sample = True + else: + append_to_existing_sample = False + sample_info.names.append(args.sample_name) + sample_info.name_to_idx[args.sample_name] = len(sample_info.names)-1 new_header = vcfpy.Header(samples = sample_info) for line in vcf_reader.header.lines: if not (line.key == 'FORMAT' and line.id == 'GT'): new_header.add_line(line) new_header.add_format_line(OrderedDict([('ID', 'GT'), ('Number', '1'), ('Type', 'String'), ('Description', 'Genotype')])) - return vcfpy.Writer.from_path(output_file, new_header) + return ( vcfpy.Writer.from_path(output_file, new_header), append_to_existing_sample ) def define_parser(): parser = argparse.ArgumentParser("vcf-genotype-annotator") @@ -56,20 +61,23 @@ def main(args_input = sys.argv[1:]): args = parser.parse_args(args_input) vcf_reader = create_vcf_reader(args) - vcf_writer = create_vcf_writer(args, vcf_reader) + (vcf_writer, append_to_existing_sample) = create_vcf_writer(args, vcf_reader) for entry in vcf_reader: - new_sample_call = vcfpy.Call(args.sample_name, data={'GT': args.genotype_value}) if "GT" not in entry.FORMAT: if isinstance(entry.FORMAT, tuple): entry.FORMAT = ["GT"] else: entry.FORMAT.insert(0, 'GT') - if entry.calls: - entry.calls.append(new_sample_call) + if append_to_existing_sample: + entry.call_for_sample[args.sample_name].data['GT'] = args.genotype_value else: - entry.calls = [new_sample_call] - entry.call_for_sample = {call.sample: call for call in entry.calls} + new_sample_call = vcfpy.Call(args.sample_name, data={'GT': args.genotype_value}) + if entry.calls: + entry.calls.append(new_sample_call) + else: + entry.calls = [new_sample_call] + entry.call_for_sample = {call.sample: call for call in entry.calls} vcf_writer.write_record(entry) vcf_reader.close()
griffithlab/VAtools
b876fe698e774e808e675ed61a722f9f1ff853b1
diff --git a/tests/test_vcf_genotype_annotator.py b/tests/test_vcf_genotype_annotator.py index 8343771..28a1322 100644 --- a/tests/test_vcf_genotype_annotator.py +++ b/tests/test_vcf_genotype_annotator.py @@ -16,7 +16,7 @@ class VcfExpressionEncoderTests(unittest.TestCase): def test_source_compiles(self): self.assertTrue(py_compile.compile(self.executable)) - def test_error_sample_name_already_exists(self): + def test_error_sample_name_already_exists_with_GT_field(self): with self.assertRaises(Exception) as context: command = [ os.path.join(self.test_data_dir, 'input.vcf'), @@ -24,7 +24,7 @@ class VcfExpressionEncoderTests(unittest.TestCase): '0/1', ] vcf_genotype_annotator.main(command) - self.assertTrue('VCF already contains a sample column for sample H_NJ-HCC1395-HCC1395.' in str(context.exception)) + self.assertTrue('VCF already contains a sample column for sample H_NJ-HCC1395-HCC1395 with a GT field.' in str(context.exception)) def test_no_sample_vcf(self): temp_path = tempfile.TemporaryDirectory() @@ -61,3 +61,15 @@ class VcfExpressionEncoderTests(unittest.TestCase): vcf_genotype_annotator.main(command) self.assertTrue(cmp(os.path.join(self.test_data_dir, 'no_gt_in_format.genotype.vcf'), os.path.join(temp_path.name, 'input.genotype.vcf'))) temp_path.cleanup() + + def test_adding_gt_in_existing_sample(self): + temp_path = tempfile.TemporaryDirectory() + os.symlink(os.path.join(self.test_data_dir, 'input.no_gt_in_format.vcf'), os.path.join(temp_path.name, 'input.vcf')) + command = [ + os.path.join(temp_path.name, 'input.vcf'), + 'H_NJ-HCC1395-HCC1395', + '0/1', + ] + vcf_genotype_annotator.main(command) + self.assertTrue(cmp(os.path.join(self.test_data_dir, 'existing_sample.genotype.vcf'), os.path.join(temp_path.name, 'input.genotype.vcf'))) + temp_path.cleanup()
Add feature to vcf-genotype-annotator to allow for adding GT field to existing samples Right now the `vcf-genotype-annotator` will throw a fatal error if the give sample already exists in the VCF. If the GT field does not yet exist in the VCF (no header and not a field in `FORMAT`) this new option would allow for the tool to add the GT field.
0.0
b876fe698e774e808e675ed61a722f9f1ff853b1
[ "tests/test_vcf_genotype_annotator.py::VcfExpressionEncoderTests::test_error_sample_name_already_exists_with_GT_field" ]
[ "tests/test_vcf_genotype_annotator.py::VcfExpressionEncoderTests::test_no_sample_vcf", "tests/test_vcf_genotype_annotator.py::VcfExpressionEncoderTests::test_source_compiles" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-07-22 15:56:56+00:00
mit
2,677
griffithlab__VAtools-45
diff --git a/vatools/vcf_expression_annotator.py b/vatools/vcf_expression_annotator.py index a094bad..29d7b6f 100644 --- a/vatools/vcf_expression_annotator.py +++ b/vatools/vcf_expression_annotator.py @@ -225,7 +225,7 @@ def main(args_input = sys.argv[1:]): vcf_writer.close() if missing_expressions_count > 0: - logging.warning("{} of {} transcripts did not have an expression entry for their {} id.".format(missing_expressions_count, entry_count, args.mode)) + logging.warning("{} of {} {}s did not have an expression entry for their {} id.".format(missing_expressions_count, entry_count, args.mode, args.mode)) if __name__ == '__main__': main()
griffithlab/VAtools
89b018a36870b7bebfd0ecd97da7d12ce0ccf89c
diff --git a/tests/test_vcf_expression_annotator.py b/tests/test_vcf_expression_annotator.py index 21a037c..276ba10 100644 --- a/tests/test_vcf_expression_annotator.py +++ b/tests/test_vcf_expression_annotator.py @@ -165,7 +165,7 @@ class VcfExpressionEncoderTests(unittest.TestCase): ] vcf_expression_annotator.main(command) temp_path.cleanup() - l.check_present(('root', 'WARNING', "1 of 1 transcripts did not have an expression entry for their gene id.")) + l.check_present(('root', 'WARNING', "1 of 1 genes did not have an expression entry for their gene id.")) def test_multi_sample_vcf(self): temp_path = tempfile.TemporaryDirectory()
Inaccurate warning message when running vcf-expression-annotator very minor, but if a gene is not found when running vcf-expression-annotator the warning message still says: ```WARNING:root:12 of 382 transcripts did not have an expression entry for their gene id.``` from ```vcf-expression-annotator SCLC20_R_LN.vatools_anno_6.vcf /storage1/fs1/rgovindan/Active/rnaAnalysis/phase0/expression/SCLC20_R_LN.kallisto_gene_abundance.tsv kallisto gene -s TUMOR -o SCLC20_R_LN.vatools_anno_7.vcf```
0.0
89b018a36870b7bebfd0ecd97da7d12ce0ccf89c
[ "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_warning_mutation_without_matching_expression_value" ]
[ "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_error_already_GX_annotated", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_error_already_TX_annotated", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_error_custom_format_expression_column_not_set", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_error_custom_format_id_column_not_set", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_error_expression_column_nonexistent_in_file", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_error_id_column_nonexistent_in_file", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_error_more_than_one_sample_with_wrong_sample_name", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_error_more_than_one_sample_without_sample_name", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_error_no_csq", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_skip_ENSR_transcript", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_skip_variant_without_gene_in_csq", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_source_compiles", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_warning_kallisto_with_transcript_version_in_expression_file", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_warning_kallisto_with_transcript_version_in_vcf", "tests/test_vcf_expression_annotator.py::VcfExpressionEncoderTests::test_warning_no_csq_for_variants" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2020-12-18 16:41:17+00:00
mit
2,678
gristlabs__asttokens-101
diff --git a/asttokens/astroid_compat.py b/asttokens/astroid_compat.py new file mode 100644 index 0000000..3ba5e8d --- /dev/null +++ b/asttokens/astroid_compat.py @@ -0,0 +1,14 @@ +try: + from astroid import nodes as astroid_node_classes + + # astroid_node_classes should be whichever module has the NodeNG class + from astroid.nodes import NodeNG +except Exception: + try: + from astroid import node_classes as astroid_node_classes + from astroid.node_classes import NodeNG + except Exception: # pragma: no cover + astroid_node_classes = None + NodeNG = None + +__all__ = ["astroid_node_classes", "NodeNG"] diff --git a/asttokens/mark_tokens.py b/asttokens/mark_tokens.py index 0f935c0..0aa497f 100644 --- a/asttokens/mark_tokens.py +++ b/asttokens/mark_tokens.py @@ -24,12 +24,7 @@ import six from . import util from .asttokens import ASTTokens from .util import AstConstant - -try: - import astroid.node_classes as nc -except Exception: - # This is only used for type checking, we don't need it if astroid isn't installed. - nc = None +from .astroid_compat import astroid_node_classes as nc if TYPE_CHECKING: from .util import AstNode @@ -88,6 +83,9 @@ class MarkTokens(object): first = token last = None for child in cast(Callable, self._iter_children)(node): + # astroid slices have especially wrong positions, we don't want them to corrupt their parents. + if util.is_empty_astroid_slice(child): + continue if not first or child.first_token.index < first.index: first = child.first_token if not last or child.last_token.index > last.index: diff --git a/asttokens/util.py b/asttokens/util.py index 96fa931..4abc83e 100644 --- a/asttokens/util.py +++ b/asttokens/util.py @@ -24,8 +24,9 @@ from typing import Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Un from six import iteritems + if TYPE_CHECKING: # pragma: no cover - from astroid.node_classes import NodeNG + from .astroid_compat import NodeNG # Type class used to expand out the definition of AST to include fields added by this library # It's not actually used for anything other than type checking though! @@ -218,6 +219,15 @@ def is_slice(node): ) +def is_empty_astroid_slice(node): + # type: (AstNode) -> bool + return ( + node.__class__.__name__ == "Slice" + and not isinstance(node, ast.AST) + and node.lower is node.upper is node.step is None + ) + + # Sentinel value used by visit_tree(). _PREVISIT = object() diff --git a/pyproject.toml b/pyproject.toml index ea6e65f..2543e24 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,5 +20,5 @@ disallow_untyped_calls=false ignore_missing_imports=true [[tool.mypy.overrides]] -module = ["astroid", "astroid.node_classes"] -ignore_missing_imports = true \ No newline at end of file +module = ["astroid", "astroid.node_classes", "astroid.nodes", "astroid.nodes.utils"] +ignore_missing_imports = true diff --git a/setup.cfg b/setup.cfg index a2bde84..f506500 100644 --- a/setup.cfg +++ b/setup.cfg @@ -39,7 +39,7 @@ install_requires = setup_requires = setuptools>=44; setuptools_scm[toml]>=3.4.3 [options.extras_require] -test = astroid<=2.5.3; pytest +test = astroid; pytest [options.package_data] asttokens = py.typed
gristlabs/asttokens
13ae11f408361316c82d1f09be118a3b1ce70828
diff --git a/tests/test_astroid.py b/tests/test_astroid.py index 1608359..a5cc6d7 100644 --- a/tests/test_astroid.py +++ b/tests/test_astroid.py @@ -2,9 +2,9 @@ from __future__ import unicode_literals, print_function import astroid -from astroid.node_classes import NodeNG from asttokens import ASTTokens +from asttokens.astroid_compat import astroid_node_classes from . import test_mark_tokens @@ -13,7 +13,7 @@ class TestAstroid(test_mark_tokens.TestMarkTokens): is_astroid_test = True module = astroid - nodes_classes = NodeNG + nodes_classes = astroid_node_classes.NodeNG context_classes = [ (astroid.Name, astroid.DelName, astroid.AssignName), (astroid.Attribute, astroid.DelAttr, astroid.AssignAttr), diff --git a/tests/test_mark_tokens.py b/tests/test_mark_tokens.py index cebb226..5aba077 100644 --- a/tests/test_mark_tokens.py +++ b/tests/test_mark_tokens.py @@ -19,6 +19,11 @@ from asttokens import util, ASTTokens from . import tools +try: + from astroid.nodes.utils import Position as AstroidPosition +except Exception: + AstroidPosition = () + class TestMarkTokens(unittest.TestCase): maxDiff = None @@ -230,7 +235,7 @@ b + # line3 def test_slices(self): # Make sure we don't fail on parsing slices of the form `foo[4:]`. - source = "(foo.Area_Code, str(foo.Phone)[:3], str(foo.Phone)[3:], foo[:], bar[::2, :], [a[:]][::-1])" + source = "(foo.Area_Code, str(foo.Phone)[:3], str(foo.Phone)[3:], foo[:], bar[::2, :], bar2[:, ::2], [a[:]][::-1])" m = self.create_mark_checker(source) self.assertIn("Tuple:" + source, m.view_nodes_at(1, 0)) self.assertEqual(m.view_nodes_at(1, 1), @@ -243,7 +248,7 @@ b + # line3 # important, so we skip them here. self.assertEqual({n for n in m.view_nodes_at(1, 56) if 'Slice:' not in n}, { "Subscript:foo[:]", "Name:foo" }) - self.assertEqual({n for n in m.view_nodes_at(1, 64) if 'Slice:' not in n}, + self.assertEqual({n for n in m.view_nodes_at(1, 64) if 'Slice:' not in n and 'Tuple:' not in n}, { "Subscript:bar[::2, :]", "Name:bar" }) def test_adjacent_strings(self): @@ -814,6 +819,10 @@ partial_sums = [total := total + v for v in values] else: self.assertEqual(type(t1), type(t2)) + if isinstance(t1, AstroidPosition): + # Ignore the lineno/col_offset etc. from astroid + return + if isinstance(t1, (list, tuple)): self.assertEqual(len(t1), len(t2)) for vc1, vc2 in zip(t1, t2):
Test failure with py3.8 and astroid-2.6+: tests/test_astroid.py::TestAstroid::test_slices - AssertionError: Items in the first set but not the second: I'm not sure if this is a problem in asttokens or astroid itself. If you believe it's the latter, please lemme know if I should file a bug there. When running the test suite under Python 3.8 (3.9+ is fine, astroid<2.6 is fine), I get the following failure: ```pytb $ tox -e py38 GLOB sdist-make: /tmp/asttokens/setup.py py38 create: /tmp/asttokens/.tox/py38 py38 installdeps: .[test] py38 inst: /tmp/asttokens/.tox/.tmp/package/1/asttokens-2.0.5.zip py38 installed: astroid==2.6.2,asttokens @ file:///tmp/asttokens/.tox/.tmp/package/1/asttokens-2.0.5.zip,attrs==21.2.0,iniconfig==1.1.1,lazy-object-proxy==1.6.0,packaging==21.0,pluggy==0.13.1,py==1.10.0,pyparsing==2.4.7,pytest==6.2.4,six==1.16.0,toml==0.10.2,wrapt==1.12.1 py38 run-test-pre: PYTHONHASHSEED='2657976898' py38 run-test: commands[0] | pytest ========================================================= test session starts ========================================================= platform linux -- Python 3.8.11, pytest-6.2.4, py-1.10.0, pluggy-0.13.1 cachedir: .tox/py38/.pytest_cache rootdir: /tmp/asttokens, configfile: setup.cfg collected 107 items tests/test_astroid.py .............s..........................F...... [ 43%] tests/test_asttokens.py ...... [ 49%] tests/test_line_numbers.py ... [ 52%] tests/test_mark_tokens.py ............................................... [ 96%] tests/test_util.py .... [100%] ============================================================== FAILURES =============================================================== _______________________________________________________ TestAstroid.test_slices _______________________________________________________ self = <tests.test_astroid.TestAstroid testMethod=test_slices> def test_slices(self): # Make sure we don't fail on parsing slices of the form `foo[4:]`. source = "(foo.Area_Code, str(foo.Phone)[:3], str(foo.Phone)[3:], foo[:], bar[::2, :], [a[:]][::-1])" m = self.create_mark_checker(source) self.assertIn("Tuple:" + source, m.view_nodes_at(1, 0)) self.assertEqual(m.view_nodes_at(1, 1), { "Attribute:foo.Area_Code", "Name:foo" }) self.assertEqual(m.view_nodes_at(1, 16), { "Subscript:str(foo.Phone)[:3]", "Call:str(foo.Phone)", "Name:str"}) self.assertEqual(m.view_nodes_at(1, 36), { "Subscript:str(foo.Phone)[3:]", "Call:str(foo.Phone)", "Name:str"}) # Slice and ExtSlice nodes are wrong, and in particular placed with parents. They are not very # important, so we skip them here. self.assertEqual({n for n in m.view_nodes_at(1, 56) if 'Slice:' not in n}, { "Subscript:foo[:]", "Name:foo" }) > self.assertEqual({n for n in m.view_nodes_at(1, 64) if 'Slice:' not in n}, { "Subscript:bar[::2, :]", "Name:bar" }) E AssertionError: Items in the first set but not the second: E 'Tuple:bar[::2, :]' tests/test_mark_tokens.py:242: AssertionError ======================================================= short test summary info ======================================================= FAILED tests/test_astroid.py::TestAstroid::test_slices - AssertionError: Items in the first set but not the second: ============================================== 1 failed, 105 passed, 1 skipped in 12.13s ============================================== ERROR: InvocationError for command /tmp/asttokens/.tox/py38/bin/pytest (exited with code 1) _______________________________________________________________ summary _______________________________________________________________ ERROR: py38: commands failed ```
0.0
13ae11f408361316c82d1f09be118a3b1ce70828
[ "tests/test_astroid.py::TestAstroid::test_adjacent_joined_strings", "tests/test_astroid.py::TestAstroid::test_adjacent_strings", "tests/test_astroid.py::TestAstroid::test_assert_nodes_equal", "tests/test_astroid.py::TestAstroid::test_assignment_expressions", "tests/test_astroid.py::TestAstroid::test_async_def", "tests/test_astroid.py::TestAstroid::test_async_for_and_with", "tests/test_astroid.py::TestAstroid::test_await", "tests/test_astroid.py::TestAstroid::test_bad_tokenless_types", "tests/test_astroid.py::TestAstroid::test_calling_lambdas", "tests/test_astroid.py::TestAstroid::test_complex_numbers", "tests/test_astroid.py::TestAstroid::test_complex_slice_and_parens", "tests/test_astroid.py::TestAstroid::test_comprehensions", "tests/test_astroid.py::TestAstroid::test_conditional_expr", "tests/test_astroid.py::TestAstroid::test_decorators", "tests/test_astroid.py::TestAstroid::test_del_dict", "tests/test_astroid.py::TestAstroid::test_dict_merge", "tests/test_astroid.py::TestAstroid::test_dict_order", "tests/test_astroid.py::TestAstroid::test_fixture1", "tests/test_astroid.py::TestAstroid::test_fixture10", "tests/test_astroid.py::TestAstroid::test_fixture11", "tests/test_astroid.py::TestAstroid::test_fixture12", "tests/test_astroid.py::TestAstroid::test_fixture13", "tests/test_astroid.py::TestAstroid::test_fixture2", "tests/test_astroid.py::TestAstroid::test_fixture3", "tests/test_astroid.py::TestAstroid::test_fixture4", "tests/test_astroid.py::TestAstroid::test_fixture5", "tests/test_astroid.py::TestAstroid::test_fixture6", "tests/test_astroid.py::TestAstroid::test_fixture7", "tests/test_astroid.py::TestAstroid::test_fixture8", "tests/test_astroid.py::TestAstroid::test_fixture9", "tests/test_astroid.py::TestAstroid::test_fstrings", "tests/test_astroid.py::TestAstroid::test_keyword_arg_only", "tests/test_astroid.py::TestAstroid::test_mark_tokens_multiline", "tests/test_astroid.py::TestAstroid::test_mark_tokens_simple", "tests/test_astroid.py::TestAstroid::test_nonascii", "tests/test_astroid.py::TestAstroid::test_one_line_if_elif", "tests/test_astroid.py::TestAstroid::test_paren_attr", "tests/test_astroid.py::TestAstroid::test_parens_around_func", "tests/test_astroid.py::TestAstroid::test_print_function", "tests/test_astroid.py::TestAstroid::test_return_annotation", "tests/test_astroid.py::TestAstroid::test_slices", "tests/test_astroid.py::TestAstroid::test_splat", "tests/test_astroid.py::TestAstroid::test_statements_with_semicolons", "tests/test_astroid.py::TestAstroid::test_sys_modules", "tests/test_astroid.py::TestAstroid::test_trailing_commas", "tests/test_astroid.py::TestAstroid::test_tuples", "tests/test_astroid.py::TestAstroid::test_with", "tests/test_mark_tokens.py::TestMarkTokens::test_adjacent_joined_strings", "tests/test_mark_tokens.py::TestMarkTokens::test_adjacent_strings", "tests/test_mark_tokens.py::TestMarkTokens::test_assert_nodes_equal", "tests/test_mark_tokens.py::TestMarkTokens::test_assignment_expressions", "tests/test_mark_tokens.py::TestMarkTokens::test_async_def", "tests/test_mark_tokens.py::TestMarkTokens::test_async_for_and_with", "tests/test_mark_tokens.py::TestMarkTokens::test_await", "tests/test_mark_tokens.py::TestMarkTokens::test_bad_tokenless_types", "tests/test_mark_tokens.py::TestMarkTokens::test_calling_lambdas", "tests/test_mark_tokens.py::TestMarkTokens::test_complex_numbers", "tests/test_mark_tokens.py::TestMarkTokens::test_complex_slice_and_parens", "tests/test_mark_tokens.py::TestMarkTokens::test_comprehensions", "tests/test_mark_tokens.py::TestMarkTokens::test_conditional_expr", "tests/test_mark_tokens.py::TestMarkTokens::test_decorators", "tests/test_mark_tokens.py::TestMarkTokens::test_deep_recursion", "tests/test_mark_tokens.py::TestMarkTokens::test_del_dict", "tests/test_mark_tokens.py::TestMarkTokens::test_dict_merge", "tests/test_mark_tokens.py::TestMarkTokens::test_dict_order", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture1", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture10", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture11", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture12", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture13", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture2", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture3", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture4", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture5", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture6", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture7", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture8", "tests/test_mark_tokens.py::TestMarkTokens::test_fixture9", "tests/test_mark_tokens.py::TestMarkTokens::test_fstrings", "tests/test_mark_tokens.py::TestMarkTokens::test_keyword_arg_only", "tests/test_mark_tokens.py::TestMarkTokens::test_mark_tokens_multiline", "tests/test_mark_tokens.py::TestMarkTokens::test_mark_tokens_simple", "tests/test_mark_tokens.py::TestMarkTokens::test_nonascii", "tests/test_mark_tokens.py::TestMarkTokens::test_one_line_if_elif", "tests/test_mark_tokens.py::TestMarkTokens::test_paren_attr", "tests/test_mark_tokens.py::TestMarkTokens::test_parens_around_func", "tests/test_mark_tokens.py::TestMarkTokens::test_print_function", "tests/test_mark_tokens.py::TestMarkTokens::test_return_annotation", "tests/test_mark_tokens.py::TestMarkTokens::test_slices", "tests/test_mark_tokens.py::TestMarkTokens::test_splat", "tests/test_mark_tokens.py::TestMarkTokens::test_statements_with_semicolons", "tests/test_mark_tokens.py::TestMarkTokens::test_sys_modules", "tests/test_mark_tokens.py::TestMarkTokens::test_trailing_commas", "tests/test_mark_tokens.py::TestMarkTokens::test_tuples", "tests/test_mark_tokens.py::TestMarkTokens::test_with" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-10-30 16:23:11+00:00
apache-2.0
2,679
grktsh__falcon-oas-5
diff --git a/src/falcon_oas/oas/schema/validators.py b/src/falcon_oas/oas/schema/validators.py index e853bb4..a8b09fd 100644 --- a/src/falcon_oas/oas/schema/validators.py +++ b/src/falcon_oas/oas/schema/validators.py @@ -19,7 +19,20 @@ def _type_validator(validator, types, instance, schema): yield error -_Validator = validators.extend(Draft4Validator, {'type': _type_validator}) +_enum_draft4_validator = Draft4Validator.VALIDATORS['enum'] + + +def _enum_validator(validator, enums, instance, schema): + if instance is None and schema.get('nullable'): + return + + for error in _enum_draft4_validator(validator, enums, instance, schema): + yield error + + +_Validator = validators.extend( + Draft4Validator, {'type': _type_validator, 'enum': _enum_validator} +) class SchemaValidator(object):
grktsh/falcon-oas
0328d7f4c0c806bc2cdd9c45f5c720a1ec61cbc4
diff --git a/tests/oas/schema/test_unmarshalers.py b/tests/oas/schema/test_unmarshalers.py index 4783e5f..f94bde3 100644 --- a/tests/oas/schema/test_unmarshalers.py +++ b/tests/oas/schema/test_unmarshalers.py @@ -56,6 +56,13 @@ def test_unmarshal_primitive_without_formats(): assert unmarshaled == instance +def test_unmarshal_primitive_enum(): + schema = {'type': 'string', 'enum': ['a', 'b']} + instance = 'a' + unmarshaled = SchemaUnmarshaler().unmarshal(instance, schema) + assert unmarshaled == 'a' + + def test_unmarshal_array(): schema = {'type': 'array', 'items': {'type': 'string', 'format': 'date'}} instance = ['2018-01-02', '2018-02-03', '2018-03-04'] @@ -164,6 +171,7 @@ def test_unmarshal_one_of_or_any_of(schema_type): {'type': 'array', 'nullable': True}, {'type': 'object', 'nullable': True}, {'type': 'string', 'format': 'date', 'nullable': True}, + {'type': 'string', 'enum': ['a', 'b'], 'nullable': True}, ], ) def test_unmarshal_nullable(schema): diff --git a/tests/oas/schema/test_validators.py b/tests/oas/schema/test_validators.py index f0666d1..4a9bce8 100644 --- a/tests/oas/schema/test_validators.py +++ b/tests/oas/schema/test_validators.py @@ -37,6 +37,18 @@ def test_validate_error(validator): assert exc_info.value.errors[0].message == message +def test_validate_enum_error(validator): + schema = {'type': str('string'), 'enum': [str('a'), str('b')]} + instance = str('c') + message = "'c' is not one of ['a', 'b']" + + with pytest.raises(ValidationError) as exc_info: + validator.validate(instance, schema) + + assert len(exc_info.value.errors) == 1 + assert exc_info.value.errors[0].message == message + + def test_validate_format_string(validator): schema = {'type': 'string', 'format': 'date'} instance = '2018-01-02'
Support nullable enum Same as #3, just for `enum`.
0.0
0328d7f4c0c806bc2cdd9c45f5c720a1ec61cbc4
[ "tests/oas/schema/test_unmarshalers.py::test_unmarshal_nullable[schema4]" ]
[ "tests/oas/schema/test_unmarshalers.py::test_unmarshal_validation_error", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_primitive[schema0-True]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_primitive[schema1-False]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_primitive[schema2-0.0]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_primitive[schema3-2.0]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_primitive[schema4-0]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_primitive[schema5-2]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_primitive[schema6-foo]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_primitive_format", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_primitive_without_formats", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_primitive_enum", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_array", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_object", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_object_properties_and_additional_properties[None-None-expected0]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_object_properties_and_additional_properties[None-True-expected1]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_object_properties_and_additional_properties[None-additional_properties2-expected2]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_object_properties_and_additional_properties[None-additional_properties3-expected3]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_object_properties_and_additional_properties[properties4-None-expected4]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_object_properties_and_additional_properties[properties5-True-expected5]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_object_properties_and_additional_properties[properties6-additional_properties6-expected6]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_object_properties_and_additional_properties[properties7-additional_properties7-expected7]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_all_of", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_all_of_required_only", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_one_of_or_any_of[oneOf]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_one_of_or_any_of[anyOf]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_nullable[schema0]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_nullable[schema1]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_nullable[schema2]", "tests/oas/schema/test_unmarshalers.py::test_unmarshal_nullable[schema3]", "tests/oas/schema/test_validators.py::test_validate_success", "tests/oas/schema/test_validators.py::test_validate_error", "tests/oas/schema/test_validators.py::test_validate_enum_error", "tests/oas/schema/test_validators.py::test_validate_format_string", "tests/oas/schema/test_validators.py::test_validate_format_string_error", "tests/oas/schema/test_validators.py::test_validate_format_integer[-2147483648]", "tests/oas/schema/test_validators.py::test_validate_format_integer[0]", "tests/oas/schema/test_validators.py::test_validate_format_integer[2147483647]", "tests/oas/schema/test_validators.py::test_validate_format_integer_error[-2147483649]", "tests/oas/schema/test_validators.py::test_validate_format_integer_error[2147483648]", "tests/oas/schema/test_validators.py::test_validate_format_type_error", "tests/oas/schema/test_validators.py::test_validate_format_error_without_format_checker", "tests/oas/schema/test_validators.py::test_validate_nullable_success[foo-False]", "tests/oas/schema/test_validators.py::test_validate_nullable_success[foo-True]", "tests/oas/schema/test_validators.py::test_validate_nullable_success[None-True]", "tests/oas/schema/test_validators.py::test_validate_nullable_error", "tests/oas/schema/test_validators.py::test_validate_nullable_with_format" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_issue_reference" ], "has_test_patch": true, "is_lite": false }
2019-04-16 16:20:28+00:00
apache-2.0
2,680
gtsystem__python-remotezip-14
diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index cd0191b..c56f2c7 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -12,7 +12,7 @@ on: jobs: build: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 strategy: matrix: python-version: ["2.7", "3.6", "3.9", "3.10"] diff --git a/remotezip.py b/remotezip.py index cf82595..cfe6924 100644 --- a/remotezip.py +++ b/remotezip.py @@ -1,5 +1,6 @@ import io import zipfile +from itertools import tee import requests @@ -208,6 +209,13 @@ class RemoteFetcher: raise RemoteIOError(str(e)) +def pairwise(iterable): + # pairwise('ABCDEFG') --> AB BC CD DE EF FG + a, b = tee(iterable) + next(b, None) + return zip(a, b) + + class RemoteZip(zipfile.ZipFile): def __init__(self, url, initial_buffer_size=64*1024, session=None, fetcher=RemoteFetcher, **kwargs): fetcher = fetcher(url, session, **kwargs) @@ -216,15 +224,12 @@ class RemoteZip(zipfile.ZipFile): rio.set_position_to_size(self._get_position_to_size()) def _get_position_to_size(self): - ilist = self.infolist() + ilist = [info.header_offset for info in self.infolist()] if len(ilist) == 0: return {} + ilist.sort() + ilist.append(self.start_dir) + return {a: b-a for a, b in pairwise(ilist)} - position_to_size = {ilist[-1].header_offset: self.start_dir - ilist[-1].header_offset} - for i in range(len(ilist) - 1): - m1, m2 = ilist[i: i+2] - position_to_size[m1.header_offset] = m2.header_offset - m1.header_offset - - return position_to_size diff --git a/setup.py b/setup.py index dea5194..1aea6e4 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ with open("README.md") as f: setup( name='remotezip', - version='0.11.0', + version='0.11.1', author='Giuseppe Tribulato', author_email='[email protected]', py_modules=['remotezip'],
gtsystem/python-remotezip
8d6634cc51e127afb7a9704c364f4fa136b5fd8d
diff --git a/test_remotezip.py b/test_remotezip.py index 317a57c..c45f41c 100644 --- a/test_remotezip.py +++ b/test_remotezip.py @@ -238,6 +238,32 @@ class TestRemoteZip(unittest.TestCase): self.assertIsNone(zfile.testzip()) + @staticmethod + def make_unordered_zip_file(fname): + with zipfile.ZipFile(fname, 'w') as zip: + zip.writestr("fileA", "A" * 300000 + 'Z') + zip.writestr("fileB", "B" * 10000 + 'Z') + zip.writestr("fileC", "C" * 100000 + 'Z') + info_list = zip.infolist() + info_list[0], info_list[1] = info_list[1], info_list[0] + + def test_unordered_fileinfo(self): + """Test that zip file with unordered fileinfo records works as well. Fix #13.""" + with TmpDir() as dire: + fname = os.path.join(dire, 'test.zip') + self.make_unordered_zip_file(fname) + + with rz.RemoteZip(fname, fetcher=LocalFetcher) as zfile: + names = zfile.namelist() + self.assertEqual(names, ['fileB', 'fileA', 'fileC']) + with zfile.open('fileB', 'r') as f: + self.assertEqual(f.read(), b"B" * 10000 + b'Z') + with zfile.open('fileA', 'r') as f: + self.assertEqual(f.read(), b"A" * 300000 + b'Z') + with zfile.open('fileC', 'r') as f: + self.assertEqual(f.read(), b"C" * 100000 + b'Z') + self.assertIsNone(zfile.testzip()) + def test_fetch_part(self): # fetch a range expected_headers = {'Range': 'bytes=10-20'}
Get 416 Client Error on some zip files remotezip raising `remotezip.RemoteIOError` on some files. For example, http://0x0.st/o5Pa.apk (it's unavailable for now, so I re-uploaded it to https://transfer.sh/get/rndzvr/test.apk) Also uploaded it to GitHub as zip archive: [test.zip](https://github.com/gtsystem/python-remotezip/files/10267095/test.zip) ``` $ remotezip http://0x0.st/o5Pa.apk AndroidManifest.xml Extracting AndroidManifest.xml... Traceback (most recent call last): File "/.../venv/lib/python3.10/site-packages/remotezip.py", line 193, in fetch_fun res, headers = self.request(self.url, range_header, kwargs, self.session) File "/.../venv/lib/python3.10/site-packages/remotezip.py", line 184, in request res.raise_for_status() File "/.../venv/lib/python3.10/site-packages/requests/models.py", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 416 Client Error: Requested Range Not Satisfiable for url: http://0x0.st/o5Pa.apk During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/.../venv/bin/remotezip", line 43, in <module> extract_files(args.url, args.filename, args.dir) File "/.../venv/bin/remotezip", line 26, in extract_files zip.extract(fname, path=path) File "/usr/lib/python3.10/zipfile.py", line 1628, in extract return self._extract_member(member, path, pwd) File "/usr/lib/python3.10/zipfile.py", line 1698, in _extract_member with self.open(member, pwd=pwd) as source, \ File "/usr/lib/python3.10/zipfile.py", line 1530, in open fheader = zef_file.read(sizeFileHeader) File "/usr/lib/python3.10/zipfile.py", line 745, in read data = self._file.read(n) File "/.../venv/lib/python3.10/site-packages/remotezip.py", line 112, in read self.buffer = self.fetch_fun((self.buffer.position, self.buffer.position + fetch_size -1), stream=stream) File "/.../venv/lib/python3.10/site-packages/remotezip.py", line 196, in fetch_fun raise RemoteIOError(str(e)) remotezip.RemoteIOError: 416 Client Error: Requested Range Not Satisfiable for url: http://0x0.st/o5Pa.apk ``` I also tested this file with [PartialZipBrowser](https://github.com/tihmstar/partialZipBrowser), and it works fine: ``` $ pzb -g AndroidManifest.xml https://0x0.st/o5Pa.apk Version: 9bfdde2b2456181045f74631683fba491d8bf4f2 - 38 libfragmentzip version: 0.64-aaf6fae83a0aa6f7aae1c94721857076d04a14e8-RELEASE init pzb: https://0x0.st/o5Pa.apk init done getting: AndroidManifest.xml 100% [===================================================================================================>] download succeeded ``` It looks like byte range calculation bug in remotezip. I also sniffed traffic, generated by remotezip and PartialZibBrowser while downloading this file, in mitmproxy, this may help you. 1. remotezip ![image](https://user-images.githubusercontent.com/43933400/208459898-d62dd2ad-1fd3-40d8-85c5-df2416a06144.png) 2. PartialZipBrowser: ![image](https://user-images.githubusercontent.com/43933400/208460225-12c31661-f2ed-44c7-b412-de6ba7bc2aca.png)
0.0
8d6634cc51e127afb7a9704c364f4fa136b5fd8d
[ "test_remotezip.py::TestRemoteZip::test_unordered_fileinfo" ]
[ "test_remotezip.py::TestRemoteIO::test_simple", "test_remotezip.py::TestRemoteIO::test_file_access", "test_remotezip.py::TestPartialBuffer::test_static_seek", "test_remotezip.py::TestPartialBuffer::test_static", "test_remotezip.py::TestPartialBuffer::test_static_out_of_bound", "test_remotezip.py::TestPartialBuffer::test_static_read_no_size", "test_remotezip.py::TestPartialBuffer::test_stream", "test_remotezip.py::TestPartialBuffer::test_stream_forward_seek", "test_remotezip.py::TestRemoteZip::test_big_header", "test_remotezip.py::TestRemoteZip::test_range_not_supported", "test_remotezip.py::TestRemoteZip::test_zip64", "test_remotezip.py::TestRemoteZip::test_interface", "test_remotezip.py::TestRemoteZip::test_fetch_part", "test_remotezip.py::TestRemoteZip::test_fetch_ending", "test_remotezip.py::TestRemoteZip::test_custom_session", "test_remotezip.py::TestLocalFetcher::test_parse_range_header", "test_remotezip.py::TestLocalFetcher::test_build_range_header" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_git_commit_hash", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-12-21 22:01:59+00:00
mit
2,681
gtsystem__python-remotezip-16
diff --git a/README.md b/README.md index f359611..d570447 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,8 @@ To download the content, this library rely on the `requests` module. The constru * ... Please look at the [requests](http://docs.python-requests.org/en/master/user/quickstart/#make-a-request) documentation for futher usage details. * **initial\_buffer\_size**: How much data (in bytes) to fetch during the first connection to download the zip file central directory. If your zip file conteins a lot of files, would be a good idea to increase this parameter in order to avoid the need for further remote requests. *Default: 64kb*. * **session**: a custom session object to use for the request. +* **support_suffix_range**: You can set this attribute to `False` if the remote server doesn't support suffix range + (negative offset). Notice that this option will use one more HEAD request to fetch the content length. ### Class Interface diff --git a/remotezip.py b/remotezip.py index cfe6924..27366c2 100644 --- a/remotezip.py +++ b/remotezip.py @@ -162,10 +162,11 @@ class RemoteIO(io.IOBase): class RemoteFetcher: """Represent a remote file to be fetched in parts""" - def __init__(self, url, session=None, **kwargs): + def __init__(self, url, session=None, support_suffix_range=True, **kwargs): self._kwargs = kwargs self._url = url self._session = session + self._support_suffix_range = support_suffix_range @staticmethod def parse_range_header(content_range_header): @@ -191,15 +192,33 @@ class RemoteFetcher: raise RangeNotSupported("The server doesn't support range requests") return res.raw, res.headers['Content-Range'] - def prepare_request(self, data_range): - range_header = self.build_range_header(*data_range) + def prepare_request(self, data_range=None): kwargs = dict(self._kwargs) kwargs['headers'] = headers = dict(kwargs.get('headers', {})) - headers['Range'] = range_header + if data_range is not None: + headers['Range'] = self.build_range_header(*data_range) return kwargs + def get_file_size(self): + if self._session: + res = self._session.head(self._url, **self.prepare_request()) + else: + res = requests.head(self._url, **self.prepare_request()) + try: + res.raise_for_status() + return int(res.headers['Content-Length']) + except IOError as e: + raise RemoteIOError(str(e)) + except KeyError: + raise RemoteZipError("Cannot get file size: Content-Length header missing") + def fetch(self, data_range, stream=False): """Fetch a part of a remote file""" + # Handle the case suffix range request is not supported. Fixes #15 + if data_range[0] < 0 and data_range[1] is None and not self._support_suffix_range: + size = self.get_file_size() + data_range = (max(0, size + data_range[0]), size - 1) + kwargs = self.prepare_request(data_range) try: res, range_header = self._request(kwargs) @@ -217,8 +236,9 @@ def pairwise(iterable): class RemoteZip(zipfile.ZipFile): - def __init__(self, url, initial_buffer_size=64*1024, session=None, fetcher=RemoteFetcher, **kwargs): - fetcher = fetcher(url, session, **kwargs) + def __init__(self, url, initial_buffer_size=64*1024, session=None, fetcher=RemoteFetcher, support_suffix_range=True, + **kwargs): + fetcher = fetcher(url, session, support_suffix_range=support_suffix_range, **kwargs) rio = RemoteIO(fetcher.fetch, initial_buffer_size) super(RemoteZip, self).__init__(rio) rio.set_position_to_size(self._get_position_to_size()) diff --git a/setup.py b/setup.py index 1aea6e4..4abadc9 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ with open("README.md") as f: setup( name='remotezip', - version='0.11.1', + version='0.12.0', author='Giuseppe Tribulato', author_email='[email protected]', py_modules=['remotezip'],
gtsystem/python-remotezip
2afca9a2df6c302f61eb612683bd4e4ee74e9cb5
diff --git a/test_remotezip.py b/test_remotezip.py index c45f41c..03e006d 100644 --- a/test_remotezip.py +++ b/test_remotezip.py @@ -42,6 +42,7 @@ class ServerSimulator: context.headers['Content-Range'] = rz.RemoteFetcher.build_range_header(init_pos, init_pos + len(content)) return content + class LocalFetcher(rz.RemoteFetcher): def fetch(self, data_range, stream=False): with open(self._url, 'rb') as f: @@ -60,9 +61,9 @@ class LocalFetcher(rz.RemoteFetcher): f = io.BytesIO(f.read(range_max - range_min + 1)) buff = rz.PartialBuffer(f, range_min, range_max - range_min + 1, stream=stream) - #buff = self._make_buffer(f, content_range, stream=stream) return buff + class TestPartialBuffer(unittest.TestCase): def setUp(self): if not hasattr(self, 'assertRaisesRegex'): @@ -288,6 +289,19 @@ class TestRemoteZip(unittest.TestCase): self.assertEqual(buffer.tell(), 10) self.assertEqual(buffer.read(3), b"abc") + def test_fetch_ending_unsupported_suffix(self): + # fetch file ending + expected_headers = {'Range': 'bytes=900-999'} + headers = {'Content-Range': 'Bytes 900-999/1000'} + with requests_mock.Mocker() as m: + m.head("http://test.com/file.zip", status_code=200, headers={'Content-Length': '1000'}) + m.get("http://test.com/file.zip", content=b"abc", status_code=200, headers=headers, + request_headers=expected_headers) + fetcher = rz.RemoteFetcher("http://test.com/file.zip", support_suffix_range=False) + buffer = fetcher.fetch((-100, None), stream=True) + self.assertEqual(buffer.tell(), 900) + self.assertEqual(buffer.read(3), b"abc") + @staticmethod def make_zip_file(fname): with zipfile.ZipFile(fname, 'w', compression=zipfile.ZIP_DEFLATED) as zip:
Unhandled HTTPErrors When Negative Offset Not Supported When trying to use remotezip with PyPi, I discovered that while their server supports range requests, it does not support using a negative offset to get {content-length - bytes}-{content-length}. When raise_for_status is called, an HTTP 501 error is returned (it could theoretically also be a 405) and remotezip aborts. https://github.com/gtsystem/python-remotezip/blob/2afca9a2df6c302f61eb612683bd4e4ee74e9cb5/remotezip.py#L184-L189 Reproducer: ``` from remotezip import RemoteZip url = "https://files.pythonhosted.org/packages/71/6d/95777fd66507106d2f8f81d005255c237187951644f85a5bd0baeec8a88f/paramiko-2.12.0-py2.py3-none-any.whl" with RemoteZip(url) as wzip: wzip.extract('METADATA') ``` I was going to do a pull request, but not being super good with Python myself, I found it was not obviously fixible (to me at least) with a simple try/catch since it's going through constructors, etc. Checking for a 501 or 405 error before raise_for_status and falling back to getting self.__file_size by a separate http request for content-length should fix this. Something like: ``` def _request(self, kwargs): if self._session: res = self._session.get(self._url, stream=True, **kwargs) else: res = requests.get(self._url, stream=True, **kwargs) if res._status_code == 501 or 405: (do whatever needs to be done so that self._file_size = requests.get(self._url, stream=True).headers['Content-Length'] ) res.raise_for_status() ``` Edit: I also submitted a bug/feature request to PyPI/warehoise about this on their server; I don't anticipate they will implement it quickly, but if they do, the given reproducer may not work.
0.0
2afca9a2df6c302f61eb612683bd4e4ee74e9cb5
[ "test_remotezip.py::TestRemoteZip::test_fetch_ending_unsupported_suffix" ]
[ "test_remotezip.py::TestRemoteZip::test_unordered_fileinfo", "test_remotezip.py::TestRemoteZip::test_custom_session", "test_remotezip.py::TestRemoteZip::test_zip64", "test_remotezip.py::TestRemoteZip::test_interface", "test_remotezip.py::TestRemoteZip::test_fetch_ending", "test_remotezip.py::TestRemoteZip::test_big_header", "test_remotezip.py::TestRemoteZip::test_range_not_supported", "test_remotezip.py::TestRemoteZip::test_fetch_part", "test_remotezip.py::TestRemoteIO::test_file_access", "test_remotezip.py::TestRemoteIO::test_simple", "test_remotezip.py::TestPartialBuffer::test_stream", "test_remotezip.py::TestPartialBuffer::test_static", "test_remotezip.py::TestPartialBuffer::test_stream_forward_seek", "test_remotezip.py::TestPartialBuffer::test_static_out_of_bound", "test_remotezip.py::TestPartialBuffer::test_static_seek", "test_remotezip.py::TestPartialBuffer::test_static_read_no_size", "test_remotezip.py::TestLocalFetcher::test_build_range_header", "test_remotezip.py::TestLocalFetcher::test_parse_range_header" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2023-01-15 13:59:01+00:00
mit
2,682
guykisel__inline-plz-228
diff --git a/inlineplz/linters/__init__.py b/inlineplz/linters/__init__.py index 7fede16..a0fd9a4 100644 --- a/inlineplz/linters/__init__.py +++ b/inlineplz/linters/__init__.py @@ -100,9 +100,9 @@ LINTERS = { 'install': [['npm', 'install', 'eslint']], 'help': [os.path.normpath('./node_modules/.bin/eslint'), '-h'], 'run': - [os.path.normpath('./node_modules/.bin/eslint'), '.', '-f', 'json'], + [os.path.normpath('./node_modules/.bin/eslint'), '.', '-f', 'unix'], 'rundefault': [ - os.path.normpath('./node_modules/.bin/eslint'), '.', '-f', 'json', + os.path.normpath('./node_modules/.bin/eslint'), '.', '-f', 'unix', '-c', '{config_dir}/.eslintrc.js', '--ignore-path', '{config_dir}/.eslintignore' ], 'dotfiles': [ diff --git a/inlineplz/linters/config/.eslintignore b/inlineplz/linters/config/.eslintignore index 6713aaf..ce2175e 100644 --- a/inlineplz/linters/config/.eslintignore +++ b/inlineplz/linters/config/.eslintignore @@ -1,10 +1,10 @@ -coverage/** -docs/** -jsdoc/** -templates/** -tmp/** -vendor/** -src/** -dist/** -node_modules/** +**/coverage/** +**/docs/** +**/jsdoc/** +**/templates/** +**/tmp/** +**/vendor/** +**/src/** +**/dist/** **/node_modules/** +**/.tox/** diff --git a/inlineplz/parsers/eslint.py b/inlineplz/parsers/eslint.py index 3d0e556..972ae1e 100644 --- a/inlineplz/parsers/eslint.py +++ b/inlineplz/parsers/eslint.py @@ -12,14 +12,14 @@ class ESLintParser(ParserBase): def parse(self, lint_data): messages = set() - for filedata in json.loads(lint_data): - if filedata.get('messages'): - for msgdata in filedata['messages']: - try: - path = filedata['filePath'] - line = msgdata['line'] - msgbody = msgdata['message'] - messages.add((path, line, msgbody)) - except (ValueError, KeyError): - print('Invalid message: {0}'.format(msgdata)) + for line in lint_data.split('\n'): + try: + parts = line.split(':') + if line.strip() and parts: + path = parts[0].strip() + line = int(parts[1].strip()) + msgbody = ':'.join(parts[3:]).strip() + messages.add((path, line, msgbody)) + except (ValueError, IndexError): + print('Invalid message: {0}'.format(line)) return messages
guykisel/inline-plz
dc293c43edd1609683294660fb7c6a0840fb24ea
diff --git a/tests/parsers/test_eslint.py b/tests/parsers/test_eslint.py index 8255168..780af9f 100644 --- a/tests/parsers/test_eslint.py +++ b/tests/parsers/test_eslint.py @@ -18,6 +18,6 @@ eslint_path = os.path.join( def test_eslint(): with codecs.open(eslint_path, encoding='utf-8', errors='replace') as inputfile: messages = sorted(list(eslint.ESLintParser().parse(inputfile.read()))) - assert messages[0][2] == 'Parsing error: Illegal return statement' - assert messages[0][1] == 17 - assert messages[0][0] == 'C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\asi.js' + assert messages[0][2] == "'addOne' is defined but never used. [Error/no-unused-vars]" + assert messages[0][1] == 1 + assert messages[0][0] == '/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js' diff --git a/tests/testdata/parsers/eslint.txt b/tests/testdata/parsers/eslint.txt index 27a5040..04d345a 100644 --- a/tests/testdata/parsers/eslint.txt +++ b/tests/testdata/parsers/eslint.txt @@ -1,1 +1,9 @@ -[{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\data\\ascii-identifier-data.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\data\\non-ascii-identifier-part-only.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\data\\non-ascii-identifier-start.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\dist\\jshint-rhino.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\dist\\jshint.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\examples\\reporter.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\scripts\\build.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\scripts\\generate-identifier-data.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\cli.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\jshint.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\lex.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\messages.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\name-stack.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\options.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\platforms\\rhino.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\reg.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\reporters\\checkstyle.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\reporters\\default.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\reporters\\jslint_xml.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\reporters\\non_error.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\reporters\\unix.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\scope-manager.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\state.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\style.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\vars.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\browser.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\cli.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\helpers\\browser\\fixture-fs.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\helpers\\browser\\server.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\helpers\\fixture.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\helpers\\testhelper.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\libs\\backbone.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\libs\\codemirror3.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\libs\\jquery-1.7.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\libs\\json2.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\libs\\lodash.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\libs\\prototype-17.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\npm.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\thirdparty.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\core.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\envs.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\asi.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal return statement","line":17,"column":20}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\blocks.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected end of input","line":32,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\boss.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\browser.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\camelcase.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\caseExpressions.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\class-declaration.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected reserved word","line":1,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\comma.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected identifier","line":15,"column":7}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\const.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token const","line":16,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\curly.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal return statement","line":2,"column":12}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\curly2.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal return statement","line":2,"column":12}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\default-arguments.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token =","line":7,"column":28}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\destparam.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token =","line":4,"column":17}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\emptystmt.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token ;","line":1,"column":5}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\enforceall.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\eqeqeq.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es5.funcexpr.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es5.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Object literal may not have data and accessor property with the same name","line":43,"column":19}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es5Reserved.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token default","line":6,"column":6}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es6-export-star-from.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal export declaration","line":1,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es6-import-export.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal import declaration","line":3,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es6-template-literal-tagged.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token ILLEGAL","line":5,"column":18}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es6-template-literal.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token ILLEGAL","line":3,"column":15}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\exported.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\forin.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\function-declaration.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal export declaration","line":1,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\functionScopedOptions.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh-2194.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh-226.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh-334.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh-738-browser.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh-738-node.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1227.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1632-1.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1632-2.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1632-3.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1768-1.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1768-2.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1768-3.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1768-4.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1768-5.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1768-6.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1802.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh247.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh431.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh56.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh618.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh668.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh826.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token <","line":24,"column":6}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh870.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh878.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh988.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gruntComment.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\identifiers.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\ignore-w117.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\ignored.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\ignoreDelimiters.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token <","line":3,"column":4}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\immed.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\insideEval.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\jslintInverted.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\jslintOptions.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\jslintRenamed.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\lastsemic.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\latedef-esnext.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token let","line":1,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\latedef-inline.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\latedef.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\latedefundef.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\laxbreak.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\laxcomma.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\leak.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token const","line":3,"column":4}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\loopfunc.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\mappingstart.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\max-cyclomatic-complexity-per-function.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\max-nested-block-depth-per-function.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\max-parameters-per-function.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token =","line":7,"column":13}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\max-statements-per-function.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\maxlen.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\multiline-global-declarations.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\nativeobject.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\nbsp.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\nestedFunctions-locations.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\nestedFunctions.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token [","line":37,"column":3}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\newcap.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\noarg.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\onevar.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\parsingCommas.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token ,","line":2,"column":13}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\protoiterator.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\quotes.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\quotes2.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\quotes3.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\quotes4.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token ILLEGAL","line":2,"column":14}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\redef-es6.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token let","line":2,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\redef.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\regex_array.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal return statement","line":6,"column":8}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\removeglobals.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\reserved.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token let","line":5,"column":6}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\return.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\safeasi.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token .","line":10,"column":9}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\scope-cross-blocks.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\scope-redef.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\scope.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\scripturl.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\shadow-inline.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\shelljs.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\strict_incorrect.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\strict_newcap.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\strict_this.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\strict_this2.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\strict_violations.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\strings.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token ILLEGAL","line":9,"column":22}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\supernew.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\switchDefaultFirst.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\switchFallThrough.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token :","line":40,"column":13}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\trycatch.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\typeofcomp.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\undef_func.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\undef.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\undefstrict.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\unignored.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\unused-cross-blocks.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\unused.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token const","line":34,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\unusedglobals.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\with.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Strict mode code may not include a with statement","line":13,"column":6}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\yield-expressions.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token *","line":1,"column":10}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\options.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\parser.js","messages":[],"errorCount":0,"warningCount":0}] +/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:1:10: 'addOne' is defined but never used. [Error/no-unused-vars] +/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:2:9: Use the isNaN function to compare with NaN. [Error/use-isnan] +/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:3:16: Unexpected space before unary operator '++'. [Error/space-unary-ops] +/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:3:20: Missing semicolon. [Warning/semi] +/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:4:12: Unnecessary 'else' after 'return'. [Warning/no-else-return] +/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:5:1: Expected indentation of 8 spaces but found 6. [Warning/indent] +/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:5:7: Function 'addOne' expected a return value. [Error/consistent-return] +/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:5:13: Missing semicolon. [Warning/semi] +/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:7:2: Unnecessary semicolon. [Error/no-extra-semi]
switch eslint to a different formatter the json formatter breaks on long text: https://github.com/eslint/eslint/issues/5380 ``` b'Invalid string length\nRangeError: Invalid string length\n at JSON.stringify (<anonymous>)\n at module.exports (/home/travis/build/guykisel/inline-plz/node_modules/eslint/lib/formatters/json.js:12:17)\n at printResults (/home/travis/build/guykisel/inline-plz/node_modules/eslint/lib/cli.js:91:20)\n at Object.execute (/home/travis/build/guykisel/inline-plz/node_modules/eslint/lib/cli.js:201:17)\n at Object.<anonymous> (/home/travis/build/guykisel/inline-plz/node_modules/eslint/bin/eslint.js:74:28)\n at Module._compile (module.js:635:30)\n at Object.Module._extensions..js (module.js:646:10)\n at Module.load (module.js:554:32)\n at tryModuleLoad (module.js:497:12)\n at Function.Module._load (module.js:489:3)' Parsing of eslint took 0 seconds ```
0.0
dc293c43edd1609683294660fb7c6a0840fb24ea
[ "tests/parsers/test_eslint.py::test_eslint" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2018-05-01 23:55:15+00:00
isc
2,683
gvalkov__tornado-http-auth-8
diff --git a/tornado_http_auth.py b/tornado_http_auth.py index 2c6efbb..cbe74c8 100644 --- a/tornado_http_auth.py +++ b/tornado_http_auth.py @@ -209,8 +209,11 @@ class BasicAuthMixin(object): raise self.SendChallenge() auth_data = auth_header.split(None, 1)[-1] - auth_data = base64.b64decode(auth_data).decode('ascii') - username, password = auth_data.split(':', 1) + try: + auth_data = base64.b64decode(auth_data, validate=True).decode('ascii') + username, password = auth_data.split(':', 1) + except (UnicodeDecodeError, binascii.Error): + raise self.SendChallenge() challenge = check_credentials_func(username) if not challenge:
gvalkov/tornado-http-auth
9eb225c1740fad1e53320b55d8d4fc6ab4ba58b6
diff --git a/tests/test_functional.py b/tests/test_functional.py index d03680b..ca2a817 100644 --- a/tests/test_functional.py +++ b/tests/test_functional.py @@ -40,9 +40,14 @@ class AuthTest(AsyncHTTPTestCase): res = self.fetch('/basic') self.assertEqual(res.code, 401) + res = self.fetch('/basic', headers={'Authorization': 'Basic foo bar'}) + self.assertEqual(res.code, 401) + auth = '%s:%s' % ('user1', 'pass1') auth = b64encode(auth.encode('ascii')) - hdr = {'Authorization': 'Basic %s' % auth.decode('utf8')} - res = self.fetch('/basic', headers=hdr) - self.assertEqual(res.code, 200) + res = self.fetch('/basic', headers={'Authorization': 'Basic ___%s' % auth.decode('utf8')}) + self.assertEqual(res.code, 401) + + res = self.fetch('/basic', headers={'Authorization': 'Basic %s' % auth.decode('utf8')}) + self.assertEqual(res.code, 200)
authenticate_user method raises binascii.Error when provided auth is not base64 `authenticate_user` method raises uncontrolled exception when the provided Authorization header is not base64. To reproduce the issue: ``` curl -i \ -H 'Accept:application/json' \ -H 'Authorization:Basic not_a_base64_string' \ http://localhost:8000/protected ```
0.0
9eb225c1740fad1e53320b55d8d4fc6ab4ba58b6
[ "tests/test_functional.py::AuthTest::test_basic_auth" ]
[ "tests/test_functional.py::AuthTest::test_digest_auth" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2020-07-22 09:46:08+00:00
apache-2.0
2,684
h2non__pook-111
diff --git a/src/pook/helpers.py b/src/pook/helpers.py index 193c2c9..6af0685 100644 --- a/src/pook/helpers.py +++ b/src/pook/helpers.py @@ -1,8 +1,24 @@ +import re + from inspect import ismethod, isfunction from .exceptions import PookInvalidArgument -def trigger_methods(instance, args): +reply_response_re = re.compile("^(response|reply)_") + + +def _get_key(key_order): + def key(x): + raw = reply_response_re.sub("", x) + try: + return key_order.index(raw) + except KeyError: + raise PookInvalidArgument("Unsupported argument: {}".format(x)) + + return key + + +def trigger_methods(instance, args, key_order=None): """ Triggers specific class methods using a simple reflection mechanism based on the given input dictionary params. @@ -10,18 +26,25 @@ def trigger_methods(instance, args): Arguments: instance (object): target instance to dynamically trigger methods. args (iterable): input arguments to trigger objects to + key_order (None|iterable): optional order in which to process keys; falls back to `sorted`'s default behaviour if not present Returns: None """ # Start the magic - for name in sorted(args): + if key_order: + key = _get_key(key_order) + sorted_args = sorted(args, key=key) + else: + sorted_args = sorted(args) + + for name in sorted_args: value = args[name] target = instance # If response attibutes - if name.startswith("response_") or name.startswith("reply_"): - name = name.replace("response_", "").replace("reply_", "") + if reply_response_re.match(name): + name = reply_response_re.sub("", name) # If instance has response attribute, use it if hasattr(instance, "_response"): target = instance._response diff --git a/src/pook/mock.py b/src/pook/mock.py index 1c0316b..dd6fed6 100644 --- a/src/pook/mock.py +++ b/src/pook/mock.py @@ -96,6 +96,46 @@ class Mock(object): pook.Mock """ + _KEY_ORDER = ( + "add_matcher", + "body", + "callback", + "calls", + "content", + "delay", + "done", + "error", + "file", + "filter", + "header", + "header_present", + "headers", + "headers_present", + "isdone", + "ismatched", + "json", + "jsonschema", + "map", + "match", + "matched", + "matches", + "method", + "url", + "param", + "param_exists", + "params", + "path", + "persist", + "reply", + "response", + "status", + "times", + "total_matches", + "type", + "use", + "xml", + ) + def __init__(self, request=None, response=None, **kw): # Stores the number of times the mock should live self._times = 1 @@ -126,7 +166,7 @@ class Mock(object): self.callbacks = [] # Triggers instance methods based on argument names - trigger_methods(self, kw) + trigger_methods(self, kw, self._KEY_ORDER) # Trigger matchers based on predefined request object, if needed if request: diff --git a/src/pook/request.py b/src/pook/request.py index 27e57d9..abd8733 100644 --- a/src/pook/request.py +++ b/src/pook/request.py @@ -44,7 +44,7 @@ class Request(object): self._extra = kw.get("extra") self._headers = HTTPHeaderDict() - trigger_methods(self, kw) + trigger_methods(self, kw, self.keys) @property def method(self): diff --git a/src/pook/response.py b/src/pook/response.py index 6cceae9..9effd56 100644 --- a/src/pook/response.py +++ b/src/pook/response.py @@ -23,6 +23,20 @@ class Response(object): mock (pook.Mock): reference to mock instance. """ + _KEY_ORDER = ( + "body", + "content", + "file", + "header", + "headers", + "json", + "mock", + "set", + "status", + "type", + "xml", + ) + def __init__(self, **kw): self._status = 200 self._mock = None @@ -31,7 +45,7 @@ class Response(object): self._headers = HTTPHeaderDict() # Trigger response method based on input arguments - trigger_methods(self, kw) + trigger_methods(self, kw, self._KEY_ORDER) def status(self, code=200): """
h2non/pook
fac40e9f571152ba09bc16954548ce51d590ccea
diff --git a/tests/unit/mock_test.py b/tests/unit/mock_test.py index 4900d9c..4be7b79 100644 --- a/tests/unit/mock_test.py +++ b/tests/unit/mock_test.py @@ -1,6 +1,12 @@ import pytest +import json +import re + +import pook from pook.mock import Mock from pook.request import Request +from urllib.request import urlopen +from urllib.parse import urlencode @pytest.fixture @@ -17,6 +23,47 @@ def test_mock_url(mock): assert str(matcher(mock)) == "http://google.es" [email protected]( + ("param_kwargs", "query_string"), + ( + pytest.param({"params": {"x": "1"}}, "?x=1", id="params"), + pytest.param( + {"param": ("y", "pook")}, + "?y=pook", + marks=pytest.mark.xfail( + condition=True, + reason="Constructor does not correctly handle multi-argument methods from kwargs", + ), + id="param", + ), + pytest.param( + {"param_exists": "z"}, + # This complexity is needed until https://github.com/h2non/pook/issues/110 + # is resolved + f'?{urlencode({"z": re.compile("(.*)")})}', + id="param_exists", + ), + ), +) +def test_constructor(param_kwargs, query_string): + # Should not raise + mock = Mock( + url="https://httpbin.org/404", + reply_status=200, + response_json={"hello": "from pook"}, + **param_kwargs, + ) + + expected_url = f"https://httpbin.org/404{query_string}" + assert mock._request.rawurl == expected_url + + with pook.use(): + pook.engine().add_mock(mock) + res = urlopen(expected_url) + assert res.status == 200 + assert json.loads(res.read()) == {"hello": "from pook"} + + @pytest.mark.parametrize( "url, params, req, expected", [
Cannot pass `param`, `param_exists` or `params` kwarg to `Mock` constructor Passing any of the query parameter settings to the `Mock` constructor results in the following error: ``` > pook.get(url="https://example.com", params={"a": "b"}) src/pook/api.py:342: in get return mock(url, method="GET", **kw) src/pook/api.py:326: in mock return _engine.mock(url, **kw) src/pook/engine.py:150: in mock mock = Mock(url=url, **kw) src/pook/mock.py:130: in __init__ trigger_methods(self, kw) src/pook/helpers.py:41: in trigger_methods member(value) src/pook/mock.py:355: in params url = furl(self._request.rawurl) src/pook/request.py:81: in rawurl return self._url if isregex(self._url) else urlunparse(self._url) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ components = None def urlunparse(components): """Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).""" scheme, netloc, url, params, query, fragment, _coerce_result = ( > _coerce_args(*components)) E TypeError: urllib.parse._coerce_args() argument after * must be an iterable, not NoneType /usr/lib64/python3.12/urllib/parse.py:515: TypeError ``` This is because `params` is called on the `Mock` instance by `trigger_methods` without a `url` necessarily being present. The implementation of `Mock::params` is such that a `url` _must_ be set on the request object before. This happens even if `url` is passed to the constructor because this line sorts the keys before iteration, causing `params` to get handled before `url`: https://github.com/h2non/pook/blob/16ecba6cb2800c836e1bc399336dca1a49e66801/src/pook/helpers.py#L18 A better solution than sorting the list would be to keep a static list of the methods that should be executed in the order they're meant to be. For consistency, this should start with the alphabetical order to maintain the current behaviour, but include the fix to move `url` forward ahead of other methods that depend on it. Ideally, the mock would be lazy in these cases and wait to process the param inputs until the URL is set. This would be more flexible overall anyway, as it would allow for the creation of a mock factory function in testing code that sets up params without the URL, and wouldn't behave in such an unexpected way.
0.0
fac40e9f571152ba09bc16954548ce51d590ccea
[ "tests/unit/mock_test.py::test_constructor[params]", "tests/unit/mock_test.py::test_constructor[param_exists]" ]
[ "tests/unit/mock_test.py::test_mock_url", "tests/unit/mock_test.py::test_mock_params[http://google.es-params0-req0-expected0]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params1-req1-expected1]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params2-req2-expected2]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params3-req3-expected3]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params4-req4-expected4]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params5-req5-expected5]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params6-req6-expected6]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params7-req7-expected7]", "tests/unit/mock_test.py::test_new_response" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-12-31 23:30:54+00:00
mit
2,685
h2non__pook-129
diff --git a/pyproject.toml b/pyproject.toml index e1d39cc..ae086b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,9 +60,6 @@ extra-dependencies = [ "urllib3~=1.24", "httpx~=0.26.0", - # aiohttp depends on multidict, so we can't test aiohttp until - # https://github.com/aio-libs/multidict/issues/887 is resolved - # async-timeout is only used for testing aiohttp "aiohttp~=3.8", "async-timeout~=4.0.3", diff --git a/src/pook/engine.py b/src/pook/engine.py index b7dd1bc..81651bb 100644 --- a/src/pook/engine.py +++ b/src/pook/engine.py @@ -3,7 +3,7 @@ from inspect import isfunction from .mock import Mock from .regex import isregex from .mock_engine import MockEngine -from .exceptions import PookNoMatches, PookExpiredMock +from .exceptions import PookNoMatches class Engine(object): @@ -416,16 +416,12 @@ class Engine(object): # Try to match the request against registered mock definitions for mock in self.mocks[:]: - try: - # Return the first matched HTTP request mock - matches, errors = mock.match(request.copy()) - if len(errors): - match_errors += errors - if matches: - return mock - except PookExpiredMock: - # Remove the mock if already expired - self.mocks.remove(mock) + # Return the first matched HTTP request mock + matches, errors = mock.match(request.copy()) + if len(errors): + match_errors += errors + if matches: + return mock # Validate that we have a mock if not self.should_use_network(request): @@ -442,7 +438,11 @@ class Engine(object): msg += "\n\n=> Detailed matching errors:\n{}\n".format(err) # Raise no matches exception - raise PookNoMatches(msg) + self.no_matches(msg) # Register unmatched request self.unmatched_reqs.append(request) + + def no_matches(self, msg): + """Raise `PookNoMatches` and reduce pytest printed stacktrace noise""" + raise PookNoMatches(msg) diff --git a/src/pook/exceptions.py b/src/pook/exceptions.py index b5da0a6..931a3a4 100644 --- a/src/pook/exceptions.py +++ b/src/pook/exceptions.py @@ -1,3 +1,6 @@ +import warnings + + class PookInvalidBody(Exception): pass @@ -11,7 +14,13 @@ class PookNetworkFilterError(Exception): class PookExpiredMock(Exception): - pass + def __init__(self, *args, **kwargs): + warnings.warn( + "PookExpiredMock is deprecated and will be removed in a future version of Pook", + DeprecationWarning, + stacklevel=2, + ) + super().__init__(*args, **kwargs) class PookInvalidArgument(Exception): diff --git a/src/pook/matcher.py b/src/pook/matcher.py index b86333f..9c03c9f 100644 --- a/src/pook/matcher.py +++ b/src/pook/matcher.py @@ -29,7 +29,7 @@ class MatcherEngine(list): request (pook.Request): outgoing request to match. Returns: - tuple(bool, list[Exception]): ``True`` if all matcher tests + tuple(bool, list[str]): ``True`` if all matcher tests passes, otherwise ``False``. Also returns an optional list of error exceptions. """ diff --git a/src/pook/mock.py b/src/pook/mock.py index 246aeb3..ae606d0 100644 --- a/src/pook/mock.py +++ b/src/pook/mock.py @@ -7,7 +7,6 @@ from .constants import TYPES from .request import Request from .matcher import MatcherEngine from .helpers import trigger_methods -from .exceptions import PookExpiredMock from .matchers import init as matcher @@ -750,10 +749,6 @@ class Mock(object): the outgoing HTTP request, otherwise ``False``. Also returns an optional list of error exceptions. """ - # If mock already expired, fail it - if self._times <= 0: - raise PookExpiredMock("Mock expired") - # Trigger mock filters for test in self.filters: if not test(request, self): @@ -772,6 +767,9 @@ class Mock(object): if not matches: return False, errors + if self._times <= 0: + return False, [f"Mock matches request but is expired.\n{repr(self)}"] + # Register matched request for further inspecion and reference self._calls.append(request) diff --git a/src/pook/regex.py b/src/pook/regex.py index 1e8dd77..a76b6c1 100644 --- a/src/pook/regex.py +++ b/src/pook/regex.py @@ -1,10 +1,6 @@ import re -import sys -if sys.version_info < (3, 7): - Pattern = type(re.compile("")) -else: - Pattern = re.Pattern +Pattern = re.Pattern def isregex_expr(expr):
h2non/pook
ef5bb1ade60aed66aaccb776a0fc9eb16d58bb5a
diff --git a/tests/integration/examples_test.py b/tests/integration/examples_test.py index 463673d..e0d8e6f 100644 --- a/tests/integration/examples_test.py +++ b/tests/integration/examples_test.py @@ -1,4 +1,3 @@ -import sys import subprocess import pytest from pathlib import Path @@ -15,12 +14,6 @@ if platform.python_implementation() == "PyPy": examples.remove("mocket_example.py") -if sys.version_info >= (3, 12): - # See pyproject.toml note on aiohttp dependency - examples.remove("aiohttp_client.py") - examples.remove("decorator_activate_async.py") - - @pytest.mark.parametrize("example", examples) def test_examples(example): result = subprocess.run(["python", "examples/{}".format(example)]) diff --git a/tests/unit/exceptions_test.py b/tests/unit/exceptions_test.py index 0c64d2d..9245410 100644 --- a/tests/unit/exceptions_test.py +++ b/tests/unit/exceptions_test.py @@ -4,6 +4,5 @@ from pook import exceptions as ex def test_exceptions(): assert isinstance(ex.PookNoMatches(), Exception) assert isinstance(ex.PookInvalidBody(), Exception) - assert isinstance(ex.PookExpiredMock(), Exception) assert isinstance(ex.PookNetworkFilterError(), Exception) assert isinstance(ex.PookInvalidArgument(), Exception) diff --git a/tests/unit/mock_test.py b/tests/unit/mock_test.py index 35d342d..856a5e7 100644 --- a/tests/unit/mock_test.py +++ b/tests/unit/mock_test.py @@ -4,6 +4,7 @@ import json import pook from pook.mock import Mock from pook.request import Request +from pook.exceptions import PookNoMatches from urllib.request import urlopen @@ -123,3 +124,34 @@ def test_mock_params(url, params, req, expected, mock): def test_new_response(mock): assert mock.reply() != mock.reply(new_response=True, json={}) + + +def test_times(mock): + url = "https://example.com" + mock.url(url) + mock.times(2) + + req = Request(url=url) + + assert mock.match(req) == (True, []) + assert mock.match(req) == (True, []) + matches, errors = mock.match(req) + assert not matches + assert len(errors) == 1 + assert "Mock matches request but is expired." in errors[0] + assert repr(mock) in errors[0] + + [email protected] +def test_times_integrated(httpbin): + url = f"{httpbin.url}/status/404" + pook.get(url).times(2).reply(200).body("hello from pook") + + res = urlopen(url) + assert res.read() == "hello from pook" + + res = urlopen(url) + assert res.read() == "hello from pook" + + with pytest.raises(PookNoMatches, match="Mock matches request but is expired."): + urlopen(url)
Provide a more helpful error when `times` is exceeded on a mock I've ran into the issue with repeated request and the default `times: 1` being exceeded quite a few times and it has confused me every time. Could pook give a better error message when a mock has matched but `times` is exceeded. For example: ```py mock = pook.get(url, reply=404) requests.get(url) # works requests.get(url) # fails with "pook error! Cannot match any mock for the following request" ``` Could the error instead say `Matched a mock, but it can only be called {n} times`?
0.0
ef5bb1ade60aed66aaccb776a0fc9eb16d58bb5a
[ "tests/unit/mock_test.py::test_times" ]
[ "tests/integration/examples_test.py::test_examples[http_client_native.py]", "tests/unit/exceptions_test.py::test_exceptions", "tests/unit/mock_test.py::test_mock_url", "tests/unit/mock_test.py::test_mock_constructor[params]", "tests/unit/mock_test.py::test_mock_constructor[param_exists_has_value]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params0-req0-expected0]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params1-req1-expected1]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params2-req2-expected2]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params3-req3-expected3]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params4-req4-expected4]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params5-req5-expected5]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params6-req6-expected6]", "tests/unit/mock_test.py::test_mock_params[http://google.es-params7-req7-expected7]", "tests/unit/mock_test.py::test_new_response" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2024-03-29 09:08:57+00:00
mit
2,686
h2non__pook-90
diff --git a/pook/headers.py b/pook/headers.py index ac20bd2..9119d77 100644 --- a/pook/headers.py +++ b/pook/headers.py @@ -3,6 +3,8 @@ try: except ImportError: from collections import Mapping, MutableMapping +from base64 import b64encode + class HTTPHeaderDict(MutableMapping): """ @@ -53,7 +55,7 @@ class HTTPHeaderDict(MutableMapping): def __getitem__(self, key): val = self._container[key.lower()] - return ', '.join(val[1:]) + return ', '.join([to_string_value(v) for v in val[1:]]) def __delitem__(self, key): del self._container[key.lower()] @@ -153,28 +155,23 @@ class HTTPHeaderDict(MutableMapping): if new_vals is not vals: self._container[key_lower] = [vals[0], vals[1], val] - def extend(self, *args, **kwargs): + def extend(self, mapping, **kwargs): """ Generic import function for any type of header-like object. Adapted version of MutableMapping.update in order to insert items with self.add instead of self.__setitem__ """ - if len(args) > 1: - raise TypeError("extend() takes at most 1 positional " - "arguments ({0} given)".format(len(args))) - other = args[0] if len(args) >= 1 else () - - if isinstance(other, HTTPHeaderDict): - for key, val in other.iteritems(): + if isinstance(mapping, HTTPHeaderDict): + for key, val in mapping.iteritems(): self.add(key, val) - elif isinstance(other, Mapping): - for key in other: - self.add(key, other[key]) - elif hasattr(other, "keys"): - for key in other.keys(): - self.add(key, other[key]) + elif isinstance(mapping, Mapping): + for key in mapping: + self.add(key, mapping[key]) + elif hasattr(mapping, "keys"): + for key in mapping.keys(): + self.add(key, mapping[key]) else: - for key, value in other: + for key, value in mapping: self.add(key, value) for key, value in kwargs.items(): @@ -231,10 +228,36 @@ class HTTPHeaderDict(MutableMapping): """ for key in self: val = self._container[key.lower()] - yield val[0], ', '.join(val[1:]) + yield val[0], ', '.join([to_string_value(v) for v in val[1:]]) def items(self): return list(self.iteritems()) def to_dict(self): return {key: values for key, values in self.items()} + + +def to_string_value(value): + """ + Retrieve a string value for arbitrary header field value. + + HTTP header values are specified as ASCII strings. However, + the specificiation also states that non-ASCII bytes should be + treated as arbitrary data. In that case, we just rely on unicode + escaping to return a value that at least somewhat resembles the + inputs (at least moreso than other encodings that would significantly + obscure the input, like base 64). + + Arguments:: + value (str|bytes): + The value to cast to ``str``. + + Returns:: + str: + Unicode escaped ``value`` if it was ``bytes``; otherwise, + ``value`` is returned. + """ + if isinstance(value, str): + return value + + return value.decode('unicode_escape') diff --git a/pook/matchers/headers.py b/pook/matchers/headers.py index 7c3cbd0..fb76a61 100644 --- a/pook/matchers/headers.py +++ b/pook/matchers/headers.py @@ -1,4 +1,5 @@ from .base import BaseMatcher +from ..headers import to_string_value class HeadersMatcher(BaseMatcher): @@ -15,7 +16,9 @@ class HeadersMatcher(BaseMatcher): def match(self, req): for key in self.expectation: # Retrieve value to match - value = self.expectation[key] + # Cast it to a string that can be compared + # If it is already a string ``to_string_value`` is a noop + value = to_string_value(self.expectation[key]) # Retrieve header value by key header = req.headers.get(key)
h2non/pook
9bb2eaecf62d8dd048973690281c12da00e434ce
diff --git a/tests/unit/matchers/headers_test.py b/tests/unit/matchers/headers_test.py index e69de29..c079087 100644 --- a/tests/unit/matchers/headers_test.py +++ b/tests/unit/matchers/headers_test.py @@ -0,0 +1,97 @@ +import pytest + +import pook + + [email protected]( + ('expected', 'requested', 'should_match'), + ( + pytest.param( + {'Content-Type': b'application/pdf'}, + {'Content-Type': b'application/pdf'}, + True, + id='Matching binary headers' + ), + pytest.param( + { + 'Content-Type': b'application/pdf', + 'Authentication': 'Bearer 123abc', + }, + { + 'Content-Type': b'application/pdf', + 'Authentication': 'Bearer 123abc', + }, + True, + id='Matching mixed headers' + ), + pytest.param( + {'Authentication': 'Bearer 123abc'}, + {'Authentication': 'Bearer 123abc'}, + True, + id='Matching string headers' + ), + pytest.param( + {'Content-Type': b'application/pdf'}, + { + 'Content-Type': b'application/pdf', + 'Authentication': 'Bearer 123abc', + }, + True, + id='Non-matching asymetric mixed headers' + ), + pytest.param( + {'Content-Type': b'application/pdf'}, + {'Content-Type': 'application/pdf'}, + True, + id='Non-matching header types (matcher binary, request string)' + ), + pytest.param( + {'Content-Type': 'application/pdf'}, + {'Content-Type': b'application/pdf'}, + True, + id='Non-matching header types (matcher string, request binary)' + ), + pytest.param( + {'Content-Type': 'application/pdf'}, + {'Content-Type': 'application/xml'}, + False, + id='Non-matching values' + ), + pytest.param( + {'content-type': 'application/pdf'}, + {'Content-Type': 'application/pdf'}, + True, + id='Non-matching field name casing' + ), + pytest.param( + {}, + {'Content-Type': 'application/pdf'}, + True, + id='Missing matcher header' + ), + pytest.param( + {'Content-Type': 'application/pdf'}, + {}, + False, + id='Missing request header' + ), + pytest.param( + {'Content-Type': 'application/pdf'.encode('utf-16')}, + {'Content-Type': 'application/pdf'.encode('utf-16')}, + True, + id='Arbitrary field value encoding' + ), + ) +) +def test_headers_matcher(expected, requested, should_match): + mock = pook.get('https://example.com') + if expected: + mock.headers(expected) + + request = pook.Request() + request.url = 'https://example.com' + if requested: + request.headers = requested + + matched, explanation = mock.match(request) + assert matched == should_match, explanation
pook can't handle binary headers If a response header is binary, for example something like `('Content-Type', b'application/pdf')`, when pook tries to iterate over the headers, it is expecting a string and throws an error. The problem is in `HTTPHeaderDict.itermerged`.
0.0
9bb2eaecf62d8dd048973690281c12da00e434ce
[ "tests/unit/matchers/headers_test.py::test_headers_matcher[Matching", "tests/unit/matchers/headers_test.py::test_headers_matcher[Non-matching", "tests/unit/matchers/headers_test.py::test_headers_matcher[Arbitrary" ]
[ "tests/unit/matchers/headers_test.py::test_headers_matcher[Missing" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-10-27 05:54:10+00:00
mit
2,687
h2non__pook-97
diff --git a/pook/assertion.py b/pook/assertion.py index 493b065..152f1e6 100644 --- a/pook/assertion.py +++ b/pook/assertion.py @@ -30,8 +30,6 @@ def equal(x, y): """ return test_case().assertEqual(x, y) or True - assert x == y - def matches(x, y, regex_expr=False): """ @@ -52,9 +50,6 @@ def matches(x, y, regex_expr=False): # Parse regex expression, if needed x = strip_regex(x) if regex_expr and isregex_expr(x) else x - # Run regex assertion - # Retrieve original regex pattern - x = x.pattern if isregex(x) else x # Assert regular expression via unittest matchers return test_case().assertRegex(y, x) or True diff --git a/pook/headers.py b/pook/headers.py index 26c9d38..0cb1213 100644 --- a/pook/headers.py +++ b/pook/headers.py @@ -237,7 +237,7 @@ class HTTPHeaderDict(MutableMapping): def to_string_value(value): """ - Retrieve a string value for arbitrary header field value. + Retrieve a string value for an arbitrary value. HTTP header values are specified as ASCII strings. However, the specificiation also states that non-ASCII bytes should be @@ -247,15 +247,15 @@ def to_string_value(value): obscure the input, like base 64). Arguments:: - value (str|bytes): + value (mixed): The value to cast to ``str``. Returns:: str: Unicode escaped ``value`` if it was ``bytes``; otherwise, - ``value`` is returned. + ``value`` is returned, cast through ``str``. """ - if isinstance(value, str): - return value + if hasattr(value, "decode"): + return value.decode("unicode_escape") - return value.decode('unicode_escape') + return str(value) diff --git a/pook/matchers/headers.py b/pook/matchers/headers.py index fb76a61..1d1b325 100644 --- a/pook/matchers/headers.py +++ b/pook/matchers/headers.py @@ -1,5 +1,6 @@ from .base import BaseMatcher from ..headers import to_string_value +from ..regex import Pattern class HeadersMatcher(BaseMatcher): @@ -15,16 +16,41 @@ class HeadersMatcher(BaseMatcher): @BaseMatcher.matcher def match(self, req): for key in self.expectation: - # Retrieve value to match - # Cast it to a string that can be compared - # If it is already a string ``to_string_value`` is a noop - value = to_string_value(self.expectation[key]) + assert key in req.headers, f"Header '{key}' not present" + + expected_value = self.to_comparable_value(self.expectation[key]) # Retrieve header value by key - header = req.headers.get(key) + actual_value = req.headers.get(key) + + assert not all([ + expected_value is not None, + actual_value is None, + ]), ( + f"Expected a value `{expected_value}` " + f"for '{key}' but found `None`" + ) # Compare header value - if not self.compare(value, header, regex_expr=True): + if not self.compare(expected_value, actual_value, regex_expr=True): return False return True + + def to_comparable_value(self, value): + """ + Return a comparable version of ``value``. + + Arguments: + value (mixed): the value to cast. + + Returns: + str|re.Pattern|None + """ + if isinstance(value, (str, Pattern)): + return value + + if value is None: + return value + + return to_string_value(value) diff --git a/pook/regex.py b/pook/regex.py index 8ec84c3..2559ebd 100644 --- a/pook/regex.py +++ b/pook/regex.py @@ -1,7 +1,10 @@ import re +import sys -# Little hack to extra the regexp object type at runtime -retype = type(re.compile('')) +if sys.version_info < (3, 7): + Pattern = type(re.compile('')) +else: + Pattern = re.Pattern def isregex_expr(expr): @@ -38,7 +41,7 @@ def isregex(value): """ if not value: return False - return any((isregex_expr(value), isinstance(value, retype))) + return any((isregex_expr(value), isinstance(value, Pattern))) def strip_regex(expr): @@ -52,4 +55,4 @@ def strip_regex(expr): Returns: str """ - return expr.replace[3:-1] if isregex_expr(expr) else expr + return expr[3:-1] if isregex_expr(expr) else expr
h2non/pook
253ad95c518e6c6b3dcde66a25fbd6775b6378ba
diff --git a/tests/unit/matchers/headers_test.py b/tests/unit/matchers/headers_test.py index c079087..cb4cec3 100644 --- a/tests/unit/matchers/headers_test.py +++ b/tests/unit/matchers/headers_test.py @@ -1,15 +1,15 @@ import pytest +import re import pook @pytest.mark.parametrize( - ('expected', 'requested', 'should_match'), + ('expected', 'requested'), ( pytest.param( {'Content-Type': b'application/pdf'}, {'Content-Type': b'application/pdf'}, - True, id='Matching binary headers' ), pytest.param( @@ -21,13 +21,11 @@ import pook 'Content-Type': b'application/pdf', 'Authentication': 'Bearer 123abc', }, - True, id='Matching mixed headers' ), pytest.param( {'Authentication': 'Bearer 123abc'}, {'Authentication': 'Bearer 123abc'}, - True, id='Matching string headers' ), pytest.param( @@ -36,54 +34,130 @@ import pook 'Content-Type': b'application/pdf', 'Authentication': 'Bearer 123abc', }, - True, id='Non-matching asymetric mixed headers' ), pytest.param( {'Content-Type': b'application/pdf'}, {'Content-Type': 'application/pdf'}, - True, id='Non-matching header types (matcher binary, request string)' ), pytest.param( {'Content-Type': 'application/pdf'}, {'Content-Type': b'application/pdf'}, - True, id='Non-matching header types (matcher string, request binary)' ), - pytest.param( - {'Content-Type': 'application/pdf'}, - {'Content-Type': 'application/xml'}, - False, - id='Non-matching values' - ), pytest.param( {'content-type': 'application/pdf'}, {'Content-Type': 'application/pdf'}, - True, id='Non-matching field name casing' ), pytest.param( {}, {'Content-Type': 'application/pdf'}, - True, id='Missing matcher header' ), + pytest.param( + {'Content-Type': 'application/pdf'.encode('utf-16')}, + {'Content-Type': 'application/pdf'.encode('utf-16')}, + id='Arbitrary field value encoding' + ), + pytest.param( + {'Content-Type': 're/json/'}, + {'Content-Type': 'application/json'}, + id="Regex-format str expectation" + ), + pytest.param( + {'Content-Type': re.compile("json", re.I)}, + {'Content-Type': 'APPLICATION/JSON'}, + id="Regex pattern expectation", + ) + ) +) +def test_headers_matcher_matching(expected, requested): + mock = pook.get('https://example.com') + if expected: + mock.headers(expected) + + request = pook.Request() + request.url = 'https://example.com' + if requested: + request.headers = requested + + matched, explanation = mock.match(request) + assert matched, explanation + + [email protected]( + ("expected", "requested", "explanation"), + ( pytest.param( {'Content-Type': 'application/pdf'}, {}, - False, - id='Missing request header' + ["HeadersMatcher: Header 'Content-Type' not present"], + id='Missing request header str expectation', ), pytest.param( - {'Content-Type': 'application/pdf'.encode('utf-16')}, - {'Content-Type': 'application/pdf'.encode('utf-16')}, - True, - id='Arbitrary field value encoding' + {'Content-Type': b'application/pdf'}, + {}, + ["HeadersMatcher: Header 'Content-Type' not present"], + id='Missing request header bytes expectation', + ), + pytest.param( + {'Content-Type': 'application/pdf'}, + {'Content-Type': 'application/xml'}, + [ + ( + "HeadersMatcher: 'application/pdf' != 'application/xml'\n" + "- application/pdf\n" + "? ^^^\n" + "+ application/xml\n" + "? ^^^\n" + ) + ], + id='Non-matching values, matching types', ), + pytest.param( + {'Content-Type': 'application/pdf'}, + {'Content-Type': b'application/xml'}, + [ + ( + "HeadersMatcher: 'application/pdf' != 'application/xml'\n" + "- application/pdf\n" + "? ^^^\n" + "+ application/xml\n" + "? ^^^\n" + ) + ], + id='Non-matching values, str expectation byte actual', + ), + pytest.param( + {'Content-Type': b'application/pdf'}, + {'Content-Type': 'application/xml'}, + [ + ( + "HeadersMatcher: 'application/pdf' != 'application/xml'\n" + "- application/pdf\n" + "? ^^^\n" + "+ application/xml\n" + "? ^^^\n" + ) + ], + id='Non-matching values, bytes expectation str actual', + ), + pytest.param( + {'Content-Type': "re/json/"}, + {'Content-Type': b"application/xml"}, + [ + ( + "HeadersMatcher: Regex didn't match: 'json' not found in " + "'application/xml'" + ) + ], + id='Non-matching values, re-format str expectation', + ) ) ) -def test_headers_matcher(expected, requested, should_match): +def test_headers_not_matching(expected, requested, explanation): mock = pook.get('https://example.com') if expected: mock.headers(expected) @@ -93,5 +167,77 @@ def test_headers_matcher(expected, requested, should_match): if requested: request.headers = requested + matched, actual_explanation = mock.match(request) + assert not matched + assert explanation == actual_explanation + + [email protected]( + ("required_headers", "requested_headers", "should_match"), + ( + pytest.param( + ["content-type", "Authorization"], + { + "Content-Type": "", + "authorization": "Bearer NOT A TOKEN", + }, + True, + id="case-insensitive-match-with-empty-value" + ), + pytest.param( + ["content-type", "Authorization"], + { + "Content-Type": "application/json", + "authorization": "Bearer NOT A TOKEN", + }, + True, + id="case-insensitive-match-with-non-empty-values" + ), + pytest.param( + ["x-requested-with"], + { + "content-type": "application/json", + }, + False, + id="x-header-missing-with-other-headers" + ), + pytest.param( + ["x-requested-with"], + {}, + False, + id="x-header-no-headers", + ), + pytest.param( + ["content-type"], + {}, + False, + id="no-headers", + ), + pytest.param( + ["x-requested-with"], + {"x-requested-with": "com.example.app"}, + True, + id="x-header-with-value" + ), + pytest.param( + ["x-requested-with"], + {"x-requested-with": ""}, + True, + id="x-header-with-empty-value" + ), + ) +) +def test_headers_present(required_headers, requested_headers, should_match): + mock = pook.get('https://example.com').headers_present(required_headers) + + request = pook.Request() + request.url = 'https://example.com' + request.headers = requested_headers + matched, explanation = mock.match(request) assert matched == should_match, explanation + + +def test_headers_present_empty_headers(): + with pytest.raises(ValueError): + pook.get('https://example.com').headers_present([])
`HeadersMatcher: 're.Pattern' object has no attribute 'decode'` with pook 1.2.0 Not sure if actionable, but after upgrading from 1.1.1 to 1.2.0, I see this new error on a test where pook can not match on the request headers any more it seems. No additional details or stack trace unfortunately. ``` pook error! => Cannot match any mock for the following request: ================================================== Method: POST URL: https://foo.bar Headers: HTTPHeaderDict({}) Body: {}} ================================================== => Detailed matching errors: HeadersMatcher: 're.Pattern' object has no attribute 'decode' ``` The test uses `headers` and `headers_present`, if that helps: ```py pook.post( url, reply=200, headers={ "content-type": "application/json" }, headers_present=[ "foo" ], response_json={}, ) ```
0.0
253ad95c518e6c6b3dcde66a25fbd6775b6378ba
[ "tests/unit/matchers/headers_test.py::test_headers_matcher_matching[Regex-format", "tests/unit/matchers/headers_test.py::test_headers_matcher_matching[Regex", "tests/unit/matchers/headers_test.py::test_headers_not_matching[Missing", "tests/unit/matchers/headers_test.py::test_headers_not_matching[Non-matching", "tests/unit/matchers/headers_test.py::test_headers_present[case-insensitive-match-with-empty-value]", "tests/unit/matchers/headers_test.py::test_headers_present[case-insensitive-match-with-non-empty-values]", "tests/unit/matchers/headers_test.py::test_headers_present[x-header-with-value]", "tests/unit/matchers/headers_test.py::test_headers_present[x-header-with-empty-value]" ]
[ "tests/unit/matchers/headers_test.py::test_headers_matcher_matching[Matching", "tests/unit/matchers/headers_test.py::test_headers_matcher_matching[Non-matching", "tests/unit/matchers/headers_test.py::test_headers_matcher_matching[Missing", "tests/unit/matchers/headers_test.py::test_headers_matcher_matching[Arbitrary", "tests/unit/matchers/headers_test.py::test_headers_present[x-header-missing-with-other-headers]", "tests/unit/matchers/headers_test.py::test_headers_present[x-header-no-headers]", "tests/unit/matchers/headers_test.py::test_headers_present[no-headers]", "tests/unit/matchers/headers_test.py::test_headers_present_empty_headers" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2023-12-23 01:22:02+00:00
mit
2,688
hCaptcha__hmt-basemodels-47
diff --git a/basemodels/manifest/data/preprocess.py b/basemodels/manifest/data/preprocess.py new file mode 100644 index 0000000..8ba5827 --- /dev/null +++ b/basemodels/manifest/data/preprocess.py @@ -0,0 +1,12 @@ +from schematics.models import Model +from schematics.types import StringType, DictType, UnionType, IntType, FloatType + +class Preprocess(Model): + pipeline = StringType(required=True,choices=["FaceBlurPipeline"]) + config = DictType(UnionType([FloatType, IntType, StringType])) + + def to_dict(self): + p = { "pipeline": self.pipeline } + if self.config is not None: + p["config"] = self.config + return p \ No newline at end of file diff --git a/basemodels/manifest/manifest.py b/basemodels/manifest/manifest.py index ba082d7..0da9ca2 100644 --- a/basemodels/manifest/manifest.py +++ b/basemodels/manifest/manifest.py @@ -9,6 +9,7 @@ from schematics.types import StringType, DecimalType, BooleanType, IntType, Dict from .data.groundtruth import validate_groundtruth_entry from .data.taskdata import validate_taskdata_entry +from .data.preprocess import Preprocess BASE_JOB_TYPES = [ "image_label_binary", diff --git a/basemodels/pydantic/__init__.py b/basemodels/pydantic/__init__.py index 66c6f10..fb3f851 100644 --- a/basemodels/pydantic/__init__.py +++ b/basemodels/pydantic/__init__.py @@ -1,3 +1,4 @@ from .manifest import validate_manifest_uris, Manifest, NestedManifest, RequestConfig, TaskData, Webhook from .manifest.data import validate_taskdata_entry, validate_groundtruth_entry from .via import ViaDataManifest +from .manifest.data.preprocess import Pipeline, Preprocess \ No newline at end of file diff --git a/basemodels/pydantic/manifest/data/preprocess.py b/basemodels/pydantic/manifest/data/preprocess.py new file mode 100644 index 0000000..32f8667 --- /dev/null +++ b/basemodels/pydantic/manifest/data/preprocess.py @@ -0,0 +1,16 @@ +import enum +import typing +import pydantic + +class Pipeline(str, enum.Enum): + FaceBlurPipeline = 'FaceBlurPipeline' + +class Preprocess(pydantic.BaseModel): + pipeline: Pipeline + config: typing.Optional[dict] + + def to_dict(self): + p = { "pipeline": self.pipeline.value } + if self.config is not None: + p["config"] = self.config + return p \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 1fbdcc4..8156772 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "hmt-basemodels" -version = "0.1.1" +version = "0.1.2" description = "" authors = ["Intuition Machines, Inc <[email protected]>"] packages = [ diff --git a/setup.py b/setup.py index 4ff40fd..264080f 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ import setuptools setuptools.setup( name="hmt-basemodels", - version="0.1.1", + version="0.1.2", author="HUMAN Protocol", description="Common data models shared by various components of the Human Protocol stack", url="https://github.com/hCaptcha/hmt-basemodels",
hCaptcha/hmt-basemodels
86c71b032a082fc86b14ff885989592aab015666
diff --git a/tests/test_preprocess.py b/tests/test_preprocess.py new file mode 100644 index 0000000..3f3776b --- /dev/null +++ b/tests/test_preprocess.py @@ -0,0 +1,41 @@ +import unittest + +from schematics.exceptions import DataError +from basemodels.manifest import Preprocess + +class PipelineTest(unittest.TestCase): + def test_preprocess(self): + config = {} + p = Preprocess({"pipeline": "FaceBlurPipeline", "config": config}) + + self.assertEqual(p.pipeline, "FaceBlurPipeline") + self.assertEqual(p.config, config) + + p = Preprocess({"pipeline": "FaceBlurPipeline"}) + + self.assertIsNone(p.config) + + + def test_preprocess_raise(self): + with self.assertRaises(DataError): + Preprocess().validate() + + with self.assertRaises(DataError): + Preprocess({"pipeline": ""}).validate() + + with self.assertRaises(DataError): + Preprocess({"pipeline": "FaceBlurPipeline", "config": 1}).validate() + + + def test_preprocess_to_dict(self): + config = { "radius": 3 } + p = Preprocess({"pipeline": "FaceBlurPipeline", "config": config}) + + self.assertEqual(p.to_dict(), { "pipeline": "FaceBlurPipeline", "config": config }) + + p = Preprocess({"pipeline": "FaceBlurPipeline"}) + + self.assertEqual(p.to_dict(), { "pipeline": "FaceBlurPipeline" }) + + + diff --git a/tests/test_pydantic_preprocess.py b/tests/test_pydantic_preprocess.py new file mode 100644 index 0000000..4731b35 --- /dev/null +++ b/tests/test_pydantic_preprocess.py @@ -0,0 +1,41 @@ +import unittest + +from pydantic.error_wrappers import ValidationError +from basemodels.pydantic import Preprocess, Pipeline + +class PipelineTest(unittest.TestCase): + def test_preprocess(self): + config = {} + p = Preprocess(pipeline=Pipeline.FaceBlurPipeline, config=config) + + self.assertEqual(p.pipeline, Pipeline.FaceBlurPipeline) + self.assertEqual(p.config, config) + + p = Preprocess(pipeline=Pipeline.FaceBlurPipeline) + + self.assertIsNone(p.config) + + + def test_preprocess_raise(self): + with self.assertRaises(ValidationError): + Preprocess() + + with self.assertRaises(ValidationError): + Preprocess(pipeline="") + + with self.assertRaises(ValidationError): + Preprocess(pipeline=Pipeline.FaceBlurPipeline, config=1) + + + def test_preprocess_to_dict(self): + config = { "radius": 3 } + p = Preprocess(pipeline=Pipeline.FaceBlurPipeline, config=config) + + self.assertEqual(p.to_dict(), { "pipeline": Pipeline.FaceBlurPipeline.value, "config": config }) + + p = Preprocess(pipeline=Pipeline.FaceBlurPipeline) + + self.assertEqual(p.to_dict(), { "pipeline": Pipeline.FaceBlurPipeline.value }) + + +
add preprocessing schema add basic preprocessing schema for labeling request image preprocessing configuration
0.0
86c71b032a082fc86b14ff885989592aab015666
[ "tests/test_preprocess.py::PipelineTest::test_preprocess", "tests/test_preprocess.py::PipelineTest::test_preprocess_raise", "tests/test_preprocess.py::PipelineTest::test_preprocess_to_dict", "tests/test_pydantic_preprocess.py::PipelineTest::test_preprocess", "tests/test_pydantic_preprocess.py::PipelineTest::test_preprocess_raise", "tests/test_pydantic_preprocess.py::PipelineTest::test_preprocess_to_dict" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2021-03-29 16:27:22+00:00
mit
2,689
hackebrot__labels-10
diff --git a/src/labels/cli.py b/src/labels/cli.py index 96c9a85..acf2150 100644 --- a/src/labels/cli.py +++ b/src/labels/cli.py @@ -6,7 +6,7 @@ import typing import click from requests.auth import HTTPBasicAuth -from labels import __version__ +from labels import __version__, utils from labels.exceptions import LabelsException from labels.github import Client, Label from labels.io import write_labels, read_labels @@ -50,8 +50,8 @@ def labels(ctx, username: str, token: str, verbose: bool) -> None: @labels.command("fetch") @click.pass_obj [email protected]("-o", "--owner", help="GitHub owner name", type=str, required=True) [email protected]("-r", "--repo", help="GitHub repository name", type=str, required=True) [email protected]("-o", "--owner", help="GitHub owner name", type=str) [email protected]("-r", "--repo", help="GitHub repository name", type=str) @click.option( "-f", "--filename", @@ -60,13 +60,22 @@ def labels(ctx, username: str, token: str, verbose: bool) -> None: type=click.Path(), required=True, ) -def fetch_cmd(client: Client, owner: str, repo: str, filename: str) -> None: +def fetch_cmd( + client: Client, + owner: typing.Optional[str], + repo: typing.Optional[str], + filename: str +) -> None: """Fetch labels for a GitHub repository. This will write the labels information to disk to the specified filename. """ try: - labels = client.list_labels(owner, repo) + inferred_owner, inferred_repo = utils.get_owner_and_repo_from_cwd() + labels = client.list_labels( + owner or inferred_owner, + repo or inferred_repo + ) except LabelsException as exc: click.echo(str(exc)) sys.exit(1) @@ -79,8 +88,8 @@ def fetch_cmd(client: Client, owner: str, repo: str, filename: str) -> None: @labels.command("sync") @click.pass_obj [email protected]("-o", "--owner", help="GitHub owner name", type=str, required=True) [email protected]("-r", "--repo", help="GitHub repository name", type=str, required=True) [email protected]("-o", "--owner", help="GitHub owner name", type=str) [email protected]("-r", "--repo", help="GitHub repository name", type=str) @click.option("-n", "--dryrun", help="Do not modify remote labels", is_flag=True) @click.option( "-f", @@ -91,7 +100,11 @@ def fetch_cmd(client: Client, owner: str, repo: str, filename: str) -> None: required=True, ) def sync_cmd( - client: Client, owner: str, repo: str, filename: str, dryrun: bool + client: Client, + owner: typing.Optional[str], + repo: typing.Optional[str], + filename: str, + dryrun: bool ) -> None: """Sync labels with a GitHub repository. @@ -105,6 +118,10 @@ def sync_cmd( local_labels = read_labels(filename) + inferred_owner, inferred_repo = utils.get_owner_and_repo_from_cwd() + owner = owner or inferred_owner + repo = repo or inferred_repo + try: remote_labels = {l.name: l for l in client.list_labels(owner, repo)} except LabelsException as exc: diff --git a/src/labels/utils.py b/src/labels/utils.py new file mode 100644 index 0000000..2b82057 --- /dev/null +++ b/src/labels/utils.py @@ -0,0 +1,21 @@ +import re +import subprocess +import typing + + +def get_owner_and_repo_from_cwd() -> typing.Tuple[str, str]: + """Return the owner and name of the remote named origin in the cwd.""" + origin_url = ( + subprocess.check_output(["git", "remote", "get-url", "origin"]).decode().strip() + ) + return _extract_o_and_r(origin_url) + + +def _extract_o_and_r(url: str) -> typing.Tuple[str, str]: + """Return the owner and repo name of a remote given its SSH or HTTPS url. + + HTTPS url format -> 'https://github.com/user/repo.git' + SSH url format -> '[email protected]:user/repo.git' + """ + parts = re.split(r"[@/:.]+", url) + return (parts[-3], parts[-2])
hackebrot/labels
bddf3632de5f26f955362398a3b2e996a4450726
diff --git a/tests/conftest.py b/tests/conftest.py index 7de740e..8e569a5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,4 +1,7 @@ +import subprocess +import shutil import typing +from pathlib import Path import pytest import responses @@ -48,6 +51,44 @@ def fixture_repo() -> str: return "cookiecutter" [email protected](name="tmp_local_repo") +def fixture_tmp_local_repo(tmpdir, owner: str, repo: str) -> None: + """Return a temporary local git repository. + + Mocks a repository cloned from + https://github.com/audreyr/cookiecutter.git + and within which a labels file for the sync test is created + ./tests/sync.toml + """ + subprocess.call( + [ + "git", + "-C", + str(tmpdir), + "init" + ] + ) + subprocess.call( + [ + "git", + "-C", + str(tmpdir), + "remote", + "add", + "origin", + f"https://github.com/{owner}/{repo}.git" + ] + ) + + # copy labels file for the sync test to the directory + tmp = Path(str(tmpdir), "tests") + tmp.mkdir(exist_ok=True) + perm = Path(__file__).parent.joinpath("sync.toml") + shutil.copy(perm, tmp) + + return tmpdir + + @pytest.fixture(name="response_get_bug") def fixture_response_get_bug(base_url: str, owner: str, repo: str) -> Response_Label: """Return a dict respresenting the GitHub API response body for the bug diff --git a/tests/test_cli.py b/tests/test_cli.py index ff87c0e..7eafe8b 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -37,6 +37,67 @@ def test_fetch( assert result.exit_code == 0 [email protected]("mock_list_labels") +def test_fetch_without_owner_option( + run_cli: typing.Callable, repo: str, labels_file_write: str, tmp_local_repo +) -> None: + """Test for the CLI fetch command without -o option supplied.""" + with tmp_local_repo.as_cwd(): + result = run_cli( + "-u", + "hackebrot", + "-t", + "1234", + "fetch", + "-r", + repo, + "-f", + labels_file_write, + ) + + assert result.exit_code == 0 + + [email protected]("mock_list_labels") +def test_fetch_without_repo_option( + run_cli: typing.Callable, owner: str, labels_file_write: str, tmp_local_repo +) -> None: + """Test for the CLI fetch command without -r option supplied.""" + with tmp_local_repo.as_cwd(): + result = run_cli( + "-u", + "hackebrot", + "-t", + "1234", + "fetch", + "-o", + owner, + "-f", + labels_file_write, + ) + + assert result.exit_code == 0 + + [email protected]("mock_list_labels") +def test_fetch_without_owner_and_repo_options( + run_cli: typing.Callable, labels_file_write: str, tmp_local_repo +) -> None: + """Test for the CLI fetch command without -o and -r options supplied.""" + with tmp_local_repo.as_cwd(): + result = run_cli( + "-u", + "hackebrot", + "-t", + "1234", + "fetch", + "-f", + labels_file_write, + ) + + assert result.exit_code == 0 + + @pytest.mark.usefixtures("mock_sync") def test_sync( run_cli: typing.Callable, owner: str, repo: str, labels_file_sync: str @@ -61,6 +122,70 @@ def test_sync( assert result.output == "" [email protected]("mock_sync") +def test_sync_without_owner_option( + run_cli: typing.Callable, repo: str, labels_file_sync: str, tmp_local_repo +) -> None: + """Test for the CLI sync command without the -o option supplied.""" + with tmp_local_repo.as_cwd(): + result = run_cli( + "-u", + "hackebrot", + "-t", + "1234", + "sync", + "-r", + repo, + "-f", + labels_file_sync, + ) + + assert result.exit_code == 0 + assert result.output == "" + + [email protected]("mock_sync") +def test_sync_without_repo_option( + run_cli: typing.Callable, owner: str, labels_file_sync: str, tmp_local_repo +) -> None: + """Test for the CLI sync command without the -r option supplied.""" + with tmp_local_repo.as_cwd(): + result = run_cli( + "-u", + "hackebrot", + "-t", + "1234", + "sync", + "-o", + owner, + "-f", + labels_file_sync, + ) + + assert result.exit_code == 0 + assert result.output == "" + + [email protected]("mock_sync") +def test_sync_without_owner_and_repo_options( + run_cli: typing.Callable, labels_file_sync: str, tmp_local_repo +) -> None: + """Test for the CLI sync command without the -o and -r options supplied.""" + with tmp_local_repo.as_cwd(): + result = run_cli( + "-u", + "hackebrot", + "-t", + "1234", + "sync", + "-f", + labels_file_sync, + ) + + assert result.exit_code == 0 + assert result.output == "" + + @pytest.mark.usefixtures("mock_list_labels") def test_sync_dryrun( run_cli: typing.Callable, owner: str, repo: str, labels_file_sync: str diff --git a/tests/test_fixtures.py b/tests/test_fixtures.py new file mode 100644 index 0000000..0bcd24e --- /dev/null +++ b/tests/test_fixtures.py @@ -0,0 +1,39 @@ +import subprocess +from pathlib import Path + + +def test_fixture_tmp_local_repo(tmp_local_repo, owner: str, repo: str) -> None: + """Test that the tmp_local_repo fixture mocks a git repo cloned from + https://github.com/audreyr/cookiecutter.git + """ + _origin_url = subprocess.check_output( + ["git", "-C", str(tmp_local_repo), "remote", "get-url", "origin"] + ) + got_url = _origin_url.strip().decode() + + expected_url = f"https://github.com/{owner}/{repo}.git" + + assert expected_url == got_url + + +def test_fixture_tmp_local_repo_contains_sync_file( + tmp_local_repo, labels_file_sync: str +) -> None: + """Test that labels file for the sync test exists in the temp_local_repo fixture. + """ + sync_file = Path(str(tmp_local_repo), labels_file_sync) + + assert sync_file.exists() + assert sync_file.is_file() + + +def test_sync_file_in_tmp_local_repo_is_appropriately_populated( + tmp_local_repo, labels_file_sync: str +) -> None: + """Test that the labels file in the temporary directory is an + exact copy of the permanent labels file at labels/tests/sync.toml + """ + with Path(str(tmp_local_repo), labels_file_sync).open() as f_tmp: + with Path(__file__).parent.joinpath("sync.toml").open() as f_perm: + + assert f_tmp.readlines() == f_perm.readlines() diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 0000000..b5a53a1 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,33 @@ +from labels import utils + + +def test_get_owner_and_repo_from_cwd(tmp_local_repo, owner: str, repo: str) -> None: + """Test that repo owner and name can be inferred from the + local git repo in the current working directory. + """ + with tmp_local_repo.as_cwd(): + assert utils.get_owner_and_repo_from_cwd() == (owner, repo) + + +def test_extract_o_and_r_from_remote_https_url() -> None: + """Test extraction of owner and repo names from HTTPS remote url string.""" + remote_url = "https://github.com/hackebrot/pytest-covfefe.git" + expected_owner = "hackebrot" + expected_repo = "pytest-covfefe" + + gotten_owner, gotten_repo = utils._extract_o_and_r(remote_url) + + assert gotten_owner == expected_owner + assert gotten_repo == expected_repo + + +def test_extract_o_and_r_from_remote_ssh_url() -> None: + """Test extraction of owner and repo names from SSH remote url string.""" + remote_url = "[email protected]:hackebrot/pytest-covfefe.git" + expected_owner = "hackebrot" + expected_repo = "pytest-covfefe" + + gotten_owner, gotten_repo = utils._extract_o_and_r(remote_url) + + assert gotten_owner == expected_owner + assert gotten_repo == expected_repo
Infer labels owner and repo from git in working directory Currently fetching and syncing labels with a GitHub repository requires the ``--repo`` and ``--owner`` CLI options. I think it would be awesome if we could infer this information from the current working directory and the corresponding Git repository 😃 ```text labels fetch ``` ```text labels sync ```
0.0
bddf3632de5f26f955362398a3b2e996a4450726
[ "tests/test_fixtures.py::test_fixture_tmp_local_repo_contains_sync_file", "tests/test_fixtures.py::test_sync_file_in_tmp_local_repo_is_appropriately_populated", "tests/test_fixtures.py::test_fixture_tmp_local_repo", "tests/test_utils.py::test_extract_o_and_r_from_remote_ssh_url", "tests/test_utils.py::test_extract_o_and_r_from_remote_https_url", "tests/test_utils.py::test_get_owner_and_repo_from_cwd", "tests/test_cli.py::test_fetch_without_repo_option", "tests/test_cli.py::test_version_option[-V]", "tests/test_cli.py::test_fetch_without_owner_and_repo_options", "tests/test_cli.py::test_sync_without_owner_and_repo_options", "tests/test_cli.py::test_fetch_without_owner_option", "tests/test_cli.py::test_sync_without_repo_option", "tests/test_cli.py::test_sync", "tests/test_cli.py::test_sync_dryrun", "tests/test_cli.py::test_fetch", "tests/test_cli.py::test_sync_without_owner_option", "tests/test_cli.py::test_version_option[--version]" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-10-26 17:06:17+00:00
mit
2,690
haddocking__arctic3d-225
diff --git a/src/arctic3d/cli.py b/src/arctic3d/cli.py index b0e4333..ea1b0a4 100644 --- a/src/arctic3d/cli.py +++ b/src/arctic3d/cli.py @@ -264,7 +264,7 @@ def main( if pdb_f is None: log.error( "Could not retrieve a valid PDB for the target, please provide" - " one using the --pdb option" + " one as the main input argument." ) sys.exit() diff --git a/src/arctic3d/modules/interface_matrix.py b/src/arctic3d/modules/interface_matrix.py index 79204fe..fd64777 100644 --- a/src/arctic3d/modules/interface_matrix.py +++ b/src/arctic3d/modules/interface_matrix.py @@ -91,7 +91,7 @@ def get_coupling_matrix(mdu, int_resids): if u.positions.shape[0] != len(int_resids): raise Exception( "shape mismatch: positions do not match input residues" - " {int_resids}" + f" {int_resids}" ) distmap = cdist(u.positions, u.positions) exp_factor = 4 * SIGMA * SIGMA diff --git a/src/arctic3d/modules/pdb.py b/src/arctic3d/modules/pdb.py index 9bc154b..82b1dc7 100644 --- a/src/arctic3d/modules/pdb.py +++ b/src/arctic3d/modules/pdb.py @@ -9,6 +9,8 @@ from pdbecif.mmcif_io import MMCIF2Dict from pdbtools.pdb_selaltloc import select_by_occupancy from pdbtools.pdb_selchain import select_chain from pdbtools.pdb_tidy import tidy_pdbfile +from pdbtools.pdb_selmodel import select_model + from arctic3d.functions import make_request from arctic3d.modules.interface_matrix import filter_interfaces @@ -177,6 +179,36 @@ def renumber_pdb_from_cif(pdb_id, uniprot_id, chain_id, pdb_fname): return pdb_renum_fname, cif_fname +def fetch_pdb_files(pdb_to_fetch): + """ + Fetches the pdb files from PDBe database. + + Parameters + ---------- + pdb_to_fetch : list + list of pdb hits to fetch + + Returns + ------- + validated_pdbs : list + list of tuples (pdb_file, hit) + """ + validated_pdbs = [] + valid_pdb_set = set() # set of valid pdb IDs + for hit in pdb_to_fetch: + pdb_id = hit["pdb_id"] + pdb_fname = f"{pdb_id}.pdb" + if pdb_fname not in os.listdir(): + pdb_f = fetch_pdb(pdb_id) + else: + pdb_f = Path(pdb_fname) + if pdb_f is not None: + validated_pdbs.append((pdb_f, hit)) + if pdb_id not in valid_pdb_set: + valid_pdb_set.add(pdb_id) + return validated_pdbs + + def fetch_pdb(pdb_id): """ Fetches the pdb from PDBe database. @@ -227,7 +259,32 @@ def selchain_pdb(inp_pdb_f, chain): with open(out_pdb_fname, "w") as f: for line in select_chain(pdb_fh, chain): f.write(line) - f.write(line) + return out_pdb_fname + + +def selmodel_pdb(inp_pdb_f, model_id=1): + """ + Select model from PDB file. + + Parameters + ---------- + inp_pdb_f : Path + Path to PDB file. + model_id : int, optional + Model ID, by default 1 + + Returns + ------- + out_pdb_fname : Path + Path to PDB file. + """ + # log.debug(f"Selecting model {model_id} from PDB file") + out_pdb_fname = Path(f"{inp_pdb_f.stem}-model{model_id}.pdb") + with open(inp_pdb_f, "r") as pdb_fh: + with open(out_pdb_fname, "w") as f: + line = "" + for line in select_model(pdb_fh, [model_id]): + f.write(line) return out_pdb_fname @@ -328,48 +385,38 @@ def validate_api_hit( validated_pdbs : list List of (pdb_f, hit) tuples """ - validated_pdbs = [] # list of good pdbs - valid_pdb_set = set() # set of valid pdb IDs - - for hit in fetch_list[:max_pdb_num]: - check_list = {} + pdbs_to_fetch = [] + for hit in fetch_list: + check_list = [] pdb_id = hit["pdb_id"] coverage = hit["coverage"] resolution = hit["resolution"] + exp_method = hit["experimental_method"] - pdb_fname = f"{pdb_id}.pdb" - if pdb_fname not in os.listdir(): - pdb_f = fetch_pdb(pdb_id) - else: - pdb_f = Path(pdb_fname) - - if pdb_f is not None: - check_list["pdb_f"] = True - else: - check_list["pdb_f"] = False - + # check coverage value if coverage > coverage_cutoff: - check_list["cov"] = True + check_list.append(True) else: - check_list["cov"] = False - + check_list.append(False) + # check resolution value if resolution is None: - check_list["res"] = False + if "NMR" in exp_method: + check_list.append(True) + else: + check_list.append(False) elif resolution < resolution_cutoff: - check_list["res"] = True + check_list.append(True) else: - check_list["res"] = False + check_list.append(False) - if all(check_list.values()): - validated_pdbs.append((pdb_f, hit)) - if pdb_id not in valid_pdb_set: - valid_pdb_set.add(pdb_id) + if all(check_list): + pdbs_to_fetch.append(hit) else: - log.debug(f"{pdb_id} failed validation ({check_list})") - # pdb_f could be None or the pdb (another chain) - # could be valid and the file should not be removed - if pdb_f is not None and pdb_id not in valid_pdb_set: - os.unlink(pdb_f) + log.debug(f"{pdb_id} failed validation") + log.info(f"Found {len(pdbs_to_fetch)} valid PDBs to fetch") + # downloading a list of good pdbs + validated_pdbs = fetch_pdb_files(pdbs_to_fetch[:max_pdb_num]) + log.info(f"Found {len(pdbs_to_fetch)} valid PDBs") return validated_pdbs @@ -389,7 +436,8 @@ def preprocess_pdb(pdb_fname, chain_id): tidy_pdb_f : Path preprocessed pdb file """ - atoms_pdb_f = keep_atoms(pdb_fname) + model_pdb_f = selmodel_pdb(pdb_fname) + atoms_pdb_f = keep_atoms(model_pdb_f) chained_pdb_f = selchain_pdb(atoms_pdb_f, chain_id) occ_pdb_f = occ_pdb(chained_pdb_f) tidy_pdb_f = tidy_pdb(occ_pdb_f) @@ -565,7 +613,6 @@ def get_best_pdb( return # if pdb_to_use is not None, already filter the list - if pdb_to_use: pdb_to_use = pdb_to_use.lower() if chain_to_use: @@ -591,8 +638,8 @@ def get_best_pdb( log.info( f"BestPDB hit for {uniprot_id}:" - f" {pdb_id}_{chain_id} {coverage:.2f} coverage" - f" {resolution:.2f} Angstrom / start {start} end {end}" + f" {pdb_id}_{chain_id} {coverage} coverage" + f" {resolution} Angstrom / start {start} end {end}" ) processed_pdb = pdb_f.rename(f"{uniprot_id}-{pdb_id}-{chain_id}.pdb")
haddocking/arctic3d
e7d631822f1853974bb241234685746b1c12964d
diff --git a/tests/test_pdb.py b/tests/test_pdb.py index 1f7e252..7851263 100644 --- a/tests/test_pdb.py +++ b/tests/test_pdb.py @@ -9,6 +9,7 @@ from arctic3d.modules.pdb import ( keep_atoms, occ_pdb, selchain_pdb, + selmodel_pdb, tidy_pdb, validate_api_hit, ) @@ -98,6 +99,15 @@ def good_hits(): return hits_list [email protected] +def example_interfaces(): + interfaces = { + "P01024": [103, 104, 105], + "P-dummy": [103, 104, 105, 1049, 1050], + } + return interfaces + + def test_selchain_pdb(inp_pdb): pdb = selchain_pdb(inp_pdb, "B") assert pdb.exists() @@ -122,10 +132,19 @@ def test_keep_atoms(inp_pdb): pdb.unlink() +def test_selmodel_pdb(inp_pdb): + pdb = selmodel_pdb(inp_pdb, "1") + assert pdb.exists() + pdb.unlink() + + def test_validate_api_hit(pdb_hit_no_resolution): + """Test validate_api_hit.""" validated_pdbs = validate_api_hit([pdb_hit_no_resolution]) - assert validated_pdbs == [] - + assert ( + validated_pdbs == [] + ) # this is empty because resolution is None and exp != NMR + # change resolution to 1.0 pdb_hit_no_resolution["resolution"] = 1.0 validated_pdbs = validate_api_hit([pdb_hit_no_resolution]) pdb, dict = validated_pdbs[0] @@ -133,12 +152,19 @@ def test_validate_api_hit(pdb_hit_no_resolution): assert dict == pdb_hit_no_resolution -def test_get_best_pdb(): - orig_interfaces = { - "P01024": [103, 104, 105], - "P-dummy": [103, 104, 105, 1049, 1050], - } - pdb, filtered_interfaces = get_best_pdb("P20023", orig_interfaces) +def test_validate_api_hit_nmr(pdb_hit_no_resolution): + """Test validate_api_hit with NMR data.""" + pdb_hit_no_resolution["experimental_method"] = "Solution NMR" + # NMR structures have no resolution but should be accepted + validated_pdbs = validate_api_hit([pdb_hit_no_resolution]) + pdb, dict = validated_pdbs[0] + assert pdb.name == "2gsx.pdb" + assert dict == pdb_hit_no_resolution + + +def test_get_best_pdb(example_interfaces): + """Test get_best_pdb.""" + pdb, filtered_interfaces = get_best_pdb("P20023", example_interfaces) exp_pdb = Path("P20023-1ghq-B.pdb") exp_interfaces = {"P01024": [103, 104, 105]} assert pdb == exp_pdb @@ -146,8 +172,8 @@ def test_get_best_pdb(): exp_pdb.unlink() -def test_get_maxint_pdb(): - """Test get_maxint_pdb.""" +def test_get_maxint_pdb_empty(): + """Test get_maxint_pdb with empty output.""" empty_validated_pdbs = [] pdb_f, top_hit, filtered_interfaces = get_maxint_pdb( empty_validated_pdbs, {}, uniprot_id=None @@ -156,7 +182,17 @@ def test_get_maxint_pdb(): assert top_hit is None assert filtered_interfaces is None - # TODO: test the non-empty case as well + +def test_get_maxint_pdb(good_hits, example_interfaces): + """Test get_maxint_pdb.""" + validated_pdbs = validate_api_hit(good_hits) + pdb_f, top_hit, filtered_interfaces = get_maxint_pdb( + validated_pdbs, example_interfaces, "P00760" + ) + assert pdb_f.name == "4xoj-model1-atoms-A-occ-tidy_renum.pdb" + assert top_hit["pdb_id"] == "4xoj" + assert top_hit["chain_id"] == "A" + assert filtered_interfaces == {"P01024": [103, 104, 105]} def test_filter_pdb_list(good_hits):
adjust error message when no PDB is found the current error message when the program fails to retrieve the pdb is `Could not retrieve a valid PDB for the target, please provide one using the --pdb option` such option does not exist, as the pdb can only be the main argument
0.0
e7d631822f1853974bb241234685746b1c12964d
[ "tests/test_pdb.py::test_selchain_pdb", "tests/test_pdb.py::test_tidy_pdb", "tests/test_pdb.py::test_occ_pdb", "tests/test_pdb.py::test_keep_atoms", "tests/test_pdb.py::test_selmodel_pdb", "tests/test_pdb.py::test_validate_api_hit", "tests/test_pdb.py::test_validate_api_hit_nmr", "tests/test_pdb.py::test_get_maxint_pdb_empty", "tests/test_pdb.py::test_get_maxint_pdb", "tests/test_pdb.py::test_filter_pdb_list", "tests/test_pdb.py::test_pdb_data" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-04-03 08:07:27+00:00
apache-2.0
2,691
haddocking__arctic3d-234
diff --git a/src/arctic3d/__init__.py b/src/arctic3d/__init__.py index e69de29..332735d 100644 --- a/src/arctic3d/__init__.py +++ b/src/arctic3d/__init__.py @@ -0,0 +1,12 @@ +"""arctic3d""" +import logging +import sys + +log = logging.getLogger(__name__) +log.handlers.clear() +log.setLevel(logging.DEBUG) +handler = logging.StreamHandler(sys.stdout) +handler.setFormatter( + logging.Formatter("[%(asctime)s %(module)s %(levelname)s] %(message)s") +) +log.addHandler(handler) diff --git a/src/arctic3d/cli.py b/src/arctic3d/cli.py index ea1b0a4..caf5b7a 100644 --- a/src/arctic3d/cli.py +++ b/src/arctic3d/cli.py @@ -1,12 +1,10 @@ """Main CLI.""" import argparse -import logging -import shutil import sys import time -import os from pathlib import Path +from arctic3d import log from arctic3d.modules.blast import run_blast from arctic3d.modules.cluster_interfaces import cluster_interfaces from arctic3d.modules.input import Input @@ -14,21 +12,14 @@ from arctic3d.modules.interface import ( get_interface_residues, read_interface_residues, ) -from arctic3d.modules.output import make_output, setup_output_folder +from arctic3d.modules.output import ( + make_output, + create_output_folder, + setup_output_folder, +) from arctic3d.modules.pdb import get_best_pdb from arctic3d.modules.sequence import to_fasta - -# logging -LOGNAME = f"arctic3d_{os.getpid()}.log" -LOGNAME_FINAL = "arctic3d.log" -logging.basicConfig(filename=LOGNAME) -log = logging.getLogger(LOGNAME) -ch = logging.StreamHandler() -formatter = logging.Formatter( - " [%(asctime)s %(module)s:L%(lineno)d %(levelname)s] %(message)s" -) -ch.setFormatter(formatter) -log.addHandler(ch) +from arctic3d.modules.log import add_log_for_CLI argument_parser = argparse.ArgumentParser() @@ -175,11 +166,10 @@ def main( ligand, linkage_strategy, threshold, + log_level="DEBUG", ): """Main function.""" st_time = time.time() - log.setLevel("DEBUG") - inp = Input(input_arg) input_files = {} # retrieve uniprot information @@ -197,15 +187,23 @@ def main( input_files["interface_file"] = Path(interface_file) uniprot_id = None + # create output folder + run_dir_path = create_output_folder(run_dir, uniprot_id) + # configure logging + log_file = Path(run_dir_path, "arctic3d.log") + add_log_for_CLI(log, log_level, log_file) + + log.info(f"Target UNIPROTID: {uniprot_id}") + # save json files if interface_data: input_files["interface_data"] = Path(interface_data) if pdb_data: input_files["pdb_data"] = Path(pdb_data) - log.info(f"Target UNIPROTID: {uniprot_id}") - - input_files = setup_output_folder(uniprot_id, input_files, run_dir) + input_files = setup_output_folder( + run_dir=run_dir_path, input_files=input_files + ) # retrieve interfaces. if "interface_file" in input_files: @@ -299,11 +297,6 @@ def main( f"arctic3d run completed in {(time.time() - st_time):.2f} seconds." ) - # move log file to output folder - exp_log_path = Path(f"../{LOGNAME}") - if exp_log_path.exists(): - shutil.move(exp_log_path, LOGNAME_FINAL) - if __name__ == "__main__": sys.exit(maincli()) diff --git a/src/arctic3d/cli_localise.py b/src/arctic3d/cli_localise.py index f89dc26..56b6066 100644 --- a/src/arctic3d/cli_localise.py +++ b/src/arctic3d/cli_localise.py @@ -31,34 +31,24 @@ and biological process (P):: --quickgo=F """ import argparse -import logging import os -import shutil import sys import time from pathlib import Path +from arctic3d import log from arctic3d.functions import make_request from arctic3d.modules.interface import parse_out_partner +from arctic3d.modules.log import add_log_for_CLI from arctic3d.modules.output import ( create_barplot, + create_output_folder, parse_clusters, setup_output_folder, write_dict, ) -LOGNAME = f"arctic3d_localise_{os.getpid()}.log" -LOGNAME_FINAL = "arctic3d_localise.log" -logging.basicConfig(filename=LOGNAME) -log = logging.getLogger(LOGNAME) -ch = logging.StreamHandler() -formatter = logging.Formatter( - " [%(asctime)s %(module)s:L%(lineno)d %(levelname)s] %(message)s" -) -ch.setFormatter(formatter) -log.addHandler(ch) - UNIPROT_API_URL = "https://www.ebi.ac.uk/proteins/api/proteins" argument_parser = argparse.ArgumentParser() @@ -302,10 +292,18 @@ def maincli(): cli(argument_parser, main) -def main(input_arg, run_dir, out_partner, quickgo, weight): +def main(input_arg, run_dir, out_partner, quickgo, weight, log_level="DEBUG"): """Main function.""" - log.setLevel("INFO") + log.setLevel(log_level) start_time = time.time() + + # create output folder + run_dir_path = create_output_folder(run_dir) + # logging + log_file = Path(run_dir_path, "arctic3d_localise.log") + add_log_for_CLI(log, log_level, log_file) + + # property name prop_name = "location" if quickgo: if quickgo == "F": @@ -326,7 +324,7 @@ def main(input_arg, run_dir, out_partner, quickgo, weight): if not input_files["cl_filename"].exists(): raise Exception("non existing input file") - input_files = setup_output_folder(None, input_files, run_dir) + input_files = setup_output_folder(run_dir_path, input_files) # parsing arctic3d clustering output clustering_dict = parse_clusters(input_files["cl_filename"]) @@ -425,12 +423,6 @@ def main(input_arg, run_dir, out_partner, quickgo, weight): elap_time = round((time.time() - start_time), 3) log.info(f"arctic3d_localise run took {elap_time} seconds") - # copying log file to the run folder (if possible) - try: - shutil.move(f"../{LOGNAME}", LOGNAME_FINAL) - except FileNotFoundError as e: - log.warning(f"Could not find log file: {e}") - if __name__ == "__main__": sys.exit(maincli()) diff --git a/src/arctic3d/cli_resclust.py b/src/arctic3d/cli_resclust.py index d7460f8..a994381 100644 --- a/src/arctic3d/cli_resclust.py +++ b/src/arctic3d/cli_resclust.py @@ -25,28 +25,18 @@ Input arguments: `criterion` : the criterion to extract the clusters. """ import argparse -import logging import sys import MDAnalysis as mda from scipy.spatial.distance import pdist +from arctic3d import log from arctic3d.modules.clustering import ( cluster_similarity_matrix, get_clustering_dict, ) from arctic3d.modules.input import Input -LOGNAME = "arctic3d_resclust.log" -logging.basicConfig(filename=LOGNAME, filemode="w") -log = logging.getLogger(LOGNAME) -ch = logging.StreamHandler() -formatter = logging.Formatter( - " [%(asctime)s %(module)s:L%(lineno)d %(levelname)s] %(message)s" -) -ch.setFormatter(formatter) -log.addHandler(ch) - argument_parser = argparse.ArgumentParser() argument_parser.add_argument( @@ -177,9 +167,7 @@ def main(input_arg, residue_list, chain, threshold, linkage, criterion): n_chains = u.n_segments if n_chains != 1: - log.error( - f"Number of consistent segments ({n_chains}) != 1. Aborting." - ) + log.error(f"Number of consistent segments ({n_chains}) != 1.Aborting.") sys.exit(1) # do the clustering diff --git a/src/arctic3d/cli_restraints.py b/src/arctic3d/cli_restraints.py index face33c..c2f2af6 100644 --- a/src/arctic3d/cli_restraints.py +++ b/src/arctic3d/cli_restraints.py @@ -33,26 +33,21 @@ This will consider only residues with a probability of being in the interface higher than 0.5 (for each cluster). """ import argparse -import logging import os -import shutil import sys import time from pathlib import Path import tarfile -from arctic3d.modules.output import read_residues_probs, setup_output_folder +from arctic3d import log -LOGNAME = f"arctic3d_restraints_{os.getpid()}.log" -LOGNAME_FINAL = "arctic3d_restraints.log" -logging.basicConfig(filename=LOGNAME) -log = logging.getLogger(LOGNAME) -ch = logging.StreamHandler() -formatter = logging.Formatter( - " [%(asctime)s %(module)s:L%(lineno)d %(levelname)s] %(message)s" +from arctic3d.modules.output import ( + read_residues_probs, + create_output_folder, + setup_output_folder, ) -ch.setFormatter(formatter) -log.addHandler(ch) +from arctic3d.modules.log import add_log_for_CLI + argument_parser = argparse.ArgumentParser() argument_parser.add_argument( @@ -234,12 +229,14 @@ def maincli(): cli(argument_parser, main) -def main(r1, r2, ch1, ch2, run_dir, prob_threshold=0.5): +def main(r1, r2, ch1, ch2, run_dir, prob_threshold=0.5, log_level="DEBUG"): """Main function.""" - log.setLevel("INFO") + log.setLevel(log_level) start_time = time.time() log.info("Starting arctic3d_restraints") - + run_dir_path = create_output_folder(run_dir) + log_file = Path(run_dir_path, "arctic3d_restraints.log") + add_log_for_CLI(log, log_level, log_file) # checking if r1 and r2 exists if not os.path.exists(r1): log.error(f"Could not find {r1}") @@ -261,7 +258,7 @@ def main(r1, r2, ch1, ch2, run_dir, prob_threshold=0.5): # Setting up output folder input_files = {"r1_res_fname": r1_res_fname, "r2_res_fname": r2_res_fname} log.info(f"Input files are {input_files}") - input_files = setup_output_folder(None, input_files, run_dir) + input_files = setup_output_folder(run_dir_path, input_files) # read and filter probabilities r1_residues_probs = read_residues_probs(input_files["r1_res_fname"]) @@ -282,7 +279,7 @@ def main(r1, r2, ch1, ch2, run_dir, prob_threshold=0.5): ambig_fnames.append(ambig_fname) log.info( f"Creating {ambig_fname} restraint file by" - "coupling {cl1} (r1) and {cl2} (r2)" + f" coupling {cl1} (r1) and {cl2} (r2)" ) generate_restraints(residues1, residues2, ch1, ch2, ambig_fname) n_ambig += 1 @@ -291,9 +288,3 @@ def main(r1, r2, ch1, ch2, run_dir, prob_threshold=0.5): elap_time = round((time.time() - start_time), 3) log.info(f"arctic3d_restraints run took {elap_time} seconds") - - # copying log file to the run folder (if possible) - try: - shutil.move(f"../{LOGNAME}", LOGNAME_FINAL) - except FileNotFoundError as e: - log.warning(f"Could not find log file: {e}") diff --git a/src/arctic3d/modules/log.py b/src/arctic3d/modules/log.py new file mode 100644 index 0000000..902aeca --- /dev/null +++ b/src/arctic3d/modules/log.py @@ -0,0 +1,70 @@ +"""Manage logging.""" +import logging +import sys +from functools import partial +from logging import FileHandler, StreamHandler + + +log_file_name = "log" + + +info_formatter = "[%(asctime)s %(module)s %(levelname)s] %(message)s" +debug_formatter = ( + "[%(asctime)s] " + "%(filename)s:%(name)s:%(funcName)s:%(lineno)d: " + "%(message)s" +) + +log_formatters = { + "DEBUG": debug_formatter, + "INFO": info_formatter, + "WARNING": info_formatter, + "ERROR": info_formatter, + "CRITICAL": info_formatter, +} + +log_levels = { + "DEBUG": logging.DEBUG, + "INFO": logging.INFO, + "WARNING": logging.WARNING, + "ERROR": logging.ERROR, + "CRITICAL": logging.CRITICAL, +} + + +def add_handler( + log_obj, + handler, + stream, + log_level="INFO", + formatter=info_formatter, +): + """Add a logging Handler to the log object.""" + ch = handler(stream) + ch.setLevel(log_levels[log_level.upper()]) + ch.setFormatter(logging.Formatter(formatter)) + log_obj.addHandler(ch) + return ch + + +def add_log_for_CLI(log, log_level, logfile): + """Configure log for command-line clients.""" + llu = log_level.upper() + + params = { + "log_level": llu, + "formatter": log_formatters[llu], + } + + log.handlers.clear() + add_sysout_handler(log, **params) + add_logfile_handler(log, stream=logfile, **params) + return + + +add_sysout_handler = partial( + add_handler, handler=StreamHandler, stream=sys.stdout +) # noqa: E501 +add_logfile_handler = partial( + add_handler, handler=FileHandler, stream=log_file_name +) # noqa: E501 diff --git a/src/arctic3d/modules/output.py b/src/arctic3d/modules/output.py index 1ea7821..06bb7b5 100644 --- a/src/arctic3d/modules/output.py +++ b/src/arctic3d/modules/output.py @@ -13,22 +13,20 @@ import plotly.graph_objects as go log = logging.getLogger("arctic3d.log") -def setup_output_folder(uniprot_id, input_files, output_dir): - """Sets up output folder. +def create_output_folder(output_dir, uniprot_id=None): + """Creates output folder. Parameters ---------- - uniprot_id : string or None - uniprot_id of the run - input_files : dict of Paths - dict of input files - output_dir : str or None + output_dir : str user-defined name of the run + uniport_id : str or None + uniprot id of the target Returns ------- - copied_input_files : dict of Paths - dict of copied input files + run_dir : Path + path to the run directory """ run_dir = output_dir if run_dir is None: @@ -41,10 +39,27 @@ def setup_output_folder(uniprot_id, input_files, output_dir): if os.path.exists(run_dir): raise Exception(f"{run_dir} already exists!") - - # setting up the directory - log.info(f"Setting up output_directory {run_dir}") + log.info(f"Creating output_directory {run_dir}") os.mkdir(run_dir) + return run_dir + + +def setup_output_folder(run_dir, input_files): + """Sets up output folder. + + Parameters + ---------- + run_dir : str or Path + name of the run directory + input_files : dict of Paths + dict of input files + + Returns + ------- + copied_input_files : dict of Paths + dict of copied input files + """ + log.info(f"Setting up output folder {run_dir}") datadir = Path(run_dir, "input_data") os.mkdir(datadir)
haddocking/arctic3d
a3a5af6557f501c0b0695687e03b48ec6468ede0
diff --git a/tests/test_cli.py b/tests/test_cli.py index 5b143a0..996733c 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -27,5 +27,8 @@ def test_cli_empty(): os.chdir(start_cwd) exp_dir = Path(f"arctic3d-{target_uniprot}") assert exp_dir.exists() is True + # Check that the log file has been created + assert Path(exp_dir, "arctic3d.log").exists() + # remove folder if exp_dir.exists(): shutil.rmtree(exp_dir) diff --git a/tests/test_cli_localise.py b/tests/test_cli_localise.py index 8fd265b..a236a85 100644 --- a/tests/test_cli_localise.py +++ b/tests/test_cli_localise.py @@ -41,6 +41,7 @@ def example_uniprot_data(): def test_localise_cli_empty(empty_cluster_filepath): + """Test localise cli with empty cluster file.""" start_cwd = os.getcwd() run_dir = "arctic3d-localise" main( @@ -51,6 +52,10 @@ def test_localise_cli_empty(empty_cluster_filepath): None, ) os.chdir(start_cwd) + # Check that the output directory has been created + assert Path(run_dir).exists() + # check existence of log file + assert Path("arctic3d-localise", "arctic3d_localise.log").exists() shutil.rmtree(run_dir) diff --git a/tests/test_cli_restraints.py b/tests/test_cli_restraints.py index 413113c..4003a95 100644 --- a/tests/test_cli_restraints.py +++ b/tests/test_cli_restraints.py @@ -79,6 +79,8 @@ def test_main(): main(r1, r2, None, None, run_dir=run_dir, prob_threshold=0.7) # check if the zipped tbl files exist assert Path("ambig.tbl.tgz").exists() + # check if log file exists + assert Path("arctic3d_restraints.log").exists() # check the correct number of tbl files exist ls_tbl = len(glob.glob("ambig*tbl")) assert ls_tbl == 4 diff --git a/tests/test_output.py b/tests/test_output.py index cb18c6d..a4dfee9 100644 --- a/tests/test_output.py +++ b/tests/test_output.py @@ -4,6 +4,7 @@ from pathlib import Path import pytest from arctic3d.modules.output import ( + create_output_folder, output_pdb, read_residues_probs, setup_output_folder, @@ -154,16 +155,29 @@ def test_output_pdb(inp_pdb): os.unlink(output_files[0]) -def test_run_dir(): +def test_create_output_folder(): """Test if the expected run_dir is effectively created.""" - run_dir = "run_dir" uniprot_id = "fake_uniprot" + create_output_folder(output_dir=None, uniprot_id=uniprot_id) + exp_run_dir = Path(f"arctic3d-{uniprot_id}") + assert Path.exists(exp_run_dir) + os.rmdir(exp_run_dir) + + +def test_setup_output_folder(inp_pdb): + """Test the correct setup of the output folder.""" + run_dir = "dummy_output" start_cwd = os.getcwd() - setup_output_folder(uniprot_id, [], run_dir) + create_output_folder(run_dir) + input_files = {"pdb": inp_pdb} + setup_output_folder(run_dir, input_files) obs_cwd = Path(os.getcwd()) exp_cwd = Path(start_cwd, run_dir) assert exp_cwd == obs_cwd os.chdir(start_cwd) + assert Path.exists(Path(run_dir, "input_data")) + assert Path.exists(Path(run_dir, "input_data", inp_pdb.name)) + os.unlink(Path(run_dir, "input_data", inp_pdb.name)) os.rmdir(Path(run_dir, "input_data")) os.rmdir(run_dir)
fix logging current logging system can be improved. will take inspiration from HADDOCK3 logging to improve it
0.0
a3a5af6557f501c0b0695687e03b48ec6468ede0
[ "tests/test_cli.py::test_cli_empty", "tests/test_cli_localise.py::test_localise_cli_empty", "tests/test_cli_localise.py::test_get_quickgo_information", "tests/test_cli_localise.py::test_get_uniprot_subcellular_location", "tests/test_cli_restraints.py::test_filter_residues_probs", "tests/test_cli_restraints.py::test_generate_restraints", "tests/test_cli_restraints.py::test_compress_tbl_files", "tests/test_cli_restraints.py::test_main", "tests/test_output.py::test_write_clusters", "tests/test_output.py::test_write_residues", "tests/test_output.py::test_write_interfaes", "tests/test_output.py::test_write_res_probs", "tests/test_output.py::test_read_res_probs", "tests/test_output.py::test_output_pdb", "tests/test_output.py::test_create_output_folder", "tests/test_output.py::test_setup_output_folder", "tests/test_output.py::test_shorten_labels" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-04-04 11:15:54+00:00
apache-2.0
2,692
haddocking__arctic3d-243
diff --git a/src/arctic3d/cli.py b/src/arctic3d/cli.py index 450572a..e2da9aa 100644 --- a/src/arctic3d/cli.py +++ b/src/arctic3d/cli.py @@ -111,6 +111,14 @@ argument_parser.add_argument( default="average", ) +argument_parser.add_argument( + "--numbering", + help="what to renumber while extracting the best pdb files", + type=str, + default="pdb", + choices=["pdb", "resi"], +) + def load_args(arguments): """ @@ -166,6 +174,7 @@ def main( ligand, linkage_strategy, threshold, + numbering, log_level="DEBUG", ): """Main function.""" @@ -257,6 +266,7 @@ def main( pdb_to_use=pdb_to_use, chain_to_use=chain_to_use, pdb_data=pdb_data_path, + numbering=numbering, ) if pdb_f is None: diff --git a/src/arctic3d/modules/pdb.py b/src/arctic3d/modules/pdb.py index 513716f..83a0043 100644 --- a/src/arctic3d/modules/pdb.py +++ b/src/arctic3d/modules/pdb.py @@ -61,7 +61,7 @@ def get_cif_dict(cif_name): return cif_dict -def get_numbering_dict(pdb_id, cif_dict, uniprot_id, chain_id): +def get_numbering_dict(pdb_id, cif_dict, uniprot_id, chain_id, key="pdb"): """ gets the numbering correspondence between the pdb file and the uniprot sequence from the cif dict. @@ -76,12 +76,15 @@ def get_numbering_dict(pdb_id, cif_dict, uniprot_id, chain_id): uniprot ID to be used (many IDs may exist in the .cif file) chain_id : str chain ID to be used + key : str + key to use for the numbering dict, either "uniprot" or "pdb" Returns ------- numbering_dict : dict - pdb-resid : uniprot-resid dictionary - Example : {"GLY-A-16" : 20, "TYR-A-17" : 21, ... } + pdb-resid : key-value dictionary + Example (key=pdb) : {"GLY-A-16" : 20, "TYR-A-17" : 21, ... } + Example (key=uniprot) : {20 : "GLY-A-16", 21 : "TYR-A-17", ... } """ atomsite_dict = cif_dict[pdb_id.upper()]["_atom_site"] numbering_dict = {} @@ -102,12 +105,75 @@ def get_numbering_dict(pdb_id, cif_dict, uniprot_id, chain_id): ) unp_num = atomsite_dict["pdbx_sifts_xref_db_num"][resid] if residue_key != prev_residue_key: # not a duplicate entry - numbering_dict[residue_key] = unp_num + if key == "pdb": + numbering_dict[residue_key] = unp_num + elif key == "uniprot": + numbering_dict[unp_num] = residue_key + else: + raise ValueError(f"key {key} not recognized") prev_residue_key = residue_key # log.debug(f"numbering dict {numbering_dict}") return numbering_dict +def renumber_interfaces_from_cif( + pdb_id, uniprot_id, chain_id, interface_residues +): + """ + Renumbers a list of interfaces based on the information coming from the + corresponding updated cif file. + + Parameters + ---------- + pdb_id : str + PDB ID + uniprot_id : str + uniprot ID to be used + chain_id : str + chain ID to be used + interfaces_residues : list + list of interfaces residues + """ + + cif_fname = Path(f"{pdb_id}_updated.cif") + if not cif_fname.is_file(): + fetch_updated_cif(pdb_id, cif_fname) + cif_dict = get_cif_dict(cif_fname) + + # retrieve mapping + numbering_dict = get_numbering_dict( + pdb_id, cif_dict, uniprot_id, chain_id, key="uniprot" + ) + # log.debug(f"numbering_dict {numbering_dict}") + if any(numbering_dict): + unique_resids = set( + value for values in interface_residues.values() for value in values + ) + renum_residues = {} # dictionary of renumbered residues + for residue in unique_resids: + str_res = str(residue) + if str_res in numbering_dict.keys(): + # log.debug(f"Residue {residue} not found in cif file") + int_residue = int(numbering_dict[str_res].split("-")[2]) + renum_residues[residue] = int_residue + else: + # log.debug(f"Residue {residue} not found in cif file") + renum_residues[residue] = None + # renumbering interfaces + renum_interfaces = {} + for interface, residues in interface_residues.items(): + renum_residues_list = [] + for residue in residues: + if residue is not None: + renum_residues_list.append(renum_residues[residue]) + renum_interfaces[interface] = renum_residues_list + else: + log.info(f"Renumbering failed for pdb {pdb_id}-{chain_id}") + renum_interfaces = None + # log.debug(f"renum_interfaces {renum_interfaces}") + return renum_interfaces, cif_fname + + def renumber_pdb_from_cif(pdb_id, uniprot_id, chain_id, pdb_fname): """ Renumbers a pdb file based on the information coming from the corresponding @@ -135,7 +201,9 @@ def renumber_pdb_from_cif(pdb_id, uniprot_id, chain_id, pdb_fname): cif_dict = get_cif_dict(cif_fname) # retrieve mapping - numbering_dict = get_numbering_dict(pdb_id, cif_dict, uniprot_id, chain_id) + numbering_dict = get_numbering_dict( + pdb_id, cif_dict, uniprot_id, chain_id, key="pdb" + ) # we do not check if all residues in pdb_fname have # been correctly renumbered @@ -368,7 +436,7 @@ def validate_api_hit( fetch_list, resolution_cutoff=4.0, coverage_cutoff=0.0, - max_pdb_num=10, + max_pdb_num=20, ): """ Validate PDB fetch request file. @@ -422,7 +490,7 @@ def validate_api_hit( log.info(f"Found {len(pdbs_to_fetch)} valid PDBs to fetch") # downloading a list of good pdbs validated_pdbs = fetch_pdb_files(pdbs_to_fetch[:max_pdb_num]) - log.info(f"Found {len(pdbs_to_fetch)} valid PDBs") + log.info(f"Fetched {len(validated_pdbs)} valid PDBs") return validated_pdbs @@ -473,7 +541,9 @@ def unlink_files(suffix="pdb", to_exclude=None): fpath.unlink() -def get_maxint_pdb(validated_pdbs, interface_residues, uniprot_id): +def get_maxint_pdb( + validated_pdbs, interface_residues, uniprot_id, numbering="pdb" +): """ Get PDB ID that retains the most interfaces. @@ -485,15 +555,8 @@ def get_maxint_pdb(validated_pdbs, interface_residues, uniprot_id): Dictionary of all the interfaces (each one with its uniprot ID as key) uniprot_id : str Uniprot ID - - Returns - ------- - pdb_f : Path or None - Path to PDB file. - hit : dict or None - Interface API hit. - filtered_interfaces : dict or None - Dictionary of the retained and filtered interfaces. + numbering : str + what to renumber? 'pdb' for pdb files, 'resi' for interface residues """ log.info("Selecting pdb retaining the most interfaces") cif_f, pdb_f, hit, filtered_interfaces = None, None, None, None @@ -502,31 +565,42 @@ def get_maxint_pdb(validated_pdbs, interface_residues, uniprot_id): for curr_pdb, curr_hit in validated_pdbs: chain_id = curr_hit["chain_id"] pdb_id = curr_hit["pdb_id"] + # refactor renumbering tidy_pdb_f = preprocess_pdb(curr_pdb, chain_id) - curr_renum_pdb_f, curr_cif_f = renumber_pdb_from_cif( - pdb_id, uniprot_id, chain_id, tidy_pdb_f - ) - tidy_pdb_f.unlink() - if curr_renum_pdb_f is None: - continue + if numbering == "pdb": # renumber the pdb files + curr_pdb_f, curr_cif_f = renumber_pdb_from_cif( + pdb_id, uniprot_id, chain_id, tidy_pdb_f + ) + curr_interface_residues = interface_residues + elif numbering == "resi": # renumber the interface residues + curr_pdb_f = tidy_pdb_f + ( + curr_interface_residues, + curr_cif_f, + ) = renumber_interfaces_from_cif( + pdb_id, uniprot_id, chain_id, interface_residues + ) + else: + raise ValueError(f"Unknown numbering option: {numbering}") # load pdb file. If there is an error, skip to the next one try: - mdu = mda.Universe(curr_renum_pdb_f) + mdu = mda.Universe(curr_pdb_f) except Exception as e: - log.error(f"Error loading {curr_renum_pdb_f}: {e}") + log.error(f"Error loading {curr_pdb_f}: {e}") continue + selection_string = f"name CA and chainID {chain_id}" pdb_resids = mdu.select_atoms(selection_string).resids tmp_filtered_interfaces = filter_interfaces( - interface_residues, pdb_resids + curr_interface_residues, pdb_resids ) curr_nint = len(tmp_filtered_interfaces) if curr_nint > max_nint: # update "best" hit max_nint = curr_nint filtered_interfaces = tmp_filtered_interfaces.copy() - pdb_f = curr_renum_pdb_f + pdb_f = curr_pdb_f cif_f = curr_cif_f hit = curr_hit # unlink pdb files @@ -536,7 +610,6 @@ def get_maxint_pdb(validated_pdbs, interface_residues, uniprot_id): if max_nint != 0: log.info(f"filtered_interfaces {filtered_interfaces}") log.info(f"pdb {pdb_f} retains the most interfaces ({max_nint})") - return pdb_f, hit, filtered_interfaces @@ -581,6 +654,7 @@ def get_best_pdb( pdb_to_use=None, chain_to_use=None, pdb_data=None, + numbering="pdb", ): """ Get best PDB ID. @@ -597,6 +671,8 @@ def get_best_pdb( Chain id to be used. pdb_data : Path or None pdb json file for offline mode. + numbering : str (default pdb) + what to renumber, either the pdb files or the interface residues Returns ------- @@ -632,7 +708,7 @@ def get_best_pdb( validated_pdbs = validate_api_hit(pdb_list) pdb_f, top_hit, filtered_interfaces = get_maxint_pdb( - validated_pdbs, interface_residues, uniprot_id + validated_pdbs, interface_residues, uniprot_id, numbering=numbering ) if pdb_f is None:
haddocking/arctic3d
8cf368227787bb43f0936e36269925d908e31e72
diff --git a/tests/test_cli.py b/tests/test_cli.py index 996733c..988d26e 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -9,20 +9,21 @@ def test_cli_empty(): target_uniprot = "P23804" start_cwd = os.getcwd() main( - target_uniprot, - None, - None, - None, - None, - None, - None, - None, - None, - None, - None, - None, - None, - None, + input_arg=target_uniprot, + db=None, + interface_file=None, + out_partner=None, + out_pdb=None, + pdb_to_use=None, + chain_to_use=None, + run_dir=None, + interface_data=None, + pdb_data=None, + full=None, + ligand=None, + linkage_strategy=None, + threshold=None, + numbering=None, ) os.chdir(start_cwd) exp_dir = Path(f"arctic3d-{target_uniprot}") diff --git a/tests/test_pdb.py b/tests/test_pdb.py index 32d1f77..59c43a6 100644 --- a/tests/test_pdb.py +++ b/tests/test_pdb.py @@ -11,6 +11,7 @@ from arctic3d.modules.pdb import ( keep_atoms, occ_pdb, renumber_pdb_from_cif, + renumber_interfaces_from_cif, selchain_pdb, selmodel_pdb, tidy_pdb, @@ -197,7 +198,7 @@ def test_get_maxint_pdb_empty(): def test_get_maxint_pdb(good_hits, example_interfaces): - """Test get_maxint_pdb.""" + """Test get_maxint_pdb with implicit pdb numbering.""" validated_pdbs = validate_api_hit(good_hits) pdb_f, top_hit, filtered_interfaces = get_maxint_pdb( validated_pdbs, example_interfaces, "P00760" @@ -208,6 +209,20 @@ def test_get_maxint_pdb(good_hits, example_interfaces): assert filtered_interfaces == {"P01024": [103, 104, 105]} +def test_get_maxint_pdb_resi(good_hits, example_interfaces): + """Test get_maxint_pdb with resi numbering.""" + validated_pdbs = validate_api_hit(good_hits) + pdb_f, top_hit, filtered_interfaces = get_maxint_pdb( + validated_pdbs, example_interfaces, "P00760", numbering="resi" + ) + # here the pdb is not renumbered + assert pdb_f.name == "4xoj-model1-atoms-A-occ-tidy.pdb" + assert top_hit["pdb_id"] == "4xoj" + assert top_hit["chain_id"] == "A" + # here the interfaces are renumbered, so the residues change + assert filtered_interfaces == {"P01024": [95, 96, 97]} + + def test_filter_pdb_list(good_hits): """Test filter_pdb_list.""" observed_red_list = filter_pdb_list(good_hits, pdb_to_use="1abc") @@ -263,3 +278,20 @@ def test_renumber_pdb_from_cif(inp_pdb_3psg): assert lines[726][13:26] == "CA SER A 50" pdb_renum_fname.unlink() cif_fname.unlink() + + +def test_renumber_interfaces_from_cif(inp_pdb_3psg): + """Test renumber_interfaces_from_cif.""" + interfaces = {"P00441": [85, 137, 138]} + renum_interfaces, cif_fname = renumber_interfaces_from_cif( + pdb_id="3psg", + uniprot_id="P00791", + chain_id="A", + interface_residues=interfaces, + ) + assert renum_interfaces == {"P00441": [26, 78, 79]} + # NB : this result is wrong in this case, as the pdb contains two different + # records with equal chain-resid, with two different insertion codes. + # It's not possible to extract the correct residues in this case, but + # this should be a highly unlikely case. + cif_fname.unlink()
add option to renumber interfaces instead of pdb files There are some (~20) proteins whose sequences contain more than 10K amino acids. In this case, renumbering the pdb file according to the canonical numbering will be wrong, since the result will be not parsable. In order to circumvent this problem, it is necessary to give the possibility to renumber the interface residues instead of the pdb files while looking for the best available pdb (the one maximizing the retained interfaces). This renumbering is mostly safe, except for the case in which the pdb has multiple atomname-chainid-resid records (for example when dealing with pdb with insertion codes (see #238 )). In that case this residue-based renumbering is going to fail miserably
0.0
8cf368227787bb43f0936e36269925d908e31e72
[ "tests/test_cli.py::test_cli_empty", "tests/test_pdb.py::test_selchain_pdb", "tests/test_pdb.py::test_tidy_pdb", "tests/test_pdb.py::test_occ_pdb", "tests/test_pdb.py::test_keep_atoms", "tests/test_pdb.py::test_selmodel_pdb", "tests/test_pdb.py::test_validate_api_hit", "tests/test_pdb.py::test_validate_api_hit_nmr", "tests/test_pdb.py::test_get_maxint_pdb_empty", "tests/test_pdb.py::test_get_maxint_pdb", "tests/test_pdb.py::test_get_maxint_pdb_resi", "tests/test_pdb.py::test_filter_pdb_list", "tests/test_pdb.py::test_pdb_data", "tests/test_pdb.py::test_get_numbering_dict", "tests/test_pdb.py::test_renumber_pdb_from_cif", "tests/test_pdb.py::test_renumber_interfaces_from_cif" ]
[]
{ "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-04-06 15:21:50+00:00
apache-2.0
2,693
haddocking__arctic3d-256
diff --git a/src/arctic3d/cli.py b/src/arctic3d/cli.py index cc265bf..7e3dfe2 100644 --- a/src/arctic3d/cli.py +++ b/src/arctic3d/cli.py @@ -260,7 +260,7 @@ def main( else: pdb_data_path = None # get best pdb - pdb_f, filtered_interfaces = get_best_pdb( + pdb_f, cif_f, filtered_interfaces = get_best_pdb( uniprot_id=uniprot_id, interface_residues=interface_residues, pdb_to_use=pdb_to_use, diff --git a/src/arctic3d/modules/pdb.py b/src/arctic3d/modules/pdb.py index 83a0043..fad58c0 100644 --- a/src/arctic3d/modules/pdb.py +++ b/src/arctic3d/modules/pdb.py @@ -610,7 +610,7 @@ def get_maxint_pdb( if max_nint != 0: log.info(f"filtered_interfaces {filtered_interfaces}") log.info(f"pdb {pdb_f} retains the most interfaces ({max_nint})") - return pdb_f, hit, filtered_interfaces + return pdb_f, cif_f, hit, filtered_interfaces def filter_pdb_list(fetch_list, pdb_to_use=None, chain_to_use=None): @@ -707,12 +707,12 @@ def get_best_pdb( validated_pdbs = validate_api_hit(pdb_list) - pdb_f, top_hit, filtered_interfaces = get_maxint_pdb( + pdb_f, cif_f, top_hit, filtered_interfaces = get_maxint_pdb( validated_pdbs, interface_residues, uniprot_id, numbering=numbering ) - if pdb_f is None: - log.warning(f"Could not fetch PDB file for {uniprot_id}") + if pdb_f is None or cif_f is None: + log.warning(f"Could not fetch PDB/mmcif file for {uniprot_id}") return None, None pdb_id = top_hit["pdb_id"] @@ -730,4 +730,4 @@ def get_best_pdb( processed_pdb = pdb_f.rename(f"{uniprot_id}-{pdb_id}-{chain_id}.pdb") - return processed_pdb, filtered_interfaces + return processed_pdb, cif_f, filtered_interfaces
haddocking/arctic3d
9d82a31b52e0fa7a29a44ef0c6548ce14315fda9
diff --git a/tests/test_pdb.py b/tests/test_pdb.py index 59c43a6..ce06ae3 100644 --- a/tests/test_pdb.py +++ b/tests/test_pdb.py @@ -178,21 +178,25 @@ def test_validate_api_hit_nmr(pdb_hit_no_resolution): def test_get_best_pdb(example_interfaces): """Test get_best_pdb.""" - pdb, filtered_interfaces = get_best_pdb("P20023", example_interfaces) + pdb, cif, filtered_interfaces = get_best_pdb("P20023", example_interfaces) exp_pdb = Path("P20023-1ghq-B.pdb") + exp_cif = Path("1ghq_updated.cif") exp_interfaces = {"P01024": [103, 104, 105]} assert pdb == exp_pdb + assert cif == exp_cif assert filtered_interfaces == exp_interfaces exp_pdb.unlink() + exp_cif.unlink() def test_get_maxint_pdb_empty(): """Test get_maxint_pdb with empty output.""" empty_validated_pdbs = [] - pdb_f, top_hit, filtered_interfaces = get_maxint_pdb( + pdb_f, cif_f, top_hit, filtered_interfaces = get_maxint_pdb( empty_validated_pdbs, {}, uniprot_id=None ) assert pdb_f is None + assert cif_f is None assert top_hit is None assert filtered_interfaces is None @@ -200,10 +204,11 @@ def test_get_maxint_pdb_empty(): def test_get_maxint_pdb(good_hits, example_interfaces): """Test get_maxint_pdb with implicit pdb numbering.""" validated_pdbs = validate_api_hit(good_hits) - pdb_f, top_hit, filtered_interfaces = get_maxint_pdb( + pdb_f, cif_f, top_hit, filtered_interfaces = get_maxint_pdb( validated_pdbs, example_interfaces, "P00760" ) assert pdb_f.name == "4xoj-model1-atoms-A-occ-tidy_renum.pdb" + assert cif_f.name == "4xoj_updated.cif" assert top_hit["pdb_id"] == "4xoj" assert top_hit["chain_id"] == "A" assert filtered_interfaces == {"P01024": [103, 104, 105]} @@ -212,11 +217,12 @@ def test_get_maxint_pdb(good_hits, example_interfaces): def test_get_maxint_pdb_resi(good_hits, example_interfaces): """Test get_maxint_pdb with resi numbering.""" validated_pdbs = validate_api_hit(good_hits) - pdb_f, top_hit, filtered_interfaces = get_maxint_pdb( + pdb_f, cif_f, top_hit, filtered_interfaces = get_maxint_pdb( validated_pdbs, example_interfaces, "P00760", numbering="resi" ) # here the pdb is not renumbered assert pdb_f.name == "4xoj-model1-atoms-A-occ-tidy.pdb" + assert cif_f.name == "4xoj_updated.cif" assert top_hit["pdb_id"] == "4xoj" assert top_hit["chain_id"] == "A" # here the interfaces are renumbered, so the residues change @@ -242,12 +248,13 @@ def test_filter_pdb_list(good_hits): def test_pdb_data(inp_pdb_data): """Test pdb_data input json file.""" orig_interfaces = {"P00441": [85, 137, 138]} - pdb, filtered_interfaces = get_best_pdb( + pdb, cif, filtered_interfaces = get_best_pdb( "P40202", orig_interfaces, pdb_data=inp_pdb_data ) assert filtered_interfaces == orig_interfaces pdb.unlink() + cif.unlink() def test_get_numbering_dict(inp_cif_3psg): @@ -280,7 +287,7 @@ def test_renumber_pdb_from_cif(inp_pdb_3psg): cif_fname.unlink() -def test_renumber_interfaces_from_cif(inp_pdb_3psg): +def test_renumber_interfaces_from_cif(): """Test renumber_interfaces_from_cif.""" interfaces = {"P00441": [85, 137, 138]} renum_interfaces, cif_fname = renumber_interfaces_from_cif(
return cif_f in get_maxint_pdb `get_maxint_pdb` function should return the updated cif file, so that we can a) check its presence in the tests b) delete it in the tests
0.0
9d82a31b52e0fa7a29a44ef0c6548ce14315fda9
[ "tests/test_pdb.py::test_get_maxint_pdb_empty", "tests/test_pdb.py::test_get_maxint_pdb", "tests/test_pdb.py::test_get_maxint_pdb_resi", "tests/test_pdb.py::test_pdb_data" ]
[ "tests/test_pdb.py::test_selchain_pdb", "tests/test_pdb.py::test_tidy_pdb", "tests/test_pdb.py::test_occ_pdb", "tests/test_pdb.py::test_keep_atoms", "tests/test_pdb.py::test_selmodel_pdb", "tests/test_pdb.py::test_validate_api_hit", "tests/test_pdb.py::test_validate_api_hit_nmr", "tests/test_pdb.py::test_filter_pdb_list", "tests/test_pdb.py::test_get_numbering_dict", "tests/test_pdb.py::test_renumber_pdb_from_cif", "tests/test_pdb.py::test_renumber_interfaces_from_cif" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-04-17 16:01:24+00:00
apache-2.0
2,694
haddocking__arctic3d-302
diff --git a/src/arctic3d/cli.py b/src/arctic3d/cli.py index 8eb450c..6c8da6f 100644 --- a/src/arctic3d/cli.py +++ b/src/arctic3d/cli.py @@ -6,6 +6,7 @@ from pathlib import Path from arctic3d import log from arctic3d.modules.blast import run_blast +from arctic3d.modules.clustering import filter_clusters from arctic3d.modules.cluster_interfaces import cluster_interfaces from arctic3d.modules.input import Input from arctic3d.modules.interface import ( @@ -111,6 +112,14 @@ argument_parser.add_argument( default="average", ) +argument_parser.add_argument( + "--min_clust_size", + help="Minimum number of residues in clusters", + type=int, + required=False, + default=0, +) + def load_args(arguments): """ @@ -166,6 +175,7 @@ def main( ligand, linkage_strategy, threshold, + min_clust_size, log_level="DEBUG", ): """Main function.""" @@ -282,6 +292,13 @@ def main( log.info(f"Clustered interfaces {cl_dict}") log.info(f"Clustered interface residues: {cl_residues}") + if min_clust_size > 0: + log.info( + f"Excluding clusters with less than {min_clust_size} residues" + ) + cl_dict, cl_residues, cl_residues_probs = filter_clusters( + cl_dict, cl_residues, cl_residues_probs, min_clust_size + ) make_output( interface_residues=interface_residues, diff --git a/src/arctic3d/modules/clustering.py b/src/arctic3d/modules/clustering.py index 344869b..849c968 100644 --- a/src/arctic3d/modules/clustering.py +++ b/src/arctic3d/modules/clustering.py @@ -221,3 +221,50 @@ def interface_clustering( log.info(f"Clustering performed in {elap_time} seconds") log.info(f"Clustering produced {len(cl_dict)} clusters") return cl_dict, cl_residues, cl_residues_probs + + +def filter_clusters(cl_dict, cl_residues, cl_residues_probs, min_clust_size): + """ + Filter clusters based on size. + + Parameters + ---------- + cl_dict : dict + dictionary of clustered interfaces + cl_residues : dict + dictionary of clustered residues + cl_residues_probs : dict of dicts + dictionary of probabilities for clustered residues + min_clust_size : int + minimum cluster size + + Returns + ------- + flt_cl_dict : dict + dictionary of clustered interfaces + flt_cl_residues : dict + dictionary of clustered residues + flt_cl_residues_probs : dict of dicts + dictionary of probabilities for clustered residues + """ + # gather clusters not respecting the minimum size + excl_clusts = [] + for cl in cl_residues: + if len(cl_residues[cl]) < min_clust_size: + log.info(f"Cluster {cl} has less than {min_clust_size} residues.") + excl_clusts.append(cl) + # remove clusters not respecting the minimum size + for cl in excl_clusts: + log.info(f"Removing cluster {cl}") + del cl_dict[cl] + del cl_residues[cl] + del cl_residues_probs[cl] + # renumber clusters + flt_cl_dict = {} + flt_cl_residues = {} + flt_cl_residues_probs = {} + for idx, cl in enumerate(cl_dict.keys()): + flt_cl_dict[idx + 1] = cl_dict[cl] + flt_cl_residues[idx + 1] = cl_residues[cl] + flt_cl_residues_probs[idx + 1] = cl_residues_probs[cl] + return flt_cl_dict, flt_cl_residues, flt_cl_residues_probs
haddocking/arctic3d
d915f6f939fa3715cd611a48217c892660f5e1be
diff --git a/tests/test_cli.py b/tests/test_cli.py index 5d0f747..6cf8c76 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -23,6 +23,7 @@ def test_cli_empty(): ligand=None, linkage_strategy=None, threshold=None, + min_clust_size=None, ) os.chdir(start_cwd) exp_dir = Path(f"arctic3d-{target_uniprot}") diff --git a/tests/test_clustering.py b/tests/test_clustering.py index 7f6498a..f2ccb71 100644 --- a/tests/test_clustering.py +++ b/tests/test_clustering.py @@ -3,6 +3,7 @@ import pytest from arctic3d.modules.clustering import ( cluster_similarity_matrix, + filter_clusters, get_clustering_dict, get_residue_dict, ) @@ -66,3 +67,27 @@ def test_get_res_dict(): ) assert expected_res_dict == observed_res_dict assert expected_res_probs == observed_res_probs + + +def test_filter_clusters(): + """Test correct filtering of clusters.""" + example_cl_dict = { + 1: ["int_1", "int_2"], + 2: ["int_3"], + } + example_res_dict = {1: [1, 2, 3, 4, 5], 2: [27, 28, 29]} + example_res_probs = { + 1: {1: 0.5, 2: 0.5, 3: 1.0, 4: 0.5, 5: 0.5}, + 2: {27: 1.0, 28: 1.0, 29: 1.0}, + } + obs_cl_dict, obs_res_dict, obs_res_probs = filter_clusters( + example_cl_dict, example_res_dict, example_res_probs, 4 + ) + exp_cl_dict = {1: ["int_1", "int_2"]} + exp_res_dict = {1: [1, 2, 3, 4, 5]} + exp_res_probs = { + 1: {1: 0.5, 2: 0.5, 3: 1.0, 4: 0.5, 5: 0.5}, + } + assert exp_cl_dict == obs_cl_dict + assert exp_res_dict == obs_res_dict + assert exp_res_probs == obs_res_probs diff --git a/tests/test_pdb.py b/tests/test_pdb.py index aac2e7b..b265871 100644 --- a/tests/test_pdb.py +++ b/tests/test_pdb.py @@ -269,7 +269,6 @@ def test_pdb_data(inp_pdb_data): def test_convert_cif_to_pdbs(inp_cif_3psg): """Test convert_cif_to_pdbs.""" obs_out_pdb_fnames = convert_cif_to_pdbs(inp_cif_3psg, "3psg", "P00791") - print(f"obs_out_pdb_fnames {obs_out_pdb_fnames}") exp_out_pdb_fnames = [Path("3psg-A.pdb")] assert exp_out_pdb_fnames == obs_out_pdb_fnames # inspect the pdb file
add min_cluster_size parameter this (optional) parameter must allow to filter out clusters that contain less than `min_cluster_size` residues
0.0
d915f6f939fa3715cd611a48217c892660f5e1be
[ "tests/test_cli.py::test_cli_empty", "tests/test_clustering.py::test_cluster_similarity_matrix", "tests/test_clustering.py::test_complete_strategy_clustering", "tests/test_clustering.py::test_get_cl_dict", "tests/test_clustering.py::test_get_res_dict", "tests/test_clustering.py::test_filter_clusters", "tests/test_pdb.py::test_selchain_pdb", "tests/test_pdb.py::test_tidy_pdb", "tests/test_pdb.py::test_occ_pdb", "tests/test_pdb.py::test_keep_atoms", "tests/test_pdb.py::test_selmodel_pdb", "tests/test_pdb.py::test_validate_api_hit", "tests/test_pdb.py::test_validate_api_hit_nmr", "tests/test_pdb.py::test_get_maxint_pdb_empty", "tests/test_pdb.py::test_get_maxint_pdb", "tests/test_pdb.py::test_filter_pdb_list", "tests/test_pdb.py::test_pdb_data", "tests/test_pdb.py::test_convert_cif_to_pdbs" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-07-11 15:33:08+00:00
apache-2.0
2,695
haddocking__arctic3d-304
diff --git a/src/arctic3d/modules/pdb.py b/src/arctic3d/modules/pdb.py index 096f98b..3dfad7e 100644 --- a/src/arctic3d/modules/pdb.py +++ b/src/arctic3d/modules/pdb.py @@ -401,14 +401,15 @@ def fetch_pdb_files(pdb_to_fetch, uniprot_id): for hit in pdb_to_fetch: pdb_id = hit["pdb_id"] chain_id = hit["chain_id"] - pdb_fname = f"{pdb_id}-{chain_id}.pdb" cif_fname = f"{pdb_id}_updated.cif" + # if the cif file has not been downloaded yet, download it if cif_fname not in os.listdir(): cif_f = fetch_updated_cif(pdb_id, cif_fname) pdb_files = convert_cif_to_pdbs(cif_f, pdb_id, uniprot_id) log.info(f"converted cif to pdb files: {pdb_files}") else: cif_f = Path(cif_fname) + pdb_fname = f"{pdb_id}-{chain_id}.pdb" pdb_f = Path(pdb_fname) if pdb_f.exists(): validated_pdb_and_cifs.append((pdb_f, cif_f, hit)) @@ -569,6 +570,7 @@ def keep_atoms(inp_pdb_f): def validate_api_hit( fetch_list, uniprot_id, + check_pdb=True, resolution_cutoff=4.0, coverage_cutoff=0.0, max_pdb_num=20, @@ -582,6 +584,8 @@ def validate_api_hit( List containing dictionaries of hits. uniprot_id : str Uniprot ID. + check_pdb : bool + Check PDB resolution and coverage. resolution_cutoff : float Resolution cutoff. coverage_cutoff : float @@ -602,33 +606,35 @@ def validate_api_hit( coverage = hit["coverage"] resolution = hit["resolution"] exp_method = hit["experimental_method"] - - # check coverage value - if coverage > coverage_cutoff: - check_list.append(True) - else: - check_list.append(False) - reason = "coverage" - # check resolution value - if resolution is None: - if "NMR" in exp_method: + if check_pdb: + # check coverage value + if coverage > coverage_cutoff: check_list.append(True) else: check_list.append(False) - reason = "None resolution" - elif resolution < resolution_cutoff: - check_list.append(True) - else: - check_list.append(False) - reason = "resolution" + reason = "coverage" + # check resolution value + if resolution is None: + if "NMR" in exp_method: + check_list.append(True) + else: + check_list.append(False) + reason = "None resolution" + elif resolution < resolution_cutoff: + check_list.append(True) + else: + check_list.append(False) + reason = "resolution" # check chain ID not longer than 1 character + # this check holds also if check_pdb is False if len(chain_id) == 1: check_list.append(True) else: check_list.append(False) reason = "chain ID too big" + # append pdb to fetch list if all checks passed if all(check_list): pdbs_to_fetch.append(hit) else: @@ -826,13 +832,15 @@ def get_best_pdb( return # if pdb_to_use is not None, already filter the list + check_pdb = True if pdb_to_use: pdb_to_use = pdb_to_use.lower() + check_pdb = False if chain_to_use: chain_to_use = chain_to_use.upper() pdb_list = filter_pdb_list(pdb_dict[uniprot_id], pdb_to_use, chain_to_use) - validated_pdbs_and_cifs = validate_api_hit(pdb_list, uniprot_id) + validated_pdbs_and_cifs = validate_api_hit(pdb_list, uniprot_id, check_pdb) pdb_f, cif_f, top_hit, filtered_interfaces = get_maxint_pdb( validated_pdbs_and_cifs,
haddocking/arctic3d
f64dd5f2c627e5a4ce2249f73cb416103fec2e3e
diff --git a/tests/test_pdb.py b/tests/test_pdb.py index b265871..594f342 100644 --- a/tests/test_pdb.py +++ b/tests/test_pdb.py @@ -200,6 +200,17 @@ def test_validate_api_hit_nmr(pdb_hit_no_resolution): assert dict == pdb_hit_no_resolution +def test_validate_api_hit_check_pdb(pdb_hit_no_resolution): + """Test validate_api_hit with check_pdb == False.""" + validated_pdbs = validate_api_hit( + [pdb_hit_no_resolution], "P20023", check_pdb=False + ) + pdb, cif, dict = validated_pdbs[0] + assert pdb.name == "2gsx-A.pdb" + assert cif.name == "2gsx_updated.cif" + assert dict == pdb_hit_no_resolution + + def test_get_best_pdb(example_interfaces): """Test get_best_pdb.""" pdb, cif, filtered_interfaces = get_best_pdb("P20023", example_interfaces)
allow for bad pdb structures Users should have the freedom to include pdb files with poor resolution in the search. We could say that a pdb file provided in the `pdb_to_use` field does not have to respect any of the resolution-coverage criteria. An example for this is uniprot ID F1PJP5, for which no PDB respects the resolution thresholds
0.0
f64dd5f2c627e5a4ce2249f73cb416103fec2e3e
[ "tests/test_pdb.py::test_validate_api_hit_check_pdb" ]
[ "tests/test_pdb.py::test_selchain_pdb", "tests/test_pdb.py::test_tidy_pdb", "tests/test_pdb.py::test_occ_pdb", "tests/test_pdb.py::test_keep_atoms", "tests/test_pdb.py::test_selmodel_pdb", "tests/test_pdb.py::test_validate_api_hit", "tests/test_pdb.py::test_validate_api_hit_nmr", "tests/test_pdb.py::test_get_maxint_pdb_empty", "tests/test_pdb.py::test_get_maxint_pdb", "tests/test_pdb.py::test_filter_pdb_list", "tests/test_pdb.py::test_pdb_data", "tests/test_pdb.py::test_convert_cif_to_pdbs" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-07-12 08:47:04+00:00
apache-2.0
2,696
haddocking__arctic3d-313
diff --git a/src/arctic3d/modules/output.py b/src/arctic3d/modules/output.py index 7f75c08..1695c82 100644 --- a/src/arctic3d/modules/output.py +++ b/src/arctic3d/modules/output.py @@ -503,13 +503,54 @@ def create_barplot(cluster, sorted_dict, max_labels=70): return +def remove_duplicate_labels(labels, values): + """ + Remove duplicate labels. + + Parameters + ---------- + labels : list + list of labels + values : list + list of values + + Returns + ------- + new_labels : list + list of labels without duplicates + new_values : list + list of values without duplicates + """ + new_labels, new_values = [], [] + for n in range(len(labels)): + if labels[n] not in new_labels: + new_labels.append(labels[n]) + new_values.append(values[n]) + else: + log.info(f"Detected duplicate label {labels[n]}.") + return new_labels, new_values + + def create_barplotly(cluster, sorted_dict, format, scale, max_labels=25): """ Create horizontal barplot using plotly. + Parameters + ---------- + cluster : int or str + cluster ID + sorted_dict : dict + dictionary of sorted entries + format : str + format of the output figure + scale : float + scale of the output figure + max_labels : int + maximum number of labels to include """ - labels = shorten_labels(list(sorted_dict.keys())[-max_labels:]) - values = list(sorted_dict.values())[-max_labels:] + tmp_labels = shorten_labels(list(sorted_dict.keys())[-max_labels:]) + tmp_values = list(sorted_dict.values())[-max_labels:] + labels, values = remove_duplicate_labels(tmp_labels, tmp_values) fig = go.Figure(go.Bar(x=values, y=labels, orientation="h")) fig_fname = f"cluster_{cluster}.html" fig.write_html(fig_fname)
haddocking/arctic3d
54f82a8122b749c52e4b49349d60366fc5ac6caf
diff --git a/tests/test_output.py b/tests/test_output.py index 497f843..9d637d9 100644 --- a/tests/test_output.py +++ b/tests/test_output.py @@ -7,6 +7,7 @@ from arctic3d.modules.output import ( create_output_folder, output_pdb, read_residues_probs, + remove_duplicate_labels, setup_output_folder, shorten_labels, write_dict, @@ -192,3 +193,14 @@ def test_shorten_labels(example_B_labels): "positive regulation of transcription by RNA polymerase...", ] assert exp_shortened_labels == obs_shortened_labels + + +def test_remove_duplicate_labels(): + """Test remove_duplicate_labels.""" + tmp_labels = ["Polymerase...", "Polymerase...", "Polymerase..."] + tmp_values = [2, 3, 1] + exp_labels = ["Polymerase..."] + exp_values = [2] + obs_labels, obs_values = remove_duplicate_labels(tmp_labels, tmp_values) + assert exp_labels == obs_labels + assert exp_values == obs_values
handle duplicate labels in arctic3d-localise plots when we shorten labels we may have duplicate labels that should be removed from the plot
0.0
54f82a8122b749c52e4b49349d60366fc5ac6caf
[ "tests/test_output.py::test_write_clusters", "tests/test_output.py::test_write_residues", "tests/test_output.py::test_write_interfaes", "tests/test_output.py::test_write_res_probs", "tests/test_output.py::test_read_res_probs", "tests/test_output.py::test_output_pdb", "tests/test_output.py::test_create_output_folder", "tests/test_output.py::test_setup_output_folder", "tests/test_output.py::test_shorten_labels", "tests/test_output.py::test_remove_duplicate_labels" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2023-07-27 14:31:30+00:00
apache-2.0
2,697
haddocking__arctic3d-78
diff --git a/src/arctic3d/modules/interface.py b/src/arctic3d/modules/interface.py index f9f6049..8fd15cc 100644 --- a/src/arctic3d/modules/interface.py +++ b/src/arctic3d/modules/interface.py @@ -84,6 +84,46 @@ def parse_out_pdb(out_pdb_string): return set(out_pdb_list) +def parse_interface_line(int_line, ln_num): + """ + Parses the input interface line according to the following format: + + int_name 1,2,3,6,7 + + Parameters + ---------- + int_line : str + interface_line + ln_num : int + line number + + Returns + ------- + int_name : str + name of the interface + residue_list : list + list of residues + """ + splt_ln = int_line.strip().split() + int_name = splt_ln[0] + # checking malformed interface + if len(splt_ln) != 2: + raise Exception( + f"Found uncompatible interface at line {ln_num} in interface_file." + ) + residues_str_list = splt_ln[1].split(",") + residues_int_list = [] + # checking they are all integers + for resid_string in residues_str_list: + if resid_string.isdigit(): + residues_int_list.append(int(resid_string)) + else: + raise Exception( + f"Malformed residue {resid_string} at line {ln_num} in interface_file." + ) + return int_name, residues_int_list + + def read_interface_residues(interface_file): """ Parameters @@ -105,14 +145,12 @@ def read_interface_residues(interface_file): interface_dict = {} if os.path.exists(interface_file): with open(interface_file, "r") as ifile: + ln_num = 0 # keep track of line number for ln in ifile: + ln_num += 1 if ln != os.linesep: - splt_ln = ln.split() - try: - residues = [int(resid) for resid in splt_ln[1].split(",")] - interface_dict[splt_ln[0]] = residues - except Exception as e: - log.exception(e) + int_name, residue_list = parse_interface_line(ln, ln_num) + interface_dict[int_name] = residue_list else: raise Exception(f"interface_file {interface_file} does not exist") return interface_dict
haddocking/arctic3d
46762136d6b57abf6f1fedce1e660e390e73a5fa
diff --git a/tests/test_interface.py b/tests/test_interface.py index ca503cb..a980505 100644 --- a/tests/test_interface.py +++ b/tests/test_interface.py @@ -3,6 +3,7 @@ from pathlib import Path import pytest from arctic3d.modules.interface import ( + parse_interface_line, parse_out_pdb, parse_out_uniprot, read_interface_residues, @@ -32,6 +33,20 @@ def test_read_int_file(): assert obs_interface_dict == exp_interface_dict +def test_parse_interface_line(): + """Test parse_interface_line function.""" + interface_lines = ["P00767 1,2,3", "P00767", "P00767 1-3,4"] + # first string is correct + exp_interface = "P00767", [1, 2, 3] + obs_interface = parse_interface_line(interface_lines[0], 0) + assert exp_interface == obs_interface + # the other two should throw an exception + with pytest.raises(Exception): + parse_interface_line(interface_lines[1], 1) + with pytest.raises(Exception): + parse_interface_line(interface_lines[2], 2) + + def test_parse_out_uniprot(): uniprot_strings = [None, "P00760", "P00760,P00974"] expected_uniprot_strings = [set([]), set(["P00760"]), set(["P00760", "P00974"])]
improve filtering on input interface_file the program should handle the following situations while reading the `interface_file` 1. empty line in interface file -> skip it 2. only a single value (possibly the name of the interface) -> abort the execution and throw an exception 3. invalid residue numbers (es 19A,28-38) -> abort the execution and throw an exception
0.0
46762136d6b57abf6f1fedce1e660e390e73a5fa
[ "tests/test_interface.py::test_read_int_file_nonexisting", "tests/test_interface.py::test_read_int_file", "tests/test_interface.py::test_parse_interface_line", "tests/test_interface.py::test_parse_out_uniprot", "tests/test_interface.py::test_error_parse_out_uniprot", "tests/test_interface.py::test_parse_out_pdb", "tests/test_interface.py::test_error_parse_out_pdb" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2022-09-19 07:29:02+00:00
apache-2.0
2,698
haddocking__arctic3d-84
diff --git a/src/arctic3d/modules/clustering.py b/src/arctic3d/modules/clustering.py index d83223c..3aeb007 100644 --- a/src/arctic3d/modules/clustering.py +++ b/src/arctic3d/modules/clustering.py @@ -5,10 +5,10 @@ import os import time import matplotlib.pyplot as plt -import numpy as np -import pandas as pd from scipy.cluster.hierarchy import dendrogram, fcluster, linkage +from arctic3d.modules.interface_matrix import read_int_matrix + LINKAGE = "average" # THRESHOLD = 0.7071 # np.sqrt(2)/2 THRESHOLD = 0.8660 # np.sqrt(3)/2 @@ -16,40 +16,6 @@ THRESHOLD = 0.8660 # np.sqrt(3)/2 log = logging.getLogger("arctic3dlog") -def read_int_matrix(filename): - """ - Read the interface matrix. - - Parameters - ---------- - filename : str or Path - interface matrix filename - Returns - ------- - int_matrix : np.array - interface matrix - """ - if os.path.exists(filename): - int_matrix = pd.read_csv(filename, header=None, sep=" ") - int_matrix.columns = ["lig1", "lig2", "D"] - # first check: it must be a 1D condensed distance matrix - nligands = 0.5 + np.sqrt(0.25 + 2 * int_matrix.shape[0]) - int_nligands = int(nligands) - if abs(nligands - int_nligands) > 0.00001: - raise Exception( - f"npairs {int_matrix.shape[0]}: interface matrix should be a 1D condensed distance matrix" - ) - # extracting ligands' names - ligand_names = [int_matrix.iloc[0, 0]] - for lig in int_matrix.iloc[:, 1]: - if lig not in ligand_names: - ligand_names.append(lig) - log.debug(f"Ligand names {ligand_names}") - return int_matrix.iloc[:, 2], ligand_names - else: - raise Exception(f"input path {filename} does not exist!") - - def cluster_distance_matrix(int_matrix, entries, plot=False): """ Does the clustering. diff --git a/src/arctic3d/modules/interface_matrix.py b/src/arctic3d/modules/interface_matrix.py index 19e080a..718b91f 100644 --- a/src/arctic3d/modules/interface_matrix.py +++ b/src/arctic3d/modules/interface_matrix.py @@ -4,6 +4,7 @@ import time import MDAnalysis as mda import numpy as np +import pandas as pd from scipy.spatial.distance import cdist SIGMA = 1.9 @@ -61,12 +62,7 @@ def compute_scalar_product(interface_one, interface_two, Jij_mat): scalar product between the two interfaces """ # log.debug(f"computing scal_prod between {interface_one} and {interface_two}") - len_one = len(interface_one) - len_two = len(interface_two) - scalar_product = 0.0 - for res_one in range(len_one): - for res_two in range(len_two): - scalar_product += Jij_mat[interface_one[res_one], interface_two[res_two]] + scalar_product = Jij_mat[np.ix_(interface_one, interface_two)].sum() return scalar_product @@ -284,3 +280,37 @@ def interface_matrix(interface_dict, pdb_path): log.warning("Too few interfaces, interface matrix was not calculated.") out_fl = None return retained_interfaces, out_fl + + +def read_int_matrix(filename): + """ + Read the interface matrix. + + Parameters + ---------- + filename : str or Path + interface matrix filename + Returns + ------- + int_matrix : np.array + interface matrix + """ + if os.path.exists(filename): + int_matrix = pd.read_csv(filename, header=None, sep=" ") + int_matrix.columns = ["lig1", "lig2", "D"] + # first check: it must be a 1D condensed distance matrix + nligands = 0.5 + np.sqrt(0.25 + 2 * int_matrix.shape[0]) + int_nligands = int(nligands) + if abs(nligands - int_nligands) > 0.00001: + raise Exception( + f"npairs {int_matrix.shape[0]}: interface matrix should be a 1D condensed distance matrix" + ) + # extracting ligands' names + ligand_names = [int_matrix.iloc[0, 0]] + for lig in int_matrix.iloc[:, 1]: + if lig not in ligand_names: + ligand_names.append(lig) + log.debug(f"Ligand names {ligand_names}") + return int_matrix.iloc[:, 2], ligand_names + else: + raise Exception(f"input path {filename} does not exist!")
haddocking/arctic3d
da41c9d75b793682fb7ae040c3cb7b381afa1b38
diff --git a/tests/test_clustering.py b/tests/test_clustering.py index 07104e8..7c8ff17 100644 --- a/tests/test_clustering.py +++ b/tests/test_clustering.py @@ -1,33 +1,9 @@ -from pathlib import Path - import numpy as np -import pytest from arctic3d.modules.clustering import ( # write_clusters,; write_residues, cluster_distance_matrix, - read_int_matrix, ) -from . import golden_data - - -def test_read_int_matrix_nonexisting(): - """Test error on non-existing path.""" - non_ex_path = "../dummy" - with pytest.raises(Exception): - read_int_matrix(non_ex_path) - - -def test_read_int_matrix(): - """Test correct reading of interface matrix.""" - matrix_path = Path(golden_data, "interface_matrix.txt") - expected_int_matrix = np.array([0.9972, 0.3742, 0.9736, 0.9996, 0.8841, 0.9991]) - expected_ligands = ["int_1", "int_2", "int_3", "int_4"] - observed_int_matrix, observed_ligands = read_int_matrix(matrix_path) - assert expected_ligands == observed_ligands - # now checking the matrix - np.testing.assert_allclose(expected_int_matrix, observed_int_matrix, atol=0.0001) - def test_cluster_distance_matrix(): """Test correct clustering""" diff --git a/tests/test_interface_matrix.py b/tests/test_interface_matrix.py index d39ac4b..576e89e 100644 --- a/tests/test_interface_matrix.py +++ b/tests/test_interface_matrix.py @@ -9,16 +9,23 @@ from arctic3d.modules.interface_matrix import ( compute_scalar_product, filter_interfaces, get_coupling_matrix, + interface_matrix, + read_int_matrix, ) from . import golden_data @pytest.fixture -def example_mdu(): +def example_pdbpath(): + """Example pdb path.""" + return Path(golden_data, "1rypB_r_b.pdb") + + [email protected] +def example_mdu(example_pdbpath): """Example mdanalysis universe.""" - pdb_path = Path(golden_data, "1rypB_r_b.pdb") - return mda.Universe(pdb_path) + return mda.Universe(example_pdbpath) @pytest.fixture @@ -28,6 +35,18 @@ def example_interface_dict(): return interface_dict [email protected] +def reference_jij(): + jij = np.array( + [ + [1.0, 0.358133, 0.031553], + [0.358133, 1.0, 0.366509], + [0.031553, 0.366509, 1.0], + ] + ) + return jij + + def test_check_residues_coverage(): """Test check_residues_coverage.""" interface_one = [1, 2, 3] @@ -38,7 +57,6 @@ def test_check_residues_coverage(): assert filtered_int_one == interface_one cov_two, filtered_int_two = check_residues_coverage(interface_two, pdb_resids) - assert cov_two == 0.75 expected_filtered_int_two = [2, 3, 4] assert expected_filtered_int_two == filtered_int_two @@ -52,35 +70,23 @@ def test_get_coupling_matrix_empty(example_mdu): assert observed_jij == expected_jij -def test_get_coupling_matrix(example_mdu): +def test_get_coupling_matrix(example_mdu, reference_jij): """Test get_coupling_matrix with a set of residues""" int_resids = [1, 2, 3] - expected_jij = np.array( - [ - [1.0, 0.358133, 0.031553], - [0.358133, 1.0, 0.366509], - [0.031553, 0.366509, 1.0], - ] - ) observed_jij = get_coupling_matrix(example_mdu, int_resids) - np.testing.assert_allclose(expected_jij, observed_jij, atol=0.00001) + np.testing.assert_allclose(reference_jij, observed_jij, atol=0.00001) -def test_compute_scalar_product(): +def test_compute_scalar_product(reference_jij): """Test compute_scalar_product.""" - jij = np.array( - [ - [1.0, 0.358133, 0.031553], - [0.358133, 1.0, 0.366509], - [0.031553, 0.366509, 1.0], - ] - ) interface_one = [0, 1, 2] - observed_norm = compute_scalar_product(interface_one, interface_one, jij) + observed_norm = compute_scalar_product(interface_one, interface_one, reference_jij) expected_norm = 4.51239 np.testing.assert_allclose(expected_norm, observed_norm, atol=0.00001) interface_two = [0, 1] - observed_scal_prod = compute_scalar_product(interface_one, interface_two, jij) + observed_scal_prod = compute_scalar_product( + interface_one, interface_two, reference_jij + ) expected_scal_prod = 3.11433 np.testing.assert_allclose(expected_scal_prod, observed_scal_prod, atol=0.00001) @@ -91,3 +97,42 @@ def test_filter_interfaces(example_mdu, example_interface_dict): pdb_resids = example_mdu.select_atoms("name CA").resids observed_filter_dict = filter_interfaces(example_interface_dict, pdb_resids) assert expected_filter_dict == observed_filter_dict + + +def test_interface_matrix(example_interface_dict, example_pdbpath): + """Test interface_matrix""" + # defining expected quantities + expected_interface_matrix = np.array([0.2515]) + expected_int_filename = "interface_matrix.txt" + expected_filter_dict = {"int_1": [1, 2], "int_2": [1, 2, 4]} + expected_interface_names = ["int_1", "int_2"] + # calculate interface matrix + observed_filter_ints, observed_int_filename = interface_matrix( + example_interface_dict, example_pdbpath + ) + assert expected_filter_dict == observed_filter_ints + assert expected_int_filename == observed_int_filename + # read interface matrix + observed_int_matrix, obs_int_names = read_int_matrix(observed_int_filename) + np.testing.assert_allclose( + expected_interface_matrix, observed_int_matrix, atol=0.00001 + ) + assert expected_interface_names == obs_int_names + + +def test_read_int_matrix_nonexisting(): + """Test error on non-existing path.""" + non_ex_path = "../dummy" + with pytest.raises(Exception): + read_int_matrix(non_ex_path) + + +def test_read_int_matrix(): + """Test correct reading of interface matrix.""" + matrix_path = Path(golden_data, "interface_matrix.txt") + expected_int_matrix = np.array([0.9972, 0.3742, 0.9736, 0.9996, 0.8841, 0.9991]) + expected_ligands = ["int_1", "int_2", "int_3", "int_4"] + observed_int_matrix, observed_ligands = read_int_matrix(matrix_path) + assert expected_ligands == observed_ligands + # now checking the matrix + np.testing.assert_allclose(expected_int_matrix, observed_int_matrix, atol=0.0001)
refactor interface matrix calculation - [x] using numpy built-in functions it should be possibile to substantially accelerate some calculations - [x] add some tests as well - [x] move `read_int_matrix` from `clustering` module to `interface_matrix` module
0.0
da41c9d75b793682fb7ae040c3cb7b381afa1b38
[ "tests/test_clustering.py::test_cluster_distance_matrix", "tests/test_clustering.py::test_write_clusters", "tests/test_clustering.py::test_write_residues", "tests/test_interface_matrix.py::test_check_residues_coverage", "tests/test_interface_matrix.py::test_get_coupling_matrix_empty", "tests/test_interface_matrix.py::test_get_coupling_matrix", "tests/test_interface_matrix.py::test_compute_scalar_product", "tests/test_interface_matrix.py::test_filter_interfaces", "tests/test_interface_matrix.py::test_interface_matrix", "tests/test_interface_matrix.py::test_read_int_matrix_nonexisting", "tests/test_interface_matrix.py::test_read_int_matrix" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-09-19 14:32:29+00:00
apache-2.0
2,699
haddocking__haddock3-88
diff --git a/src/haddock/clis/cli.py b/src/haddock/clis/cli.py index 36932a86..bcb48ac3 100755 --- a/src/haddock/clis/cli.py +++ b/src/haddock/clis/cli.py @@ -6,7 +6,8 @@ from argparse import ArgumentTypeError from functools import partial from haddock.version import CURRENT_VERSION -from haddock.libs.libutil import file_exists, non_negative_int +from haddock.libs.libutil import file_exists +from haddock.gear.restart_run import add_restart_arg # Command line interface parser @@ -22,17 +23,7 @@ ap.add_argument( help="The input recipe file path", ) -_arg_pos_int = partial( - non_negative_int, - exception=ArgumentTypeError, - emsg="Minimum value is 0, {!r} given.", - ) -ap.add_argument( - "--restart", - type=_arg_pos_int, - default=0, - help="Restart the recipe from this course", - ) +add_restart_arg(ap) ap.add_argument( "--setup", @@ -75,7 +66,7 @@ def maincli(): def main( recipe, - restart=0, + restart=None, setup_only=False, log_level="INFO", ): @@ -112,7 +103,7 @@ def main( logging.info(get_initial_greeting()) try: - params, other_params = setup_run(recipe) + params, other_params = setup_run(recipe, restart_from=restart) except ConfigurationError as err: logging.error(err) diff --git a/src/haddock/gear/prepare_run.py b/src/haddock/gear/prepare_run.py index 2f6ffbb5..5e2e5bf6 100644 --- a/src/haddock/gear/prepare_run.py +++ b/src/haddock/gear/prepare_run.py @@ -11,6 +11,7 @@ from haddock import haddock3_source_path from haddock.modules import modules_category from haddock.error import ConfigurationError from haddock.gear.parameters import config_mandatory_general_parameters +from haddock.gear.restart_run import remove_folders_after_number from haddock.libs.libutil import ( copy_files_to_dir, make_list_if_string, @@ -41,7 +42,7 @@ def with_config_error(func): return wrapper -def setup_run(workflow_path): +def setup_run(workflow_path, restart_from=None): """ Setup HADDOCK3 run. @@ -55,6 +56,15 @@ def setup_run(workflow_path): #6 : create the needed folders/files to start the run #7 : copy additional files to run folder + Parameters + ---------- + workflow_path : str or pathlib.Path + The path to the configuration file. + + erase_previous : bool + Whether to erase the previous run folder and reprare from + scratch. Defaults to `True`. + Returns ------- tuple of two dicts @@ -77,12 +87,16 @@ def setup_run(workflow_path): ) validate_modules_params(modules_params) - # prepares the run folders - remove_folder(params['run_dir']) - begin_dir, _ = create_begin_files(params) + if restart_from is None: + # prepares the run folders + remove_folder(params['run_dir']) + begin_dir, _ = create_begin_files(params) + + # prepare other files + copy_ambig_files(modules_params, begin_dir) - # prepare other files - copy_ambig_files(modules_params, begin_dir) + else: + remove_folders_after_number(params['run_dir'], restart_from) # return the modules' parameters and other parameters that may serve # the workflow, the "other parameters" can be expanded in the future diff --git a/src/haddock/gear/restart_run.py b/src/haddock/gear/restart_run.py new file mode 100644 index 00000000..8028ff85 --- /dev/null +++ b/src/haddock/gear/restart_run.py @@ -0,0 +1,59 @@ +"""Features to allow run restart from a given step.""" +from argparse import ArgumentTypeError +from functools import partial + +from haddock.libs.libutil import non_negative_int, remove_folder + + +_help_cli = """Restart the run from a given step. Previous folders from +the selected step onward will be deleted.""" + + +_arg_non_neg_int = partial( + non_negative_int, + exception=ArgumentTypeError, + emsg="Minimum value is 0, {!r} given.", + ) + + +def add_restart_arg(parser): + """Adds `--restart` option to argument parser.""" + parser.add_argument( + "--restart", + type=_arg_non_neg_int, + default=None, + help=_help_cli, + ) + + +def remove_folders_after_number(run_dir, num): + """ + Remove calculation folder after (included) a given number. + + Example + ------- + If the following step folders exist: + + 00_topoaa + 01_rigidbody + 02_mdref + 03_flexref + + and the number `2` is given, folders `02_` and `03_` will be + deleted. + + Parameters + ---------- + run_dir : pathlib.Path + The run directory. + + num : int + The number of the folder from which to delete calculation step + folders. `num` must be non-negative integer, or equivalent + representation. + """ + num = _arg_non_neg_int(num) + previous = sorted(list(run_dir.resolve().glob('[0-9][0-9]*/'))) + for folder in previous[num:]: + remove_folder(folder) + return diff --git a/src/haddock/libs/libutil.py b/src/haddock/libs/libutil.py index dcd42376..cac05d66 100644 --- a/src/haddock/libs/libutil.py +++ b/src/haddock/libs/libutil.py @@ -111,7 +111,7 @@ def parse_ncores(n=None, njobs=None, max_cpus=None): raise SetupError(_msg) from err if n < 1: - _msg = "`n` is not positive, this is not possible." + _msg = f"`n` is not positive, this is not possible: {n!r}" raise SetupError(_msg) if njobs: diff --git a/src/haddock/modules/__init__.py b/src/haddock/modules/__init__.py index 5a5d2529..e7e1ffcf 100644 --- a/src/haddock/modules/__init__.py +++ b/src/haddock/modules/__init__.py @@ -102,7 +102,7 @@ class BaseHaddockModule(ABC): def previous_path(self): previous = sorted(list(self.path.resolve().parent.glob('[0-9][0-9]*/'))) try: - return previous[-2] + return previous[self.order - 1] except IndexError: return self.path
haddocking/haddock3
10c1f6a7b91a594406b60428dcf47eb6762be798
diff --git a/tests/test_cli.py b/tests/test_cli.py index 85070308..fb4a6aba 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -31,26 +31,6 @@ def test_ap_setup_false(): assert cmd.setup_only == False [email protected]( - 'n', - (0, 1, 10, 1230, 50000), - ) -def test_ap_restart(n): - cmd = ap.parse_args(f'{recipe} --restart {n}'.split()) - assert cmd.restart == n - - [email protected]( - 'n', - (-1, -10, -1230, -50000), - ) -def test_ap_restart_error(n): - with pytest.raises(SystemExit) as exit: - cmd = ap.parse_args(f'{recipe} --restart {n}'.split()) - assert exit.type == SystemExit - assert exit.value.code == 2 - - def test_ap_version(): with pytest.raises(SystemExit) as exit: ap.parse_args('-v'.split()) diff --git a/tests/test_gear_restart.py b/tests/test_gear_restart.py new file mode 100644 index 00000000..99462fe0 --- /dev/null +++ b/tests/test_gear_restart.py @@ -0,0 +1,65 @@ +"""Test gear.restart_run.""" +import argparse + +import pytest + +from haddock.gear import restart_run + + +def test_has_help(): + """Assert module has _help_cli variable.""" + assert restart_run._help_cli + + [email protected]( + 'i,expected', + [ + ('0', 0), + ('1', 1), + ('57', 57), + (100, 100), + ] + ) +def test_non_neg_arg(i, expected): + """Test non negative arg type.""" + r = restart_run._arg_non_neg_int(i) + assert r == expected + + [email protected]( + 'i,expected', + [ + ('0', 0), + ('1', 1), + ('57', 57), + (100, 100), + ] + ) +def test_restart_cli(i, expected): + """Test non negative arg type.""" + ap = argparse.ArgumentParser() + restart_run.add_restart_arg(ap) + cmd = ap.parse_args(f'--restart {i}'.split()) + assert cmd.restart == expected + + [email protected]( + 'n', + (-1, -10, '-1230', -50000), + ) +def test_arg_non_neg_error(n): + with pytest.raises(argparse.ArgumentTypeError) as exit: + restart_run._arg_non_neg_int(n) + + [email protected]( + 'n', + (-1, -10, '-1230', -50000), + ) +def test_restart_cli_error(n): + ap = argparse.ArgumentParser() + restart_run.add_restart_arg(ap) + with pytest.raises(SystemExit) as exit: + cmd = ap.parse_args(f'--restart {n}'.split()) + assert exit.type == SystemExit + assert exit.value.code == 2
parameter `--restart` likely not functioning properly - investigate
0.0
10c1f6a7b91a594406b60428dcf47eb6762be798
[ "tests/test_cli.py::test_ap_recipe_does_not_exist", "tests/test_cli.py::test_ap_recipe_exists", "tests/test_cli.py::test_ap_setup_true", "tests/test_cli.py::test_ap_setup_false", "tests/test_cli.py::test_ap_version", "tests/test_cli.py::test_ap_log_level[DEBUG]", "tests/test_cli.py::test_ap_log_level[INFO]", "tests/test_cli.py::test_ap_log_level[WARNING]", "tests/test_cli.py::test_ap_log_level[ERROR]", "tests/test_cli.py::test_ap_log_level[CRITICAL]", "tests/test_cli.py::test_ap_log_level_error", "tests/test_gear_restart.py::test_has_help", "tests/test_gear_restart.py::test_non_neg_arg[0-0]", "tests/test_gear_restart.py::test_non_neg_arg[1-1]", "tests/test_gear_restart.py::test_non_neg_arg[57-57]", "tests/test_gear_restart.py::test_non_neg_arg[100-100]", "tests/test_gear_restart.py::test_restart_cli[0-0]", "tests/test_gear_restart.py::test_restart_cli[1-1]", "tests/test_gear_restart.py::test_restart_cli[57-57]", "tests/test_gear_restart.py::test_restart_cli[100-100]", "tests/test_gear_restart.py::test_arg_non_neg_error[-1]", "tests/test_gear_restart.py::test_arg_non_neg_error[-10]", "tests/test_gear_restart.py::test_arg_non_neg_error[-1230]", "tests/test_gear_restart.py::test_arg_non_neg_error[-50000]", "tests/test_gear_restart.py::test_restart_cli_error[-1]", "tests/test_gear_restart.py::test_restart_cli_error[-10]", "tests/test_gear_restart.py::test_restart_cli_error[-1230]", "tests/test_gear_restart.py::test_restart_cli_error[-50000]" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-09-06 18:14:17+00:00
apache-2.0
2,700
hadpro24__nimba-framework-39
diff --git a/docs/tutorial/index.md b/docs/tutorial/index.md index 13d8137..0541bb4 100644 --- a/docs/tutorial/index.md +++ b/docs/tutorial/index.md @@ -55,7 +55,7 @@ from nimba.http import router @router('/about') def about(request): - return "<h1> Hello, <h2> Welcom to my app page" + return "<h1> Hello, World </h1>" ``` Each life is decorated by a road indicating a path diff --git a/nimba/core/exceptions.py b/nimba/core/exceptions.py index 8cbe421..a77b14b 100644 --- a/nimba/core/exceptions.py +++ b/nimba/core/exceptions.py @@ -13,4 +13,8 @@ class AppNameIncorrect(Exception): pass class CommandError(Exception): - pass \ No newline at end of file + pass + +class NoReverseFound(Exception): + pass + \ No newline at end of file diff --git a/nimba/http/__init__.py b/nimba/http/__init__.py index 417220d..12cc753 100644 --- a/nimba/http/__init__.py +++ b/nimba/http/__init__.py @@ -1,8 +1,9 @@ -from .utils import router, render +from .utils import router, render, path_reverse from .utils import ROUTES all = [ 'router', 'render' + 'path_reverse', 'ROUTES', ] \ No newline at end of file diff --git a/nimba/http/resolver.py b/nimba/http/resolver.py index 824f89e..4113ca4 100644 --- a/nimba/http/resolver.py +++ b/nimba/http/resolver.py @@ -97,7 +97,4 @@ def is_valid_method(methods:list) -> None: if not isinstance(methods, list) or len(methods) > 2 or len(methods) < 0: raise ImproperlyMethodsConfig('ErrorConfig : methods must be list and use the valid element GET or POST.') - -def reverse(name_path:str) -> str: - pass \ No newline at end of file diff --git a/nimba/http/utils.py b/nimba/http/utils.py index 6cdd0fa..eb06e85 100644 --- a/nimba/http/utils.py +++ b/nimba/http/utils.py @@ -4,6 +4,7 @@ import re import http.client from wsgiref.headers import Headers import pathlib +import urllib.parse import traceback import mimetypes @@ -29,10 +30,44 @@ from nimba.http.errors import ( error_404, error_500 ) +from nimba.core.exceptions import ( + NoReverseFound +) ROUTES = {} +REVERSE_ROUTE_INFO = {} PROJECT_MASK = 'PROJECT_MASK_PATH' +def path_reverse(name_path:str, args=None, kwargs=None) -> str: + if not isinstance(name_path, str) or not re.match(r"^[^\d\W][\w-]*\Z", name_path): + raise ValueError("Name path must but a valid identifier name.") + args = args or {} + kwargs = kwargs or {} + if args and kwargs: + raise ValueError(("Don't use *args and **kwargs." + "*args is for get and **kwargs for post method.")) + path = REVERSE_ROUTE_INFO.get(name_path) + if not path: + raise NoReverseFound(f"Reverse for {name_path} not found.") + + if args: + path = path +'?'+ urllib.parse.urlencode(args) + else: + regex = r'<(?:(?P<converter>[^>:]+):)?(?P<parameter>[^>]+)>' + url = re.compile(regex, 0) + helper_path = path + for match in url.finditer(path): + value = kwargs.get(match['parameter']) + if not value: + raise NoReverseFound((f"Reverse for {name_path} not found. " + "Keyword arguments '%s' not found." % match['parameter'])) + path = re.sub( + helper_path[match.start():match.end()], + str(value), + path + ) + return path + def load_static(value): return os.path.join('/staticfiles/', value) @@ -58,6 +93,7 @@ def render(template, contexts=None, status=200, charset='utf-8', content_type='t ) #load env jinja2 contexts['load_static'] = load_static + contexts['path_reverse'] = path_reverse with open(path, 'r') as content_file: content = content_file.read() html_render = env.from_string(content) @@ -75,7 +111,7 @@ def render(template, contexts=None, status=200, charset='utf-8', content_type='t return response -def router(path, methods=['GET']): +def router(path, methods=['GET'], name=None): """ Routing app """ @@ -87,6 +123,9 @@ def router(path, methods=['GET']): #format url value url new_path, converters = resolve_pattern(path, callback) ROUTES[new_path] = (callback, converters, path, methods) + + # if: pass + REVERSE_ROUTE_INFO[name or callback.__name__] = path def application(environ, start_response): request = Request(environ) #authorized diff --git a/setup.cfg b/setup.cfg index be7f926..6debe5b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = nimba -version = 0.0.7 +version = 0.0.8 description = Nimba is a modern, fast coding, web framework with Python. long_description = file: README.rst keywords = python, python3, framework, nimba, nimba-solution, web
hadpro24/nimba-framework
4ddf24a8c9605eda9cca04c3a7e541d2a3c50355
diff --git a/tests/test_router.py b/tests/test_router.py index 54baf49..d795bee 100644 --- a/tests/test_router.py +++ b/tests/test_router.py @@ -9,8 +9,9 @@ import pytest import shutil from unittest.mock import patch -from nimba.http import router, render +from nimba.http import router, render, path_reverse from nimba.core.server import Application +from nimba.core.exceptions import NoReverseFound from nimba.test.client import TestCase @@ -22,14 +23,19 @@ def about(request): return 'yes' return TEST -@router('/articles') +@router('/articles', name='articles') def articles(request): return TEST -@router('/article/<int:id>') +@router('/article/<int:id>', name='article') def article(request, id): return str(id) +@router('/info', name='info') +def info(request): + name = request.GET.get('name', '') + return name + @router('/me') def me(request): return render('awesome_app/me.html') @@ -79,6 +85,53 @@ class TestRouterRender(TestCase): self.assertEqual(200, response['status_code']) self.assertIn("hello, world", response['text']) + def test_path_reverse_with_function_name(self): + url = path_reverse('about') + response = self.get(url) + self.assertEqual(200, response['status_code']) + self.assertEqual(TEST, response['text']) + + def test_path_reverse_with_name(self): + url = path_reverse('articles') + response = self.get(url) + self.assertEqual(200, response['status_code']) + self.assertEqual(TEST, response['text']) + + def test_path_reverse_with_name_and_kwargs(self): + #error type name path + with self.assertRaises(ValueError) as error: + path_reverse(57885) + self.assertEqual(str(error.exception), "Name path must but a valid identifier name.") + #bad name give + invalid_path = 'invalid-article' + with self.assertRaises(NoReverseFound) as error: + path_reverse(invalid_path) + self.assertEqual(str(error.exception), f"Reverse for {invalid_path} not found.") + #give kwargs and args + with self.assertRaises(ValueError) as error: + path_reverse('article', kwargs={'id': 5}, args={'name': 'test'}) + self.assertEqual(str(error.exception), ("Don't use *args and **kwargs." + "*args is for get and **kwargs for post method.")) + #invalid parmas name + invalid_params = 'id_wrong' + with self.assertRaises(NoReverseFound) as error: + path_reverse('article', kwargs={invalid_params: 5}) + self.assertEqual(str(error.exception), ("Reverse for article not found. " + "Keyword arguments 'id' not found.")) + #valid + _id = 5 + url = path_reverse('article', kwargs={'id': _id}) + response = self.get(url) + self.assertEqual(200, response['status_code']) + self.assertEqual(str(_id), response['text']) + + def test_path_reverse_with_args(self): + name = 'Harouna Diallo' + url = path_reverse('info', args={'name': name}) + response = self.get(url) + self.assertEqual(200, response['status_code']) + self.assertEqual(name, response['text']) + def tearDown(self): try: shutil.rmtree('tests/templates')
[DM] named_url_path - default use function name - set it with name parameters
0.0
4ddf24a8c9605eda9cca04c3a7e541d2a3c50355
[ "tests/test_router.py::TestRouterRender::test_path_reverse_with_args", "tests/test_router.py::TestRouterRender::test_path_reverse_with_function_name", "tests/test_router.py::TestRouterRender::test_path_reverse_with_name", "tests/test_router.py::TestRouterRender::test_path_reverse_with_name_and_kwargs", "tests/test_router.py::TestRouterRender::test_route_404", "tests/test_router.py::TestRouterRender::test_route_about", "tests/test_router.py::TestRouterRender::test_route_about_with_query", "tests/test_router.py::TestRouterRender::test_route_article_with_id", "tests/test_router.py::TestRouterRender::test_route_with_template" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2021-07-22 06:17:39+00:00
mit
2,701
haginara__ssh_config-25
diff --git a/ssh_config/client.py b/ssh_config/client.py index 5084e87..0032a17 100644 --- a/ssh_config/client.py +++ b/ssh_config/client.py @@ -114,9 +114,10 @@ class Host: self.set_name(name) self.__attrs = {} attrs = {key.upper(): value for key, value in attrs.items()} - for attr, attr_type in Keywords: - if attrs.get(attr.upper()): - self.__attrs[attr] = attr_type(attrs.get(attr.upper())) + for keyword in Keywords: + if attrs.get(keyword.key.upper()): + self.__attrs[keyword.key] = keyword.type_converter( + attrs.get(keyword.key.upper())) def set_name(self, name): """Set Host name @@ -146,7 +147,7 @@ class Host: """Get attributes Args: exclude (List or None): Attributes to exclude - include (List or None): Atrributes to include + include (List or None): Attributes to include """ if exclude and include: raise Exception("exclude and include cannot be together") @@ -158,6 +159,14 @@ class Host: return {key: self.__attrs[key] for key in self.__attrs if key in include} return self.__attrs + def persist_attributes(self): + converted_attributes = {} + for keyword in Keywords: + if keyword.key in self.attributes(): + converted_attributes[keyword.key] = keyword.persist_converter( + self.get(keyword.key)) + return converted_attributes + @property def name(self): """Return name""" @@ -329,7 +338,7 @@ class SSHConfig: def write(self, filename=None): """Write the current ssh_config to self.config_path or given filename - It chagnes the self.config_path, if the filename is given. + It changes the self.config_path, if the filename is given. Args: filename (str): target filename to be written. """ @@ -339,8 +348,8 @@ class SSHConfig: with open(self.config_path, "w") as f: for host in self.hosts: f.write(f"Host {host.name}\n") - for attr in host.attributes(): - f.write(f"{' '*4}{attr} {host.get(attr)}\n") + for attr, value in host.persist_attributes().items(): + f.write(f"{' '*4}{attr} {value}\n") def asdict(self): """Return dict from list of hosts diff --git a/ssh_config/keywords.py b/ssh_config/keywords.py index 3d0d07e..6a2414f 100644 --- a/ssh_config/keywords.py +++ b/ssh_config/keywords.py @@ -1,4 +1,3 @@ - def yes_or_no(value: str) -> bool: """Convert 'yes' or 'no' to True or False Args: @@ -12,111 +11,129 @@ def yes_or_no(value: str) -> bool: raise TypeError("Yes or No is required") convert = { "yes": True, - "no": False, - True: "yes", - False: "no", + "no": False } - return convert[value] + return convert[value.lower()] + + +def yes_or_no_str(value: bool) -> str: + """Convert True or False to 'yes' or 'no' + Args: + value (bool): True/False + Returns: + str: 'yes' if value is True, 'no' if value is False/None + """ + if value is None: + return "no" + return "yes" if value else "no" + + +class Keyword: + def __init__(self, key: str, type_converter: type, + persist_converter: type = None) -> None: + self.key = key + self.type_converter = type_converter + self.persist_converter = persist_converter if persist_converter else type_converter Keywords = [ - ("HostName", str), - ("User", str), - ("Port", int), - ("IdentityFile", str), - ("AddressFamily", str), # any, inet, inet6 - ("BatchMode", str), - ("BindAddress", str), - ("ChallengeResponseAuthentication", str), # yes, no - ("CheckHostIP", str), # yes, no - ("Cipher", str), - ("Ciphers", str), - ("ClearAllForwardings", str), # yes, no - ("Compression", str), # yes, no - ("CompressionLevel", int), # 1 to 9 - ("ConnectionAttempts", int), # default: 1 - ("ConnectTimeout", int), - ("ControlMaster", str), # yes, no - ("ControlPath", str), - ("DynamicForward", str), # [bind_address:]port, [bind_adderss/]port - ("EnableSSHKeysign", str), # yes, no - ("EscapeChar", str), # default: '~' - ("ExitOnForwardFailure", str), # yes, no - ("ForwardAgent", str), # yes, no - ("ForwardX11", str), # yes, no - ("ForwardX11Trusted", str), # yes, no - ("GatewayPorts", str), # yes, no - ("GlobalKnownHostsFile", str), # yes, no - ("GSSAPIAuthentication", str), # yes, no - ("LocalCommand", str), - ("LocalForward", str), - ("LogLevel", str), - ("ProxyCommand", str), - ("ProxyJump", str), - ("Match", str), - ("AddKeysToAgent", str), - ("BindInterface", str), - ("CanonicalizeHostname", str), # yes, no - ("CanonicalizeMaxDots", int), - ("CanonicalDomains", str), - ("CanonicalizePermittedCNAMEs", str), - ("CanonicalizeFallbackLocal", str), - ("IdentityAgent", str), - ("PreferredAuthentications", str), - ("ServerAliveInterval", int), - ("ServerAliveCountMax", int), - ("UsePrivilegedPort", str), # yes, no - ("TCPKeepAlive", str), # yes, no - ("Include", str), - ("IPQoS", str), - ("GlobalKnownHostsFile", str), - ("UserKnownHostsFile", str), - ("GSSAPIDelegateCredentials", str), - ("PKCS11Provider", str), - ("XAuthLocation", str), - ("PasswordAuthentication", yes_or_no), # default: yes - ("KbdInteractiveAuthentication", str), - ("KbdInteractiveDevices", str), - ("PubkeyAuthentication", str), - ("HostbasedAuthentication", str), - ("IdentitiesOnly", yes_or_no), # default: no - ("CertificateFile", str), - ("HostKeyAlias", str), - ("MACs", str), - ("RemoteForward", str), - ("PermitRemoteOpen", str), - ("StrictHostKeyChecking", yes_or_no), - ("NumberOfPasswordPrompts", str), - ("SyslogFacility", str), - ("LogVerbose", str), - ("HostKeyAlgorithms", str), - ("CASignatureAlgorithms", str), - ("VerifyHostKeyDNS", str), - ("NoHostAuthenticationForLocalhost", str), - ("RekeyLimit", str), - ("SendEnv", str), - ("SetEnv", str), - ("ControlPersist", str), - ("HashKnownHosts", str), - ("Tunnel", str), - ("TunnelDevice", str), - ("PermitLocalCommand", str), - ("RemoteCommand", str), - ("VisualHostKey", str), - ("KexAlgorithms", str), - ("RequestTTY", str), - ("SessionType", str), - ("StdinNull", str), - ("ForkAfterAuthentication", str), - ("ProxyUseFdpass", str), - ("StreamLocalBindMask", str), - ("StreamLocalBindUnlink", str), - ("RevokedHostKeys", str), - ("FingerprintHash", str), # md5 or sha256 - ("UpdateHostKeys", str), - ("HostbasedAcceptedAlgorithms", str), - ("PubkeyAcceptedAlgorithms", str), - ("IgnoreUnknown", str), - ("SecurityKeyProvider", str), - ("KnownHostsCommand", str), + Keyword("HostName", str), + Keyword("User", str), + Keyword("Port", int), + Keyword("IdentityFile", str), + Keyword("AddressFamily", str), # any, inet, inet6 + Keyword("BatchMode", str), + Keyword("BindAddress", str), + Keyword("ChallengeResponseAuthentication", str), # yes, no + Keyword("CheckHostIP", str), # yes, no + Keyword("Cipher", str), + Keyword("Ciphers", str), + Keyword("ClearAllForwardings", str), # yes, no + Keyword("Compression", str), # yes, no + Keyword("CompressionLevel", int), # 1 to 9 + Keyword("ConnectionAttempts", int), # default: 1 + Keyword("ConnectTimeout", int), + Keyword("ControlMaster", str), # yes, no + Keyword("ControlPath", str), + Keyword("DynamicForward", str), # [bind_address:]port, [bind_adderss/]port + Keyword("EnableSSHKeysign", str), # yes, no + Keyword("EscapeChar", str), # default: '~' + Keyword("ExitOnForwardFailure", str), # yes, no + Keyword("ForwardAgent", str), # yes, no + Keyword("ForwardX11", str), # yes, no + Keyword("ForwardX11Trusted", str), # yes, no + Keyword("GatewayPorts", str), # yes, no + Keyword("GlobalKnownHostsFile", str), # yes, no + Keyword("GSSAPIAuthentication", str), # yes, no + Keyword("LocalCommand", str), + Keyword("LocalForward", str), + Keyword("LogLevel", str), + Keyword("ProxyCommand", str), + Keyword("ProxyJump", str), + Keyword("Match", str), + Keyword("AddKeysToAgent", str), + Keyword("BindInterface", str), + Keyword("CanonicalizeHostname", str), # yes, no + Keyword("CanonicalizeMaxDots", int), + Keyword("CanonicalDomains", str), + Keyword("CanonicalizePermittedCNAMEs", str), + Keyword("CanonicalizeFallbackLocal", str), + Keyword("IdentityAgent", str), + Keyword("PreferredAuthentications", str), + Keyword("ServerAliveInterval", int), + Keyword("ServerAliveCountMax", int), + Keyword("UsePrivilegedPort", str), # yes, no + Keyword("TCPKeepAlive", str), # yes, no + Keyword("Include", str), + Keyword("IPQoS", str), + Keyword("GlobalKnownHostsFile", str), + Keyword("UserKnownHostsFile", str), + Keyword("GSSAPIDelegateCredentials", str), + Keyword("PKCS11Provider", str), + Keyword("XAuthLocation", str), + Keyword("PasswordAuthentication", yes_or_no, yes_or_no_str), # default: yes + Keyword("KbdInteractiveAuthentication", str), + Keyword("KbdInteractiveDevices", str), + Keyword("PubkeyAuthentication", str), + Keyword("HostbasedAuthentication", str), + Keyword("IdentitiesOnly", yes_or_no, yes_or_no_str), # default: no + Keyword("CertificateFile", str), + Keyword("HostKeyAlias", str), + Keyword("MACs", str), + Keyword("RemoteForward", str), + Keyword("PermitRemoteOpen", str), + Keyword("StrictHostKeyChecking", yes_or_no, yes_or_no_str), + Keyword("NumberOfPasswordPrompts", str), + Keyword("SyslogFacility", str), + Keyword("LogVerbose", str), + Keyword("HostKeyAlgorithms", str), + Keyword("CASignatureAlgorithms", str), + Keyword("VerifyHostKeyDNS", str), + Keyword("NoHostAuthenticationForLocalhost", str), + Keyword("RekeyLimit", str), + Keyword("SendEnv", str), + Keyword("SetEnv", str), + Keyword("ControlPersist", str), + Keyword("HashKnownHosts", str), + Keyword("Tunnel", str), + Keyword("TunnelDevice", str), + Keyword("PermitLocalCommand", str), + Keyword("RemoteCommand", str), + Keyword("VisualHostKey", str), + Keyword("KexAlgorithms", str), + Keyword("RequestTTY", str), + Keyword("SessionType", str), + Keyword("StdinNull", str), + Keyword("ForkAfterAuthentication", str), + Keyword("ProxyUseFdpass", str), + Keyword("StreamLocalBindMask", str), + Keyword("StreamLocalBindUnlink", str), + Keyword("RevokedHostKeys", str), + Keyword("FingerprintHash", str), # md5 or sha256 + Keyword("UpdateHostKeys", str), + Keyword("HostbasedAcceptedAlgorithms", str), + Keyword("PubkeyAcceptedAlgorithms", str), + Keyword("IgnoreUnknown", str), + Keyword("SecurityKeyProvider", str), + Keyword("KnownHostsCommand", str), ]
haginara/ssh_config
be893bc61387f75d56467e0cb50cd38aa7478d6f
diff --git a/tests/test_client.py b/tests/test_client.py index ba579d1..aa0e09f 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -16,11 +16,12 @@ from ssh_config.errors import EmptySSHConfig, WrongSSHConfig, HostExistsError logging.basicConfig(level=logging.INFO) sample = os.path.join(os.path.dirname(__file__), "sample") -new_host = Host("server2", {"ServerAliveInterval": 200, "HostName": "203.0.113.77"}) +new_host = Host("server2", {"ServerAliveInterval": 200, "HostName": "203.0.113.77", "StrictHostKeyChecking": "no"}) new_data = """Host server2 HostName 203.0.113.77 ServerAliveInterval 200 + StrictHostKeyChecking no """
Fix "KeyError: 'No'" for "No" values It is better to change the line at https://github.com/haginara/ssh_config/blob/be893bc61387f75d56467e0cb50cd38aa7478d6f/ssh_config/keywords.py#L19 to the following. Otherwise, "No" values would run into `KeyError: 'No'` errors. ```python convert[value.lower()] ``` I suggest the code to look like ```python def yes_or_no(value: str) -> bool: """Convert 'yes' or 'no' to True or False Args: value (str): The string containing 'yes' or 'no' Returns: bool: True if value is 'yes', False if value is 'no' """ if value is None: return if value.lower() not in ('yes', 'no', 'true', 'false'): raise TypeError(f"Yes or No is required: {value}") convert = { "yes": True, "no": False, "true": True, "false": False, True: "yes", False: "no", } return convert[value.lower()] ``` Otherwise, I would run into errors when reading back the updated config file.
0.0
be893bc61387f75d56467e0cb50cd38aa7478d6f
[ "tests/test_client.py::TestSSHConfig::test_write" ]
[ "tests/test_client.py::TestSSHConfig::test_asdict", "tests/test_client.py::TestSSHConfig::test_get_host", "tests/test_client.py::TestSSHConfig::test_host_command", "tests/test_client.py::TestSSHConfig::test_load", "tests/test_client.py::TestSSHConfig::test_other", "tests/test_client.py::TestSSHConfig::test_remove", "tests/test_client.py::TestSSHConfig::test_set", "tests/test_client.py::TestSSHConfig::test_set_host", "tests/test_client.py::TestSSHConfig::test_update" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-08-22 10:28:56+00:00
mit
2,702
hamcrest__PyHamcrest-129
diff --git a/src/hamcrest/core/core/raises.py b/src/hamcrest/core/core/raises.py index efe9e6c..67ec46c 100644 --- a/src/hamcrest/core/core/raises.py +++ b/src/hamcrest/core/core/raises.py @@ -13,8 +13,11 @@ __license__ = "BSD, see License.txt" class Raises(BaseMatcher[Callable[..., Any]]): - def __init__(self, expected: Exception, pattern: Optional[str] = None) -> None: + def __init__( + self, expected: Exception, pattern: Optional[str] = None, matching: Optional[Matcher] = None + ) -> None: self.pattern = pattern + self.matcher = matching self.expected = expected self.actual = None # type: Optional[BaseException] self.function = None # type: Optional[Callable[..., Any]] @@ -35,7 +38,11 @@ class Raises(BaseMatcher[Callable[..., Any]]): if isinstance(self.actual, cast(type, self.expected)): if self.pattern is not None: - return re.search(self.pattern, str(self.actual)) is not None + if re.search(self.pattern, str(self.actual)) is None: + return False + if self.matcher is not None: + if not self.matcher.matches(self.actual): + return False return True return False @@ -55,12 +62,17 @@ class Raises(BaseMatcher[Callable[..., Any]]): if self.actual is None: description.append_text("No exception raised.") - elif isinstance(self.actual, cast(type, self.expected)) and self.pattern is not None: - description.append_text( - 'Correct assertion type raised, but the expected pattern ("%s") not found.' - % self.pattern - ) - description.append_text('\n message was: "%s"' % str(self.actual)) + elif isinstance(self.actual, cast(type, self.expected)): + if self.pattern is not None or self.matcher is not None: + description.append_text("Correct assertion type raised, but ") + if self.pattern is not None: + description.append_text('the expected pattern ("%s") ' % self.pattern) + if self.pattern is not None and self.matcher is not None: + description.append_text("and ") + if self.matcher is not None: + description.append_description_of(self.matcher) + description.append_text(" ") + description.append_text('not found. Exception message was: "%s"' % str(self.actual)) else: description.append_text( "%r of type %s was raised instead" % (self.actual, type(self.actual)) @@ -73,11 +85,12 @@ class Raises(BaseMatcher[Callable[..., Any]]): ) -def raises(exception: Exception, pattern=None) -> Matcher[Callable[..., Any]]: +def raises(exception: Exception, pattern=None, matching=None) -> Matcher[Callable[..., Any]]: """Matches if the called function raised the expected exception. :param exception: The class of the expected exception :param pattern: Optional regular expression to match exception message. + :param matching: Optional Hamcrest matchers to apply to the exception. Expects the actual to be wrapped by using :py:func:`~hamcrest.core.core.raises.calling`, or a callable taking no arguments. @@ -88,8 +101,12 @@ def raises(exception: Exception, pattern=None) -> Matcher[Callable[..., Any]]: assert_that(calling(int).with_args('q'), raises(TypeError)) assert_that(calling(parse, broken_input), raises(ValueError)) + assert_that( + calling(valid_user, bad_json), + raises(HTTPError, matching=has_properties(status_code=500) + ) """ - return Raises(exception, pattern) + return Raises(exception, pattern, matching) class DeferredCallable(object):
hamcrest/PyHamcrest
eb746246a13c4abd6141c6ab45724ec1c4a2efbe
diff --git a/tests/hamcrest_unit_test/core/raises_test.py b/tests/hamcrest_unit_test/core/raises_test.py index e4fee61..519b7a2 100644 --- a/tests/hamcrest_unit_test/core/raises_test.py +++ b/tests/hamcrest_unit_test/core/raises_test.py @@ -2,7 +2,7 @@ import sys import unittest import pytest -from hamcrest import not_ +from hamcrest import has_properties, not_ from hamcrest.core.core.raises import calling, raises from hamcrest_unit_test.matcher_test import MatcherTest, assert_mismatch_description @@ -28,6 +28,13 @@ def raise_baseException(*args, **kwargs): raise SystemExit(str(args) + str(kwargs)) +def raise_exception_with_properties(**kwargs): + err = AssertionError("boom") + for k, v in kwargs.items(): + setattr(err, k, v) + raise err + + class RaisesTest(MatcherTest): def testMatchesIfFunctionRaisesTheExactExceptionExpected(self): self.assert_matches("Right exception", raises(AssertionError), calling(raise_exception)) @@ -72,6 +79,11 @@ class RaisesTest(MatcherTest): self.assert_does_not_match( "Bad regex", raises(AssertionError, "Phrase not found"), calling(raise_exception) ) + self.assert_mismatch_description( + '''Correct assertion type raised, but the expected pattern ("Phrase not found") not found. Exception message was: "(){}"''', + raises(AssertionError, "Phrase not found"), + calling(raise_exception), + ) def testMatchesRegularExpressionToStringifiedException(self): self.assert_matches( @@ -86,6 +98,37 @@ class RaisesTest(MatcherTest): calling(raise_exception).with_args(3, 1, 4), ) + def testMachesIfRaisedExceptionMatchesAdditionalMatchers(self): + self.assert_matches( + "Properties", + raises(AssertionError, matching=has_properties(prip="prop")), + calling(raise_exception_with_properties).with_args(prip="prop"), + ) + + def testDoesNotMatchIfAdditionalMatchersDoesNotMatch(self): + self.assert_does_not_match( + "Bad properties", + raises(AssertionError, matching=has_properties(prop="prip")), + calling(raise_exception_with_properties).with_args(prip="prop"), + ) + self.assert_mismatch_description( + '''Correct assertion type raised, but an object with a property 'prop' matching 'prip' not found. Exception message was: "boom"''', + raises(AssertionError, matching=has_properties(prop="prip")), + calling(raise_exception_with_properties).with_args(prip="prop"), + ) + + def testDoesNotMatchIfNeitherPatternOrMatcherMatch(self): + self.assert_does_not_match( + "Bad pattern and properties", + raises(AssertionError, pattern="asdf", matching=has_properties(prop="prip")), + calling(raise_exception_with_properties).with_args(prip="prop"), + ) + self.assert_mismatch_description( + '''Correct assertion type raised, but the expected pattern ("asdf") and an object with a property 'prop' matching 'prip' not found. Exception message was: "boom"''', + raises(AssertionError, pattern="asdf", matching=has_properties(prop="prip")), + calling(raise_exception_with_properties).with_args(prip="prop"), + ) + def testDescribeMismatchWillCallItemIfNotTheOriginalMatch(self): function = Callable() matcher = raises(AssertionError)
How to match exception properties? I have a webapp with helper methods that raises `bottle.HTTPError` and I want to ensure that a raised exception has a `status_code` property, but it seems the calling/raises pair cannot directly assert this? E.g. something like ``` assert_that( calling(helper).with_args(broken_input), raises(HTTPError, has_properties(status_code=500)) ) ``` Am I missing something obvious or is there no way to match structured details of an exception short of manually catching the exception first? If this is indeed the case, would you be amenable to a PR implementing `raises(HTTPError, matching=has_properties(status_code=500))` or perhaps `raises(HTTPError).matching(has_properties(status_code=500))`?
0.0
eb746246a13c4abd6141c6ab45724ec1c4a2efbe
[ "tests/hamcrest_unit_test/core/raises_test.py::RaisesTest::testDoesNotMatchExceptionIfRegularExpressionDoesNotMatch", "tests/hamcrest_unit_test/core/raises_test.py::RaisesTest::testDoesNotMatchIfAdditionalMatchersDoesNotMatch", "tests/hamcrest_unit_test/core/raises_test.py::RaisesTest::testDoesNotMatchIfNeitherPatternOrMatcherMatch", "tests/hamcrest_unit_test/core/raises_test.py::RaisesTest::testMachesIfRaisedExceptionMatchesAdditionalMatchers" ]
[ "tests/hamcrest_unit_test/core/raises_test.py::RaisesTest::testDescribeMismatchWillCallItemIfNotTheOriginalMatch", "tests/hamcrest_unit_test/core/raises_test.py::RaisesTest::testDoesNotMatchIfFunctionDoesNotRaiseException", "tests/hamcrest_unit_test/core/raises_test.py::RaisesTest::testDoesNotMatchIfTheWrongExceptionTypeIsRaisedPy37", "tests/hamcrest_unit_test/core/raises_test.py::RaisesTest::testDoesNotMatchTypeErrorIfActualIsNotCallable", "tests/hamcrest_unit_test/core/raises_test.py::RaisesTest::testMatchesIfFunctionRaisesASubclassOfTheExpectedBaseException", "tests/hamcrest_unit_test/core/raises_test.py::RaisesTest::testMatchesIfFunctionRaisesASubclassOfTheExpectedException", "tests/hamcrest_unit_test/core/raises_test.py::RaisesTest::testMatchesIfFunctionRaisesTheExactExceptionExpected", "tests/hamcrest_unit_test/core/raises_test.py::RaisesTest::testMatchesRegularExpressionToStringifiedException", "tests/hamcrest_unit_test/core/raises_test.py::test_gives_correct_message_when_wrapped_with_is_not[but", "tests/hamcrest_unit_test/core/raises_test.py::CallingTest::testCallingDoesNotImmediatelyExecuteFunction", "tests/hamcrest_unit_test/core/raises_test.py::CallingTest::testCallingObjectCallsProvidedFunction", "tests/hamcrest_unit_test/core/raises_test.py::CallingTest::testCallingWithFunctionReturnsObject", "tests/hamcrest_unit_test/core/raises_test.py::CallingTest::testCallingWithFunctionSetsArgumentList" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-01-06 15:42:02+00:00
bsd-3-clause
2,703
haney__python-ansel-10
diff --git a/HISTORY.rst b/HISTORY.rst index 2b2cd2e..2bc7672 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -6,6 +6,9 @@ History ----------- * Improve encoding (~50%) and decoding (~25%) performance. +* Fix handling of combining characters that occur at the end of a file or before + a control character. In those cases an implicit space (`U+0020`) is + introduced. 0.1.1 (2018-12-31) diff --git a/ansel/codec.py b/ansel/codec.py index 1b8aa24..5bcad57 100644 --- a/ansel/codec.py +++ b/ansel/codec.py @@ -8,6 +8,7 @@ class Codec(codecs.Codec): encode_char_map = {} encode_modifier_map = {} decode_char_map = {} + decode_control_map = {} decode_modifier_map = {} def encode(self, input, errors="strict"): @@ -21,5 +22,6 @@ class Codec(codecs.Codec): decoder = IncrementalDecoder(errors) decoder.name = self.name decoder.decode_char_map = self.decode_char_map + decoder.decode_control_map = self.decode_control_map decoder.decode_modifier_map = self.decode_modifier_map return decoder.decode(input, final=True), len(input) diff --git a/ansel/encodings/ansel.py b/ansel/encodings/ansel.py index a3a59d4..f93e212 100644 --- a/ansel/encodings/ansel.py +++ b/ansel/encodings/ansel.py @@ -2,7 +2,7 @@ import codecs from .. import codec, incremental -ANSEL_TO_UNICODE = { +ANSEL_TO_UNICODE_CONTROL = { 0x00: "\u0000", # NULL CHARACTER 0x01: "\u0001", # START OF HEADING 0x02: "\u0002", # START OF TEXT @@ -35,6 +35,9 @@ ANSEL_TO_UNICODE = { 0x1D: "\u001D", # GROUP SEPARATOR 0x1E: "\u001E", # RECORD SEPARATOR 0x1F: "\u001F", # UNIT SEPARATOR +} + +ANSEL_TO_UNICODE = { 0x20: "\u0020", # SPACE 0x21: "\u0021", # EXCLAMATION MARK 0x22: "\u0022", # QUOTATION MARK @@ -823,12 +826,14 @@ class Codec(codec.Codec): encode_char_map = UNICODE_TO_ANSEL encode_modifier_map = UNICODE_TO_ANSEL_MODIFIERS decode_char_map = ANSEL_TO_UNICODE + decode_control_map = ANSEL_TO_UNICODE_CONTROL decode_modifier_map = ANSEL_TO_UNICODE_MODIFIERS class IncrementalDecoder(incremental.IncrementalDecoder): name = "ansel" decode_char_map = ANSEL_TO_UNICODE + decode_control_map = ANSEL_TO_UNICODE_CONTROL decode_modifier_map = ANSEL_TO_UNICODE_MODIFIERS diff --git a/ansel/encodings/gedcom.py b/ansel/encodings/gedcom.py index 1efe93c..b2c779c 100644 --- a/ansel/encodings/gedcom.py +++ b/ansel/encodings/gedcom.py @@ -3,6 +3,8 @@ import codecs from . import ansel from .. import codec, incremental +GEDCOM_TO_UNICODE_CONTROL = ansel.ANSEL_TO_UNICODE_CONTROL + GEDCOM_TO_UNICODE = ansel.ANSEL_TO_UNICODE.copy() GEDCOM_TO_UNICODE.update( { @@ -40,12 +42,14 @@ class Codec(codec.Codec): encode_char_map = UNICODE_TO_GEDCOM encode_modifier_map = UNICODE_TO_GEDCOM_MODIFIERS decode_char_map = GEDCOM_TO_UNICODE + decode_control_map = GEDCOM_TO_UNICODE_CONTROL decode_modifier_map = GEDCOM_TO_UNICODE_MODIFIERS class IncrementalDecoder(incremental.IncrementalDecoder): name = "gedcom" decode_char_map = GEDCOM_TO_UNICODE + decode_control_map = GEDCOM_TO_UNICODE_CONTROL decode_modifier_map = GEDCOM_TO_UNICODE_MODIFIERS diff --git a/ansel/incremental.py b/ansel/incremental.py index ec5aea0..05a38dc 100644 --- a/ansel/incremental.py +++ b/ansel/incremental.py @@ -4,6 +4,7 @@ import codecs class IncrementalDecoder(codecs.IncrementalDecoder): name = None decode_char_map = {} + decode_control_map = {} decode_modifier_map = {} def __init__(self, errors="strict"): @@ -32,6 +33,7 @@ class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): decode_char_map = self.decode_char_map + decode_control_map = self.decode_control_map decode_modifier_map = self.decode_modifier_map decoded_modifiers = self.decoded_modifiers error_handler = codecs.lookup_error(self.errors) @@ -46,24 +48,33 @@ class IncrementalDecoder(codecs.IncrementalDecoder): decoded_modifiers = [] except KeyError: try: - decoded_item = decode_modifier_map[item] - decoded_modifiers.insert(0, decoded_item) - except KeyError: - decoded_item, _ = error_handler( - UnicodeDecodeError( - self.name, - input, - index, - index + 1, - "character maps to <undefined>", - ) - ) - decoded_chars.append(decoded_item) + decoded_item = decode_control_map[item] if decoded_modifiers: + decoded_chars.append(" ") decoded_chars += decoded_modifiers decoded_modifiers = [] + decoded_chars.append(decoded_item) + except KeyError: + try: + decoded_item = decode_modifier_map[item] + decoded_modifiers.insert(0, decoded_item) + except KeyError: + decoded_item, _ = error_handler( + UnicodeDecodeError( + self.name, + input, + index, + index + 1, + "character maps to <undefined>", + ) + ) + decoded_chars.append(decoded_item) + if decoded_modifiers: + decoded_chars += decoded_modifiers + decoded_modifiers = [] if final and decoded_modifiers: + decoded_chars.append(" ") decoded_chars += decoded_modifiers decoded_modifiers = []
haney/python-ansel
490d325e2be91ece1a0433a64f31898777412f5e
diff --git a/tests/test_codec.py b/tests/test_codec.py index e313bc6..c0de0e4 100644 --- a/tests/test_codec.py +++ b/tests/test_codec.py @@ -11,6 +11,7 @@ class Codec(ansel.codec.Codec): decode_char_map = {ord(b"a"): "1", ord(b"b"): "23"} encode_modifier_map = {"n": b"5", "o": b"67"} decode_modifier_map = {ord(b"n"): "5", ord(b"o"): "67"} + decode_control_map = {ord(b"\n"): "8", ord(b"\t"): "9A"} @pytest.mark.parametrize( @@ -93,9 +94,9 @@ def test_decode_valid(input, expected, expected_len): @pytest.mark.parametrize( "input, expected, expected_len", [ - (b"n", "5", 1), - (b"an", "15", 2), - (b"bn", "235", 2), + (b"n", " 5", 1), + (b"an", "1 5", 2), + (b"bn", "23 5", 2), (b"na", "15", 2), (b"naa", "151", 3), (b"noa", "1675", 3), @@ -109,6 +110,36 @@ def test_decode_valid_with_modifiers(input, expected, expected_len): assert expected_len == output_len [email protected]( + "input, expected, expected_len", + [(b"\n", "8", 1), (b"\t", "9A", 1), (b"\n\t", "89A", 2)], +) +def test_decode_valid_with_control(input, expected, expected_len): + codec = Codec() + output, output_len = codec.decode(input) + assert expected == output + assert expected_len == output_len + + [email protected]( + "input, expected, expected_len", + [ + (b"n", " 5", 1), + (b"\nn", "8 5", 2), + (b"\tn", "9A 5", 2), + (b"n\n", " 58", 2), + (b"n\n\n", " 588", 3), + (b"no\n", " 6758", 3), + (b"on\t", " 5679A", 3), + ], +) +def test_decode_valid_with_control_and_modifiers(input, expected, expected_len): + codec = Codec() + output, output_len = codec.decode(input) + assert expected == output + assert expected_len == output_len + + @pytest.mark.parametrize( "input, start, end, reason", [ diff --git a/tests/test_incremental.py b/tests/test_incremental.py index ff45362..2129582 100644 --- a/tests/test_incremental.py +++ b/tests/test_incremental.py @@ -11,6 +11,7 @@ class IncrementalDecoder(ansel.incremental.IncrementalDecoder): decode_char_map = {ord(b"a"): "1", ord(b"b"): "23"} encode_modifier_map = {"n": b"5", "o": b"67"} decode_modifier_map = {ord(b"n"): "5", ord(b"o"): "67"} + decode_control_map = {ord(b"\n"): "8", ord(b"\t"): "9A"} class IncrementalEncoder(ansel.incremental.IncrementalEncoder): @@ -201,6 +202,8 @@ def test_decode_valid(input, expected, expected_len): [b"an", b"nb"], [b"a", b"n", b"n", b"b"], [b"n", b"o"], + [b"a", b"n", b"\n"], + [b"b", b"o", b"\t", b"a"], ], ) def test_decode_incremental(partials): @@ -214,9 +217,9 @@ def test_decode_incremental(partials): @pytest.mark.parametrize( "input, expected, expected_len", [ - (b"n", "5", 1), - (b"an", "15", 2), - (b"bn", "235", 2), + (b"n", " 5", 1), + (b"an", "1 5", 2), + (b"bn", "23 5", 2), (b"na", "15", 2), (b"naa", "151", 3), (b"noa", "1675", 3), @@ -230,6 +233,36 @@ def test_decode_valid_with_modifiers(input, expected, expected_len): assert (b"", 0) == decoder.getstate() [email protected]( + "input, expected, expected_len", + [(b"\n", "8", 1), (b"\t", "9A", 1), (b"\n\t", "89A", 2)], +) +def test_decode_valid_with_control(input, expected, expected_len): + decoder = IncrementalDecoder() + output = decoder.decode(input) + assert expected == output + assert (b"", 0) == decoder.getstate() + + [email protected]( + "input, expected, expected_len", + [ + (b"n", " 5", 1), + (b"\nn", "8 5", 2), + (b"\tn", "9A 5", 2), + (b"n\n", " 58", 2), + (b"n\n\n", " 588", 3), + (b"no\n", " 6758", 3), + (b"on\t", " 5679A", 3), + ], +) +def test_decode_valid_with_control_modifiers(input, expected, expected_len): + decoder = IncrementalDecoder() + output = decoder.decode(input, final=True) + assert expected == output + assert (b"", 0) == decoder.getstate() + + @pytest.mark.parametrize( "input, start, end, reason", [
Combining Character at end of line problem. * ANSEL Codecs version: 0.1.1 * Python version: 3.9.5 * Operating System: Mac OS 11.3.1 ### Description I am reading in a GEDCOM 5.5 file, with the ANSEL character set. Some of the lines end with accent characters that become the ANSEL combining characters. In particular, I have data that ends in the Accent Grave character (00B4), which is converted to ANSEL E2 which gets converted to Unicode U+0301. It appears that the Codec, because Unicode puts combining characters after the base character, while ANSEL puts them before, moves the accent grave to after the end of line character so it is on the next line. (Actually, I think the file is using \r\n as it ends up on a line of its own) It would seem better if before the Codec swapped the characters it first checked that the character was of the right class to do have this make sense, or at least check for control characters.
0.0
490d325e2be91ece1a0433a64f31898777412f5e
[ "tests/test_codec.py::test_decode_valid_with_modifiers[n-", "tests/test_codec.py::test_decode_valid_with_modifiers[an-1", "tests/test_codec.py::test_decode_valid_with_modifiers[bn-23", "tests/test_codec.py::test_decode_valid_with_control[\\n-8-1]", "tests/test_codec.py::test_decode_valid_with_control[\\t-9A-1]", "tests/test_codec.py::test_decode_valid_with_control[\\n\\t-89A-2]", "tests/test_codec.py::test_decode_valid_with_control_and_modifiers[n-", "tests/test_codec.py::test_decode_valid_with_control_and_modifiers[\\nn-8", "tests/test_codec.py::test_decode_valid_with_control_and_modifiers[\\tn-9A", "tests/test_codec.py::test_decode_valid_with_control_and_modifiers[n\\n-", "tests/test_codec.py::test_decode_valid_with_control_and_modifiers[n\\n\\n-", "tests/test_codec.py::test_decode_valid_with_control_and_modifiers[no\\n-", "tests/test_codec.py::test_decode_valid_with_control_and_modifiers[on\\t-", "tests/test_incremental.py::test_decode_incremental[partials7]", "tests/test_incremental.py::test_decode_incremental[partials8]", "tests/test_incremental.py::test_decode_valid_with_modifiers[n-", "tests/test_incremental.py::test_decode_valid_with_modifiers[an-1", "tests/test_incremental.py::test_decode_valid_with_modifiers[bn-23", "tests/test_incremental.py::test_decode_valid_with_control[\\n-8-1]", "tests/test_incremental.py::test_decode_valid_with_control[\\t-9A-1]", "tests/test_incremental.py::test_decode_valid_with_control[\\n\\t-89A-2]", "tests/test_incremental.py::test_decode_valid_with_control_modifiers[n-", "tests/test_incremental.py::test_decode_valid_with_control_modifiers[\\nn-8", "tests/test_incremental.py::test_decode_valid_with_control_modifiers[\\tn-9A", "tests/test_incremental.py::test_decode_valid_with_control_modifiers[n\\n-", "tests/test_incremental.py::test_decode_valid_with_control_modifiers[n\\n\\n-", "tests/test_incremental.py::test_decode_valid_with_control_modifiers[no\\n-", "tests/test_incremental.py::test_decode_valid_with_control_modifiers[on\\t-" ]
[ "tests/test_codec.py::test_encode_valid[--0]", "tests/test_codec.py::test_encode_valid[a-1-1]", "tests/test_codec.py::test_encode_valid[b-23-1]", "tests/test_codec.py::test_encode_valid[ab-123-2]", "tests/test_codec.py::test_encode_valid_with_modifiers[n-5-1]", "tests/test_codec.py::test_encode_valid_with_modifiers[na-51-2]", "tests/test_codec.py::test_encode_valid_with_modifiers[nb-523-2]", "tests/test_codec.py::test_encode_valid_with_modifiers[an-51-2]", "tests/test_codec.py::test_encode_valid_with_modifiers[aan-151-3]", "tests/test_codec.py::test_encode_valid_with_modifiers[ano-6751-3]", "tests/test_codec.py::test_encode_valid_with_modifiers[bon-56723-3]", "tests/test_codec.py::test_encode_invalid[+-0-1-character", "tests/test_codec.py::test_encode_invalid[ab+-2-3-character", "tests/test_codec.py::test_encode_invalid_raising_error_handler[+]", "tests/test_codec.py::test_encode_invalid_with_replacement[+-?-1]", "tests/test_codec.py::test_encode_invalid_with_replacement[a+b-1?23-3]", "tests/test_codec.py::test_encode_invalid_with_replacement[a+n-15?-3]", "tests/test_codec.py::test_decode_valid[--0]", "tests/test_codec.py::test_decode_valid[a-1-1]", "tests/test_codec.py::test_decode_valid[b-23-1]", "tests/test_codec.py::test_decode_valid[ab-123-2]", "tests/test_codec.py::test_decode_valid_with_modifiers[na-15-2]", "tests/test_codec.py::test_decode_valid_with_modifiers[naa-151-3]", "tests/test_codec.py::test_decode_valid_with_modifiers[noa-1675-3]", "tests/test_codec.py::test_decode_valid_with_modifiers[onb-23567-3]", "tests/test_codec.py::test_decode_invalid[+-0-1-character", "tests/test_codec.py::test_decode_invalid[ab+-2-3-character", "tests/test_codec.py::test_decode_invalid_raising_error_handler[+]", "tests/test_codec.py::test_decode_invalid_with_replacement[+-\\ufffd-1]", "tests/test_codec.py::test_decode_invalid_with_replacement[a+b-1\\ufffd23-3]", "tests/test_codec.py::test_decode_invalid_with_replacement[an+-1\\ufffd5-3]", "tests/test_incremental.py::test_encode_getstate[-0]", "tests/test_incremental.py::test_encode_getstate[a-305]", "tests/test_incremental.py::test_encode_getstate[ab-78387]", "tests/test_incremental.py::test_encode_getstate[n-309]", "tests/test_incremental.py::test_encode_getstate[ao-20330289]", "tests/test_incremental.py::test_encode_getstate[annn-5187646769]", "tests/test_incremental.py::test_encode_getstate[annnb-78387]", "tests/test_incremental.py::test_encode_setstate[0-a-1]", "tests/test_incremental.py::test_encode_setstate[305-b-123]", "tests/test_incremental.py::test_encode_setstate[305-nb-5123]", "tests/test_incremental.py::test_encode_setstate[20264241-nb-555123]", "tests/test_incremental.py::test_encode_valid[--0]", "tests/test_incremental.py::test_encode_valid[a-1-1]", "tests/test_incremental.py::test_encode_valid[b-23-1]", "tests/test_incremental.py::test_encode_valid[ab-123-2]", "tests/test_incremental.py::test_encode_incremental[partials0]", "tests/test_incremental.py::test_encode_incremental[partials1]", "tests/test_incremental.py::test_encode_incremental[partials2]", "tests/test_incremental.py::test_encode_incremental[partials3]", "tests/test_incremental.py::test_encode_incremental[partials4]", "tests/test_incremental.py::test_encode_incremental[partials5]", "tests/test_incremental.py::test_encode_incremental[partials6]", "tests/test_incremental.py::test_encode_valid_with_modifiers[n-5-1]", "tests/test_incremental.py::test_encode_valid_with_modifiers[na-51-2]", "tests/test_incremental.py::test_encode_valid_with_modifiers[nb-523-2]", "tests/test_incremental.py::test_encode_valid_with_modifiers[an-51-2]", "tests/test_incremental.py::test_encode_valid_with_modifiers[aan-151-3]", "tests/test_incremental.py::test_encode_valid_with_modifiers[ano-6751-3]", "tests/test_incremental.py::test_encode_valid_with_modifiers[bon-56723-3]", "tests/test_incremental.py::test_encode_invalid[+-0-1-character", "tests/test_incremental.py::test_encode_invalid[ab+-2-3-character", "tests/test_incremental.py::test_encode_invalid_raising_error_handler[+]", "tests/test_incremental.py::test_encode_invalid_with_replacement[+-?-1]", "tests/test_incremental.py::test_encode_invalid_with_replacement[a+b-1?23-3]", "tests/test_incremental.py::test_encode_invalid_with_replacement[a+n-15?-3]", "tests/test_incremental.py::test_decode_getstate[-state0]", "tests/test_incremental.py::test_decode_getstate[a-state1]", "tests/test_incremental.py::test_decode_getstate[n-state2]", "tests/test_incremental.py::test_decode_getstate[no-state3]", "tests/test_incremental.py::test_decode_getstate[nob-state4]", "tests/test_incremental.py::test_decode_getstate[ano-state5]", "tests/test_incremental.py::test_decode_setstate[state0--]", "tests/test_incremental.py::test_decode_setstate[state1-a-15]", "tests/test_incremental.py::test_decode_setstate[state2-na-155]", "tests/test_incremental.py::test_decode_setstate[state3-na-1567]", "tests/test_incremental.py::test_decode_valid[--0]", "tests/test_incremental.py::test_decode_valid[a-1-1]", "tests/test_incremental.py::test_decode_valid[b-23-1]", "tests/test_incremental.py::test_decode_valid[ab-123-2]", "tests/test_incremental.py::test_decode_incremental[partials0]", "tests/test_incremental.py::test_decode_incremental[partials1]", "tests/test_incremental.py::test_decode_incremental[partials2]", "tests/test_incremental.py::test_decode_incremental[partials3]", "tests/test_incremental.py::test_decode_incremental[partials4]", "tests/test_incremental.py::test_decode_incremental[partials5]", "tests/test_incremental.py::test_decode_incremental[partials6]", "tests/test_incremental.py::test_decode_valid_with_modifiers[na-15-2]", "tests/test_incremental.py::test_decode_valid_with_modifiers[naa-151-3]", "tests/test_incremental.py::test_decode_valid_with_modifiers[noa-1675-3]", "tests/test_incremental.py::test_decode_valid_with_modifiers[onb-23567-3]", "tests/test_incremental.py::test_decode_invalid[+-0-1-character", "tests/test_incremental.py::test_decode_invalid[ab+-2-3-character", "tests/test_incremental.py::test_decode_invalid_raising_error_handler[+]", "tests/test_incremental.py::test_decode_invalid_with_replacement[+-\\ufffd-1]", "tests/test_incremental.py::test_decode_invalid_with_replacement[a+b-1\\ufffd23-3]", "tests/test_incremental.py::test_decode_invalid_with_replacement[an+-1\\ufffd5-3]" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-05-19 00:29:32+00:00
mit
2,704
happyleavesaoc__python-snapcast-55
diff --git a/snapcast/control/group.py b/snapcast/control/group.py index 47999ea..1000a69 100644 --- a/snapcast/control/group.py +++ b/snapcast/control/group.py @@ -105,7 +105,8 @@ class Snapgroup(): @property def friendly_name(self): """Get friendly name.""" - return self.name if self.name != '' else self.stream + return self.name if self.name != '' else "+".join( + sorted([self._server.client(c).friendly_name for c in self.clients])) @property def clients(self):
happyleavesaoc/python-snapcast
f362e4abfe6ab71bb87ca427d687d87ead1f44b2
diff --git a/tests/test_group.py b/tests/test_group.py index 3139dca..5c3563b 100644 --- a/tests/test_group.py +++ b/tests/test_group.py @@ -27,6 +27,7 @@ class TestSnapgroup(unittest.TestCase): client.volume = 50 client.callback = MagicMock() client.update_volume = MagicMock() + client.friendly_name = 'A' server.streams = [stream] server.stream = MagicMock(return_value=stream) server.client = MagicMock(return_value=client) @@ -35,7 +36,7 @@ class TestSnapgroup(unittest.TestCase): def test_init(self): self.assertEqual(self.group.identifier, 'test') self.assertEqual(self.group.name, '') - self.assertEqual(self.group.friendly_name, 'test stream') + self.assertEqual(self.group.friendly_name, 'A+A') self.assertEqual(self.group.stream, 'test stream') self.assertEqual(self.group.muted, False) self.assertEqual(self.group.volume, 50)
[discussion] Group friendly name At the moment the friendly name of a group is either the name (if manually set) or the stream name: `return self.name if self.name != '' else self.stream` By default there are no names set and multiple groups could have the same stream, thus showing the same friendly name. In my opinion this makes sense only if someone is using exactly one group per stream and in that case one could set the group name manually. So I would propose to change the fall-back to either the group id or a combination of client names.
0.0
f362e4abfe6ab71bb87ca427d687d87ead1f44b2
[ "tests/test_group.py::TestSnapgroup::test_init" ]
[ "tests/test_group.py::TestSnapgroup::test_add_client", "tests/test_group.py::TestSnapgroup::test_remove_client", "tests/test_group.py::TestSnapgroup::test_set_callback", "tests/test_group.py::TestSnapgroup::test_set_muted", "tests/test_group.py::TestSnapgroup::test_set_name", "tests/test_group.py::TestSnapgroup::test_set_stream", "tests/test_group.py::TestSnapgroup::test_set_volume", "tests/test_group.py::TestSnapgroup::test_snapshot_restore", "tests/test_group.py::TestSnapgroup::test_streams_by_name", "tests/test_group.py::TestSnapgroup::test_update", "tests/test_group.py::TestSnapgroup::test_update_mute", "tests/test_group.py::TestSnapgroup::test_update_stream" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-05-12 19:52:58+00:00
mit
2,705
happyleavesaoc__python-snapcast-64
diff --git a/setup.py b/setup.py index 6c934d0..e2fa579 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup setup( name='snapcast', - version='2.3.3', + version='2.3.4', description='Control Snapcast.', url='https://github.com/happyleavesaoc/python-snapcast/', license='MIT', diff --git a/snapcast/control/group.py b/snapcast/control/group.py index 7935b2f..3b9a8be 100644 --- a/snapcast/control/group.py +++ b/snapcast/control/group.py @@ -105,8 +105,10 @@ class Snapgroup(): @property def friendly_name(self): """Get friendly name.""" - return self.name if self.name != '' else "+".join( - sorted([self._server.client(c).friendly_name for c in self.clients])) + fname = self.name if self.name != '' else "+".join( + sorted([self._server.client(c).friendly_name for c in self.clients + if c in [client.identifier for client in self._server.clients]])) + return fname if fname != '' else self.identifier @property def clients(self): diff --git a/snapcast/control/server.py b/snapcast/control/server.py index e93f5b1..afff4b3 100644 --- a/snapcast/control/server.py +++ b/snapcast/control/server.py @@ -284,7 +284,6 @@ class Snapserver(): new_groups[group.get('id')].update(group) else: new_groups[group.get('id')] = Snapgroup(self, group) - _LOGGER.debug('group found: %s', new_groups[group.get('id')]) for client in group.get('clients'): if client.get('id') in self._clients: new_clients[client.get('id')] = self._clients[client.get('id')] @@ -292,6 +291,7 @@ class Snapserver(): else: new_clients[client.get('id')] = Snapclient(self, client) _LOGGER.debug('client found: %s', new_clients[client.get('id')]) + _LOGGER.debug('group found: %s', new_groups[group.get('id')]) self._groups = new_groups self._clients = new_clients self._streams = new_streams @@ -402,14 +402,21 @@ class Snapserver(): def _on_stream_update(self, data): """Handle stream update.""" - self._streams[data.get('id')].update(data.get('stream')) - _LOGGER.debug('stream %s updated', self._streams[data.get('id')].friendly_name) - self._streams[data.get("id")].callback() - for group in self._groups.values(): - if group.stream == data.get('id'): - group.callback() - for clientID in group.clients: - self._clients.get(clientID).callback() + if data.get('id') in self._streams: + self._streams[data.get('id')].update(data.get('stream')) + _LOGGER.debug('stream %s updated', self._streams[data.get('id')].friendly_name) + self._streams[data.get("id")].callback() + for group in self._groups.values(): + if group.stream == data.get('id'): + group.callback() + for clientID in group.clients: + self._clients.get(clientID).callback() + else: + if data.get('stream', {}).get('uri', {}).get('query', {}).get('codec') == 'null': + _LOGGER.debug('stream %s is input-only, ignore', data.get('id')) + else: + _LOGGER.info('stream %s not found, synchronize', data.get('id')) + self.synchronize(self.status()) def set_on_update_callback(self, func): """Set on update callback function."""
happyleavesaoc/python-snapcast
9c8f97cea23015ab2414e9eee43926eca5878634
diff --git a/tests/test_group.py b/tests/test_group.py index 5c3563b..bd99ec2 100644 --- a/tests/test_group.py +++ b/tests/test_group.py @@ -28,21 +28,26 @@ class TestSnapgroup(unittest.TestCase): client.callback = MagicMock() client.update_volume = MagicMock() client.friendly_name = 'A' + client.identifier = 'a' server.streams = [stream] server.stream = MagicMock(return_value=stream) server.client = MagicMock(return_value=client) + server.clients = [client] self.group = Snapgroup(server, data) def test_init(self): self.assertEqual(self.group.identifier, 'test') self.assertEqual(self.group.name, '') - self.assertEqual(self.group.friendly_name, 'A+A') + self.assertEqual(self.group.friendly_name, 'A') self.assertEqual(self.group.stream, 'test stream') self.assertEqual(self.group.muted, False) self.assertEqual(self.group.volume, 50) self.assertEqual(self.group.clients, ['a', 'b']) self.assertEqual(self.group.stream_status, 'playing') + def test_repr(self): + self.assertEqual(self.group.__repr__(), 'Snapgroup (A, test)') + def test_update(self): self.group.update({ 'stream_id': 'other stream'
Exceptions thrown when debug logging is enabled When logging is configured for DEBUG the following exceptions are thrown upon connecting to my Snapcast server. ``` DEBUG:snapcast.control.server:connected to snapserver on wyseguy:1705 DEBUG:snapcast.control.server:stream found: Snapstream (UPnP) DEBUG:snapcast.control.server:stream found: Snapstream (Airplay) DEBUG:snapcast.control.server:stream found: Snapstream (Spotify) DEBUG:snapcast.control.server:stream found: Snapstream (All Streams) --- Logging error --- Traceback (most recent call last): File "/usr/lib/python3.11/logging/__init__.py", line 1110, in emit msg = self.format(record) ^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/logging/__init__.py", line 953, in format return fmt.format(record) ^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/logging/__init__.py", line 687, in format record.message = record.getMessage() ^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/logging/__init__.py", line 377, in getMessage msg = msg % self.args ~~~~^~~~~~~~~~~ File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/group.py", line 193, in __repr__ return f'Snapgroup ({self.friendly_name}, {self.identifier})' ^^^^^^^^^^^^^^^^^^ File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/group.py", line 109, in friendly_name sorted([self._server.client(c).friendly_name for c in self.clients])) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/group.py", line 109, in <listcomp> sorted([self._server.client(c).friendly_name for c in self.clients])) ^^^^^^^^^^^^^^^^^^^^^^ File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/server.py", line 251, in client return self._clients[client_identifier] ~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^ KeyError: 'b8:27:eb:e3:17:de' Call stack: File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/./error.py", line 18, in <module> asyncio.run(main()) File "/usr/lib/python3.11/asyncio/runners.py", line 190, in run return runner.run(main) File "/usr/lib/python3.11/asyncio/runners.py", line 118, in run return self._loop.run_until_complete(task) File "/usr/lib/python3.11/asyncio/base_events.py", line 640, in run_until_complete self.run_forever() File "/usr/lib/python3.11/asyncio/base_events.py", line 607, in run_forever self._run_once() File "/usr/lib/python3.11/asyncio/base_events.py", line 1922, in _run_once handle._run() File "/usr/lib/python3.11/asyncio/events.py", line 80, in _run self._context.run(self._callback, *self._args) File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/./error.py", line 14, in main await snapcast.control.create_server(loop, "wyseguy") File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/__init__.py", line 9, in create_server await server.start() File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/server.py", line 115, in start self.synchronize(status) File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/server.py", line 287, in synchronize _LOGGER.debug('group found: %s', new_groups[group.get('id')]) Unable to print the message and arguments - possible formatting error. Use the traceback above to help find the error. DEBUG:snapcast.control.server:client found: Snapclient 0.27.0 (StereoBerry, b8:27:eb:e3:17:de) --- Logging error --- Traceback (most recent call last): File "/usr/lib/python3.11/logging/__init__.py", line 1110, in emit msg = self.format(record) ^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/logging/__init__.py", line 953, in format return fmt.format(record) ^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/logging/__init__.py", line 687, in format record.message = record.getMessage() ^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/logging/__init__.py", line 377, in getMessage msg = msg % self.args ~~~~^~~~~~~~~~~ File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/group.py", line 193, in __repr__ return f'Snapgroup ({self.friendly_name}, {self.identifier})' ^^^^^^^^^^^^^^^^^^ File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/group.py", line 109, in friendly_name sorted([self._server.client(c).friendly_name for c in self.clients])) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/group.py", line 109, in <listcomp> sorted([self._server.client(c).friendly_name for c in self.clients])) ^^^^^^^^^^^^^^^^^^^^^^ File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/server.py", line 251, in client return self._clients[client_identifier] ~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^ KeyError: 'b8:27:eb:43:7e:9f' Call stack: File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/./error.py", line 18, in <module> asyncio.run(main()) File "/usr/lib/python3.11/asyncio/runners.py", line 190, in run return runner.run(main) File "/usr/lib/python3.11/asyncio/runners.py", line 118, in run return self._loop.run_until_complete(task) File "/usr/lib/python3.11/asyncio/base_events.py", line 640, in run_until_complete self.run_forever() File "/usr/lib/python3.11/asyncio/base_events.py", line 607, in run_forever self._run_once() File "/usr/lib/python3.11/asyncio/base_events.py", line 1922, in _run_once handle._run() File "/usr/lib/python3.11/asyncio/events.py", line 80, in _run self._context.run(self._callback, *self._args) File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/./error.py", line 14, in main await snapcast.control.create_server(loop, "wyseguy") File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/__init__.py", line 9, in create_server await server.start() File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/server.py", line 115, in start self.synchronize(status) File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/server.py", line 287, in synchronize _LOGGER.debug('group found: %s', new_groups[group.get('id')]) Unable to print the message and arguments - possible formatting error. Use the traceback above to help find the error. DEBUG:snapcast.control.server:client found: Snapclient 0.27.0 (NoteBerry, b8:27:eb:43:7e:9f) DEBUG:snapcast.control.server:Server connected ``` Script to recreate ```python3 #!/usr/bin/env python3 import asyncio import logging import snapcast.control async def main(): logging.basicConfig(level=logging.DEBUG) # Connect to the Snapcast server loop = asyncio.get_running_loop() await snapcast.control.create_server(loop, "wyseguy") try: asyncio.run(main()) except KeyboardInterrupt: pass ```
0.0
9c8f97cea23015ab2414e9eee43926eca5878634
[ "tests/test_group.py::TestSnapgroup::test_init", "tests/test_group.py::TestSnapgroup::test_repr" ]
[ "tests/test_group.py::TestSnapgroup::test_add_client", "tests/test_group.py::TestSnapgroup::test_remove_client", "tests/test_group.py::TestSnapgroup::test_set_callback", "tests/test_group.py::TestSnapgroup::test_set_muted", "tests/test_group.py::TestSnapgroup::test_set_name", "tests/test_group.py::TestSnapgroup::test_set_stream", "tests/test_group.py::TestSnapgroup::test_set_volume", "tests/test_group.py::TestSnapgroup::test_snapshot_restore", "tests/test_group.py::TestSnapgroup::test_streams_by_name", "tests/test_group.py::TestSnapgroup::test_update", "tests/test_group.py::TestSnapgroup::test_update_mute", "tests/test_group.py::TestSnapgroup::test_update_stream" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2024-02-17 17:53:55+00:00
mit
2,706
hazelcast__hazelcast-python-client-115
diff --git a/hazelcast/client.py b/hazelcast/client.py index 1a3a525..e566ce2 100644 --- a/hazelcast/client.py +++ b/hazelcast/client.py @@ -1,6 +1,6 @@ import logging from hazelcast.cluster import ClusterService, RandomLoadBalancer -from hazelcast.config import ClientConfig +from hazelcast.config import ClientConfig, ClientProperties from hazelcast.connection import ConnectionManager, Heartbeat from hazelcast.invocation import InvocationService, ListenerService from hazelcast.lifecycle import LifecycleService, LIFECYCLE_STATE_SHUTTING_DOWN, LIFECYCLE_STATE_SHUTDOWN @@ -25,6 +25,7 @@ class HazelcastClient(object): def __init__(self, config=None): self.config = config or ClientConfig() + self.properties = ClientProperties(self.config.get_properties()) self.lifecycle = LifecycleService(self.config) self.reactor = AsyncoreReactor() self.connection_manager = ConnectionManager(self, self.reactor.new_connection) diff --git a/hazelcast/config.py b/hazelcast/config.py index ea6b9bd..1b560af 100644 --- a/hazelcast/config.py +++ b/hazelcast/config.py @@ -2,9 +2,10 @@ Hazelcast Client Configuration module contains configuration classes and various constants required to create a ClientConfig. """ +import os from hazelcast.serialization.api import StreamSerializer -from hazelcast.util import validate_type, validate_serializer, enum +from hazelcast.util import validate_type, validate_serializer, enum, TimeUnit DEFAULT_GROUP_NAME = "dev" """ @@ -16,18 +17,6 @@ DEFAULT_GROUP_PASSWORD = "dev-pass" Default password of connected Hazelcast cluster """ -PROPERTY_HEARTBEAT_INTERVAL = "hazelcast.client.heartbeat.interval" -""" -Configuration property for heartbeat interval in milliseconds. Client will send heartbeat to server by this value of interval -unless other requests send to server -""" - -PROPERTY_HEARTBEAT_TIMEOUT = "hazelcast.client.heartbeat.timeout" -""" -Configuration property for heartbeat timeout in milliseconds. If client cannot see any activity on a connection for this timeout -value it will shutdown the connection -""" - INTEGER_TYPE = enum(VAR=0, BYTE=1, SHORT=2, INT=3, LONG=4, BIG_INT=5) """ Integer type options that can be used by serialization service. @@ -76,6 +65,7 @@ class ClientConfig(object): def __init__(self): self._properties = {} + """Config properties""" self.group_config = GroupConfig() """The group configuration""" @@ -124,6 +114,16 @@ class ClientConfig(object): self.lifecycle_listeners.append(lifecycle_state_changed) return self + def add_near_cache_config(self, near_cache_config): + """ + Helper method to add a new NearCacheConfig. + + :param near_cache_config: (NearCacheConfig), the near_cache config to add. + :return: `self` for cascading configuration. + """ + self.near_cache_configs[near_cache_config.name] = near_cache_config + return self + def get_property_or_default(self, key, default): """ Client property accessor with fallback to default value. @@ -137,14 +137,21 @@ class ClientConfig(object): except KeyError: return default - def add_near_cache_config(self, near_cache_config): + def get_properties(self): """ - Helper method to add a new NearCacheConfig. + Gets the configuration properties. + :return: (dict), Client configuration properties. + """ + return self._properties - :param near_cache_config: (NearCacheConfig), the near_cache config to add. + def set_property(self, key, value): + """ + Sets the value of a named property. + :param key: Property name + :param value: Value of the property :return: `self` for cascading configuration. """ - self.near_cache_configs[near_cache_config.name] = near_cache_config + self._properties[key] = value return self @@ -217,6 +224,7 @@ class SocketOption(object): A Socket option represent the unix socket option, that will be passed to python socket.setoption(level,`option, value)` See the Unix manual for level and option. """ + def __init__(self, level, option, value): self.level = level """Option level. See the Unix manual for detail.""" @@ -230,6 +238,7 @@ class SerializationConfig(object): """ Hazelcast Serialization Service configuration options can be set from this class. """ + def __init__(self): self.portable_version = 0 """ @@ -439,3 +448,82 @@ class NearCacheConfig(object): if eviction_sampling_pool_size < 1: raise ValueError("'eviction_sampling_pool_size' cannot be less than 1") self._eviction_sampling_pool_size = eviction_sampling_pool_size + + +class ClientProperty(object): + """ + Client property holds the name, default value and time unit of Hazelcast client properties. + Client properties can be set by + * Programmatic Configuration (config.set_property) + * Environment variables + """ + + def __init__(self, name, default_value=None, time_unit=None): + self.name = name + self.default_value = default_value + self.time_unit = time_unit + + +class ClientProperties(object): + HEARTBEAT_INTERVAL = ClientProperty("hazelcast.client.heartbeat.interval", 5000, TimeUnit.MILLISECOND) + """ + Time interval between the heartbeats sent by the client to the nodes. + """ + + HEARTBEAT_TIMEOUT = ClientProperty("hazelcast.client.heartbeat.timeout", 60000, TimeUnit.MILLISECOND) + """ + Client sends heartbeat messages to the members and this is the timeout for this sending operations. + If there is not any message passing between the client and member within the given time via this property + in milliseconds, the connection will be closed. + """ + + INVOCATION_TIMEOUT_SECONDS = ClientProperty("hazelcast.client.invocation.timeout.seconds", 120, TimeUnit.SECOND) + """ + When an invocation gets an exception because : + * Member throws an exception. + * Connection between the client and member is closed. + * Client's heartbeat requests are timed out. + Time passed since invocation started is compared with this property. + If the time is already passed, then the exception is delegated to the user. If not, the invocation is retried. + Note that, if invocation gets no exception and it is a long running one, then it will not get any exception, + no matter how small this timeout is set. + """ + + INVOCATION_RETRY_PAUSE_MILLIS = ClientProperty("hazelcast.client.invocation.retry.pause.millis", 1000, + TimeUnit.MILLISECOND) + """ + Pause time between each retry cycle of an invocation in milliseconds. + """ + + def __init__(self, properties): + self._properties = properties + + def get(self, property): + """ + Gets the value of the given property. First checks client config properties, then environment variables + and lastly fall backs to the default value of the property. + :param property: (:class:`~hazelcast.config.ClientProperty`), Property to get value from + :return: Returns the value of the given property + """ + return self._properties.get(property.name) or os.getenv(property.name) or property.default_value + + def get_seconds(self, property): + """ + Gets the value of the given property in seconds. If the value of the given property is not a number, + throws TypeError. + :param property: (:class:`~hazelcast.config.ClientProperty`), Property to get seconds from + :return: (float), Value of the given property in seconds + """ + return TimeUnit.to_seconds(self.get(property), property.time_unit) + + def get_seconds_positive_or_default(self, property): + """ + Gets the value of the given property in seconds. If the value of the given property is not a number, + throws TypeError. If the value of the given property in seconds is not positive, tries to + return the default value in seconds. + :param property: (:class:`~hazelcast.config.ClientProperty`), Property to get seconds from + :return: (float), Value of the given property in seconds if it is positive. + Else, value of the default value of given property in seconds. + """ + seconds = self.get_seconds(property) + return seconds if seconds > 0 else TimeUnit.to_seconds(property.default_value, property.time_unit) diff --git a/hazelcast/connection.py b/hazelcast/connection.py index 9c580e8..e37f7bb 100644 --- a/hazelcast/connection.py +++ b/hazelcast/connection.py @@ -6,7 +6,6 @@ import sys import threading import time -from hazelcast.config import PROPERTY_HEARTBEAT_INTERVAL, PROPERTY_HEARTBEAT_TIMEOUT from hazelcast.core import CLIENT_TYPE from hazelcast.exception import AuthenticationError from hazelcast.future import ImmediateFuture, ImmediateExceptionFuture @@ -19,9 +18,6 @@ from hazelcast import six BUFFER_SIZE = 8192 PROTOCOL_VERSION = 1 -DEFAULT_HEARTBEAT_INTERVAL = 5000 -DEFAULT_HEARTBEAT_TIMEOUT = 60000 - class ConnectionManager(object): """ @@ -188,10 +184,8 @@ class Heartbeat(object): self._client = client self._listeners = [] - self._heartbeat_timeout = client.config.get_property_or_default(PROPERTY_HEARTBEAT_TIMEOUT, - DEFAULT_HEARTBEAT_TIMEOUT) // 1000 - self._heartbeat_interval = client.config.get_property_or_default(PROPERTY_HEARTBEAT_INTERVAL, - DEFAULT_HEARTBEAT_INTERVAL) // 1000 + self._heartbeat_timeout = client.properties.get_seconds_positive_or_default(client.properties.HEARTBEAT_TIMEOUT) + self._heartbeat_interval = client.properties.get_seconds_positive_or_default(client.properties.HEARTBEAT_INTERVAL) def start(self): """ diff --git a/hazelcast/util.py b/hazelcast/util.py index 052afe5..db59e56 100644 --- a/hazelcast/util.py +++ b/hazelcast/util.py @@ -219,3 +219,27 @@ def get_portable_version(portable, default_version): except AttributeError: version = default_version return version + + +class TimeUnit(object): + """ + Represents the time durations at given units in seconds. + """ + NANOSECOND = 1e-9 + MICROSECOND = 1e-6 + MILLISECOND = 1e-3 + SECOND = 1.0 + MINUTE = 60.0 + HOUR = 3600.0 + + @staticmethod + def to_seconds(value, time_unit): + """ + :param value: (Number), value to be translated to seconds + :param time_unit: Time duration in seconds + :return: Value of the value in seconds + """ + if isinstance(value, bool): + # bool is a subclass of int. Don't let bool and float multiplication. + raise TypeError + return float(value) * time_unit
hazelcast/hazelcast-python-client
cf69cd386dc1c6b4c0b4fdeb04bbee6c48e3ff2d
diff --git a/tests/property_tests.py b/tests/property_tests.py new file mode 100644 index 0000000..b8b61b3 --- /dev/null +++ b/tests/property_tests.py @@ -0,0 +1,126 @@ +import os + +from hazelcast.util import TimeUnit +from hazelcast.config import ClientProperty, ClientProperties, ClientConfig +from unittest import TestCase + + +class PropertyTest(TestCase): + def test_client_property_defaults(self): + prop = ClientProperty("name") + self.assertEqual("name", prop.name) + self.assertIsNone(prop.default_value) + self.assertIsNone(prop.time_unit) + + def test_client_property(self): + prop = ClientProperty("name", 0, TimeUnit.SECOND) + self.assertEqual("name", prop.name) + self.assertEqual(0, prop.default_value) + self.assertEqual(TimeUnit.SECOND, prop.time_unit) + + def test_client_properties_with_config(self): + config = ClientConfig() + prop = ClientProperty("key") + config.set_property(prop.name, "value") + + props = ClientProperties(config.get_properties()) + self.assertEqual("value", props.get(prop)) + + def test_client_properties_with_default_value(self): + config = ClientConfig() + prop = ClientProperty("key", "def-value") + + props = ClientProperties(config.get_properties()) + self.assertEqual("def-value", props.get(prop)) + + def test_client_properties_with_config_and_default_value(self): + config = ClientConfig() + prop = ClientProperty("key", "def-value") + config.set_property(prop.name, "value") + + props = ClientProperties(config.get_properties()) + self.assertEqual("value", props.get(prop)) + + def test_client_properties_with_environment_variable(self): + environ = os.environ + environ[ClientProperties.HEARTBEAT_INTERVAL.name] = "3000" + + props = ClientProperties(dict()) + self.assertEqual("3000", props.get(ClientProperties.HEARTBEAT_INTERVAL)) + os.unsetenv(ClientProperties.HEARTBEAT_INTERVAL.name) + + def test_client_properties_with_config_default_value_and_environment_variable(self): + environ = os.environ + prop = ClientProperties.HEARTBEAT_INTERVAL + environ[prop.name] = "1000" + + config = ClientConfig() + config.set_property(prop.name, 2000) + + props = ClientProperties(config.get_properties()) + self.assertEqual(2, props.get_seconds(prop)) + os.unsetenv(prop.name) + + def test_client_properties_get_second(self): + config = ClientConfig() + prop = ClientProperty("test", time_unit=TimeUnit.MILLISECOND) + config.set_property(prop.name, 1000) + + props = ClientProperties(config.get_properties()) + self.assertEqual(1, props.get_seconds(prop)) + + def test_client_properties_get_second_unsupported_type(self): + config = ClientConfig() + prop = ClientProperty("test", "value", TimeUnit.SECOND) + config.set_property(prop.name, None) + + props = ClientProperties(config.get_properties()) + with self.assertRaises(ValueError): + props.get_seconds(prop) + + def test_client_properties_get_second_positive(self): + config = ClientConfig() + prop = ClientProperty("test", 1000, TimeUnit.MILLISECOND) + config.set_property(prop.name, -1000) + + props = ClientProperties(config.get_properties()) + self.assertEqual(1, props.get_seconds_positive_or_default(prop)) + + def test_client_properties_get_second_positive_unsupported_type(self): + config = ClientConfig() + prop = ClientProperty("test", "value", TimeUnit.MILLISECOND) + config.set_property(prop.name, None) + + props = ClientProperties(config.get_properties()) + with self.assertRaises(ValueError): + props.get_seconds_positive_or_default(prop) + + +class TimeUnitTest(TestCase): + def test_nano_to_second(self): + self.assertEqual(0.1, TimeUnit.to_seconds(0.1e9, TimeUnit.NANOSECOND)) + + def test_micro_to_second(self): + self.assertEqual(2, TimeUnit.to_seconds(2e6, TimeUnit.MICROSECOND)) + + def test_milli_to_second(self): + self.assertEqual(3, TimeUnit.to_seconds(3e3, TimeUnit.MILLISECOND)) + + def test_second_to_second(self): + self.assertEqual(5.5, TimeUnit.to_seconds(5.5, TimeUnit.SECOND)) + + def test_minute_to_second(self): + self.assertEqual(60, TimeUnit.to_seconds(1, TimeUnit.MINUTE)) + + def test_hour_to_second(self): + self.assertEqual(1800, TimeUnit.to_seconds(0.5, TimeUnit.HOUR)) + + def test_numeric_string_to_second(self): + self.assertEqual(1, TimeUnit.to_seconds("1000", TimeUnit.MILLISECOND)) + + def test_unsupported_types_to_second(self): + types = ["str", True, None, list(), set(), dict()] + for type in types: + with self.assertRaises((TypeError, ValueError)): + TimeUnit.to_seconds(type, TimeUnit.SECOND) +
implement client properties Right now, configuration properties are set with constant strings that are defined on top of the module they are being used. This causes properties to scatter all over the code and makes it hard for user to import them one by one. There should be a common class that holds all the available configuration properties to deal with that problem.
0.0
cf69cd386dc1c6b4c0b4fdeb04bbee6c48e3ff2d
[ "tests/property_tests.py::PropertyTest::test_client_properties_get_second", "tests/property_tests.py::PropertyTest::test_client_properties_get_second_positive", "tests/property_tests.py::PropertyTest::test_client_properties_get_second_positive_unsupported_type", "tests/property_tests.py::PropertyTest::test_client_properties_get_second_unsupported_type", "tests/property_tests.py::PropertyTest::test_client_properties_with_config", "tests/property_tests.py::PropertyTest::test_client_properties_with_config_and_default_value", "tests/property_tests.py::PropertyTest::test_client_properties_with_config_default_value_and_environment_variable", "tests/property_tests.py::PropertyTest::test_client_properties_with_default_value", "tests/property_tests.py::PropertyTest::test_client_properties_with_environment_variable", "tests/property_tests.py::PropertyTest::test_client_property", "tests/property_tests.py::PropertyTest::test_client_property_defaults", "tests/property_tests.py::TimeUnitTest::test_hour_to_second", "tests/property_tests.py::TimeUnitTest::test_micro_to_second", "tests/property_tests.py::TimeUnitTest::test_milli_to_second", "tests/property_tests.py::TimeUnitTest::test_minute_to_second", "tests/property_tests.py::TimeUnitTest::test_nano_to_second", "tests/property_tests.py::TimeUnitTest::test_numeric_string_to_second", "tests/property_tests.py::TimeUnitTest::test_second_to_second", "tests/property_tests.py::TimeUnitTest::test_unsupported_types_to_second" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2018-09-19 09:14:26+00:00
apache-2.0
2,707
hazelcast__hazelcast-python-client-257
diff --git a/hazelcast/serialization/input.py b/hazelcast/serialization/input.py index dcc68ca..cc6c387 100644 --- a/hazelcast/serialization/input.py +++ b/hazelcast/serialization/input.py @@ -36,7 +36,14 @@ class _ObjectDataInput(ObjectDataInput): self._pos += _len def skip_bytes(self, count): - raise NotImplementedError("skip_bytes not implemented!!!") + if count <= 0: + return 0 + + if self._pos + count > self._size: + count = self._size - self._pos + + self._pos += count + return count def read_boolean(self, position=None): return self.read_byte(position) != 0
hazelcast/hazelcast-python-client
766c61257c44094cd61efe057697fa45e8c64487
diff --git a/tests/serialization/input_test.py b/tests/serialization/input_test.py index 71ae9a6..c17da70 100644 --- a/tests/serialization/input_test.py +++ b/tests/serialization/input_test.py @@ -51,6 +51,21 @@ class InputTestCase(unittest.TestCase): self.assertEqual(0, initial_pos) self.assertEqual(six.unichr(0x00e7), char) + def test_skip_bytes(self): + inp = _ObjectDataInput(bytearray(10)) + self.assertEqual(0, inp.position()) + self.assertEqual(4, inp.skip_bytes(4)) + self.assertEqual(4, inp.position()) -if __name__ == '__main__': - unittest.main() + def test_skip_bytes_when_count_greater_than_remaining(self): + inp = _ObjectDataInput(bytearray(10)) + inp.set_position(8) + self.assertEqual(2, inp.skip_bytes(4)) + self.assertEqual(10, inp.position()) + + def test_skip_bytes_when_count_is_not_positive(self): + inp = _ObjectDataInput(bytearray(10)) + self.assertEqual(0, inp.skip_bytes(0)) + self.assertEqual(0, inp.position()) + self.assertEqual(0, inp.skip_bytes(-1)) + self.assertEqual(0, inp.position())
Implement ObjectDataInput#skip_bytes It seems that we didn't implement `skip_bytes` method yet. We should do that similar to the Java client. https://github.com/hazelcast/hazelcast/blob/master/hazelcast/src/main/java/com/hazelcast/internal/serialization/impl/ByteArrayObjectDataInput.java#L598
0.0
766c61257c44094cd61efe057697fa45e8c64487
[ "tests/serialization/input_test.py::InputTestCase::test_skip_bytes", "tests/serialization/input_test.py::InputTestCase::test_skip_bytes_when_count_greater_than_remaining", "tests/serialization/input_test.py::InputTestCase::test_skip_bytes_when_count_is_not_positive" ]
[ "tests/serialization/input_test.py::InputTestCase::test_bool_array", "tests/serialization/input_test.py::InputTestCase::test_char_be", "tests/serialization/input_test.py::InputTestCase::test_char_le", "tests/serialization/input_test.py::InputTestCase::test_int_array", "tests/serialization/input_test.py::InputTestCase::test_short_array" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2020-12-02 09:48:01+00:00
apache-2.0
2,708
hdmf-dev__hdmf-194
diff --git a/src/hdmf/build/builders.py b/src/hdmf/build/builders.py index e92e481..8bc6631 100644 --- a/src/hdmf/build/builders.py +++ b/src/hdmf/build/builders.py @@ -272,9 +272,9 @@ class GroupBuilder(BaseBuilder): returns='the DatasetBuilder object for the dataset', rtype='DatasetBuilder') def add_dataset(self, **kwargs): ''' Create a dataset and add it to this group ''' + kwargs['parent'] = self + kwargs['source'] = self.source pargs, pkwargs = fmt_docval_args(DatasetBuilder.__init__, kwargs) - pkwargs['parent'] = self - pkwargs['source'] = self.source builder = DatasetBuilder(*pargs, **pkwargs) self.set_dataset(builder) return builder diff --git a/src/hdmf/build/map.py b/src/hdmf/build/map.py index a5104af..2fb4e74 100644 --- a/src/hdmf/build/map.py +++ b/src/hdmf/build/map.py @@ -7,7 +7,7 @@ from copy import copy, deepcopy from datetime import datetime from six import with_metaclass, raise_from, text_type, binary_type, integer_types -from ..utils import docval, getargs, ExtenderMeta, get_docval, fmt_docval_args, call_docval_func +from ..utils import docval, getargs, ExtenderMeta, get_docval, call_docval_func, fmt_docval_args from ..container import AbstractContainer, Container, Data, DataRegion from ..spec import Spec, AttributeSpec, DatasetSpec, GroupSpec, LinkSpec, NAME_WILDCARD, NamespaceCatalog, RefSpec,\ SpecReader @@ -1448,15 +1448,17 @@ class TypeMap(object): fields.append({'name': f, 'child': True}) else: fields.append(f) - if name is not None: + + if name is not None: # fixed name is specified in spec, remove it from docval args docval_args = filter(lambda x: x['name'] != 'name', docval_args) @docval(*docval_args) def __init__(self, **kwargs): - pargs, pkwargs = fmt_docval_args(base.__init__, kwargs) if name is not None: - pkwargs.update(name=name) - base.__init__(self, *pargs, **pkwargs) + kwargs.update(name=name) + pargs, pkwargs = fmt_docval_args(base.__init__, kwargs) + base.__init__(self, *pargs, **pkwargs) # special case: need to pass self to __init__ + for f in new_args: arg_val = kwargs.get(f, None) if arg_val is not None: diff --git a/src/hdmf/container.py b/src/hdmf/container.py index 856e1e5..2831aa8 100644 --- a/src/hdmf/container.py +++ b/src/hdmf/container.py @@ -226,7 +226,7 @@ class AbstractContainer(with_metaclass(ExtenderMeta, object)): parent_container.__children.append(self) parent_container.set_modified() else: - self.__parent.add_candidate(parent_container, self) + self.__parent.add_candidate(parent_container) else: self.__parent = parent_container if isinstance(parent_container, Container): diff --git a/src/hdmf/monitor.py b/src/hdmf/monitor.py index 6fa9b95..0c49a42 100644 --- a/src/hdmf/monitor.py +++ b/src/hdmf/monitor.py @@ -1,7 +1,7 @@ from abc import ABCMeta, abstractmethod import six -from .utils import docval, getargs, fmt_docval_args +from .utils import docval, getargs, call_docval_func from .data_utils import AbstractDataChunkIterator, DataChunkIterator, DataChunk @@ -62,8 +62,7 @@ class DataChunkProcessor(AbstractDataChunkIterator): class NumSampleCounter(DataChunkProcessor): def __init__(self, **kwargs): - args, kwargs = fmt_docval_args(DataChunkProcessor.__init__, kwargs) - super(NumSampleCounter, self).__init__(*args, **kwargs) + call_docval_func(super(NumSampleCounter, self).__init__, kwargs) self.__sample_count = 0 @docval({'name': 'data_chunk', 'type': DataChunk, 'doc': 'a chunk to process'}) diff --git a/src/hdmf/utils.py b/src/hdmf/utils.py index c4f7516..289c182 100644 --- a/src/hdmf/utils.py +++ b/src/hdmf/utils.py @@ -128,6 +128,8 @@ def __parse_args(validator, args, kwargs, enforce_type=True, enforce_shape=True, :param enforce_type: Boolean indicating whether the type of arguments should be enforced :param enforce_shape: Boolean indicating whether the dimensions of array arguments should be enforced if possible. + :param allow_extra: Boolean indicating whether extra keyword arguments are allowed (if False and extra keyword + arguments are specified, then an error is raised). :return: Dict with: * 'args' : Dict all arguments where keys are the names and values are the values of the arguments. @@ -145,22 +147,36 @@ def __parse_args(validator, args, kwargs, enforce_type=True, enforce_shape=True, if duplicated: raise ValueError('The following names are duplicated: {}'.format(duplicated)) try: + if allow_extra: # extra keyword arguments are allowed so do not consider them when checking number of args + # verify only that the number of positional args is <= number of docval specified args + if len(args) > len(validator): + raise TypeError('Expected at most %s arguments, got %s' % (len(validator), len(args))) + else: # verify that the number of positional args + keyword args is <= number of docval specified args + if (len(args) + len(kwargs)) > len(validator): + raise TypeError('Expected at most %s arguments, got %s' % (len(validator), len(args) + len(kwargs))) + + # iterate through the docval specification and find a matching value in args / kwargs it = iter(validator) arg = next(it) + # catch unsupported keys allowable_terms = ('name', 'doc', 'type', 'shape', 'default', 'help') unsupported_terms = set(arg.keys()) - set(allowable_terms) if unsupported_terms: raise ValueError('docval for {}: {} are not supported by docval'.format(arg['name'], list(unsupported_terms))) - # process positional arguments + # process positional arguments of the docval specification (no default value) while True: - # if 'default' in arg: break argname = arg['name'] argval_set = False if argname in kwargs: + # if this positional arg is specified by a keyword arg and there are remaining positional args that + # have not yet been matched, then it is undetermined what those positional args match to. thus, raise + # an error + if argsi < len(args): + type_errors.append("got multiple values for argument '%s'" % argname) argval = kwargs.get(argname) extras.pop(argname, None) argval_set = True @@ -171,36 +187,35 @@ def __parse_args(validator, args, kwargs, enforce_type=True, enforce_shape=True, if not argval_set: type_errors.append("missing argument '%s'" % argname) else: - if argname in ret: - type_errors.append("'got multiple arguments for '%s" % argname) - else: - if enforce_type: - if not __type_okay(argval, arg['type']): - if argval is None: - fmt_val = (argname, __format_type(arg['type'])) - type_errors.append("None is not allowed for '%s' (expected '%s', not None)" % fmt_val) - else: - fmt_val = (argname, type(argval).__name__, __format_type(arg['type'])) - type_errors.append("incorrect type for '%s' (got '%s', expected '%s')" % fmt_val) - if enforce_shape and 'shape' in arg: + if enforce_type: + if not __type_okay(argval, arg['type']): + if argval is None: + fmt_val = (argname, __format_type(arg['type'])) + type_errors.append("None is not allowed for '%s' (expected '%s', not None)" % fmt_val) + else: + fmt_val = (argname, type(argval).__name__, __format_type(arg['type'])) + type_errors.append("incorrect type for '%s' (got '%s', expected '%s')" % fmt_val) + if enforce_shape and 'shape' in arg: + valshape = get_data_shape(argval) + while valshape is None: + if argval is None: + break + if not hasattr(argval, argname): + fmt_val = (argval, argname, arg['shape']) + value_errors.append("cannot check shape of object '%s' for argument '%s' " + "(expected shape '%s')" % fmt_val) + break + # unpack, e.g. if TimeSeries is passed for arg 'data', then TimeSeries.data is checked + argval = getattr(argval, argname) valshape = get_data_shape(argval) - while valshape is None: - if argval is None: - break - if not hasattr(argval, argname): - fmt_val = (argval, argname, arg['shape']) - value_errors.append("cannot check shape of object '%s' for argument '%s' " - "(expected shape '%s')" % fmt_val) - break - # unpack, e.g. if TimeSeries is passed for arg 'data', then TimeSeries.data is checked - argval = getattr(argval, argname) - valshape = get_data_shape(argval) - if valshape is not None and not __shape_okay_multi(argval, arg['shape']): - fmt_val = (argname, valshape, arg['shape']) - value_errors.append("incorrect shape for '%s' (got '%s', expected '%s')" % fmt_val) - ret[argname] = argval + if valshape is not None and not __shape_okay_multi(argval, arg['shape']): + fmt_val = (argname, valshape, arg['shape']) + value_errors.append("incorrect shape for '%s' (got '%s', expected '%s')" % fmt_val) + ret[argname] = argval argsi += 1 arg = next(it) + + # process arguments of the docval specification with a default value while True: argname = arg['name'] if argname in kwargs:
hdmf-dev/hdmf
9f9edf89ebae063fd27e0c8e2da42b0e46adcc2a
diff --git a/tests/unit/test_io_hdf5_h5tools.py b/tests/unit/test_io_hdf5_h5tools.py index cd52436..ad37fbb 100644 --- a/tests/unit/test_io_hdf5_h5tools.py +++ b/tests/unit/test_io_hdf5_h5tools.py @@ -1034,7 +1034,7 @@ class HDF5IOReadData(unittest.TestCase): self.path = get_temp_filepath() foo1 = Foo('foo1', [0, 1, 2, 3, 4], "I am foo1", 17, 3.14) bucket1 = FooBucket('test_bucket1', [foo1]) - self.foofile1 = FooFile('test_foofile1', buckets=[bucket1]) + self.foofile1 = FooFile(buckets=[bucket1]) with HDF5IO(self.path, manager=_get_manager(), mode='w') as temp_io: temp_io.write(self.foofile1) @@ -1069,7 +1069,7 @@ class HDF5IOWriteNoFile(unittest.TestCase): def setUp(self): foo1 = Foo('foo1', [0, 1, 2, 3, 4], "I am foo1", 17, 3.14) bucket1 = FooBucket('test_bucket1', [foo1]) - self.foofile1 = FooFile('test_foofile1', buckets=[bucket1]) + self.foofile1 = FooFile(buckets=[bucket1]) self.path = 'test_write_nofile.h5' def tearDown(self): diff --git a/tests/unit/utils_test/test_docval.py b/tests/unit/utils_test/test_docval.py index 62d19fa..ebe23d1 100644 --- a/tests/unit/utils_test/test_docval.py +++ b/tests/unit/utils_test/test_docval.py @@ -31,6 +31,13 @@ class MyTestClass(object): def basic_only_kw(self, **kwargs): return kwargs + @docval({'name': 'arg1', 'type': str, 'doc': 'argument1 is a str'}, + {'name': 'arg2', 'type': 'int', 'doc': 'argument2 is a int'}, + {'name': 'arg3', 'type': bool, 'doc': 'argument3 is a bool. it defaults to False', 'default': False}, + allow_extra=True) + def basic_add2_kw_allow_extra(self, **kwargs): + return kwargs + class MyTestSubclass(MyTestClass): @@ -350,6 +357,57 @@ class TestDocValidator(unittest.TestCase): with self.assertRaises(TypeError): self.test_obj.basic_add2_kw('a string', 100, bar=1000) + def test_extra_args_pos_only(self): + """Test that docval raises an error if too many positional + arguments are specified + """ + with self.assertRaisesRegex(TypeError, r'Expected at most 3 arguments, got 4'): + self.test_obj.basic_add2_kw('a string', 100, True, 'extra') + + def test_extra_args_pos_kw(self): + """Test that docval raises an error if too many positional + arguments are specified and a keyword arg is specified + """ + with self.assertRaisesRegex(TypeError, r'Expected at most 3 arguments, got 4'): + self.test_obj.basic_add2_kw('a string', 'extra', 100, arg3=True) + + def test_extra_kwargs_pos_kw(self): + """Test that docval raises an error if extra keyword + arguments are specified + """ + with self.assertRaisesRegex(TypeError, r'Expected at most 3 arguments, got 4'): + self.test_obj.basic_add2_kw('a string', 100, extra='extra', arg3=True) + + def test_extra_args_pos_only_ok(self): + """Test that docval raises an error if too many positional + arguments are specified even if allow_extra is True + """ + with self.assertRaisesRegex(TypeError, r'Expected at most 3 arguments, got 4'): + self.test_obj.basic_add2_kw_allow_extra('a string', 100, True, 'extra', extra='extra') + + def test_extra_args_pos_kw_ok(self): + """Test that docval does not raise an error if too many + keyword arguments are specified and allow_extra is True + """ + kwargs = self.test_obj.basic_add2_kw_allow_extra('a string', 100, True, extra='extra') + self.assertDictEqual(kwargs, {'arg1': 'a string', 'arg2': 100, 'arg3': True, 'extra': 'extra'}) + + def test_dup_kw(self): + """Test that docval raises an error if a keyword argument + captures a positional argument before all positional + arguments have been resolved + """ + with self.assertRaisesRegex(TypeError, r"got multiple values for argument 'arg1'"): + self.test_obj.basic_add2_kw('a string', 100, arg1='extra') + + def test_extra_args_dup_kw(self): + """Test that docval raises an error if a keyword argument + captures a positional argument before all positional + arguments have been resolved and allow_extra is True + """ + with self.assertRaisesRegex(TypeError, r"got multiple values for argument 'arg1'"): + self.test_obj.basic_add2_kw_allow_extra('a string', 100, True, arg1='extra') + def test_unsupported_docval_term(self): """Test that docval does not allow setting of arguments marked as unsupported
Extra/duplicate args allowed in docval 1. If docval specifies three arguments and the user provides four positional arguments, docval should throw an error. 2. In Python, the below code throws an error: ```python >>> def fn(a, b): ... print(a, b) ... >>> fn(1, 2, a=3) Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: fn() got multiple values for argument 'a' ``` Here, a passed keyword argument captures a positional argument, thus making the other values uninterpretable. docval should throw a similar error but doesn't. The analogous docval definition of the above function would set a = 3, b = 2 and ignore the first argument (1). This lack of errors is relevant for when function headers of an API are changed and users are unaware that their API calls no longer work as intended.
0.0
9f9edf89ebae063fd27e0c8e2da42b0e46adcc2a
[ "tests/unit/utils_test/test_docval.py::TestDocValidator::test_dup_kw", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_extra_args_dup_kw", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_extra_args_pos_kw", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_extra_args_pos_only", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_extra_args_pos_only_ok", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_extra_kwargs_pos_kw" ]
[ "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test__chunked_iter_fill", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_copy_h5py_dataset_h5dataio_input", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_copy_h5py_dataset_input", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_dci_h5dataset", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_dci_h5dataset_scalar", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_dci_h5dataset_sparse_matched", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_dci_h5dataset_sparse_unmatched", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_error_on_unsupported_compression_filter", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_h5dataio_array_conversion_datachunkiterator", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_h5dataio_array_conversion_list", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_h5dataio_array_conversion_numpy", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_link_h5py_dataset_h5dataio_input", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_link_h5py_dataset_input", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_list_fill_empty", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_list_fill_empty_no_dtype", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_pass_through_of_recommended_chunks", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_value_error_on_incompatible_compression_opts", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_warning_on_linking_of_regular_array", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_warning_on_non_gzip_compression", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_warning_on_setting_io_options_on_h5dataset_input", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_data_chunk_iterator", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_data_chunk_iterator_with_compression", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_iterable", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_iterable_multidimensional_array", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_iterable_multidimensional_array_compression", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list_chunked", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list_compress_gzip", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list_compress_lzf", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list_compress_szip", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list_disable_default_compress", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list_enable_default_compress", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list_fillvalue", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_scalar", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_multi_dci_conc", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_multi_dci_oaat", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_table", "tests/unit/test_io_hdf5_h5tools.py::H5IOTest::test_write_table_nested", "tests/unit/test_io_hdf5_h5tools.py::TestRoundTrip::test_roundtrip_basic", "tests/unit/test_io_hdf5_h5tools.py::TestRoundTrip::test_roundtrip_empty_dataset", "tests/unit/test_io_hdf5_h5tools.py::TestRoundTrip::test_roundtrip_empty_group", "tests/unit/test_io_hdf5_h5tools.py::TestHDF5IO::test_constructor", "tests/unit/test_io_hdf5_h5tools.py::TestHDF5IO::test_set_file_mismatch", "tests/unit/test_io_hdf5_h5tools.py::TestCacheSpec::test_cache_spec", "tests/unit/test_io_hdf5_h5tools.py::TestCacheSpec::test_double_cache_spec", "tests/unit/test_io_hdf5_h5tools.py::TestNoCacheSpec::test_no_cache_spec", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOMultiFileTest::test_copy_file_with_external_links", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOInitNoFileTest::test_init_no_file_ok", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOInitNoFileTest::test_init_no_file_r", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOInitNoFileTest::test_init_no_file_rplus", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOInitFileExistsTest::test_init_file_exists_ok", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOInitFileExistsTest::test_init_wminus_file_exists", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOInitFileExistsTest::test_init_x_file_exists", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOReadNoDataTest::test_read_no_data_a", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOReadNoDataTest::test_read_no_data_r", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOReadNoDataTest::test_read_no_data_rplus", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOReadData::test_read_file_ok", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOReadData::test_read_file_w", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOWriteNoFile::test_write_no_file_a_ok", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOWriteNoFile::test_write_no_file_w_ok", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOWriteNoFile::test_write_no_file_wminus_ok", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOWriteNoFile::test_write_no_file_x_ok", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOWriteFileExists::test_write_r", "tests/unit/test_io_hdf5_h5tools.py::HDF5IOWriteFileExists::test_write_w", "tests/unit/test_io_hdf5_h5tools.py::H5DataIOValid::test_read_valid", "tests/unit/test_io_hdf5_h5tools.py::H5DataIOValid::test_valid", "tests/unit/test_io_hdf5_h5tools.py::TestReadLink::test_link_to_link", "tests/unit/test_io_hdf5_h5tools.py::TestReadLink::test_set_link_loc", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_bad_shape", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_bad_type", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_catch_duplicate_names", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_kw_all_kw_syntax", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_kw_default", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_kw_default_sub", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_kw_default_sub_missing_args", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_kw_kw_syntax", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_kw_kwsyntax_sub", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_kw_kwsyntax_sub_missing_args", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_kw_kwsyntax_sub_nonetype_arg", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_kw_pos_syntax", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_kw_pos_syntax_missing_args", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_pos_as_kw", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_text_type_w_str", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add2_text_type_w_unicode", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add_kw", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add_missing_args", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_docval_add_sub", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_extra_args_pos_kw_ok", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_extra_kwarg", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_fmt_docval_args", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_get_docval_all", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_get_docval_missing_arg", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_get_docval_missing_arg_of_many_ok", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_get_docval_missing_args", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_get_docval_none", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_get_docval_none_arg", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_get_docval_one_arg", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_get_docval_two_args", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_multi_shape", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_only_kw_arg1_arg2", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_only_kw_arg1_arg2_pos", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_only_kw_arg1_no_arg2", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_only_kw_arg1_pos_no_arg2", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_only_kw_arg2_no_arg1", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_only_kw_no_args", "tests/unit/utils_test/test_docval.py::TestDocValidator::test_unsupported_docval_term", "tests/unit/utils_test/test_docval.py::TestDocValidatorChain::test_shape_invalid_unpack", "tests/unit/utils_test/test_docval.py::TestDocValidatorChain::test_shape_invalid_unpack_default", "tests/unit/utils_test/test_docval.py::TestDocValidatorChain::test_shape_none_unpack", "tests/unit/utils_test/test_docval.py::TestDocValidatorChain::test_shape_none_unpack_default", "tests/unit/utils_test/test_docval.py::TestDocValidatorChain::test_shape_other_unpack", "tests/unit/utils_test/test_docval.py::TestDocValidatorChain::test_shape_other_unpack_default", "tests/unit/utils_test/test_docval.py::TestDocValidatorChain::test_shape_valid_unpack", "tests/unit/utils_test/test_docval.py::TestDocValidatorChain::test_shape_valid_unpack_default", "tests/unit/utils_test/test_docval.py::TestDocValidatorChain::test_type_arg", "tests/unit/utils_test/test_docval.py::TestDocValidatorChain::test_type_arg_wrong_type" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2019-11-07 22:34:28+00:00
bsd-3-clause
2,709
heavenshell__py-doq-52
diff --git a/doq/outputter.py b/doq/outputter.py index 4099b06..8fe6de7 100644 --- a/doq/outputter.py +++ b/doq/outputter.py @@ -14,13 +14,20 @@ class StringOutptter(BaseOutputter): return start else: for i, line in enumerate(lines[start:end]): - # Found end of signature without type if line.endswith('):'): + # Found end of signature without type return start + i + 1 elif re.search(r'\):', line): return start + i + 1 - # Found end of signature with type + elif re.search(r'\]:', line): + # Found end of signature type + # def foo(a, b) -> Tuple[ + # int, + # ]: + # pass + return start + i + 1 elif re.search(r'->(.*):', line): + # Found end of signature with type return start + i + 1 return start diff --git a/doq/parser.py b/doq/parser.py index 34805a7..70755bc 100644 --- a/doq/parser.py +++ b/doq/parser.py @@ -88,14 +88,9 @@ def parse_defs(module, omissions=None, ignore_exception=False, ignore_yield=Fals params.append(arguments) - # parso does not have return type. So parse from signature. - next_node = d.get_suite().get_first_leaf().get_next_sibling() - stmt_start_lineno = next_node.start_pos[0] if next_node else 2 - return_type = parse_return_type( - code=code, - start_lineno=start_lineno, - end_lineno=stmt_start_lineno - 1, - ) + return_type = None + if d.children[3].value == '->': + return_type = d.children[4].get_code().strip() yields = [] if ignore_yield is False:
heavenshell/py-doq
4cc96bd961129a08dc25e99ab9c27de1af82393b
diff --git a/tests/test_outputter.py b/tests/test_outputter.py index 6229985..5368858 100644 --- a/tests/test_outputter.py +++ b/tests/test_outputter.py @@ -107,6 +107,40 @@ class StringOutptterTestCase(TestCase): ]) self.assertEqual(expected, output) + def test_multi_return_type(self): + lines = [ + 'def foo(arg1) -> List[', + ' int,', + ' int,', + ']:', + ' pass', + ] + docstrings = [{ + 'docstring': '"""foo.\n\n:param arg1:\n:rtype List[\n int,\n int,\n]:\n"""', + 'start_lineno': 1, + 'start_col': 0, + 'end_lineno': 7, + 'end_col': 0, + 'is_doc_exists': False, + }] + output = StringOutptter().format(lines=lines, docstrings=docstrings, indent=4) + expected = '\n'.join([ + 'def foo(arg1) -> List[', + ' int,', + ' int,', + ']:', + ' """foo.', + '', + ' :param arg1:', + ' :rtype List[', + ' int,', + ' int,', + ' ]:', + ' """', + ' pass', + ]) + self.assertEqual(expected, output) + class JSONOutptterTestCase(TestCase): def test_same_lines(self): diff --git a/tests/test_parser.py b/tests/test_parser.py index 404112a..261b8e4 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -484,6 +484,37 @@ class ParseTestCase(TestCase): actual, ) + def test_with_return_multi_line_type(self): + line = '\n'.join([ + 'def foo(arg1) -> Tuple[', + ' int,', + ' int,', + ']:', + ' pass', + ]) + actual = parse(line)[0] + self.assertDictEqual( + { + 'name': 'foo', + 'params': [ + { + 'argument': 'arg1', + 'annotation': None, + 'default': None, + }, + ], + 'return_type': 'Tuple[\n int,\n int,\n]', + 'start_lineno': 1, + 'start_col': 0, + 'end_lineno': 5, + 'end_col': 8, + 'is_doc_exists': False, + 'exceptions': [], + 'yields': [], + }, + actual, + ) + def test_with_defs(self): line = '\n'.join([ 'def bar(arg1) -> List[str]:',
Multi-line return annotations Hi, Thanks for the super useful tool! Just wanted to report one minor/non-urgent issue that I've been running into recently -- Specifically, running `doq` on: ```python from typing import Tuple def x(a, b, c) -> Tuple[ int, int, int, ]: pass ``` produces: ```python from typing import Tuple def x(a, b, c) -> Tuple[ """x. :param a: :param b: :param c: """ int, int, int, ]: pass ``` Seems like two issues: - `detect_insert_point()` breaks in outputter.py - `parse_return_type()` breaks in parser.py; note that there are no return annotations above For the latter: I think `parse_return_type()` could be replaced by checking the parso tree, for example in `parse_defs()`: ```python if d.children[3].value == "->": # Get return-type annotation return_type = d.children[4].get_code().strip() else: # No annotation return_type = None ``` (child indices are from [here](https://github.com/davidhalter/parso/blob/034a9e89443261e7f434fcb1fbb807b14991b766/parso/python/tree.py#L536)) Some thought may also need to be put into how indents, etc are handled? @heavenshell if you don't have time to fix, I might spontaneously submit a PR sometime next month. :slightly_smiling_face:
0.0
4cc96bd961129a08dc25e99ab9c27de1af82393b
[ "tests/test_outputter.py::StringOutptterTestCase::test_multi_return_type", "tests/test_parser.py::ParseTestCase::test_with_return_multi_line_type" ]
[ "tests/test_outputter.py::StringOutptterTestCase::test_multi_lines", "tests/test_outputter.py::StringOutptterTestCase::test_multi_lines_with_return_type", "tests/test_outputter.py::StringOutptterTestCase::test_same_lines", "tests/test_outputter.py::JSONOutptterTestCase::test_same_lines", "tests/test_parser.py::ParseTestCase::test_async_keyword", "tests/test_parser.py::ParseTestCase::test_async_keyword_with_class", "tests/test_parser.py::ParseTestCase::test_doc_exists", "tests/test_parser.py::ParseTestCase::test_with_args", "tests/test_parser.py::ParseTestCase::test_with_args_and_kwargs", "tests/test_parser.py::ParseTestCase::test_with_class", "tests/test_parser.py::ParseTestCase::test_with_classes", "tests/test_parser.py::ParseTestCase::test_with_defs", "tests/test_parser.py::ParseTestCase::test_with_kwargs", "tests/test_parser.py::ParseTestCase::test_with_one_argument", "tests/test_parser.py::ParseTestCase::test_with_one_argument_and_annotaion", "tests/test_parser.py::ParseTestCase::test_with_one_argument_and_default", "tests/test_parser.py::ParseTestCase::test_with_one_argument_annotaion_and_default", "tests/test_parser.py::ParseTestCase::test_with_return_type", "tests/test_parser.py::ParseTestCase::test_with_two_arguments", "tests/test_parser.py::ParseTestCase::test_with_two_arguments_with_annotation", "tests/test_parser.py::ParseTestCase::test_with_two_arguments_with_annotation_and_default", "tests/test_parser.py::ParseTestCase::test_with_two_arguments_with_asterisk", "tests/test_parser.py::ParseTestCase::test_with_two_arguments_with_default", "tests/test_parser.py::ParseTestCase::test_without_argument", "tests/test_parser.py::ReturnTypeTestCase::test_get_return_type_0_def_foo_arg_str_", "tests/test_parser.py::ReturnTypeTestCase::test_get_return_type_1_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_get_return_type_2_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_get_return_type_3_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_get_return_type_4_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_get_return_type_5_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_get_return_type_6_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_get_return_type_7_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_get_return_type_8_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_get_return_type_9_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_parse_return_type_0_def_foo_arg_str_", "tests/test_parser.py::ReturnTypeTestCase::test_parse_return_type_1_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_parse_return_type_2_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_parse_return_type_3_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_parse_return_type_4_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_parse_return_type_5_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_parse_return_type_6_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_parse_return_type_7_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_parse_return_type_8_def_foo_arg_str_int_", "tests/test_parser.py::ReturnTypeTestCase::test_parse_return_type_9_def_foo_arg_str_int_" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-11-21 13:29:32+00:00
bsd-3-clause
2,710
heavenshell__py-doq-70
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index dbe033a..fb1bfcc 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.6, 3.7, 3.8] + python-version: [3.6, 3.7, 3.8, 3.9] steps: - uses: actions/checkout@v2 diff --git a/doq/cli.py b/doq/cli.py index 287e6c5..1d7b7ff 100644 --- a/doq/cli.py +++ b/doq/cli.py @@ -226,10 +226,10 @@ def run(args): if args.write and target['path'] != '<stdin>': with open(target['path'], 'w') as f: - f.write(output) + f.write(output + '\n') else: - sys.stdout.write(output) + sys.stdout.write(output + '\n') return True
heavenshell/py-doq
269e22ff16fbe4f7e8137631c83d3dd2c4fc8450
diff --git a/tests/test_cli.py b/tests/test_cli.py index 16dffd9..9a3ae16 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -59,7 +59,7 @@ class CliTestCase(TestCase): run(args) actual = p.getvalue().split('\n') with open(os.path.join(expected_path, file)) as f: - expected = f.read().rstrip().split('\n') + expected = f.read().split('\n') self.assertSequenceEqual(expected, actual) @@ -108,7 +108,7 @@ class CliTestCase(TestCase): run(args) actual = p.getvalue().split('\n') with open(os.path.join(expected_path, file)) as f: - expected = f.read().rstrip().split('\n') + expected = f.read().split('\n') self.assertSequenceEqual(expected, actual) @@ -157,7 +157,7 @@ class CliTestCase(TestCase): run(args) actual = p.getvalue().split('\n') with open(os.path.join(expected_path, file)) as f: - expected = f.read().rstrip().split('\n') + expected = f.read().split('\n') self.assertSequenceEqual(expected, actual)
EOF newline is stripped off Take the following file as an example: ``` # file.py def f(x): pass ``` Notice the last empty line which is *required* by git to diff files properly. Now, passing this file to doq using this command: `doq -w -f file.py`, produces the following: ``` # file.py def f(x): """f. :param x: """ pass ``` Notice that the last line has been stripped
0.0
269e22ff16fbe4f7e8137631c83d3dd2c4fc8450
[ "tests/test_cli.py::CliTestCase::test_output_with_google_style", "tests/test_cli.py::CliTestCase::test_output_with_numpy_style", "tests/test_cli.py::CliTestCase::test_output_with_sphinx_style" ]
[ "tests/test_cli.py::CliTestCase::test_find_files", "tests/test_cli.py::CliTestCase::test_get_defalt_template_path_0_sphinx", "tests/test_cli.py::CliTestCase::test_get_defalt_template_path_1_google", "tests/test_cli.py::CliTestCase::test_get_defalt_template_path_2_numpy", "tests/test_cli.py::CliTestCase::test_get_lines", "tests/test_cli.py::CliTestCase::test_get_target", "tests/test_cli.py::CliTestCase::test_get_template_path", "tests/test_cli.py::CliTestCase::test_is_doc_exists", "tests/test_cli.py::CliTestCase::test_no_files", "tests/test_cli.py::CliTestCase::test_no_output_with_google_style", "tests/test_cli.py::CliTestCase::test_no_output_with_numpy_style", "tests/test_cli.py::CliTestCase::test_no_output_with_sphinx_style", "tests/test_cli.py::CliTestCase::test_not_ignore_exception", "tests/test_cli.py::CliTestCase::test_not_ignore_yield", "tests/test_cli.py::CliTestCase::test_omit_one", "tests/test_cli.py::CliTestCase::test_omit_two", "tests/test_cli.py::CliTestCase::test_run_with_classes", "tests/test_cli.py::CliTestCase::test_run_with_defs" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2021-03-25 15:40:07+00:00
bsd-3-clause
2,711
heavenshell__py-doq-84
diff --git a/doq/outputter.py b/doq/outputter.py index 8fe6de7..a676c42 100644 --- a/doq/outputter.py +++ b/doq/outputter.py @@ -17,9 +17,9 @@ class StringOutptter(BaseOutputter): if line.endswith('):'): # Found end of signature without type return start + i + 1 - elif re.search(r'\):', line): + elif re.search(r'\):|\)\s*:', line): return start + i + 1 - elif re.search(r'\]:', line): + elif re.search(r'\]:|\]\s*:', line): # Found end of signature type # def foo(a, b) -> Tuple[ # int,
heavenshell/py-doq
30c9cd9837b9a3564fdd2793c8caff6131704b75
diff --git a/tests/test_outputter.py b/tests/test_outputter.py index 5368858..4cb5d69 100644 --- a/tests/test_outputter.py +++ b/tests/test_outputter.py @@ -71,6 +71,77 @@ class StringOutptterTestCase(TestCase): ]) self.assertEqual(expected, output) + def test_multi_lines_with_space(self): + lines = [ + 'def foo(', + ' arg1,', + ' arg2,', + ' arg3,', + ') :', + ' pass', + ] + + docstrings = [{ + 'docstring': '"""foo.\n\n:param arg1:\n:param arg2:\n:param arg3:\n"""', + 'start_lineno': 1, + 'start_col': 0, + 'end_lineno': 8, + 'end_col': 0, + 'is_doc_exists': False, + }] + output = StringOutptter().format(lines=lines, docstrings=docstrings, indent=4) + expected = '\n'.join([ + 'def foo(', + ' arg1,', + ' arg2,', + ' arg3,', + ') :', + ' """foo.', + '', + ' :param arg1:', + ' :param arg2:', + ' :param arg3:', + ' """', + ' pass', + ]) + self.assertEqual(expected, output) + + def test_multi_lines_with_space_and_return_type(self): + lines = [ + 'def foo(', + ' arg1,', + ' arg2,', + ' arg3,', + ') -> int :', + ' pass', + ] + + docstrings = [{ + 'docstring': '"""foo.\n\n:param arg1:\n:param arg2:\n:param arg3:\n:rtype: int\n"""', + 'start_lineno': 1, + 'start_col': 0, + 'end_lineno': 8, + 'end_col': 0, + 'is_doc_exists': False, + }] + output = StringOutptter().format(lines=lines, docstrings=docstrings, indent=4) + expected = '\n'.join([ + 'def foo(', + ' arg1,', + ' arg2,', + ' arg3,', + ') -> int :', + ' """foo.', + '', + ' :param arg1:', + ' :param arg2:', + ' :param arg3:', + ' :rtype: int', + ' """', + ' pass', + ]) + self.assertEqual(expected, output) + def test_multi_lines_with_return_type(self): lines = [ 'def foo(',
methods with multi-line arguments Similar of #51, but here the multi-line arguments cannot be processed. ![image](https://user-images.githubusercontent.com/9496702/130314277-8ec308a0-3ac7-46d7-8a43-b4d3cfeb95b9.png)
0.0
30c9cd9837b9a3564fdd2793c8caff6131704b75
[ "tests/test_outputter.py::StringOutptterTestCase::test_multi_lines_with_space" ]
[ "tests/test_outputter.py::StringOutptterTestCase::test_multi_lines", "tests/test_outputter.py::StringOutptterTestCase::test_multi_lines_with_return_type", "tests/test_outputter.py::StringOutptterTestCase::test_multi_lines_with_space_and_return_type", "tests/test_outputter.py::StringOutptterTestCase::test_multi_return_type", "tests/test_outputter.py::StringOutptterTestCase::test_same_lines", "tests/test_outputter.py::JSONOutptterTestCase::test_same_lines" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_issue_reference", "has_media" ], "has_test_patch": true, "is_lite": false }
2021-08-21 14:23:25+00:00
bsd-3-clause
2,712
heavenshell__py-doq-85
diff --git a/README.rst b/README.rst index ac2a2c5..19963e8 100644 --- a/README.rst +++ b/README.rst @@ -89,6 +89,7 @@ Usage usage: doq [-h] [-f FILE] [--start START] [--end END] [-t TEMPLATE_PATH] [-s STYLE] [--formatter FORMATTER] [--indent INDENT] [--omit OMIT] [-r] [-d DIRECTORY] [-w] [-v] [--ignore_exception] [--ignore_yield] + [--ignore_init] Docstring generator. @@ -112,6 +113,7 @@ Usage -v, --version Output the version number --ignore_exception Ignore exception statements --ignore_yield Ignore yield statements + --ignore_init Ignore genereate docstring to __init__ method Customize template ================== @@ -157,4 +159,5 @@ See `examples <https://github.com/heavenshell/py-doq/tree/master/examples>`_ LICENSE ======= + NEW BSD LICENSE. diff --git a/doq/cli.py b/doq/cli.py index 1d7b7ff..bf0e9b1 100644 --- a/doq/cli.py +++ b/doq/cli.py @@ -54,7 +54,7 @@ def get_template_path(template_path, formatter): return os.path.abspath(template_path) -def generate_def_docstrings(signature, template, is_exception=False, is_yield=False): +def generate_def_docstrings(signature, template, is_exception=False, is_yield=False, ignore_init=False): docstrings = [] for d in signature['defs']: if d['is_doc_exists'] is False: @@ -67,18 +67,24 @@ def generate_def_docstrings(signature, template, is_exception=False, is_yield=Fa elif 'defs' in d: filename = 'class.txt' - docstring = template.load(params=d, filename=filename) - docstrings.append( - { - 'docstring': docstring, - 'start_lineno': d['start_lineno'], - 'start_col': d['start_col'], - 'end_lineno': d['end_lineno'], - 'end_col': d['start_col'], - }, - ) + if ignore_init and d['name'] == '__init__': + # numpy style guide says constructor's docstring should + # documented at class docstring. + # https://numpydoc.readthedocs.io/en/latest/format.html#class-docstring + pass + else: + docstring = template.load(params=d, filename=filename) + docstrings.append( + { + 'docstring': docstring, + 'start_lineno': d['start_lineno'], + 'start_col': d['start_col'], + 'end_lineno': d['end_lineno'], + 'end_col': d['start_col'], + }, + ) if 'defs' in d: - results = generate_def_docstrings(d, template, is_exception, is_yield) + results = generate_def_docstrings(d, template, is_exception, is_yield, ignore_init=ignore_init) if len(results): docstrings += results @@ -101,6 +107,7 @@ def generate_docstrings( omissions=None, ignore_exception=False, ignore_yield=False, + ignore_init=False, ): template = Template(paths=[path]) signatures = parse( @@ -108,6 +115,7 @@ def generate_docstrings( omissions=omissions, ignore_exception=ignore_exception, ignore_yield=ignore_yield, + ignore_init=ignore_init, ) is_exception = False if ignore_exception else is_exception_enabled(os.path.join(path, 'def.txt')) is_yield = False if ignore_yield else is_yield_enabled(os.path.join(path, 'def.txt')) @@ -129,7 +137,7 @@ def generate_docstrings( ) # Method docstring - docstrings += generate_def_docstrings(signature, template, is_exception, is_yield) + docstrings += generate_def_docstrings(signature, template, is_exception, is_yield, ignore_init) else: if signature['is_doc_exists'] is False: filename = 'noarg.txt' @@ -209,6 +217,7 @@ def run(args): omissions=omissions, ignore_exception=args.ignore_exception, ignore_yield=args.ignore_yield, + ignore_init=args.ignore_init, ) if len(docstrings) == 0: continue @@ -326,6 +335,11 @@ def parse_options(): action='store_true', help='Ignore yield statements', ) + parser.add_argument( + '--ignore_init', + action='store_true', + help='Ignore genereate docstring to __init__ method', + ) args = parser.parse_args() diff --git a/doq/parser.py b/doq/parser.py index 70755bc..a376bd0 100644 --- a/doq/parser.py +++ b/doq/parser.py @@ -52,7 +52,7 @@ def parse_return_type(code, start_lineno, end_lineno): return None -def parse_defs(module, omissions=None, ignore_exception=False, ignore_yield=False): # noqa C901 +def parse_defs(module, omissions=None, ignore_exception=False, ignore_yield=False, ignore_init=False): # noqa C901 if omissions is None: omissions = [] @@ -117,7 +117,12 @@ def parse_defs(module, omissions=None, ignore_exception=False, ignore_yield=Fals }, ) - nested = parse_defs(d, ignore_exception=ignore_exception, ignore_yield=ignore_yield) + nested = parse_defs( + d, + ignore_exception=ignore_exception, + ignore_yield=ignore_yield, + ignore_init=ignore_init, + ) if len(nested): results += nested @@ -128,7 +133,7 @@ def parse_defs(module, omissions=None, ignore_exception=False, ignore_yield=Fals return results -def parse_classdefs(module, ignore_exception=False, ignore_yield=False): +def parse_classdefs(module, ignore_exception=False, ignore_yield=False, ignore_init=False): results = [] for c in module.iter_classdefs(): @@ -138,7 +143,13 @@ def parse_classdefs(module, ignore_exception=False, ignore_yield=False): (end_lineno, end_col) = c.end_pos name = c.name.value - defs = parse_defs(c, omissions=['self'], ignore_exception=ignore_exception, ignore_yield=ignore_yield) + defs = parse_defs( + c, + omissions=['self'], + ignore_exception=ignore_exception, + ignore_yield=ignore_yield, + ignore_init=ignore_init, + ) results.append( { 'name': name, @@ -160,17 +171,23 @@ def parse_classdefs(module, ignore_exception=False, ignore_yield=False): return results -def parse(code, omissions=None, ignore_exception=False, ignore_yield=False): +def parse(code, omissions=None, ignore_exception=False, ignore_yield=False, ignore_init=False): m = parso.parse(code) results = [] if 'class' in code: - results = parse_classdefs(m, ignore_exception=ignore_exception, ignore_yield=ignore_yield) + results = parse_classdefs( + m, + ignore_exception=ignore_exception, + ignore_yield=ignore_yield, + ignore_init=ignore_init, + ) results += parse_defs( m, omissions=omissions, ignore_exception=ignore_exception, ignore_yield=ignore_yield, + ignore_init=ignore_init, ) return results diff --git a/tox.ini b/tox.ini index 414c6de..1416cc3 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py36,py37,py38,flake8 +envlist = py36,py37,py38,py39,flake8 [testenv] commands=python setup.py test
heavenshell/py-doq
af4340101df64d1e8254782103c5d846b0c94e89
diff --git a/tests/test_cli.py b/tests/test_cli.py index 9a3ae16..af0ac13 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -54,6 +54,7 @@ class CliTestCase(TestCase): omit=None, ignore_exception=False, ignore_yield=False, + ignore_init=False, ) with patch('doq.cli.sys.stdout', new_callable=StringIO) as p: run(args) @@ -79,6 +80,7 @@ class CliTestCase(TestCase): omit=None, ignore_exception=False, ignore_yield=False, + ignore_init=False, ) with patch('doq.cli.sys.stdout', new_callable=StringIO) as p: run(args) @@ -103,6 +105,7 @@ class CliTestCase(TestCase): omit=None, ignore_exception=False, ignore_yield=False, + ignore_init=False, ) with patch('doq.cli.sys.stdout', new_callable=StringIO) as p: run(args) @@ -128,6 +131,7 @@ class CliTestCase(TestCase): omit=None, ignore_exception=False, ignore_yield=False, + ignore_init=False, ) with patch('doq.cli.sys.stdout', new_callable=StringIO) as p: run(args) @@ -152,6 +156,7 @@ class CliTestCase(TestCase): omit=None, ignore_exception=False, ignore_yield=False, + ignore_init=False, ) with patch('doq.cli.sys.stdout', new_callable=StringIO) as p: run(args) @@ -177,6 +182,7 @@ class CliTestCase(TestCase): omit=None, ignore_exception=False, ignore_yield=False, + ignore_init=False, ) with patch('doq.cli.sys.stdout', new_callable=StringIO) as p: run(args) @@ -559,3 +565,73 @@ class CliTestCase(TestCase): self.assertEqual(0, results[0]['end_col']) self.assertEqual(1, results[0]['start_lineno']) self.assertEqual(3, results[0]['end_lineno']) + + def test_ignore_init(self): + docstrings = [ + 'class Foo:', + ' def __init__(self, arg1):', + ' pass', + '', + '', + 'class Bar:', + ' def bar(self, arg1, arg2):', + ' pass', + ] + + template_path = os.path.join( + self.basepath, + 'doq', + 'templates', + 'sphinx', + ) + results = generate_docstrings( + docstrings, + template_path, + omissions=['self'], + ignore_exception=False, + ignore_yield=False, + ignore_init=True, + ) + expected_docstrings = [ + [ + '"""Foo."""', + '', + ], + [ + '"""Bar."""', + '', + ], + [ + '"""bar.', + '', + ':param arg1:', + ':param arg2:', + '"""', + ], + ] + self.assertEqual( + '\n'.join(expected_docstrings[0]), + results[0]['docstring'], + ) + self.assertEqual(0, results[0]['start_col']) + self.assertEqual(0, results[0]['end_col']) + self.assertEqual(1, results[0]['start_lineno']) + self.assertEqual(4, results[0]['end_lineno']) + + self.assertEqual( + '\n'.join(expected_docstrings[1]), + results[1]['docstring'], + ) + self.assertEqual(0, results[1]['start_col']) + self.assertEqual(10, results[1]['end_col']) + self.assertEqual(6, results[1]['start_lineno']) + self.assertEqual(8, results[1]['end_lineno']) + + self.assertEqual( + '\n'.join(expected_docstrings[2]), + results[2]['docstring'], + ) + self.assertEqual(3, results[2]['start_col']) + self.assertEqual(3, results[2]['end_col']) + self.assertEqual(7, results[2]['start_lineno']) + self.assertEqual(8, results[2]['end_lineno'])
constructor (__init__) in a class The docstring is added into `__init__`. But per [style guide of numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html), > Use the same sections as outlined above (all except Returns are applicable). The constructor (__init__) should also be documented here, the Parameters section of the docstring details the constructor’s parameters. They should be documented in the docstring of a class. ![image](https://user-images.githubusercontent.com/9496702/130314317-06c571f1-06fd-4e29-bcdc-96071b942891.png)
0.0
af4340101df64d1e8254782103c5d846b0c94e89
[ "tests/test_cli.py::CliTestCase::test_ignore_init" ]
[ "tests/test_cli.py::CliTestCase::test_find_files", "tests/test_cli.py::CliTestCase::test_get_defalt_template_path_0_sphinx", "tests/test_cli.py::CliTestCase::test_get_defalt_template_path_1_google", "tests/test_cli.py::CliTestCase::test_get_defalt_template_path_2_numpy", "tests/test_cli.py::CliTestCase::test_get_lines", "tests/test_cli.py::CliTestCase::test_get_target", "tests/test_cli.py::CliTestCase::test_get_template_path", "tests/test_cli.py::CliTestCase::test_is_doc_exists", "tests/test_cli.py::CliTestCase::test_no_files", "tests/test_cli.py::CliTestCase::test_no_output_with_google_style", "tests/test_cli.py::CliTestCase::test_no_output_with_numpy_style", "tests/test_cli.py::CliTestCase::test_no_output_with_sphinx_style", "tests/test_cli.py::CliTestCase::test_not_ignore_exception", "tests/test_cli.py::CliTestCase::test_not_ignore_yield", "tests/test_cli.py::CliTestCase::test_omit_one", "tests/test_cli.py::CliTestCase::test_omit_two", "tests/test_cli.py::CliTestCase::test_output_with_google_style", "tests/test_cli.py::CliTestCase::test_output_with_numpy_style", "tests/test_cli.py::CliTestCase::test_output_with_sphinx_style", "tests/test_cli.py::CliTestCase::test_run_with_classes", "tests/test_cli.py::CliTestCase::test_run_with_defs" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-08-22 02:34:49+00:00
bsd-3-clause
2,713
hetznercloud__hcloud-python-276
diff --git a/hcloud/servers/client.py b/hcloud/servers/client.py index ea72851..b6da0d3 100644 --- a/hcloud/servers/client.py +++ b/hcloud/servers/client.py @@ -1,5 +1,6 @@ from __future__ import annotations +import warnings from typing import TYPE_CHECKING, Any, NamedTuple from ..actions import ActionsPageResult, BoundAction, ResourceActionsClient @@ -21,6 +22,7 @@ from .domain import ( PrivateNet, PublicNetwork, PublicNetworkFirewall, + RebuildResponse, RequestConsoleResponse, ResetPasswordResponse, Server, @@ -299,13 +301,18 @@ class BoundServer(BoundModelBase, Server): """ return self._client.create_image(self, description, type, labels) - def rebuild(self, image: Image | BoundImage) -> BoundAction: + def rebuild( + self, + image: Image | BoundImage, + *, + return_response: bool = False, + ) -> RebuildResponse | BoundAction: """Rebuilds a server overwriting its disk with the content of an image, thereby destroying all data on the target server. - :param image: :class:`BoundImage <hcloud.images.client.BoundImage>` or :class:`Image <hcloud.servers.domain.Image>` - :return: :class:`BoundAction <hcloud.actions.client.BoundAction>` + :param image: Image to use for the rebuilt server + :param return_response: Whether to return the full response or only the action. """ - return self._client.rebuild(self, image) + return self._client.rebuild(self, image, return_response=return_response) def change_type( self, @@ -930,12 +937,14 @@ class ServersClient(ClientEntityBase): self, server: Server | BoundServer, image: Image | BoundImage, - ) -> BoundAction: + *, + return_response: bool = False, + ) -> RebuildResponse | BoundAction: """Rebuilds a server overwriting its disk with the content of an image, thereby destroying all data on the target server. - :param server: :class:`BoundServer <hcloud.servers.client.BoundServer>` or :class:`Server <hcloud.servers.domain.Server>` - :param image: :class:`BoundImage <hcloud.images.client.BoundImage>` or :class:`Image <hcloud.servers.domain.Image>` - :return: :class:`BoundAction <hcloud.actions.client.BoundAction>` + :param server: Server to rebuild + :param image: Image to use for the rebuilt server + :param return_response: Whether to return the full response or only the action. """ data: dict[str, Any] = {"image": image.id_or_name} response = self._client.request( @@ -943,7 +952,22 @@ class ServersClient(ClientEntityBase): method="POST", json=data, ) - return BoundAction(self._client.actions, response["action"]) + + rebuild_response = RebuildResponse( + action=BoundAction(self._client.actions, response["action"]), + root_password=response.get("root_password"), + ) + + if not return_response: + warnings.warn( + "Returning only the 'action' is deprecated, please set the " + "'return_response' keyword argument to 'True' to return the full " + "rebuild response and update your code accordingly.", + DeprecationWarning, + stacklevel=2, + ) + return rebuild_response.action + return rebuild_response def enable_backup(self, server: Server | BoundServer) -> BoundAction: """Enables and configures the automatic daily backup option for the server. Enabling automatic backups will increase the price of the server by 20%. diff --git a/hcloud/servers/domain.py b/hcloud/servers/domain.py index 2d55fd3..3802020 100644 --- a/hcloud/servers/domain.py +++ b/hcloud/servers/domain.py @@ -244,6 +244,24 @@ class RequestConsoleResponse(BaseDomain): self.password = password +class RebuildResponse(BaseDomain): + """Rebuild Response Domain + + :param action: Shows the progress of the server rebuild action + :param root_password: The root password of the server when not using SSH keys + """ + + __slots__ = ("action", "root_password") + + def __init__( + self, + action: BoundAction, + root_password: str | None, + ): + self.action = action + self.root_password = root_password + + class PublicNetwork(BaseDomain): """Public Network Domain
hetznercloud/hcloud-python
213b661d897cdd327f478b52aeb79844826694d8
diff --git a/tests/unit/servers/test_client.py b/tests/unit/servers/test_client.py index 2490ecf..a8ba355 100644 --- a/tests/unit/servers/test_client.py +++ b/tests/unit/servers/test_client.py @@ -307,15 +307,19 @@ class TestBoundServer: def test_rebuild(self, hetzner_client, bound_server, generic_action): hetzner_client.request.return_value = generic_action - action = bound_server.rebuild(Image(name="ubuntu-20.04")) + response = bound_server.rebuild( + Image(name="ubuntu-20.04"), + return_response=True, + ) hetzner_client.request.assert_called_with( url="/servers/14/actions/rebuild", method="POST", json={"image": "ubuntu-20.04"}, ) - assert action.id == 1 - assert action.progress == 0 + assert response.action.id == 1 + assert response.action.progress == 0 + assert response.root_password is None or isinstance(response.root_password, str) def test_enable_backup(self, hetzner_client, bound_server, generic_action): hetzner_client.request.return_value = generic_action @@ -1040,15 +1044,25 @@ class TestServersClient: ) def test_rebuild(self, servers_client, server, generic_action): servers_client._client.request.return_value = generic_action - action = servers_client.rebuild(server, Image(name="ubuntu-20.04")) + response = servers_client.rebuild( + server, + Image(name="ubuntu-20.04"), + return_response=True, + ) servers_client._client.request.assert_called_with( url="/servers/1/actions/rebuild", method="POST", json={"image": "ubuntu-20.04"}, ) - assert action.id == 1 - assert action.progress == 0 + assert response.action.id == 1 + assert response.action.progress == 0 + assert response.root_password is None or isinstance(response.root_password, str) + + def test_rebuild_return_response_deprecation(self, servers_client, generic_action): + servers_client._client.request.return_value = generic_action + with pytest.deprecated_call(): + servers_client.rebuild(Server(id=1), Image(name="ubuntu-20.04")) @pytest.mark.parametrize( "server", [Server(id=1), BoundServer(mock.MagicMock(), dict(id=1))]
Rebuild does not return root_password ## Bug Report **Current Behavior** 'Rebuild' returns the BoundAction but does not return the "root_password" **Input Code** ``` server = self.client.servers.get_by_name(name='server') res = server.rebuild(Image(id='image_id')) print(res) ``` **Expected behavior/code** The 'rebuild' should return the root_password along with the BoundAction. The response from the Hetzner API is below ( See possible solution) **Environment** - Python Version: 3.11.2 - Hcloud-Python Version: 1.18.2 **Possible Solution** ``` { "action": { "command": "rebuild_server", "error": { "code": "action_failed", "message": "Action failed" }, "finished": null, "id": 13, "progress": 0, "resources": [ { "id": 42, "type": "server" } ], "started": "2016-01-30T23:50:00+00:00", "status": "running" }, "root_password": null } ``` **Additional context/Screenshots** Add any other context about the problem here. If applicable, add screenshots to help explain.
0.0
213b661d897cdd327f478b52aeb79844826694d8
[ "tests/unit/servers/test_client.py::TestBoundServer::test_rebuild", "tests/unit/servers/test_client.py::TestServersClient::test_rebuild[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_rebuild[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_rebuild_return_response_deprecation" ]
[ "tests/unit/servers/test_client.py::TestBoundServer::test_bound_server_init", "tests/unit/servers/test_client.py::TestBoundServer::test_get_actions_list[params0]", "tests/unit/servers/test_client.py::TestBoundServer::test_get_actions_list[params1]", "tests/unit/servers/test_client.py::TestBoundServer::test_get_actions[params0]", "tests/unit/servers/test_client.py::TestBoundServer::test_get_actions[params1]", "tests/unit/servers/test_client.py::TestBoundServer::test_update", "tests/unit/servers/test_client.py::TestBoundServer::test_delete", "tests/unit/servers/test_client.py::TestBoundServer::test_power_off", "tests/unit/servers/test_client.py::TestBoundServer::test_power_on", "tests/unit/servers/test_client.py::TestBoundServer::test_reboot", "tests/unit/servers/test_client.py::TestBoundServer::test_reset", "tests/unit/servers/test_client.py::TestBoundServer::test_shutdown", "tests/unit/servers/test_client.py::TestBoundServer::test_reset_password", "tests/unit/servers/test_client.py::TestBoundServer::test_change_type", "tests/unit/servers/test_client.py::TestBoundServer::test_enable_rescue", "tests/unit/servers/test_client.py::TestBoundServer::test_disable_rescue", "tests/unit/servers/test_client.py::TestBoundServer::test_create_image", "tests/unit/servers/test_client.py::TestBoundServer::test_enable_backup", "tests/unit/servers/test_client.py::TestBoundServer::test_disable_backup", "tests/unit/servers/test_client.py::TestBoundServer::test_attach_iso", "tests/unit/servers/test_client.py::TestBoundServer::test_detach_iso", "tests/unit/servers/test_client.py::TestBoundServer::test_change_dns_ptr", "tests/unit/servers/test_client.py::TestBoundServer::test_change_protection", "tests/unit/servers/test_client.py::TestBoundServer::test_request_console", "tests/unit/servers/test_client.py::TestBoundServer::test_attach_to_network[network0]", "tests/unit/servers/test_client.py::TestBoundServer::test_attach_to_network[network1]", "tests/unit/servers/test_client.py::TestBoundServer::test_detach_from_network[network0]", "tests/unit/servers/test_client.py::TestBoundServer::test_detach_from_network[network1]", "tests/unit/servers/test_client.py::TestBoundServer::test_change_alias_ips[network0]", "tests/unit/servers/test_client.py::TestBoundServer::test_change_alias_ips[network1]", "tests/unit/servers/test_client.py::TestBoundServer::test_add_to_placement_group[placement_group0]", "tests/unit/servers/test_client.py::TestBoundServer::test_add_to_placement_group[placement_group1]", "tests/unit/servers/test_client.py::TestBoundServer::test_remove_from_placement_group", "tests/unit/servers/test_client.py::TestServersClient::test_get_by_id", "tests/unit/servers/test_client.py::TestServersClient::test_get_list[params0]", "tests/unit/servers/test_client.py::TestServersClient::test_get_list[params1]", "tests/unit/servers/test_client.py::TestServersClient::test_get_list[params2]", "tests/unit/servers/test_client.py::TestServersClient::test_get_all[params0]", "tests/unit/servers/test_client.py::TestServersClient::test_get_all[params1]", "tests/unit/servers/test_client.py::TestServersClient::test_get_by_name", "tests/unit/servers/test_client.py::TestServersClient::test_create_with_datacenter", "tests/unit/servers/test_client.py::TestServersClient::test_create_with_location", "tests/unit/servers/test_client.py::TestServersClient::test_create_with_volumes", "tests/unit/servers/test_client.py::TestServersClient::test_create_with_networks", "tests/unit/servers/test_client.py::TestServersClient::test_create_with_firewalls", "tests/unit/servers/test_client.py::TestServersClient::test_create_with_placement_group", "tests/unit/servers/test_client.py::TestServersClient::test_get_actions_list[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_get_actions_list[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_update[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_update[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_delete[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_delete[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_power_off[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_power_off[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_power_on[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_power_on[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_reboot[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_reboot[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_reset[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_reset[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_shutdown[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_shutdown[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_reset_password[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_reset_password[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_change_type_with_server_type_name[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_change_type_with_server_type_name[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_change_type_with_server_type_id[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_change_type_with_server_type_id[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_change_type_with_blank_server_type[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_change_type_with_blank_server_type[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_enable_rescue[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_enable_rescue[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_disable_rescue[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_disable_rescue[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_create_image[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_create_image[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_enable_backup[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_enable_backup[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_disable_backup[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_disable_backup[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_attach_iso[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_attach_iso[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_detach_iso[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_detach_iso[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_change_dns_ptr[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_change_dns_ptr[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_change_protection[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_change_protection[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_request_console[server0]", "tests/unit/servers/test_client.py::TestServersClient::test_request_console[server1]", "tests/unit/servers/test_client.py::TestServersClient::test_attach_to_network[network0-server0]", "tests/unit/servers/test_client.py::TestServersClient::test_attach_to_network[network0-server1]", "tests/unit/servers/test_client.py::TestServersClient::test_attach_to_network[network1-server0]", "tests/unit/servers/test_client.py::TestServersClient::test_attach_to_network[network1-server1]", "tests/unit/servers/test_client.py::TestServersClient::test_detach_from_network[network0-server0]", "tests/unit/servers/test_client.py::TestServersClient::test_detach_from_network[network0-server1]", "tests/unit/servers/test_client.py::TestServersClient::test_detach_from_network[network1-server0]", "tests/unit/servers/test_client.py::TestServersClient::test_detach_from_network[network1-server1]", "tests/unit/servers/test_client.py::TestServersClient::test_change_alias_ips[network0-server0]", "tests/unit/servers/test_client.py::TestServersClient::test_change_alias_ips[network0-server1]", "tests/unit/servers/test_client.py::TestServersClient::test_change_alias_ips[network1-server0]", "tests/unit/servers/test_client.py::TestServersClient::test_change_alias_ips[network1-server1]", "tests/unit/servers/test_client.py::TestServersClient::test_actions_get_by_id", "tests/unit/servers/test_client.py::TestServersClient::test_actions_get_list", "tests/unit/servers/test_client.py::TestServersClient::test_actions_get_all" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-08-07 11:01:29+00:00
mit
2,714
hgrecco__pint-426
diff --git a/pint/default_en.txt b/pint/default_en.txt index 807d4ad..391febe 100644 --- a/pint/default_en.txt +++ b/pint/default_en.txt @@ -108,7 +108,7 @@ btu = 1.05505585262e3 * joule = Btu = BTU = british_thermal_unit electron_volt = 1.60217653e-19 * J = eV quadrillion_btu = 10**15 * btu = quad thm = 100000 * BTU = therm = EC_therm -cal = 4.184 * joule = calorie = thermochemical_calorie +calorie = 4.184 * joule = cal = thermochemical_calorie international_steam_table_calorie = 4.1868 * joule ton_TNT = 4.184e9 * joule = tTNT US_therm = 1.054804e8 * joule diff --git a/pint/quantity.py b/pint/quantity.py index 980ff10..16b1dea 100644 --- a/pint/quantity.py +++ b/pint/quantity.py @@ -403,9 +403,9 @@ class _Quantity(SharedRegistryObject): unit_power = list(q_base._units.items())[0][1] if unit_power > 0: - power = int(math.floor(math.log10(magnitude) / unit_power / 3)) * 3 + power = int(math.floor(math.log10(abs(magnitude)) / unit_power / 3)) * 3 else: - power = int(math.ceil(math.log10(magnitude) / unit_power / 3)) * 3 + power = int(math.ceil(math.log10(abs(magnitude)) / unit_power / 3)) * 3 prefix = SI_bases[bisect.bisect_left(SI_powers, power)] diff --git a/pint/systems.py b/pint/systems.py index e2d5564..e74b6de 100644 --- a/pint/systems.py +++ b/pint/systems.py @@ -434,7 +434,7 @@ class Lister(object): self.d = d def __dir__(self): - return frozenset(self.d.keys()) + return list(self.d.keys()) def __getattr__(self, item): return self.d[item]
hgrecco/pint
199905ad630cdbc453afd958379e32d4f55ddb3b
diff --git a/pint/testsuite/test_quantity.py b/pint/testsuite/test_quantity.py index fd0a6b1..0a4c18b 100644 --- a/pint/testsuite/test_quantity.py +++ b/pint/testsuite/test_quantity.py @@ -123,8 +123,14 @@ class TestQuantity(QuantityTestCase): self.assertEqual(q2.magnitude, q2b.magnitude) self.assertEqual(q2.units, q2b.units) + q3 = (-1000.0 * self.ureg('meters')).to_compact() + q3b = self.Q_(-1., 'kilometer') + self.assertEqual(q3.magnitude, q3b.magnitude) + self.assertEqual(q3.units, q3b.units) + self.assertEqual('{0:#.1f}'.format(q1), '{0}'.format(q1b)) self.assertEqual('{0:#.1f}'.format(q2), '{0}'.format(q2b)) + self.assertEqual('{0:#.1f}'.format(q3), '{0}'.format(q3b)) def test_default_formatting(self):
System lister error When trying to check the available unit systems (as shown in [documentation](https://pint.readthedocs.io/en/0.7.2/systems.html)): `>>> dir(ureg.sys)` An error occurs: > TypeError: **dir**() must return a list, not frozenset **Python:** 2.7.12 (3.5.2 works fine) **Pint:** 0.72
0.0
199905ad630cdbc453afd958379e32d4f55ddb3b
[ "pint/testsuite/test_quantity.py::TestQuantity::test_format_compact" ]
[ "pint/testsuite/test_quantity.py::TestQuantity::test_both_symbol", "pint/testsuite/test_quantity.py::TestQuantity::test_context_attr", "pint/testsuite/test_quantity.py::TestQuantity::test_default_formatting", "pint/testsuite/test_quantity.py::TestQuantity::test_dimensionless_units", "pint/testsuite/test_quantity.py::TestQuantity::test_offset", "pint/testsuite/test_quantity.py::TestQuantity::test_offset_delta", "pint/testsuite/test_quantity.py::TestQuantity::test_pickle", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_bool", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_comparison", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_comparison_convert", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_creation", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_format", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_repr", "pint/testsuite/test_quantity.py::TestQuantity::test_to_base_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_derived_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_dimensionally_simple_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_fractional_exponent_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_inverse_square_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_inverse_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_limits_magnitudes", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_power_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_unit_parameter", "pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_float", "pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_fraction", "pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_quantity_abs_round", "pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_quantity_float_complex", "pint/testsuite/test_quantity.py::TestDimensions::test_dimensionality", "pint/testsuite/test_quantity.py::TestDimensions::test_get_dimensionality", "pint/testsuite/test_quantity.py::TestQuantityWithDefaultRegistry::test_dimensionality", "pint/testsuite/test_quantity.py::TestQuantityWithDefaultRegistry::test_get_dimensionality", "pint/testsuite/test_quantity.py::TestDimensionsWithDefaultRegistry::test_dimensionality", "pint/testsuite/test_quantity.py::TestDimensionsWithDefaultRegistry::test_get_dimensionality", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00021", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00022", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00023", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00024", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00025", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00026", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00027", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00028", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00029", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00030", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00031", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00032", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00033", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00034", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00035", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00036", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00021", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00022", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00023", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00024", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00025", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00026", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00027", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00028", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00029", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00030", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00031", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00032", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00033", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00034", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00035", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00036", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00021", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00022", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00023", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00024", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00025", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00026", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00027", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00028", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00029", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00030", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00031", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00032", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00033", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00034", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00035", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00036", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00021", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00022", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00023", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00024", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00025", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00026", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00027", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00028", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00029", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00030", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00031", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00032", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00033", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00034", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00035", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00036" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2016-08-19 08:22:12+00:00
bsd-3-clause
2,715
hgrecco__pint-527
diff --git a/.gitignore b/.gitignore index e39c2e8..545148c 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,6 @@ MANIFEST # WebDAV file system cache files .DAV/ + +# tags files (from ctags) +tags diff --git a/pint/default_en.txt b/pint/default_en.txt index 4035883..fba87d2 100644 --- a/pint/default_en.txt +++ b/pint/default_en.txt @@ -72,6 +72,10 @@ cmil = 5.067075e-10 * m ** 2 = circular_mils darcy = 9.869233e-13 * m ** 2 hectare = 100 * are = ha +# Concentration +[concentration] = [substance] / [volume] +molar = mol / (1e-3 * m ** 3) = M + # EM esu = 1 * erg**0.5 * centimeter**0.5 = statcoulombs = statC = franklin = Fr esu_per_second = 1 * esu / second = statampere
hgrecco/pint
240349f2eefc6acc3f7b2784e6064dca98a45f60
diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py index 3b06fd6..b5c494b 100644 --- a/pint/testsuite/test_issues.py +++ b/pint/testsuite/test_issues.py @@ -39,7 +39,6 @@ class TestIssues(QuantityTestCase): def test_issue29(self): ureg = UnitRegistry() - ureg.define('molar = mole / liter = M') t = 4 * ureg('mM') self.assertEqual(t.magnitude, 4) self.assertEqual(t._units, UnitsContainer(millimolar=1)) @@ -561,4 +560,4 @@ class TestIssuesNP(QuantityTestCase): a = np.asarray([1, 2, 3]) q = [1, 2, 3] * ureg.dimensionless p = (q ** q).m - np.testing.assert_array_equal(p, a ** a) \ No newline at end of file + np.testing.assert_array_equal(p, a ** a)
Concentration Units Hi, Is there any reason concentration units (in particular molarity, mol / L) is not included as a default unit in Pint? It is very easy to define on its own, but it seems well-used enough to warrant inclusion in the defaults list. If there are no objections to adding it, I'm happy to submit a PR doing so.
0.0
240349f2eefc6acc3f7b2784e6064dca98a45f60
[ "pint/testsuite/test_issues.py::TestIssues::test_issue29" ]
[ "pint/testsuite/test_issues.py::TestIssues::test_alternative_angstrom_definition", "pint/testsuite/test_issues.py::TestIssues::test_angstrom_creation", "pint/testsuite/test_issues.py::TestIssues::test_issue104", "pint/testsuite/test_issues.py::TestIssues::test_issue105", "pint/testsuite/test_issues.py::TestIssues::test_issue121", "pint/testsuite/test_issues.py::TestIssues::test_issue170", "pint/testsuite/test_issues.py::TestIssues::test_issue52", "pint/testsuite/test_issues.py::TestIssues::test_issue523", "pint/testsuite/test_issues.py::TestIssues::test_issue54", "pint/testsuite/test_issues.py::TestIssues::test_issue54_related", "pint/testsuite/test_issues.py::TestIssues::test_issue61", "pint/testsuite/test_issues.py::TestIssues::test_issue61_notNP", "pint/testsuite/test_issues.py::TestIssues::test_issue66", "pint/testsuite/test_issues.py::TestIssues::test_issue66b", "pint/testsuite/test_issues.py::TestIssues::test_issue69", "pint/testsuite/test_issues.py::TestIssues::test_issue85", "pint/testsuite/test_issues.py::TestIssues::test_issue86", "pint/testsuite/test_issues.py::TestIssues::test_issue93", "pint/testsuite/test_issues.py::TestIssues::test_issues86b", "pint/testsuite/test_issues.py::TestIssues::test_micro_creation" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2017-06-09 02:38:23+00:00
bsd-3-clause
2,716
hgrecco__pint-630
diff --git a/.travis.yml b/.travis.yml index 800263e..fd6781b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,6 @@ language: python env: - - UNCERTAINTIES="N" PYTHON="2.7" NUMPY_VERSION=1.11.2 - UNCERTAINTIES="N" PYTHON="3.3" NUMPY_VERSION=1.9.2 - UNCERTAINTIES="N" PYTHON="3.4" NUMPY_VERSION=1.11.2 - UNCERTAINTIES="N" PYTHON="3.5" NUMPY_VERSION=1.11.2 @@ -9,6 +8,12 @@ env: - UNCERTAINTIES="N" PYTHON="3.6" NUMPY_VERSION=1.11.2 - UNCERTAINTIES="N" PYTHON="2.7" NUMPY_VERSION=0 - UNCERTAINTIES="N" PYTHON="3.5" NUMPY_VERSION=0 + # Test with the latest numpy version + - UNCERTAINTIES="N" PYTHON="2.7" NUMPY_VERSION=1.14 + - UNCERTAINTIES="N" PYTHON="3.4" NUMPY_VERSION=1.14 + - UNCERTAINTIES="N" PYTHON="3.5" NUMPY_VERSION=1.14 + - UNCERTAINTIES="Y" PYTHON="3.5" NUMPY_VERSION=1.14 + - UNCERTAINTIES="N" PYTHON="3.6" NUMPY_VERSION=1.14 before_install: - sudo apt-get update diff --git a/pint/quantity.py b/pint/quantity.py index 8a6599b..88bfdac 100644 --- a/pint/quantity.py +++ b/pint/quantity.py @@ -1003,8 +1003,8 @@ class _Quantity(PrettyIPython, SharedRegistryObject): raise OffsetUnitCalculusError(self._units) if getattr(other, 'dimensionless', False): - other = other.to_base_units() - self._units **= other.magnitude + other = other.to_base_units().magnitude + self._units **= other elif not getattr(other, 'dimensionless', True): raise DimensionalityError(self._units, 'dimensionless') else: @@ -1090,6 +1090,20 @@ class _Quantity(PrettyIPython, SharedRegistryObject): # We compare to the base class of Quantity because # each Quantity class is unique. if not isinstance(other, _Quantity): + if _eq(other, 0, True): + # Handle the special case in which we compare to zero + # (or an array of zeros) + if self._is_multiplicative: + # compare magnitude + return _eq(self._magnitude, other, False) + else: + # compare the magnitude after converting the + # non-multiplicative quantity to base units + if self._REGISTRY.autoconvert_offset_to_baseunit: + return _eq(self.to_base_units()._magnitude, other, False) + else: + raise OffsetUnitCalculusError(self._units) + return (self.dimensionless and _eq(self._convert_magnitude(UnitsContainer()), other, False)) @@ -1115,6 +1129,19 @@ class _Quantity(PrettyIPython, SharedRegistryObject): if not isinstance(other, self.__class__): if self.dimensionless: return op(self._convert_magnitude_not_inplace(UnitsContainer()), other) + elif _eq(other, 0, True): + # Handle the special case in which we compare to zero + # (or an array of zeros) + if self._is_multiplicative: + # compare magnitude + return op(self._magnitude, other) + else: + # compare the magnitude after converting the + # non-multiplicative quantity to base units + if self._REGISTRY.autoconvert_offset_to_baseunit: + return op(self.to_base_units()._magnitude, other) + else: + raise OffsetUnitCalculusError(self._units) else: raise ValueError('Cannot compare Quantity and {}'.format(type(other)))
hgrecco/pint
90174a33c00a1364fcf2af9c59adf7859b0706b2
diff --git a/pint/testsuite/test_quantity.py b/pint/testsuite/test_quantity.py index 54d460a..5c0e22b 100644 --- a/pint/testsuite/test_quantity.py +++ b/pint/testsuite/test_quantity.py @@ -1296,3 +1296,107 @@ class TestTimedelta(QuantityTestCase): after = 3 * self.ureg.second with self.assertRaises(DimensionalityError): after -= d + + +class TestCompareZero(QuantityTestCase): + """This test case checks the special treatment that the zero value + receives in the comparisons: pint>=0.9 supports comparisons against zero + even for non-dimensionless quantities + """ + + def test_equal_zero(self): + ureg = self.ureg + ureg.autoconvert_offset_to_baseunit = False + self.assertTrue(ureg.Quantity(0, ureg.J) == 0) + self.assertFalse(ureg.Quantity(0, ureg.J) == ureg.Quantity(0, '')) + self.assertFalse(ureg.Quantity(5, ureg.J) == 0) + + @helpers.requires_numpy() + def test_equal_zero_NP(self): + ureg = self.ureg + ureg.autoconvert_offset_to_baseunit = False + aeq = np.testing.assert_array_equal + aeq(ureg.Quantity(0, ureg.J) == np.zeros(3), + np.asarray([True, True, True])) + aeq(ureg.Quantity(5, ureg.J) == np.zeros(3), + np.asarray([False, False, False])) + aeq(ureg.Quantity(np.arange(3), ureg.J) == np.zeros(3), + np.asarray([True, False, False])) + self.assertFalse(ureg.Quantity(np.arange(4), ureg.J) == np.zeros(3)) + + def test_offset_equal_zero(self): + ureg = self.ureg + ureg.autoconvert_offset_to_baseunit = False + q0 = ureg.Quantity(-273.15, 'degC') + q1 = ureg.Quantity(0, 'degC') + q2 = ureg.Quantity(5, 'degC') + self.assertRaises(OffsetUnitCalculusError, q0.__eq__, 0) + self.assertRaises(OffsetUnitCalculusError, q1.__eq__, 0) + self.assertRaises(OffsetUnitCalculusError, q2.__eq__, 0) + self.assertFalse(q0 == ureg.Quantity(0, '')) + + def test_offset_autoconvert_equal_zero(self): + ureg = self.ureg + ureg.autoconvert_offset_to_baseunit = True + q0 = ureg.Quantity(-273.15, 'degC') + q1 = ureg.Quantity(0, 'degC') + q2 = ureg.Quantity(5, 'degC') + self.assertTrue(q0 == 0) + self.assertFalse(q1 == 0) + self.assertFalse(q2 == 0) + self.assertFalse(q0 == ureg.Quantity(0, '')) + + def test_gt_zero(self): + ureg = self.ureg + ureg.autoconvert_offset_to_baseunit = False + q0 = ureg.Quantity(0, 'J') + q0m = ureg.Quantity(0, 'm') + q0less = ureg.Quantity(0, '') + qpos = ureg.Quantity(5, 'J') + qneg = ureg.Quantity(-5, 'J') + self.assertTrue(qpos > q0) + self.assertTrue(qpos > 0) + self.assertFalse(qneg > 0) + self.assertRaises(DimensionalityError, qpos.__gt__, q0less) + self.assertRaises(DimensionalityError, qpos.__gt__, q0m) + + @helpers.requires_numpy() + def test_gt_zero_NP(self): + ureg = self.ureg + ureg.autoconvert_offset_to_baseunit = False + qpos = ureg.Quantity(5, 'J') + qneg = ureg.Quantity(-5, 'J') + aeq = np.testing.assert_array_equal + aeq(qpos > np.zeros(3), np.asarray([True, True, True])) + aeq(qneg > np.zeros(3), np.asarray([False, False, False])) + aeq(ureg.Quantity(np.arange(-1, 2), ureg.J) > np.zeros(3), + np.asarray([False, False, True])) + aeq(ureg.Quantity(np.arange(-1, 2), ureg.J) > np.zeros(3), + np.asarray([False, False, True])) + self.assertRaises(ValueError, + ureg.Quantity(np.arange(-1, 2), ureg.J).__gt__, + np.zeros(4)) + + def test_offset_gt_zero(self): + ureg = self.ureg + ureg.autoconvert_offset_to_baseunit = False + q0 = ureg.Quantity(-273.15, 'degC') + q1 = ureg.Quantity(0, 'degC') + q2 = ureg.Quantity(5, 'degC') + self.assertRaises(OffsetUnitCalculusError, q0.__gt__, 0) + self.assertRaises(OffsetUnitCalculusError, q1.__gt__, 0) + self.assertRaises(OffsetUnitCalculusError, q2.__gt__, 0) + self.assertRaises(DimensionalityError, q1.__gt__, + ureg.Quantity(0, '')) + + def test_offset_autoconvert_gt_zero(self): + ureg = self.ureg + ureg.autoconvert_offset_to_baseunit = True + q0 = ureg.Quantity(-273.15, 'degC') + q1 = ureg.Quantity(0, 'degC') + q2 = ureg.Quantity(5, 'degC') + self.assertFalse(q0 > 0) + self.assertTrue(q1 > 0) + self.assertTrue(q2 > 0) + self.assertRaises(DimensionalityError, q1.__gt__, + ureg.Quantity(0, '')) \ No newline at end of file diff --git a/pint/testsuite/test_umath.py b/pint/testsuite/test_umath.py index 0d0c544..589aaa4 100644 --- a/pint/testsuite/test_umath.py +++ b/pint/testsuite/test_umath.py @@ -613,7 +613,7 @@ class TestFloatingUfuncs(TestUFuncs): (self.q1, self.qm, self.qless)) def test_isfinite(self): - self._testn(np.isreal, + self._testn(np.isfinite, (self.q1, self.qm, self.qless)) def test_isinf(self):
Unit tests fails with numpy 1.13.1 Refs: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=876921 Thanks for fixing. ``` S.S................................................./usr/lib/python2.7/unittest/case.py:340: RuntimeWarning: TestResult has no addExpectedFailure method, reporting as passes RuntimeWarning) .......S...S............................SSSSSSSSSSS.................S...................................................................................................................................................E.E...E.................................................................................................................................................................................................................................................................................................../build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_quantity.py:287: RuntimeWarning: to_compact applied to non numerical types has an undefined behavior. self.assertQuantityAlmostIdentical(q.to_compact(unit=unit), .......................................F..F.......................................................................................................................................................................... ====================================================================== ERROR: test_inplace_exponentiation (pint.testsuite.test_quantity.TestOffsetUnitMath) [with input = ((10, u'degC'), (2, u'')); expected_output = [u'error', (80173.92249999999, u'kelvin**2')]] ---------------------------------------------------------------------- Traceback (most recent call last): File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/parameterized.py", line 116, in new_method return method(self, *param_values) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/parameterized.py", line 137, in newfunc return func(*arg, **kwargs) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_quantity.py", line 1165, in test_inplace_exponentiation self.assertEqual(op.ipow(in1_cp, in2).units, expected.units) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/quantity.py", line 961, in __ipow__ self._magnitude **= _to_magnitude(other, self.force_ndarray) TypeError: unsupported operand type(s) for ** or pow(): 'numpy.ndarray' and 'Quantity' ====================================================================== ERROR: test_inplace_exponentiation (pint.testsuite.test_quantity.TestOffsetUnitMath) [with input = ((10, u'kelvin'), (2, u'')); expected_output = [(100.0, u'kelvin**2'), (100.0, u'kelvin**2')]] ---------------------------------------------------------------------- Traceback (most recent call last): File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/parameterized.py", line 116, in new_method return method(self, *param_values) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/parameterized.py", line 137, in newfunc return func(*arg, **kwargs) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_quantity.py", line 1165, in test_inplace_exponentiation self.assertEqual(op.ipow(in1_cp, in2).units, expected.units) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/quantity.py", line 961, in __ipow__ self._magnitude **= _to_magnitude(other, self.force_ndarray) TypeError: unsupported operand type(s) for ** or pow(): 'numpy.ndarray' and 'Quantity' ====================================================================== ERROR: test_inplace_exponentiation (pint.testsuite.test_quantity.TestOffsetUnitMath) [with input = ((10, u'degC'), (500.0, u'millikelvin/kelvin')); expected_output = [u'error', (16.827061537891872, u'kelvin**0.5')]] ---------------------------------------------------------------------- Traceback (most recent call last): File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/parameterized.py", line 116, in new_method return method(self, *param_values) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/parameterized.py", line 137, in newfunc return func(*arg, **kwargs) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_quantity.py", line 1165, in test_inplace_exponentiation self.assertEqual(op.ipow(in1_cp, in2).units, expected.units) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/quantity.py", line 961, in __ipow__ self._magnitude **= _to_magnitude(other, self.force_ndarray) TypeError: unsupported operand type(s) for ** or pow(): 'numpy.ndarray' and 'Quantity' ====================================================================== FAIL: test_isfinite (pint.testsuite.test_umath.TestFloatingUfuncs) ---------------------------------------------------------------------- Traceback (most recent call last): File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_umath.py", line 617, in test_isfinite (self.q1, self.qm, self.qless)) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_umath.py", line 101, in _testn self._test1(func, ok_with, raise_with, output_units=None, results=results) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_umath.py", line 85, in _test1 self.assertQuantityAlmostEqual(qm, res, rtol=rtol, msg=err_msg) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/__init__.py", line 117, in assertQuantityAlmostEqual np.testing.assert_allclose(m1, m2, rtol=rtol, atol=atol, err_msg=msg) File "/usr/lib/python2.7/dist-packages/numpy/testing/utils.py", line 1395, in assert_allclose verbose=verbose, header=header, equal_nan=equal_nan) File "/usr/lib/python2.7/dist-packages/numpy/testing/utils.py", line 778, in assert_array_compare raise AssertionError(msg) AssertionError: Not equal to tolerance rtol=1e-06, atol=0 At isreal with [ 1. 2. 3. 4.] joule (mismatch 100.0%) x: array(False, dtype=bool) y: array([ True, True, True, True], dtype=bool) ====================================================================== FAIL: test_isreal (pint.testsuite.test_umath.TestFloatingUfuncs) ---------------------------------------------------------------------- Traceback (most recent call last): File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_umath.py", line 609, in test_isreal (self.q1, self.qm, self.qless)) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_umath.py", line 101, in _testn self._test1(func, ok_with, raise_with, output_units=None, results=results) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_umath.py", line 85, in _test1 self.assertQuantityAlmostEqual(qm, res, rtol=rtol, msg=err_msg) File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/__init__.py", line 117, in assertQuantityAlmostEqual np.testing.assert_allclose(m1, m2, rtol=rtol, atol=atol, err_msg=msg) File "/usr/lib/python2.7/dist-packages/numpy/testing/utils.py", line 1395, in assert_allclose verbose=verbose, header=header, equal_nan=equal_nan) File "/usr/lib/python2.7/dist-packages/numpy/testing/utils.py", line 778, in assert_array_compare raise AssertionError(msg) AssertionError: Not equal to tolerance rtol=1e-06, atol=0 At isreal with [ 1. 2. 3. 4.] joule (mismatch 100.0%) x: array(False, dtype=bool) y: array([ True, True, True, True], dtype=bool) ---------------------------------------------------------------------- Ran 779 tests in 40.510s ```
0.0
90174a33c00a1364fcf2af9c59adf7859b0706b2
[ "pint/testsuite/test_quantity.py::TestCompareZero::test_equal_zero", "pint/testsuite/test_quantity.py::TestCompareZero::test_gt_zero", "pint/testsuite/test_quantity.py::TestCompareZero::test_offset_autoconvert_equal_zero", "pint/testsuite/test_quantity.py::TestCompareZero::test_offset_autoconvert_gt_zero", "pint/testsuite/test_quantity.py::TestCompareZero::test_offset_equal_zero", "pint/testsuite/test_quantity.py::TestCompareZero::test_offset_gt_zero" ]
[ "pint/testsuite/test_quantity.py::TestQuantity::test_both_symbol", "pint/testsuite/test_quantity.py::TestQuantity::test_context_attr", "pint/testsuite/test_quantity.py::TestQuantity::test_default_formatting", "pint/testsuite/test_quantity.py::TestQuantity::test_dimensionless_units", "pint/testsuite/test_quantity.py::TestQuantity::test_exponent_formatting", "pint/testsuite/test_quantity.py::TestQuantity::test_format_compact", "pint/testsuite/test_quantity.py::TestQuantity::test_ipython", "pint/testsuite/test_quantity.py::TestQuantity::test_offset", "pint/testsuite/test_quantity.py::TestQuantity::test_offset_delta", "pint/testsuite/test_quantity.py::TestQuantity::test_pickle", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_bool", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_comparison", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_comparison_convert", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_creation", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_format", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_hash", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_repr", "pint/testsuite/test_quantity.py::TestQuantity::test_to_base_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_derived_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_dimensionally_simple_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_fractional_exponent_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_inverse_square_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_inverse_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_limits_magnitudes", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_power_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_unit_parameter", "pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_float", "pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_fraction", "pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_quantity_abs_round", "pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_quantity_float_complex", "pint/testsuite/test_quantity.py::TestDimensions::test_dimensionality", "pint/testsuite/test_quantity.py::TestDimensions::test_get_dimensionality", "pint/testsuite/test_quantity.py::TestDimensions::test_inclusion", "pint/testsuite/test_quantity.py::TestQuantityWithDefaultRegistry::test_dimensionality", "pint/testsuite/test_quantity.py::TestQuantityWithDefaultRegistry::test_get_dimensionality", "pint/testsuite/test_quantity.py::TestQuantityWithDefaultRegistry::test_inclusion", "pint/testsuite/test_quantity.py::TestDimensionsWithDefaultRegistry::test_dimensionality", "pint/testsuite/test_quantity.py::TestDimensionsWithDefaultRegistry::test_get_dimensionality", "pint/testsuite/test_quantity.py::TestDimensionsWithDefaultRegistry::test_inclusion", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00021", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00022", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00023", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00024", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00025", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00026", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00027", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00028", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00029", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00030", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00031", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00032", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00033", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00034", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00035", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00036", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00021", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00022", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00023", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00024", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00025", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00026", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00027", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00028", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00029", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00030", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00031", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00032", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00033", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00034", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00035", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00036", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00021", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00022", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00023", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00024", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00025", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00026", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00027", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00028", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00029", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00030", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00031", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00032", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00033", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00034", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00035", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00036", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00021", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00022", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00023", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00024", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00025", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00026", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00027", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00028", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00029", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00030", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00031", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00032", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00033", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00034", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00035", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00036", "pint/testsuite/test_quantity.py::TestDimensionReduction::test_mul_and_div_reduction", "pint/testsuite/test_quantity.py::TestDimensionReduction::test_nocoerce_creation", "pint/testsuite/test_quantity.py::TestDimensionReduction::test_reduction_to_dimensionless", "pint/testsuite/test_quantity.py::TestTimedelta::test_add_sub", "pint/testsuite/test_quantity.py::TestTimedelta::test_iadd_isub" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2018-04-13 17:39:43+00:00
bsd-3-clause
2,717
hgrecco__pint-658
diff --git a/docs/wrapping.rst b/docs/wrapping.rst index dd9a39f..6ce6411 100644 --- a/docs/wrapping.rst +++ b/docs/wrapping.rst @@ -246,3 +246,12 @@ In the decorator format: ... def pendulum_period(length): ... return 2*math.pi*math.sqrt(length/G) +If you just want to check the dimensionality of a quantity, you can do so with the built-in 'check' function. + +.. doctest:: + + >>> distance = 1 * ureg.m + >>> distance.check('[length]') + True + >>> distance.check('[time]') + False diff --git a/pint/quantity.py b/pint/quantity.py index 88bfdac..ef25509 100644 --- a/pint/quantity.py +++ b/pint/quantity.py @@ -294,6 +294,11 @@ class _Quantity(PrettyIPython, SharedRegistryObject): return self._dimensionality + def check(self, dimension): + """Return true if the quantity's dimension matches passed dimension. + """ + return self.dimensionality == dimension + @classmethod def from_tuple(cls, tup): return cls(tup[0], UnitsContainer(tup[1])) diff --git a/pint/registry_helpers.py b/pint/registry_helpers.py index 4a8b862..5ac0dbd 100644 --- a/pint/registry_helpers.py +++ b/pint/registry_helpers.py @@ -130,6 +130,20 @@ def _parse_wrap_args(args, registry=None): return _converter +def _apply_defaults(func, args, kwargs): + """Apply default keyword arguments. + + Named keywords may have been left blank. This function applies the default + values so that every argument is defined. + """ + + sig = signature(func) + bound_arguments = sig.bind(*args) + for param in sig.parameters.values(): + if param.name not in bound_arguments.arguments: + bound_arguments.arguments[param.name] = param.default + args = [bound_arguments.arguments[key] for key in sig.parameters.keys()] + return args, {} def wraps(ureg, ret, args, strict=True): """Wraps a function to become pint-aware. @@ -171,18 +185,7 @@ def wraps(ureg, ret, args, strict=True): @functools.wraps(func, assigned=assigned, updated=updated) def wrapper(*values, **kw): - - # Named keywords may have been left blank. Wherever the named keyword is blank, - # fill it in with the default value. - sig = signature(func) - bound_arguments = sig.bind(*values, **kw) - - for param in sig.parameters.values(): - if param.name not in bound_arguments.arguments: - bound_arguments.arguments[param.name] = param.default - - values = [bound_arguments.arguments[key] for key in sig.parameters.keys()] - kw = {} + values, kw = _apply_defaults(func, values, kw) # In principle, the values are used as is # When then extract the magnitudes when needed. @@ -228,13 +231,17 @@ def check(ureg, *args): @functools.wraps(func, assigned=assigned, updated=updated) def wrapper(*values, **kwargs): - for dim, value in zip_longest(dimensions, values): + values, kwargs = _apply_defaults(func, values, kwargs) + if len(dimensions) > len(values): + raise TypeError("%s takes %i parameters, but %i dimensions were passed" + % (func.__name__, len(values), len(dimensions))) + for dim, value in zip(dimensions, values): if dim is None: continue - val_dim = ureg.get_dimensionality(value) - if val_dim != dim: + if not ureg.Quantity(value).check(dim): + val_dim = ureg.get_dimensionality(value) raise DimensionalityError(value, 'a quantity of', val_dim, dim) return func(*values, **kwargs)
hgrecco/pint
ba5cc0e76243c3e1e2b82999a71425ce3c6f3c1e
diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py index 0cefce9..4dbcb98 100644 --- a/pint/testsuite/test_issues.py +++ b/pint/testsuite/test_issues.py @@ -625,3 +625,38 @@ class TestIssuesNP(QuantityTestCase): d2 = get_displacement(Q_(2, 's'), Q_(1, 'deg/s')) self.assertAlmostEqual(d2, Q_(2,' deg')) + + def test_issue655a(self): + ureg = UnitRegistry() + distance = 1 * ureg.m + time = 1 * ureg.s + velocity = distance / time + self.assertEqual(distance.check('[length]'), True) + self.assertEqual(distance.check('[time]'), False) + self.assertEqual(velocity.check('[length] / [time]'), True) + self.assertEqual(velocity.check('1 / [time] * [length]'), True) + + def test_issue(self): + import math + try: + from inspect import signature + except ImportError: + # Python2 does not have the inspect library. Import the backport + from funcsigs import signature + + ureg = UnitRegistry() + Q_ = ureg.Quantity + @ureg.check('[length]', '[length]/[time]^2') + def pendulum_period(length, G=Q_(1, 'standard_gravity')): + print(length) + return (2*math.pi*(length/G)**.5).to('s') + l = 1 * ureg.m + # Assume earth gravity + t = pendulum_period(l) + self.assertAlmostEqual(t, Q_('2.0064092925890407 second')) + # Use moon gravity + moon_gravity = Q_(1.625, 'm/s^2') + t = pendulum_period(l, moon_gravity) + self.assertAlmostEqual(t, Q_('4.928936075204336 second')) + + diff --git a/pint/testsuite/test_unit.py b/pint/testsuite/test_unit.py index 0e7b17f..b9e3b06 100644 --- a/pint/testsuite/test_unit.py +++ b/pint/testsuite/test_unit.py @@ -437,13 +437,13 @@ class TestRegistry(QuantityTestCase): g2 = ureg.check('[speed]')(gfunc) self.assertRaises(DimensionalityError, g2, 3.0, 1) - self.assertRaises(DimensionalityError, g2, 2 * ureg.parsec) + self.assertRaises(TypeError, g2, 2 * ureg.parsec) self.assertRaises(DimensionalityError, g2, 2 * ureg.parsec, 1.0) self.assertEqual(g2(2.0 * ureg.km / ureg.hour, 2), 1 * ureg.km / ureg.hour) g3 = ureg.check('[speed]', '[time]', '[mass]')(gfunc) - self.assertRaises(DimensionalityError, g3, 1 * ureg.parsec, 1 * ureg.angstrom) - self.assertRaises(DimensionalityError, g3, 1 * ureg.parsec, 1 * ureg.angstrom, 1 * ureg.kilogram) + self.assertRaises(TypeError, g3, 1 * ureg.parsec, 1 * ureg.angstrom) + self.assertRaises(TypeError, g3, 1 * ureg.parsec, 1 * ureg.angstrom, 1 * ureg.kilogram) def test_to_ref_vs_to(self): self.ureg.autoconvert_offset_to_baseunit = True
A check function that's not a decorator Is there a way we could implement a check function for stand-alone use, not just for wrapping functions? ```python >>> from pint import _DEFAULT_REGISTRY >>> ureg = _DEFAULT_REGISTRY >>> Q_ = ureg.Quantity >>> distance = Q_('1 meter') >>> time = Q_('1 second') >>> ureg.check(distance, '[length]') True >>> ureg.check(time, '[length]') False ```
0.0
ba5cc0e76243c3e1e2b82999a71425ce3c6f3c1e
[ "pint/testsuite/test_unit.py::TestRegistry::test_check", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_check" ]
[ "pint/testsuite/test_issues.py::TestIssues::test_alternative_angstrom_definition", "pint/testsuite/test_issues.py::TestIssues::test_angstrom_creation", "pint/testsuite/test_issues.py::TestIssues::test_issue104", "pint/testsuite/test_issues.py::TestIssues::test_issue105", "pint/testsuite/test_issues.py::TestIssues::test_issue121", "pint/testsuite/test_issues.py::TestIssues::test_issue170", "pint/testsuite/test_issues.py::TestIssues::test_issue29", "pint/testsuite/test_issues.py::TestIssues::test_issue52", "pint/testsuite/test_issues.py::TestIssues::test_issue523", "pint/testsuite/test_issues.py::TestIssues::test_issue54", "pint/testsuite/test_issues.py::TestIssues::test_issue54_related", "pint/testsuite/test_issues.py::TestIssues::test_issue61", "pint/testsuite/test_issues.py::TestIssues::test_issue61_notNP", "pint/testsuite/test_issues.py::TestIssues::test_issue66", "pint/testsuite/test_issues.py::TestIssues::test_issue66b", "pint/testsuite/test_issues.py::TestIssues::test_issue69", "pint/testsuite/test_issues.py::TestIssues::test_issue85", "pint/testsuite/test_issues.py::TestIssues::test_issue86", "pint/testsuite/test_issues.py::TestIssues::test_issue93", "pint/testsuite/test_issues.py::TestIssues::test_issues86b", "pint/testsuite/test_issues.py::TestIssues::test_micro_creation", "pint/testsuite/test_unit.py::TestUnit::test_creation", "pint/testsuite/test_unit.py::TestUnit::test_deepcopy", "pint/testsuite/test_unit.py::TestUnit::test_dimensionality", "pint/testsuite/test_unit.py::TestUnit::test_dimensionless", "pint/testsuite/test_unit.py::TestUnit::test_ipython", "pint/testsuite/test_unit.py::TestUnit::test_unit_casting", "pint/testsuite/test_unit.py::TestUnit::test_unit_cmp", "pint/testsuite/test_unit.py::TestUnit::test_unit_default_formatting", "pint/testsuite/test_unit.py::TestUnit::test_unit_div", "pint/testsuite/test_unit.py::TestUnit::test_unit_eqs", "pint/testsuite/test_unit.py::TestUnit::test_unit_formatting", "pint/testsuite/test_unit.py::TestUnit::test_unit_hash", "pint/testsuite/test_unit.py::TestUnit::test_unit_mul", "pint/testsuite/test_unit.py::TestUnit::test_unit_pow", "pint/testsuite/test_unit.py::TestUnit::test_unit_rdiv", "pint/testsuite/test_unit.py::TestUnit::test_unit_repr", "pint/testsuite/test_unit.py::TestRegistry::test_as_delta", "pint/testsuite/test_unit.py::TestRegistry::test_base", "pint/testsuite/test_unit.py::TestRegistry::test_convert_parse_str", "pint/testsuite/test_unit.py::TestRegistry::test_default_format", "pint/testsuite/test_unit.py::TestRegistry::test_define", "pint/testsuite/test_unit.py::TestRegistry::test_imperial_symbol", "pint/testsuite/test_unit.py::TestRegistry::test_load", "pint/testsuite/test_unit.py::TestRegistry::test_name", "pint/testsuite/test_unit.py::TestRegistry::test_parse_alias", "pint/testsuite/test_unit.py::TestRegistry::test_parse_complex", "pint/testsuite/test_unit.py::TestRegistry::test_parse_factor", "pint/testsuite/test_unit.py::TestRegistry::test_parse_mul_div", "pint/testsuite/test_unit.py::TestRegistry::test_parse_number", "pint/testsuite/test_unit.py::TestRegistry::test_parse_plural", "pint/testsuite/test_unit.py::TestRegistry::test_parse_prefix", "pint/testsuite/test_unit.py::TestRegistry::test_parse_pretty", "pint/testsuite/test_unit.py::TestRegistry::test_parse_single", "pint/testsuite/test_unit.py::TestRegistry::test_parse_units", "pint/testsuite/test_unit.py::TestRegistry::test_pint", "pint/testsuite/test_unit.py::TestRegistry::test_redefinition", "pint/testsuite/test_unit.py::TestRegistry::test_rep_and_parse", "pint/testsuite/test_unit.py::TestRegistry::test_repeated_convert", "pint/testsuite/test_unit.py::TestRegistry::test_singular_SI_prefix_convert", "pint/testsuite/test_unit.py::TestRegistry::test_str_errors", "pint/testsuite/test_unit.py::TestRegistry::test_symbol", "pint/testsuite/test_unit.py::TestRegistry::test_to_ref_vs_to", "pint/testsuite/test_unit.py::TestRegistry::test_wrap_referencing", "pint/testsuite/test_unit.py::TestRegistry::test_wraps", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_context_sp", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_get_base_units", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_get_compatible_units", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_many", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_as_delta", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_base", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_convert_parse_str", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_default_format", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_define", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_imperial_symbol", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_lazy", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_load", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_name", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_alias", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_complex", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_factor", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_mul_div", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_number", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_plural", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_prefix", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_pretty", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_single", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_units", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_pint", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_redefinition", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_rep_and_parse", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_repeated_convert", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_singular_SI_prefix_convert", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_str_errors", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_symbol", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_to_ref_vs_to", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_wrap_referencing", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_wraps", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00001", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00002", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00003", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00004", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00005", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00006", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00007", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00008", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00009", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00010", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00011", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00012", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00013", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00014", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00015", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00016", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00017", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00018", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00019" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2018-07-20 01:55:48+00:00
bsd-3-clause
2,718
hgrecco__pint-809
diff --git a/pint/constants_en.txt b/pint/constants_en.txt index 19616a0..dba75ae 100644 --- a/pint/constants_en.txt +++ b/pint/constants_en.txt @@ -10,7 +10,7 @@ speed_of_light = 299792458 * meter / second = c standard_gravity = 9.806650 * meter / second ** 2 = g_0 = g_n = gravity vacuum_permeability = 4 * pi * 1e-7 * newton / ampere ** 2 = mu_0 = magnetic_constant vacuum_permittivity = 1 / (mu_0 * c **2 ) = epsilon_0 = electric_constant -Z_0 = mu_0 * c = impedance_of_free_space = characteristic_impedance_of_vacuum +impedance_of_free_space = mu_0 * c = Z_0 = characteristic_impedance_of_vacuum # 0.000 000 29 e-34 planck_constant = 6.62606957e-34 J s = h @@ -29,7 +29,7 @@ molar_gas_constant = 8.3144621 J mol^-1 K^-1 = R fine_structure_constant = 7.2973525698e-3 # 0.000 000 27 e23 -avogadro_number = 6.02214129e23 mol^-1 =N_A +avogadro_number = 6.02214129e23 mol^-1 = N_A # 0.000 0013 e-23 boltzmann_constant = 1.3806488e-23 J K^-1 = k diff --git a/pint/default_en.txt b/pint/default_en.txt index e796636..b329087 100644 --- a/pint/default_en.txt +++ b/pint/default_en.txt @@ -58,17 +58,17 @@ count = [] [acceleration] = [length] / [time] ** 2 # Angle -turn = 2 * pi * radian = revolution = cycle = circle +turn = 2 * pi * radian = _ = revolution = cycle = circle degree = pi / 180 * radian = deg = arcdeg = arcdegree = angular_degree arcminute = arcdeg / 60 = arcmin = arc_minute = angular_minute -arcsecond = arcmin / 60 = arcsec = arc_second = angular_second +arcsecond = arcmin / 60 = arcsec = arc_second = angular_second steradian = radian ** 2 = sr # Area [area] = [length] ** 2 are = 100 * m**2 barn = 1e-28 * m ** 2 = b -cmil = 5.067075e-10 * m ** 2 = circular_mils +circular_mil = 5.067075e-10 * m ** 2 = cmil = circular_mils darcy = 9.869233e-13 * m ** 2 hectare = 100 * are = ha @@ -81,8 +81,8 @@ molar = mol / (1e-3 * m ** 3) = M katal = mole / second = kat # EM -esu = 1 * erg**0.5 * centimeter**0.5 = statcoulombs = statC = franklin = Fr -esu_per_second = 1 * esu / second = statampere +esu = 1 * erg**0.5 * centimeter**0.5 = statcoulomb = statC = franklin = Fr +esu_per_second = 1 * esu / second = _ = statampere = statA ampere_turn = 1 * A gilbert = 10 / (4 * pi ) * ampere_turn coulomb = ampere * second = C @@ -96,15 +96,15 @@ henry = weber / ampere = H elementary_charge = 1.602176487e-19 * coulomb = e chemical_faraday = 9.64957e4 * coulomb physical_faraday = 9.65219e4 * coulomb -faraday = 96485.3399 * coulomb = C12_faraday +faraday = 96485.3399 * coulomb = _ = C12_faraday gamma = 1e-9 * tesla gauss = 1e-4 * tesla -maxwell = 1e-8 * weber = mx +maxwell = 1e-8 * weber = Mx oersted = 1000 / (4 * pi) * A / m = Oe statfarad = 1.112650e-12 * farad = statF = stF stathenry = 8.987554e11 * henry = statH = stH statmho = 1.112650e-12 * siemens = statS = stS -statohm = 8.987554e11 * ohm +statohm = 8.987554e11 * ohm = statΩ = stΩ statvolt = 2.997925e2 * volt = statV = stV unit_pole = 1.256637e-7 * weber @@ -112,17 +112,17 @@ unit_pole = 1.256637e-7 * weber [energy] = [force] * [length] joule = newton * meter = J erg = dyne * centimeter -btu = 1.05505585262e3 * joule = Btu = BTU = british_thermal_unit +british_thermal_unit = 1.05505585262e3 * joule = Btu = BTU = btu electron_volt = 1.60217653e-19 * J = eV quadrillion_btu = 10**15 * btu = quad -thm = 100000 * BTU = therm = EC_therm +therm = 100000 * BTU = thm = EC_therm calorie = 4.184 * joule = cal = thermochemical_calorie international_steam_table_calorie = 4.1868 * joule ton_TNT = 4.184e9 * joule = tTNT US_therm = 1.054804e8 * joule watt_hour = watt * hour = Wh = watthour -hartree = 4.35974394e-18 * joule = = Eh = E_h = hartree_energy -toe = 41.868e9 * joule = tonne_of_oil_equivalent +hartree = 4.35974394e-18 * joule = E_h = Eh = hartree_energy +tonne_of_oil_equivalent = 41.868e9 * joule = toe # Force [force] = [mass] * [acceleration] @@ -132,7 +132,7 @@ force_kilogram = g_0 * kilogram = kgf = kilogram_force = pond force_gram = g_0 * gram = gf = gram_force force_ounce = g_0 * ounce = ozf = ounce_force force_pound = g_0 * lb = lbf = pound_force -force_metric_ton = g_0 * t = metric_ton_force = force_t = t_force +force_metric_ton = g_0 * t = tf = metric_ton_force = force_t = t_force poundal = lb * feet / second ** 2 = pdl kip = 1000*lbf @@ -144,7 +144,7 @@ counts_per_second = count / second = cps # Heat #RSI = degK * meter ** 2 / watt -#clo = 0.155 * RSI = clos +#clo = 0.155 * RSI = _ = clos #R_value = foot ** 2 * degF * hour / btu # Information @@ -153,7 +153,7 @@ baud = bit / second = Bd = bps # Irradiance peak_sun_hour = 1000 * watt_hour / meter**2 = PSH -langley = thermochemical_calorie / centimeter**2 = Langley +langley = thermochemical_calorie / centimeter**2 = Ly # Length angstrom = 1e-10 * meter = Å = ångström = Å @@ -164,7 +164,7 @@ astronomical_unit = 149597870691 * meter = au # Mass carat = 200 * milligram metric_ton = 1000 * kilogram = t = tonne -atomic_mass_unit = 1.660538782e-27 * kilogram = u = amu = dalton = Da +atomic_mass_unit = 1.660538782e-27 * kilogram = u = amu = dalton = Da bag = 94 * lb # Textile @@ -195,13 +195,13 @@ boiler_horsepower = 33475 * btu / hour metric_horsepower = 75 * force_kilogram * meter / second electric_horsepower = 746 * watt hydraulic_horsepower = 550 * feet * lbf / second -refrigeration_ton = 12000 * btu / hour = ton_of_refrigeration +refrigeration_ton = 12000 * btu / hour = _ = ton_of_refrigeration # Pressure [pressure] = [force] / [area] -Hg = gravity * 13.59510 * gram / centimeter ** 3 = mercury = conventional_mercury +mercury = gravity * 13.59510 * gram / centimeter ** 3 = Hg = conventional_mercury mercury_60F = gravity * 13.5568 * gram / centimeter ** 3 -H2O = gravity * 1000 * kilogram / meter ** 3 = h2o = water = conventional_water +water = gravity * 1000 * kilogram / meter ** 3 = H2O = h2o = conventional_water water_4C = gravity * 999.972 * kilogram / meter ** 3 = water_39F water_60F = gravity * 999.001 * kilogram / m ** 3 pascal = newton / meter ** 2 = Pa @@ -211,31 +211,31 @@ technical_atmosphere = kilogram * gravity / centimeter ** 2 = at torr = atm / 760 pound_force_per_square_inch = pound * gravity / inch ** 2 = psi kip_per_square_inch = kip / inch ** 2 = ksi -barye = 0.1 * newton / meter ** 2 = barie = barad = barrie = baryd = Ba -mm_Hg = millimeter * Hg = mmHg = millimeter_Hg = millimeter_Hg_0C -cm_Hg = centimeter * Hg = cmHg = centimeter_Hg -in_Hg = inch * Hg = inHg = inch_Hg = inch_Hg_32F +barye = 0.1 * newton / meter ** 2 = Ba = barie = barad = barrie = baryd +millimeter_Hg = millimeter * Hg = mmHg = mm_Hg = millimeter_Hg_0C +centimeter_Hg = centimeter * Hg = cmHg = cm_Hg +inch_Hg = inch * Hg = inHg = in_Hg = inch_Hg_32F inch_Hg_60F = inch * mercury_60F inch_H2O_39F = inch * water_39F inch_H2O_60F = inch * water_60F -footH2O = ft * water -cmH2O = centimeter * water -foot_H2O = ft * water = ftH2O +centimeter_water = centimeter * water = cmH2O +foot_H2O = ft * water = ftH2O = footH2O standard_liter_per_minute = 1.68875 * Pa * m ** 3 / s = slpm = slm # Radiation -Bq = Hz = becquerel +becquerel = Hz = Bq curie = 3.7e10 * Bq = Ci rutherford = 1e6*Bq = Rd -Gy = joule / kilogram = gray = Sv = sievert +gray = joule / kilogram = Gy +sievert = joule / kilogram = Sv rem = 1e-2 * sievert rads = 1e-2 * gray roentgen = 2.58e-4 * coulomb / kilogram # Temperature -degC = kelvin; offset: 273.15 = celsius -degR = 5 / 9 * kelvin; offset: 0 = rankine -degF = 5 / 9 * kelvin; offset: 255.372222 = fahrenheit +degree_Celsius = kelvin; offset: 273.15 = °C = degC = celsius +degree_Rankine = 5 / 9 * kelvin; offset: 0 = °R = degR = rankine +degree_Fahrenheit = 5 / 9 * kelvin; offset: 255.372222 = °F = degF = fahrenheit # Time minute = 60 * second = min @@ -253,12 +253,12 @@ sidereal_second = sidereal_minute / 60 sidereal_year = 366.25636042 * sidereal_day sidereal_month = 27.321661 * sidereal_day tropical_month = 27.321661 * day -synodic_month = 29.530589 * day = lunar_month +synodic_month = 29.530589 * day = _ = lunar_month common_year = 365 * day leap_year = 366 * day julian_year = 365.25 * day gregorian_year = 365.2425 * day -millenium = 1000 * year = millenia = milenia = milenium +millennium = 1000 * year = _ = millennia eon = 1e9 * year work_year = 2056 * hour work_month = work_year / 12 @@ -279,7 +279,7 @@ rhe = 10 / (Pa * s) # Volume [volume] = [length] ** 3 liter = 1e-3 * m ** 3 = l = L = litre -cc = centimeter ** 3 = cubic_centimeter +cubic_centimeter = centimeter ** 3 = cc stere = meter ** 3 @@ -353,23 +353,23 @@ stere = meter ** 3 survey_mile = 5280 survey_foot acre = 43560 survey_foot ** 2 - square_rod = 1 rod ** 2 = sq_rod = sq_pole = sq_perch + square_rod = 1 rod ** 2 = sq_rd = sq_rod = sq_pole = sq_perch fathom = 6 survey_foot us_statute_mile = 5280 survey_foot league = 3 us_statute_mile furlong = us_statute_mile / 8 - acre_foot = acre * survey_foot = acre_feet + acre_foot = acre * survey_foot = _ =acre_feet @end @group USCSDryVolume - dry_pint = 33.6003125 cubic_inch = dpi = US_dry_pint + dry_pint = 33.6003125 cubic_inch = dpt = US_dry_pint dry_quart = 2 dry_pint = dqt = US_dry_quart dry_gallon = 8 dry_pint = dgal = US_dry_gallon peck = 16 dry_pint = pk bushel = 64 dry_pint = bu - dry_barrel = 7056 cubic_inch = US_dry_barrel + dry_barrel = 7056 cubic_inch = _ = US_dry_barrel @end @group USCSLiquidVolume @@ -405,8 +405,8 @@ stere = meter ** 3 long_hundredweight = 112 avoirdupois_pound = lg_cwt short_ton = 2000 avoirdupois_pound long_ton = 2240 avoirdupois_pound - force_short_ton = short_ton * g_0 = short_ton_force - force_long_ton = long_ton * g_0 = long_ton_force + force_short_ton = short_ton * g_0 = _ = short_ton_force + force_long_ton = long_ton * g_0 = _ = long_ton_force @end @group Troy @@ -433,14 +433,14 @@ stere = meter ** 3 @group AvoirdupoisUS using Avoirdupois US_hundredweight = short_hundredweight = US_cwt US_ton = short_ton = ton - US_ton_force = force_short_ton = ton_force = force_ton + US_ton_force = force_short_ton = _ = ton_force = force_ton @end @group Printer # Length - pixel = [printing_unit] = dot = px = pel = picture_element + pixel = [printing_unit] = _ = dot = px = pel = picture_element pixels_per_centimeter = pixel / cm = PPCM - pixels_per_inch = pixel / inch = dots_per_inch = PPI = ppi = DPI = printers_dpi + pixels_per_inch = pixel / inch = ppi = dots_per_inch = PPI = DPI = printers_dpi bits_per_pixel = bit / pixel = bpp point = yard / 216 / 12 = pp = printers_point @@ -450,7 +450,7 @@ stere = meter ** 3 @group ImperialVolume imperial_fluid_ounce = imperial_pint / 20 = imperial_floz = UK_fluid_ounce - imperial_fluid_drachm = imperial_fluid_ounce / 8 = imperial_fluid_dram + imperial_fluid_drachm = imperial_fluid_ounce / 8 = imperial_fldr = imperial_fluid_dram imperial_gill = imperial_pint / 4 = imperial_gi = UK_gill imperial_cup = imperial_pint / 2 = imperial_cp = UK_cup imperial_pint = 568.26125 * milliliter = imperial_pt = UK_pint diff --git a/pint/definitions.py b/pint/definitions.py index e8e07c9..e510a1b 100644 --- a/pint/definitions.py +++ b/pint/definitions.py @@ -52,9 +52,12 @@ class Definition(object): value.decode('utf-8') except UnicodeEncodeError: result.remove(value) - value, aliases = result[0], tuple(result[1:]) + value, aliases = result[0], tuple([x for x in result[1:] if x != '']) symbol, aliases = (aliases[0], aliases[1:]) if aliases else (None, aliases) + if symbol == '_': + symbol = None + aliases = tuple([x for x in aliases if x != '_']) if name.startswith('['): return DimensionDefinition(name, symbol, aliases, value) diff --git a/pint/registry.py b/pint/registry.py index 6c9d953..0129ba5 100644 --- a/pint/registry.py +++ b/pint/registry.py @@ -279,7 +279,8 @@ class BaseRegistry(meta.with_metaclass(_Meta)): else: d_symbol = None - d_aliases = tuple('Δ' + alias for alias in definition.aliases) + d_aliases = tuple('Δ' + alias for alias in definition.aliases) + \ + tuple('delta_' + alias for alias in definition.aliases) d_reference = UnitsContainer(dict((ref, value) for ref, value in definition.reference.items()))
hgrecco/pint
fffc07105b379b51414932f3794d9de19e37293a
diff --git a/pint/testsuite/test_definitions.py b/pint/testsuite/test_definitions.py index 573ee21..3b73fd8 100644 --- a/pint/testsuite/test_definitions.py +++ b/pint/testsuite/test_definitions.py @@ -70,6 +70,16 @@ class TestDefinition(BaseTestCase): self.assertEqual(x.converter.offset, 255.372222) self.assertEqual(x.reference, UnitsContainer(kelvin=1)) + x = Definition.from_string('turn = 6.28 * radian = _ = revolution = = cycle = _') + self.assertIsInstance(x, UnitDefinition) + self.assertEqual(x.name, 'turn') + self.assertEqual(x.aliases, ('revolution', 'cycle')) + self.assertEqual(x.symbol, 'turn') + self.assertFalse(x.is_base) + self.assertIsInstance(x.converter, ScaleConverter) + self.assertEqual(x.converter.scale, 6.28) + self.assertEqual(x.reference, UnitsContainer(radian=1)) + def test_dimension_definition(self): x = DimensionDefinition('[time]', '', (), converter='') self.assertTrue(x.is_base)
Define unit without symbol Is it currently possible to define a unit without symbol (but with aliases) in a text file? Currently we have behavior such as: ``` >>> print('{:}'.format(ureg['1 millenium'])) 1 millenium >>> print('{:~}'.format(ureg['1 millenium'])) 1 millenia ``` Surely no one intended "millenia" to be the symbol for millennium (the multiple misspelling is another matter). The unit is defined as: ``` millenium = 1000 * year = millenia = milenia = milenium ``` It is possible to set an empty symbol with ``` millenium = 1000 * year = = millenia = milenia = milenium ``` but that still gives a unit with a symbol (that happens to be an empty string). Is there another way? Or should this be the preferred way, and then the code should change an empty symbol to `None`?
0.0
fffc07105b379b51414932f3794d9de19e37293a
[ "pint/testsuite/test_definitions.py::TestDefinition::test_unit_definition" ]
[ "pint/testsuite/test_definitions.py::TestDefinition::test_baseunit_definition", "pint/testsuite/test_definitions.py::TestDefinition::test_dimension_definition", "pint/testsuite/test_definitions.py::TestDefinition::test_invalid", "pint/testsuite/test_definitions.py::TestDefinition::test_prefix_definition" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-05-16 18:57:01+00:00
bsd-3-clause
2,719
hgrecco__pint-868
diff --git a/docs/defining.rst b/docs/defining.rst index 50c177b..7aba5bb 100644 --- a/docs/defining.rst +++ b/docs/defining.rst @@ -75,12 +75,28 @@ unit, including non-metric ones (e.g. kiloinch is valid for Pint). This simplifies definitions files enormously without introducing major problems. Pint, like Python, believes that we are all consenting adults. +Derived dimensions are defined as follows:: + + [density] = [mass] / [volume] + +Note that primary dimensions don't need to be declared; they can be +defined for the first time as part of a unit definition. + +Finally, one may add aliases to an already existing unit definition:: + + @alias meter = metro = metr + +This is particularly useful when one wants to enrich definitions from defaults_en.txt +with new aliases from a custom file. It can also be used for translations (like in the +example above) as long as one is happy to have the localized units automatically +converted to English when they are parsed. + Programmatically ---------------- -You can easily add units to the registry programmatically. Let's add a dog_year -(sometimes written as dy) equivalent to 52 (human) days: +You can easily add units, dimensions, or aliases to the registry programmatically. +Let's add a dog_year (sometimes written as dy) equivalent to 52 (human) days: .. doctest:: @@ -111,4 +127,14 @@ You can also add prefixes programmatically: where the number indicates the multiplication factor. -.. warning:: Units and prefixes added programmatically are forgotten when the program ends. +Same for aliases and derived dimensions: + +.. doctest:: + + >>> ureg.define('@alias meter = metro = metr') + >>> ureg.define('[hypervolume] = [length ** 4]') + + +.. warning:: + Units, prefixes, aliases and dimensions added programmatically are forgotten when the + program ends. diff --git a/pint/default_en.txt b/pint/default_en.txt index fecc9e0..8adabe7 100644 --- a/pint/default_en.txt +++ b/pint/default_en.txt @@ -3,9 +3,11 @@ # Language: english # :copyright: 2013,2019 by Pint Authors, see AUTHORS for more details. -# Definition syntax -# ================= -# <canonical name> = <relation to another unit> [= <symbol>] [= <alias>] [ = <alias> ] [...] +# Syntax +# ====== +# Units +# ----- +# <canonical name> = <relation to another unit or dimension> [= <symbol>] [= <alias>] [ = <alias> ] [...] # # The canonical name and aliases should be expressed in singular form. # Pint automatically deals with plurals built by adding 's' to the singular form; plural @@ -17,6 +19,38 @@ # Example: # millennium = 1e3 * year = _ = millennia # +# +# Prefixes +# -------- +# <prefix>- = <amount> [= <symbol>] [= <alias>] [ = <alias> ] [...] +# +# Example: +# deca- = 1e+1 = da- = deka- +# +# +# Derived dimensions +# ------------------ +# [dimension name] = <relation to other dimensions> +# +# Example: +# [density] = [mass] / [volume] +# +# Note that primary dimensions don't need to be declared; they can be +# defined or the first time in a unit definition. +# E.g. see below `meter = [length]` +# +# +# Additional aliases +# ------------------ +# @alias <canonical name or previous alias> = <alias> [ = <alias> ] [...] +# +# Used to add aliases to already existing unit definitions. +# Particularly useful when one wants to enrich definitions +# from defaults_en.txt with custom aliases. +# +# Example: +# @alias meter = my_meter + # See also: https://pint.readthedocs.io/en/latest/defining.html @defaults diff --git a/pint/definitions.py b/pint/definitions.py index e510a1b..811a471 100644 --- a/pint/definitions.py +++ b/pint/definitions.py @@ -52,6 +52,12 @@ class Definition(object): value.decode('utf-8') except UnicodeEncodeError: result.remove(value) + + # @alias name = alias1 = alias2 = ... + if name.startswith("@alias "): + name = name[len("@alias "):].lstrip() + return AliasDefinition(name, tuple(result)) + value, aliases = result[0], tuple([x for x in result[1:] if x != '']) symbol, aliases = (aliases[0], aliases[1:]) if aliases else (None, aliases) @@ -83,6 +89,10 @@ class Definition(object): def aliases(self): return self._aliases + def add_aliases(self, *alias): + alias = tuple(a for a in alias if a not in self._aliases) + self._aliases = self._aliases + alias + @property def converter(self): return self._converter @@ -166,3 +176,12 @@ class DimensionDefinition(Definition): super(DimensionDefinition, self).__init__(name, symbol, aliases, converter=None) + + +class AliasDefinition(Definition): + """Additional alias(es) for an already existing unit + """ + def __init__(self, name, aliases): + super(AliasDefinition, self).__init__( + name=name, symbol=None, aliases=aliases, converter=None + ) diff --git a/pint/registry.py b/pint/registry.py index 0129ba5..1a24442 100644 --- a/pint/registry.py +++ b/pint/registry.py @@ -54,7 +54,7 @@ from .util import (logger, pi_theorem, solve_dependencies, ParserHelper, from .compat import tokenizer, string_types, meta from .definitions import (Definition, UnitDefinition, PrefixDefinition, - DimensionDefinition) + DimensionDefinition, AliasDefinition) from .converters import ScaleConverter from .errors import (DimensionalityError, UndefinedUnitError, DefinitionSyntaxError, RedefinitionError) @@ -263,6 +263,11 @@ class BaseRegistry(meta.with_metaclass(_Meta)): elif isinstance(definition, PrefixDefinition): d, di = self._prefixes, None + elif isinstance(definition, AliasDefinition): + d, di = self._units, self._units_casei + self._define_alias(definition, d, di) + return d[definition.name], d, di + else: raise TypeError('{} is not a valid definition.'.format(definition)) @@ -325,6 +330,13 @@ class BaseRegistry(meta.with_metaclass(_Meta)): if casei_unit_dict is not None: casei_unit_dict[key.lower()].add(key) + def _define_alias(self, definition, unit_dict, casei_unit_dict): + unit = unit_dict[definition.name] + unit.add_aliases(*definition.aliases) + for alias in unit.aliases: + unit_dict[alias] = unit + casei_unit_dict[alias.lower()].add(alias) + def _register_parser(self, prefix, parserfunc): """Register a loader for a given @ directive.. @@ -367,7 +379,7 @@ class BaseRegistry(meta.with_metaclass(_Meta)): ifile = SourceIterator(file) for no, line in ifile: - if line and line[0] == '@': + if line.startswith('@') and not line.startswith('@alias'): if line.startswith('@import'): if is_resource: path = line[7:].strip()
hgrecco/pint
f442426dc8515655806ceaf0abd0a4cce7494cc6
diff --git a/pint/testsuite/test_definitions.py b/pint/testsuite/test_definitions.py index 3b73fd8..8a66a4e 100644 --- a/pint/testsuite/test_definitions.py +++ b/pint/testsuite/test_definitions.py @@ -5,7 +5,7 @@ from __future__ import division, unicode_literals, print_function, absolute_impo from pint.util import (UnitsContainer) from pint.converters import (ScaleConverter, OffsetConverter) from pint.definitions import (Definition, PrefixDefinition, UnitDefinition, - DimensionDefinition) + DimensionDefinition, AliasDefinition) from pint.testsuite import BaseTestCase @@ -88,3 +88,9 @@ class TestDefinition(BaseTestCase): x = Definition.from_string('[speed] = [length]/[time]') self.assertIsInstance(x, DimensionDefinition) self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]': -1})) + + def test_alias_definition(self): + x = Definition.from_string("@alias meter = metro = metr") + self.assertIsInstance(x, AliasDefinition) + self.assertEqual(x.name, "meter") + self.assertEqual(x.aliases, ("metro", "metr")) diff --git a/pint/testsuite/test_unit.py b/pint/testsuite/test_unit.py index 5ce35c5..9c6bf51 100644 --- a/pint/testsuite/test_unit.py +++ b/pint/testsuite/test_unit.py @@ -648,3 +648,30 @@ class TestConvertWithOffset(QuantityTestCase, ParameterizedTestCase): if src != dst: self.assertQuantityAlmostEqual(convert(expected, dst, src), value, atol=0.001) + + def test_alias(self): + # Use load_definitions + ureg = UnitRegistry([ + "canonical = [] = can = alias1 = alias2\n", + # overlapping aliases + "@alias canonical = alias2 = alias3\n", + # Against another alias + "@alias alias3 = alias4\n", + ]) + + # Use define + ureg.define("@alias canonical = alias5") + + # Test that new aliases work + # Test that pre-existing aliases and symbol are not eliminated + for a in ("can", "alias1", "alias2", "alias3", "alias4", "alias5"): + self.assertEqual(ureg.Unit(a), ureg.Unit("canonical")) + + # Test that aliases defined multiple times are not duplicated + self.assertEqual( + ureg._units["canonical"].aliases, + ("alias1", "alias2", "alias3", "alias4", "alias5") + ) + + # Define against unknown name + self.assertRaises(KeyError, ureg.define, "@alias notexist = something")
Add aliases to already existing definitions I'd like to write a custom definitions file that adds new aliases to definitions from defaults_en.txt - as opposed to creating entirely new units. However, I could not find a way to do it without scouting for the original definition line and writing it again whole. For example, defaults_en.txt contains: ``` calorie = 4.184 * joule = cal = thermochemical_calorie = cal_th ``` If I want to add an alias, I have to write in my custom definitions file: ``` calorie = 4.184 * joule = cal = thermochemical_calorie = cal_th = CUSTOM_CAL ``` whereas I'd like to just write something like: ``` calorie = ... = CUSTOM_CAL ```
0.0
f442426dc8515655806ceaf0abd0a4cce7494cc6
[ "pint/testsuite/test_definitions.py::TestDefinition::test_alias_definition", "pint/testsuite/test_definitions.py::TestDefinition::test_baseunit_definition", "pint/testsuite/test_definitions.py::TestDefinition::test_dimension_definition", "pint/testsuite/test_definitions.py::TestDefinition::test_invalid", "pint/testsuite/test_definitions.py::TestDefinition::test_prefix_definition", "pint/testsuite/test_definitions.py::TestDefinition::test_unit_definition", "pint/testsuite/test_unit.py::TestUnit::test_creation", "pint/testsuite/test_unit.py::TestUnit::test_deepcopy", "pint/testsuite/test_unit.py::TestUnit::test_dimensionality", "pint/testsuite/test_unit.py::TestUnit::test_dimensionless", "pint/testsuite/test_unit.py::TestUnit::test_ipython", "pint/testsuite/test_unit.py::TestUnit::test_unit_casting", "pint/testsuite/test_unit.py::TestUnit::test_unit_cmp", "pint/testsuite/test_unit.py::TestUnit::test_unit_default_formatting", "pint/testsuite/test_unit.py::TestUnit::test_unit_div", "pint/testsuite/test_unit.py::TestUnit::test_unit_eqs", "pint/testsuite/test_unit.py::TestUnit::test_unit_formatting", "pint/testsuite/test_unit.py::TestUnit::test_unit_hash", "pint/testsuite/test_unit.py::TestUnit::test_unit_mul", "pint/testsuite/test_unit.py::TestUnit::test_unit_pow", "pint/testsuite/test_unit.py::TestUnit::test_unit_rdiv", "pint/testsuite/test_unit.py::TestUnit::test_unit_repr", "pint/testsuite/test_unit.py::TestRegistry::test_as_delta", "pint/testsuite/test_unit.py::TestRegistry::test_base", "pint/testsuite/test_unit.py::TestRegistry::test_check", "pint/testsuite/test_unit.py::TestRegistry::test_convert_parse_str", "pint/testsuite/test_unit.py::TestRegistry::test_default_format", "pint/testsuite/test_unit.py::TestRegistry::test_define", "pint/testsuite/test_unit.py::TestRegistry::test_imperial_symbol", "pint/testsuite/test_unit.py::TestRegistry::test_load", "pint/testsuite/test_unit.py::TestRegistry::test_name", "pint/testsuite/test_unit.py::TestRegistry::test_parse_alias", "pint/testsuite/test_unit.py::TestRegistry::test_parse_complex", "pint/testsuite/test_unit.py::TestRegistry::test_parse_factor", "pint/testsuite/test_unit.py::TestRegistry::test_parse_mul_div", "pint/testsuite/test_unit.py::TestRegistry::test_parse_number", "pint/testsuite/test_unit.py::TestRegistry::test_parse_plural", "pint/testsuite/test_unit.py::TestRegistry::test_parse_prefix", "pint/testsuite/test_unit.py::TestRegistry::test_parse_pretty", "pint/testsuite/test_unit.py::TestRegistry::test_parse_single", "pint/testsuite/test_unit.py::TestRegistry::test_parse_units", "pint/testsuite/test_unit.py::TestRegistry::test_pint", "pint/testsuite/test_unit.py::TestRegistry::test_redefinition", "pint/testsuite/test_unit.py::TestRegistry::test_rep_and_parse", "pint/testsuite/test_unit.py::TestRegistry::test_repeated_convert", "pint/testsuite/test_unit.py::TestRegistry::test_singular_SI_prefix_convert", "pint/testsuite/test_unit.py::TestRegistry::test_str_errors", "pint/testsuite/test_unit.py::TestRegistry::test_symbol", "pint/testsuite/test_unit.py::TestRegistry::test_to_ref_vs_to", "pint/testsuite/test_unit.py::TestRegistry::test_wrap_referencing", "pint/testsuite/test_unit.py::TestRegistry::test_wraps", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_context_sp", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_get_base_units", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_get_compatible_units", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_many", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_as_delta", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_base", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_check", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_convert_parse_str", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_default_format", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_define", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_imperial_symbol", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_lazy", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_load", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_name", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_alias", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_complex", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_factor", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_mul_div", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_number", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_plural", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_prefix", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_pretty", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_single", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_units", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_pint", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_redefinition", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_rep_and_parse", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_repeated_convert", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_singular_SI_prefix_convert", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_str_errors", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_symbol", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_to_ref_vs_to", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_wrap_referencing", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_wraps", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_alias", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00001", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00002", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00003", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00004", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00005", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00006", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00007", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00008", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00009", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00010", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00011", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00012", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00013", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00014", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00015", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00016", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00017", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00018", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00019" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2019-09-03 17:03:38+00:00
bsd-3-clause
2,720
hgrecco__pint-877
diff --git a/pint/util.py b/pint/util.py index 587517a..d89b957 100644 --- a/pint/util.py +++ b/pint/util.py @@ -327,9 +327,16 @@ class UnitsContainer(Mapping): def __eq__(self, other): if isinstance(other, UnitsContainer): - # Not the same as hash(self); see ParserHelper.__hash__ and __eq__ - return UnitsContainer.__hash__(self) == UnitsContainer.__hash__(other) - if isinstance(other, string_types): + # UnitsContainer.__hash__(self) is not the same as hash(self); see + # ParserHelper.__hash__ and __eq__. + # Different hashes guarantee that the actual contents are different, but + # identical hashes give no guarantee of equality. + # e.g. in CPython, hash(-1) == hash(-2) + if UnitsContainer.__hash__(self) != UnitsContainer.__hash__(other): + return False + other = other._d + + elif isinstance(other, string_types): other = ParserHelper.from_string(other) other = other._d @@ -504,9 +511,11 @@ class ParserHelper(UnitsContainer): self._d, self._hash, self.scale = state def __eq__(self, other): - if isinstance(other, self.__class__): - return self.scale == other.scale and\ + if isinstance(other, ParserHelper): + return ( + self.scale == other.scale and super(ParserHelper, self).__eq__(other) + ) elif isinstance(other, string_types): return self == ParserHelper.from_string(other) elif isinstance(other, Number):
hgrecco/pint
d655f3251c259eea52beb74cbb16e694eda197ed
diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py index 6ca7035..e031fe5 100644 --- a/pint/testsuite/test_issues.py +++ b/pint/testsuite/test_issues.py @@ -11,7 +11,7 @@ from pint import UnitRegistry from pint.unit import UnitsContainer from pint.util import ParserHelper -from pint.compat import np, long_type +from pint.compat import np from pint.errors import UndefinedUnitError, DimensionalityError from pint.testsuite import QuantityTestCase, helpers @@ -699,3 +699,21 @@ class TestIssues(QuantityTestCase): ureg2.define('test123 = 456 kg') assert ureg1('1 test123').to('kg').magnitude == 123 assert ureg2('1 test123').to('kg').magnitude == 456 + + def test_issue876(self): + # Same hash must not imply equality. + + # As an implementation detail of CPython, hash(-1) == hash(-2). + # This test is useless in potential alternative Python implementations where + # hash(-1) != hash(-2); one would need to find hash collisions specific for each + # implementation + + a = UnitsContainer({"[mass]": -1}) + b = UnitsContainer({"[mass]": -2}) + c = UnitsContainer({"[mass]": -3}) + + # Guarantee working on alternative Python implementations + assert (hash(-1) == hash(-2)) == (hash(a) == hash(b)) + assert (hash(-1) == hash(-3)) == (hash(a) == hash(c)) + assert a != b + assert a != c
Bug in compound unit dimensionality/unit reduction after caching? I came across a bizarre bug that popped up on the master branch recently: ```python import pint ureg = pint.UnitRegistry() print(ureg('joule').to_base_units()) print(ureg('joule * second ** 2 / kilogram / meter').to_base_units()) ``` gives the following traceback ``` DimensionalityError Traceback (most recent call last) <ipython-input-18-62589ad7e050> in <module> ----> 1 print((1 * ureg.joule * ureg.second ** 2 / ureg.kilogram / ureg.meter).to_base_units()) ~/dev/pint/pint/quantity.py in to_base_units(self) 478 _, other = self._REGISTRY._get_base_units(self._units) 479 --> 480 magnitude = self._convert_magnitude_not_inplace(other) 481 482 return self.__class__(magnitude, other) ~/dev/pint/pint/quantity.py in _convert_magnitude_not_inplace(self, other, *contexts, **ctx_kwargs) 406 return self._REGISTRY.convert(self._magnitude, self._units, other) 407 --> 408 return self._REGISTRY.convert(self._magnitude, self._units, other) 409 410 def _convert_magnitude(self, other, *contexts, **ctx_kwargs): ~/dev/pint/pint/registry.py in convert(self, value, src, dst, inplace) 707 return value 708 --> 709 return self._convert(value, src, dst, inplace) 710 711 def _convert(self, value, src, dst, inplace=False, check_dimensionality=True): ~/dev/pint/pint/registry.py in _convert(self, value, src, dst, inplace) 1237 value, src = src._magnitude, src._units 1238 -> 1239 return super(ContextRegistry, self)._convert(value, src, dst, inplace) 1240 1241 def _get_compatible_units(self, input_units, group_or_system): ~/dev/pint/pint/registry.py in _convert(self, value, src, dst, inplace) 990 991 if not (src_offset_unit or dst_offset_unit): --> 992 return super(NonMultiplicativeRegistry, self)._convert(value, src, dst, inplace) 993 994 src_dim = self._get_dimensionality(src) ~/dev/pint/pint/registry.py in _convert(self, value, src, dst, inplace, check_dimensionality) 729 # then the conversion cannot be performed. 730 if src_dim != dst_dim: --> 731 raise DimensionalityError(src, dst, src_dim, dst_dim) 732 733 # Here src and dst have only multiplicative units left. Thus we can DimensionalityError: Cannot convert from 'joule * second ** 2 / kilogram / meter' ([length]) to 'dimensionless' (dimensionless) ``` but it works fine after removing the `joule` line. After digging through the commit history, it seems to lead back to https://github.com/hgrecco/pint/commit/a9a97ba98167a6a20df874a14343d303a3cd2163, since any point in the history I checked including that commit fails with this error, and any without it returns the expected result of `1.0 meter` for the second line. Glancing through the changes made there, it makes sense that it is tied to this seemingly cache-related issue. That being said, I have no clue what is particularly going wrong here or what exactly in https://github.com/hgrecco/pint/commit/a9a97ba98167a6a20df874a14343d303a3cd2163 could have caused this. In case it helps with troubleshooting... ...the error no longer arises when any of the compound pieces of the last unit are removed. ...it still arises with the following: ```python import pint ureg = pint.UnitRegistry() print((1 * ureg.joule).to_base_units()) print((1 * ureg.joule * ureg.second ** 2 / ureg.kilogram / ureg.meter).to_base_units()) ``` ...but does not arise with the following: ```python import pint ureg = pint.UnitRegistry() print((1 * ureg.joule).to_base_units()) print((1 * ureg.joule * ureg.second ** 2 / ureg.kilogram / ureg.meter).to('m')) ``` Another oddity is that, despite what the last non-failing example may suggest, this first arose in MetPy through a function that does not use `.to_base_units`, but rather just `.to`. However, after spending a decent amount of time on it, I can't seem to come up with a short example that replicates the failure with `.to`. ping @crusaderky, in case you may have any insight here with the seemingly problematic commit from https://github.com/hgrecco/pint/pull/864.
0.0
d655f3251c259eea52beb74cbb16e694eda197ed
[ "pint/testsuite/test_issues.py::TestIssues::test_issue876" ]
[ "pint/testsuite/test_issues.py::TestIssues::test_alternative_angstrom_definition", "pint/testsuite/test_issues.py::TestIssues::test_angstrom_creation", "pint/testsuite/test_issues.py::TestIssues::test_issue104", "pint/testsuite/test_issues.py::TestIssues::test_issue105", "pint/testsuite/test_issues.py::TestIssues::test_issue121", "pint/testsuite/test_issues.py::TestIssues::test_issue170", "pint/testsuite/test_issues.py::TestIssues::test_issue252", "pint/testsuite/test_issues.py::TestIssues::test_issue29", "pint/testsuite/test_issues.py::TestIssues::test_issue323", "pint/testsuite/test_issues.py::TestIssues::test_issue339", "pint/testsuite/test_issues.py::TestIssues::test_issue354_356_370", "pint/testsuite/test_issues.py::TestIssues::test_issue45", "pint/testsuite/test_issues.py::TestIssues::test_issue468", "pint/testsuite/test_issues.py::TestIssues::test_issue50", "pint/testsuite/test_issues.py::TestIssues::test_issue52", "pint/testsuite/test_issues.py::TestIssues::test_issue523", "pint/testsuite/test_issues.py::TestIssues::test_issue532", "pint/testsuite/test_issues.py::TestIssues::test_issue54", "pint/testsuite/test_issues.py::TestIssues::test_issue54_related", "pint/testsuite/test_issues.py::TestIssues::test_issue61", "pint/testsuite/test_issues.py::TestIssues::test_issue61_notNP", "pint/testsuite/test_issues.py::TestIssues::test_issue62", "pint/testsuite/test_issues.py::TestIssues::test_issue625a", "pint/testsuite/test_issues.py::TestIssues::test_issue625b", "pint/testsuite/test_issues.py::TestIssues::test_issue625c", "pint/testsuite/test_issues.py::TestIssues::test_issue655a", "pint/testsuite/test_issues.py::TestIssues::test_issue655b", "pint/testsuite/test_issues.py::TestIssues::test_issue66", "pint/testsuite/test_issues.py::TestIssues::test_issue66b", "pint/testsuite/test_issues.py::TestIssues::test_issue69", "pint/testsuite/test_issues.py::TestIssues::test_issue783", "pint/testsuite/test_issues.py::TestIssues::test_issue85", "pint/testsuite/test_issues.py::TestIssues::test_issue856", "pint/testsuite/test_issues.py::TestIssues::test_issue856b", "pint/testsuite/test_issues.py::TestIssues::test_issue86", "pint/testsuite/test_issues.py::TestIssues::test_issue93", "pint/testsuite/test_issues.py::TestIssues::test_issues86b", "pint/testsuite/test_issues.py::TestIssues::test_micro_creation" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2019-09-11 09:57:06+00:00
bsd-3-clause
2,721
hgrecco__pint-891
diff --git a/pint/matplotlib.py b/pint/matplotlib.py index 0d28d43..5b51cf9 100644 --- a/pint/matplotlib.py +++ b/pint/matplotlib.py @@ -13,6 +13,8 @@ from __future__ import absolute_import import matplotlib.units +from .util import iterable, sized + class PintAxisInfo(matplotlib.units.AxisInfo): """Support default axis and tick labeling and default limits.""" @@ -31,7 +33,7 @@ class PintConverter(matplotlib.units.ConversionInterface): def convert(self, value, unit, axis): """Convert :`Quantity` instances for matplotlib to use.""" - if hasattr(value,"__iter__"): + if iterable(value): return [self._convert_value(v, unit, axis) for v in value] else: return self._convert_value(value, unit, axis) @@ -51,7 +53,7 @@ class PintConverter(matplotlib.units.ConversionInterface): @staticmethod def default_units(x, axis): """Get the default unit to use for the given combination of unit and axis.""" - if hasattr(x,"__iter__") and len(x) > 0: + if iterable(x) and sized(x): return getattr(x[0], 'units', None) return getattr(x, 'units', None) diff --git a/pint/quantity.py b/pint/quantity.py index 3373552..01d7592 100644 --- a/pint/quantity.py +++ b/pint/quantity.py @@ -148,22 +148,24 @@ class _Quantity(PrettyIPython, SharedRegistryObject): inst.__used = False inst.__handling = None - # Only instances where the magnitude is iterable should have __iter__() - if hasattr(inst._magnitude,"__iter__"): - inst.__iter__ = cls._iter + return inst - def _iter(self): - """ - Will be become __iter__() for instances with iterable magnitudes - """ - # # Allow exception to propagate in case of non-iterable magnitude - it_mag = iter(self.magnitude) - return iter((self.__class__(mag, self._units) for mag in it_mag)) @property def debug_used(self): return self.__used + def __iter__(self): + # Make sure that, if self.magnitude is not iterable, we raise TypeError as soon as one + # calls iter(self) without waiting for the first element to be drawn from the iterator + it_magnitude = iter(self.magnitude) + + def it_outer(): + for element in it_magnitude: + yield self.__class__(element, self._units) + + return it_outer() + def __copy__(self): ret = self.__class__(copy.copy(self._magnitude), self._units) ret.__used = self.__used diff --git a/pint/util.py b/pint/util.py index d89b957..5480b68 100644 --- a/pint/util.py +++ b/pint/util.py @@ -810,3 +810,32 @@ class BlockIterator(SourceIterator): return lineno, line next = __next__ + + +def iterable(y): + """Check whether or not an object can be iterated over. + + Vendored from numpy under the terms of the BSD 3-Clause License. (Copyright + (c) 2005-2019, NumPy Developers.) + + :param value: Input object. + :param type: object + """ + try: + iter(y) + except TypeError: + return False + return True + + +def sized(y): + """Check whether or not an object has a defined length. + + :param value: Input object. + :param type: object + """ + try: + len(y) + except TypeError: + return False + return True
hgrecco/pint
998417f3f99711009b5e4fe5d3a5c841561dcad8
diff --git a/pint/testsuite/test_numpy.py b/pint/testsuite/test_numpy.py index 360b29b..200115c 100644 --- a/pint/testsuite/test_numpy.py +++ b/pint/testsuite/test_numpy.py @@ -235,6 +235,10 @@ class TestNumpyMethods(QuantityTestCase): for q, v in zip(self.q.flatten(), [1, 2, 3, 4]): self.assertEqual(q, v * self.ureg.m) + def test_iterable(self): + self.assertTrue(np.iterable(self.q)) + self.assertFalse(np.iterable(1 * self.ureg.m)) + def test_reversible_op(self): """ """ diff --git a/pint/testsuite/test_quantity.py b/pint/testsuite/test_quantity.py index 41c6903..e5fc076 100644 --- a/pint/testsuite/test_quantity.py +++ b/pint/testsuite/test_quantity.py @@ -392,6 +392,19 @@ class TestQuantity(QuantityTestCase): u_array_5 = self.Q_.from_list(u_seq) self.assertTrue(all(u_array_5 == u_array_ref)) + @helpers.requires_numpy() + def test_iter(self): + # Verify that iteration gives element as Quantity with same units + x = self.Q_([0, 1, 2, 3], 'm') + self.assertQuantityEqual(next(iter(x)), self.Q_(0, 'm')) + + def test_notiter(self): + # Verify that iter() crashes immediately, without needing to draw any + # element from it, if the magnitude isn't iterable + x = self.Q_(1, 'm') + with self.assertRaises(TypeError): + iter(x) + class TestQuantityToCompact(QuantityTestCase): diff --git a/pint/testsuite/test_util.py b/pint/testsuite/test_util.py index 915fc66..da6c02e 100644 --- a/pint/testsuite/test_util.py +++ b/pint/testsuite/test_util.py @@ -9,7 +9,7 @@ from decimal import Decimal from pint.testsuite import BaseTestCase, QuantityTestCase from pint.util import (string_preprocessor, find_shortest_path, matrix_to_string, transpose, tokenizer, find_connected_nodes, ParserHelper, - UnitsContainer, to_units_container) + UnitsContainer, to_units_container, iterable, sized) class TestUnitsContainer(QuantityTestCase): @@ -333,3 +333,22 @@ class TestMatrix(BaseTestCase): def test_transpose(self): self.assertEqual(transpose([[1, 2], [3, 4]]), [[1, 3], [2, 4]]) + + +class TestOtherUtils(BaseTestCase): + + def test_iterable(self): + + # Test with list, string, generator, and scalar + self.assertTrue(iterable([0, 1, 2, 3])) + self.assertTrue(iterable('test')) + self.assertTrue(iterable((i for i in range(5)))) + self.assertFalse(iterable(0)) + + def test_sized(self): + + # Test with list, string, generator, and scalar + self.assertTrue(sized([0, 1, 2, 3])) + self.assertTrue(sized('test')) + self.assertFalse(sized((i for i in range(5)))) + self.assertFalse(sized(0))
iter for single value.... Perhaps related to #55, but I can't see where that was resolved... I have the input to a function that can either be a single value or an array. I just want the first value of the array or the value. Note that this has to work with or without pint. If I do ```python import pint ureg = pint.UnitRegistry() ureg.setup_matplotlib(True) width = 0.25 * ureg.cm print(np.iterable(width)) print(width[0]) ``` I get: ``` True Traceback (most recent call last): File "/Users/jklymak/pint/pint/quantity.py", line 1400, in __getitem__ value = self._magnitude[key] TypeError: 'float' object is not subscriptable During handling of the above exception, another exception occurred: Traceback (most recent call last): File "testBar.py", line 13, in <module> print(width[0]) File "/Users/jklymak/pint/pint/quantity.py", line 1404, in __getitem__ "supports indexing".format(self._magnitude)) TypeError: Neither Quantity object nor its magnitude (0.25)supports indexing ``` Note that `iter(width)` also works fine... So maybe I'm just missing something, but how do I check if `width` is a singleton?
0.0
998417f3f99711009b5e4fe5d3a5c841561dcad8
[ "pint/testsuite/test_quantity.py::TestQuantity::test_both_symbol", "pint/testsuite/test_quantity.py::TestQuantity::test_context_attr", "pint/testsuite/test_quantity.py::TestQuantity::test_convert_from", "pint/testsuite/test_quantity.py::TestQuantity::test_default_formatting", "pint/testsuite/test_quantity.py::TestQuantity::test_dimensionless_units", "pint/testsuite/test_quantity.py::TestQuantity::test_exponent_formatting", "pint/testsuite/test_quantity.py::TestQuantity::test_format_compact", "pint/testsuite/test_quantity.py::TestQuantity::test_ipython", "pint/testsuite/test_quantity.py::TestQuantity::test_notiter", "pint/testsuite/test_quantity.py::TestQuantity::test_offset", "pint/testsuite/test_quantity.py::TestQuantity::test_offset_delta", "pint/testsuite/test_quantity.py::TestQuantity::test_pickle", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_bool", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_comparison", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_comparison_convert", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_creation", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_format", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_hash", "pint/testsuite/test_quantity.py::TestQuantity::test_quantity_repr", "pint/testsuite/test_quantity.py::TestQuantity::test_to_base_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_derived_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_dimensionally_simple_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_fractional_exponent_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_fractional_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_inverse_square_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_inverse_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_limits_magnitudes", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_nonnumeric_magnitudes", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_power_units", "pint/testsuite/test_quantity.py::TestQuantityToCompact::test_unit_parameter", "pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_float", "pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_fraction", "pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_quantity_abs_round", "pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_quantity_float_complex", "pint/testsuite/test_quantity.py::TestDimensions::test_dimensionality", "pint/testsuite/test_quantity.py::TestDimensions::test_get_dimensionality", "pint/testsuite/test_quantity.py::TestDimensions::test_inclusion", "pint/testsuite/test_quantity.py::TestQuantityWithDefaultRegistry::test_dimensionality", "pint/testsuite/test_quantity.py::TestQuantityWithDefaultRegistry::test_get_dimensionality", "pint/testsuite/test_quantity.py::TestQuantityWithDefaultRegistry::test_inclusion", "pint/testsuite/test_quantity.py::TestDimensionsWithDefaultRegistry::test_dimensionality", "pint/testsuite/test_quantity.py::TestDimensionsWithDefaultRegistry::test_get_dimensionality", "pint/testsuite/test_quantity.py::TestDimensionsWithDefaultRegistry::test_inclusion", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00021", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00022", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00023", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00024", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00025", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00026", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00027", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00028", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00029", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00030", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00031", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00032", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00033", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00034", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00035", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00036", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00021", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00022", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00023", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00024", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00025", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00026", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00027", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00028", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00029", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00030", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00031", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00032", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00033", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00034", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00035", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00036", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00021", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00022", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00023", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00024", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00025", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00026", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00027", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00028", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00029", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00030", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00031", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00032", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00033", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00034", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00035", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00036", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00001", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00002", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00003", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00004", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00005", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00006", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00007", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00008", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00009", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00010", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00011", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00012", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00013", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00014", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00015", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00016", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00017", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00018", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00019", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00020", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00021", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00022", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00023", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00024", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00025", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00026", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00027", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00028", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00029", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00030", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00031", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00032", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00033", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00034", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00035", "pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00036", "pint/testsuite/test_quantity.py::TestDimensionReduction::test_mul_and_div_reduction", "pint/testsuite/test_quantity.py::TestDimensionReduction::test_nocoerce_creation", "pint/testsuite/test_quantity.py::TestDimensionReduction::test_reduction_to_dimensionless", "pint/testsuite/test_quantity.py::TestTimedelta::test_add_sub", "pint/testsuite/test_quantity.py::TestTimedelta::test_iadd_isub", "pint/testsuite/test_quantity.py::TestCompareZero::test_equal_zero", "pint/testsuite/test_quantity.py::TestCompareZero::test_gt_zero", "pint/testsuite/test_quantity.py::TestCompareZero::test_offset_autoconvert_equal_zero", "pint/testsuite/test_quantity.py::TestCompareZero::test_offset_autoconvert_gt_zero", "pint/testsuite/test_quantity.py::TestCompareZero::test_offset_equal_zero", "pint/testsuite/test_quantity.py::TestCompareZero::test_offset_gt_zero", "pint/testsuite/test_util.py::TestUnitsContainer::test_invalid", "pint/testsuite/test_util.py::TestUnitsContainer::test_string_comparison", "pint/testsuite/test_util.py::TestUnitsContainer::test_unitcontainer_arithmetic", "pint/testsuite/test_util.py::TestUnitsContainer::test_unitcontainer_bool", "pint/testsuite/test_util.py::TestUnitsContainer::test_unitcontainer_comp", "pint/testsuite/test_util.py::TestUnitsContainer::test_unitcontainer_creation", "pint/testsuite/test_util.py::TestUnitsContainer::test_unitcontainer_repr", "pint/testsuite/test_util.py::TestToUnitsContainer::test_dict_conversion", "pint/testsuite/test_util.py::TestToUnitsContainer::test_quantity_conversion", "pint/testsuite/test_util.py::TestToUnitsContainer::test_str_conversion", "pint/testsuite/test_util.py::TestToUnitsContainer::test_uc_conversion", "pint/testsuite/test_util.py::TestToUnitsContainer::test_unit_conversion", "pint/testsuite/test_util.py::TestParseHelper::test_basic", "pint/testsuite/test_util.py::TestParseHelper::test_calculate", "pint/testsuite/test_util.py::TestParseHelper::test_eval_token", "pint/testsuite/test_util.py::TestStringProcessor::test_joined_multiplication", "pint/testsuite/test_util.py::TestStringProcessor::test_names", "pint/testsuite/test_util.py::TestStringProcessor::test_numbers", "pint/testsuite/test_util.py::TestStringProcessor::test_per", "pint/testsuite/test_util.py::TestStringProcessor::test_space_multiplication", "pint/testsuite/test_util.py::TestStringProcessor::test_square_cube", "pint/testsuite/test_util.py::TestGraph::test_shortest_path", "pint/testsuite/test_util.py::TestGraph::test_start_not_in_graph", "pint/testsuite/test_util.py::TestMatrix::test_matrix_to_string", "pint/testsuite/test_util.py::TestMatrix::test_transpose", "pint/testsuite/test_util.py::TestOtherUtils::test_iterable", "pint/testsuite/test_util.py::TestOtherUtils::test_sized" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2019-10-16 23:23:23+00:00
bsd-3-clause
2,722
hgrecco__pint-911
diff --git a/pint/registry.py b/pint/registry.py index 4324c7d..23b1108 100644 --- a/pint/registry.py +++ b/pint/registry.py @@ -101,6 +101,8 @@ class BaseRegistry(meta.with_metaclass(_Meta)): 'warn', 'raise', 'ignore' :type on_redefinition: str :param auto_reduce_dimensions: If True, reduce dimensionality on appropriate operations. + :param preprocessors: list of callables which are iteratively ran on any input expression + or unit string """ #: Map context prefix to function @@ -116,13 +118,15 @@ class BaseRegistry(meta.with_metaclass(_Meta)): 'parse_unit_name', 'parse_units', 'parse_expression', 'convert'] - def __init__(self, filename='', force_ndarray=False, on_redefinition='warn', auto_reduce_dimensions=False): + def __init__(self, filename='', force_ndarray=False, on_redefinition='warn', + auto_reduce_dimensions=False, preprocessors=None): self._register_parsers() self._init_dynamic_classes() self._filename = filename self.force_ndarray = force_ndarray + self.preprocessors = preprocessors or [] #: Action to take in case a unit is redefined. 'warn', 'raise', 'ignore' self._on_redefinition = on_redefinition @@ -813,6 +817,8 @@ class BaseRegistry(meta.with_metaclass(_Meta)): :class:`pint.UndefinedUnitError` if a unit is not in the registry :class:`ValueError` if the expression is invalid. """ + for p in self.preprocessors: + input_string = p(input_string) units = self._parse_units(input_string, as_delta) return self.Unit(units) @@ -881,6 +887,8 @@ class BaseRegistry(meta.with_metaclass(_Meta)): if not input_string: return self.Quantity(1) + for p in self.preprocessors: + input_string = p(input_string) input_string = string_preprocessor(input_string) gen = tokenizer(input_string) @@ -1514,19 +1522,22 @@ class UnitRegistry(SystemRegistry, ContextRegistry, NonMultiplicativeRegistry): 'warn', 'raise', 'ignore' :type on_redefinition: str :param auto_reduce_dimensions: If True, reduce dimensionality on appropriate operations. + :param preprocessors: list of callables which are iteratively ran on any input expression + or unit string """ def __init__(self, filename='', force_ndarray=False, default_as_delta=True, autoconvert_offset_to_baseunit=False, on_redefinition='warn', system=None, - auto_reduce_dimensions=False): + auto_reduce_dimensions=False, preprocessors=None): super(UnitRegistry, self).__init__(filename=filename, force_ndarray=force_ndarray, on_redefinition=on_redefinition, default_as_delta=default_as_delta, autoconvert_offset_to_baseunit=autoconvert_offset_to_baseunit, system=system, - auto_reduce_dimensions=auto_reduce_dimensions) + auto_reduce_dimensions=auto_reduce_dimensions, + preprocessors=preprocessors) def pi_theorem(self, quantities): """Builds dimensionless quantities using the Buckingham π theorem
hgrecco/pint
5f89246c3376be6a0088ebbb83dec0640f6883c8
diff --git a/pint/testsuite/test_unit.py b/pint/testsuite/test_unit.py index 9c6bf51..0879627 100644 --- a/pint/testsuite/test_unit.py +++ b/pint/testsuite/test_unit.py @@ -3,7 +3,9 @@ from __future__ import division, unicode_literals, print_function, absolute_import import copy +import functools import math +import re from pint.registry import (UnitRegistry, LazyRegistry) from pint.util import (UnitsContainer, ParserHelper) @@ -274,6 +276,28 @@ class TestRegistry(QuantityTestCase): self.assertEqual(parse('kelvin*meter', as_delta=True), UnitsContainer(kelvin=1, meter=1)) self.assertEqual(parse('kelvin*meter', as_delta=False), UnitsContainer(kelvin=1, meter=1)) + def test_parse_expression_with_preprocessor(self): + # Add parsing of UDUNITS-style power + self.ureg.preprocessors.append(functools.partial( + re.sub, r'(?<=[A-Za-z])(?![A-Za-z])(?<![0-9\-][eE])(?<![0-9\-])(?=[0-9\-])', '**')) + # Test equality + self.assertEqual(self.ureg.parse_expression('42 m2'), self.Q_(42, UnitsContainer(meter=2.))) + self.assertEqual(self.ureg.parse_expression('1e6 Hz s-2'), self.Q_(1e6, UnitsContainer(second=-3.))) + self.assertEqual(self.ureg.parse_expression('3 metre3'), self.Q_(3, UnitsContainer(meter=3.))) + # Clean up and test previously expected value + self.ureg.preprocessors.pop() + self.assertEqual(self.ureg.parse_expression('1e6 Hz s-2'), self.Q_(999998., UnitsContainer())) + + def test_parse_unit_with_preprocessor(self): + # Add parsing of UDUNITS-style power + self.ureg.preprocessors.append(functools.partial( + re.sub, r'(?<=[A-Za-z])(?![A-Za-z])(?<![0-9\-][eE])(?<![0-9\-])(?=[0-9\-])', '**')) + # Test equality + self.assertEqual(self.ureg.parse_units('m2'), UnitsContainer(meter=2.)) + self.assertEqual(self.ureg.parse_units('m-2'), UnitsContainer(meter=-2.)) + # Clean up + self.ureg.preprocessors.pop() + def test_name(self): self.assertRaises(UndefinedUnitError, self.ureg.get_name, 'asdf')
How to use sign %? ## Define Sign % I define: `from pint import UnitRegistry` `ureg=UnitRegistry()` `ureg.define('percent = 1 = %')` `x = 20 * u('1 %')` `print('x: {0}'.format(x))` `print('x: {:L}'.format(x))` and the output is: `x: 20` ValueError: Unknown format code 'L' for object of type 'int' I need: `x: 20%` `x: 20\%`
0.0
5f89246c3376be6a0088ebbb83dec0640f6883c8
[ "pint/testsuite/test_unit.py::TestRegistry::test_parse_expression_with_preprocessor", "pint/testsuite/test_unit.py::TestRegistry::test_parse_unit_with_preprocessor", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_expression_with_preprocessor", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_unit_with_preprocessor" ]
[ "pint/testsuite/test_unit.py::TestUnit::test_creation", "pint/testsuite/test_unit.py::TestUnit::test_deepcopy", "pint/testsuite/test_unit.py::TestUnit::test_dimensionality", "pint/testsuite/test_unit.py::TestUnit::test_dimensionless", "pint/testsuite/test_unit.py::TestUnit::test_ipython", "pint/testsuite/test_unit.py::TestUnit::test_unit_casting", "pint/testsuite/test_unit.py::TestUnit::test_unit_cmp", "pint/testsuite/test_unit.py::TestUnit::test_unit_default_formatting", "pint/testsuite/test_unit.py::TestUnit::test_unit_div", "pint/testsuite/test_unit.py::TestUnit::test_unit_eqs", "pint/testsuite/test_unit.py::TestUnit::test_unit_formatting", "pint/testsuite/test_unit.py::TestUnit::test_unit_hash", "pint/testsuite/test_unit.py::TestUnit::test_unit_mul", "pint/testsuite/test_unit.py::TestUnit::test_unit_pow", "pint/testsuite/test_unit.py::TestUnit::test_unit_rdiv", "pint/testsuite/test_unit.py::TestUnit::test_unit_repr", "pint/testsuite/test_unit.py::TestRegistry::test_as_delta", "pint/testsuite/test_unit.py::TestRegistry::test_base", "pint/testsuite/test_unit.py::TestRegistry::test_check", "pint/testsuite/test_unit.py::TestRegistry::test_convert_parse_str", "pint/testsuite/test_unit.py::TestRegistry::test_default_format", "pint/testsuite/test_unit.py::TestRegistry::test_define", "pint/testsuite/test_unit.py::TestRegistry::test_imperial_symbol", "pint/testsuite/test_unit.py::TestRegistry::test_load", "pint/testsuite/test_unit.py::TestRegistry::test_name", "pint/testsuite/test_unit.py::TestRegistry::test_parse_alias", "pint/testsuite/test_unit.py::TestRegistry::test_parse_complex", "pint/testsuite/test_unit.py::TestRegistry::test_parse_factor", "pint/testsuite/test_unit.py::TestRegistry::test_parse_mul_div", "pint/testsuite/test_unit.py::TestRegistry::test_parse_number", "pint/testsuite/test_unit.py::TestRegistry::test_parse_plural", "pint/testsuite/test_unit.py::TestRegistry::test_parse_prefix", "pint/testsuite/test_unit.py::TestRegistry::test_parse_pretty", "pint/testsuite/test_unit.py::TestRegistry::test_parse_single", "pint/testsuite/test_unit.py::TestRegistry::test_parse_units", "pint/testsuite/test_unit.py::TestRegistry::test_pint", "pint/testsuite/test_unit.py::TestRegistry::test_redefinition", "pint/testsuite/test_unit.py::TestRegistry::test_rep_and_parse", "pint/testsuite/test_unit.py::TestRegistry::test_repeated_convert", "pint/testsuite/test_unit.py::TestRegistry::test_singular_SI_prefix_convert", "pint/testsuite/test_unit.py::TestRegistry::test_str_errors", "pint/testsuite/test_unit.py::TestRegistry::test_symbol", "pint/testsuite/test_unit.py::TestRegistry::test_to_ref_vs_to", "pint/testsuite/test_unit.py::TestRegistry::test_wrap_referencing", "pint/testsuite/test_unit.py::TestRegistry::test_wraps", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_context_sp", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_get_base_units", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_get_compatible_units", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_many", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_as_delta", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_base", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_check", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_convert_parse_str", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_default_format", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_define", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_imperial_symbol", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_lazy", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_load", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_name", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_alias", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_complex", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_factor", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_mul_div", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_number", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_plural", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_prefix", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_pretty", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_single", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_units", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_pint", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_redefinition", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_rep_and_parse", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_repeated_convert", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_singular_SI_prefix_convert", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_str_errors", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_symbol", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_to_ref_vs_to", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_wrap_referencing", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_wraps", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_alias", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00001", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00002", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00003", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00004", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00005", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00006", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00007", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00008", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00009", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00010", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00011", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00012", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00013", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00014", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00015", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00016", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00017", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00018", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00019" ]
{ "failed_lite_validators": [ "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2019-12-04 02:55:42+00:00
bsd-3-clause
2,723
hgrecco__pint-pandas-204
diff --git a/CHANGES b/CHANGES index 4ce6b16..f896b8e 100644 --- a/CHANGES +++ b/CHANGES @@ -4,7 +4,7 @@ pint-pandas Changelog 0.6 (unreleased) ---------------- -- Nothing changed yet. +- Fix astype issue #196 0.5 (2023-09-07) diff --git a/pint_pandas/pint_array.py b/pint_pandas/pint_array.py index 7154be5..fcbfe61 100644 --- a/pint_pandas/pint_array.py +++ b/pint_pandas/pint_array.py @@ -417,7 +417,9 @@ class PintArray(ExtensionArray, ExtensionOpsMixin): return self._to_array_of_quantity(copy=copy) if is_string_dtype(dtype): return pd.array([str(x) for x in self.quantity], dtype=dtype) - return pd.array(self.quantity, dtype, copy) + if isinstance(self._data, ExtensionArray): + return self._data.astype(dtype, copy=copy) + return pd.array(self.quantity.m, dtype, copy) @property def units(self):
hgrecco/pint-pandas
ef8a1209699d4533299303b800982578e8322242
diff --git a/pint_pandas/testsuite/test_issues.py b/pint_pandas/testsuite/test_issues.py index 95d85b2..d8d6ce0 100644 --- a/pint_pandas/testsuite/test_issues.py +++ b/pint_pandas/testsuite/test_issues.py @@ -185,3 +185,12 @@ class TestIssue174(BaseExtensionTests): expected_2 = pd.Series([3, 12], dtype="pint[m]") tm.assert_series_equal(col_sum, expected_2) + + [email protected]("dtype", [pd.Float64Dtype(), "float"]) +def test_issue_194(dtype): + s0 = pd.Series([1.0, 2.5], dtype=dtype) + s1 = s0.astype("pint[dimensionless]") + s2 = s1.astype(dtype) + + tm.assert_series_equal(s0, s2)
series.astype(float) not returning series of floats # Start: series with dimensionless values ```python import pandas as pd import pint import pint_pandas s0 = pd.Series([1.0, 2.5]) s1 = s0.astype('pint[dimensionless]') s1 # 0 1.0 # 1 2.5 # dtype: pint[] ``` # Issue: conversion back to floats ```python s2 = s1.astype(float) ``` Previously, the resulting Series had a `float` data type. Now, the datatype is slightly different, which causes the comparison with `s0` to fail. ``` s2.dtype # PandasDtype('float64') pd.testing.assert_series_equal(s0, s2) #"dtypes are different" ``` Another issue is, that `s2` cannot be converted to a string: ```python s2 #AttributeError: 'numpy.ndarray' object has no attribute '_formatter' ``` Excuse me as I currently cannot test this with the latest `pandas` version; I've used `2.1` instead. `pint-pandas` `0.5` , and `pint` `0.22`.
0.0
ef8a1209699d4533299303b800982578e8322242
[ "pint_pandas/testsuite/test_issues.py::test_issue_194[float]" ]
[ "pint_pandas/testsuite/test_issues.py::TestIssue80::test_div", "pint_pandas/testsuite/test_issues.py::TestIssue80::test_reductions[min]", "pint_pandas/testsuite/test_issues.py::TestIssue80::test_reductions[max]", "pint_pandas/testsuite/test_issues.py::TestIssue80::test_reductions[sum]", "pint_pandas/testsuite/test_issues.py::TestIssue80::test_reductions[mean]", "pint_pandas/testsuite/test_issues.py::TestIssue80::test_reductions[median]", "pint_pandas/testsuite/test_issues.py::test_issue_86", "pint_pandas/testsuite/test_issues.py::test_issue_71", "pint_pandas/testsuite/test_issues.py::test_issue_88", "pint_pandas/testsuite/test_issues.py::test_issue_127", "pint_pandas/testsuite/test_issues.py::test_issue_194[dtype0]" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-09-10 18:03:24+00:00
bsd-3-clause
2,724
hgrecco__pint-pandas-207
diff --git a/CHANGES b/CHANGES index f896b8e..7bfcbf9 100644 --- a/CHANGES +++ b/CHANGES @@ -4,6 +4,7 @@ pint-pandas Changelog 0.6 (unreleased) ---------------- +- Fix dequantify duplicate column failure #202 - Fix astype issue #196 diff --git a/pint_pandas/pint_array.py b/pint_pandas/pint_array.py index fcbfe61..6fb96ee 100644 --- a/pint_pandas/pint_array.py +++ b/pint_pandas/pint_array.py @@ -1,7 +1,6 @@ import copy import re import warnings -from collections import OrderedDict from importlib.metadata import version import numpy as np @@ -981,23 +980,35 @@ class PintDataFrameAccessor(object): df_columns = df.columns.to_frame() df_columns["units"] = [ - formatter_func(df[col].dtype) - if isinstance(df[col].dtype, PintType) + formatter_func(df.dtypes.iloc[i]) + if isinstance(df.dtypes.iloc[i], PintType) else NO_UNIT - for col in df.columns + for i, col in enumerate(df.columns) ] - data_for_df = OrderedDict() + data_for_df = [] for i, col in enumerate(df.columns): - if isinstance(df[col].dtype, PintType): - data_for_df[tuple(df_columns.iloc[i])] = df[col].values.data + if isinstance(df.dtypes.iloc[i], PintType): + data_for_df.append( + pd.Series( + data=df.iloc[:, i].values.data, + name=tuple(df_columns.iloc[i]), + index=df.index, + copy=False, + ) + ) else: - data_for_df[tuple(df_columns.iloc[i])] = df[col].values - - df_new = DataFrame(data_for_df, columns=data_for_df.keys()) + data_for_df.append( + pd.Series( + data=df.iloc[:, i].values, + name=tuple(df_columns.iloc[i]), + index=df.index, + copy=False, + ) + ) + df_new = pd.concat(data_for_df, axis=1, copy=False) df_new.columns.names = df.columns.names + ["unit"] - df_new.index = df.index return df_new
hgrecco/pint-pandas
2e087e625cdc666540e8c06e26b31956911c1ead
diff --git a/pint_pandas/testsuite/test_issues.py b/pint_pandas/testsuite/test_issues.py index d8d6ce0..fbcd0c6 100644 --- a/pint_pandas/testsuite/test_issues.py +++ b/pint_pandas/testsuite/test_issues.py @@ -194,3 +194,38 @@ def test_issue_194(dtype): s2 = s1.astype(dtype) tm.assert_series_equal(s0, s2) + + +class TestIssue202(BaseExtensionTests): + def test_dequantify(self): + df = pd.DataFrame() + df["test"] = pd.Series([1, 2, 3], dtype="pint[kN]") + df.insert(0, "test", df["test"], allow_duplicates=True) + + expected = pd.DataFrame.from_dict( + data={ + "index": [0, 1, 2], + "columns": [("test", "kilonewton")], + "data": [[1], [2], [3]], + "index_names": [None], + "column_names": [None, "unit"], + }, + orient="tight", + dtype="Int64", + ) + result = df.iloc[:, 1:].pint.dequantify() + tm.assert_frame_equal(expected, result) + + expected = pd.DataFrame.from_dict( + data={ + "index": [0, 1, 2], + "columns": [("test", "kilonewton"), ("test", "kilonewton")], + "data": [[1, 1], [2, 2], [3, 3]], + "index_names": [None], + "column_names": [None, "unit"], + }, + orient="tight", + dtype="Int64", + ) + result = df.pint.dequantify() + tm.assert_frame_equal(expected, result)
pint.dequantify() fails for duplicate column names `df.pint.dequantify()` breaks unexpectedly (and error msg is cryptic) if column names contain duplicates. ```python df = pd.DataFrame() df['test']= pd.Series([1,2,3], dtype='pint[kN]') df.insert(0,'test',df['test'],allow_duplicates=True) ``` `df`: > <html> <body> <!--StartFragment-->   | test | test -- | -- | -- 0|1 | 1 1|2 | 2 2|3 | 3 <!--EndFragment--> </body> </html> ```python df.iloc[:,1:].pint.dequantify() ``` <html> <body> <!--StartFragment--> --| test --|-- unit| kN 0|1 1|2 2|3 <!--EndFragment--> </body> </html> ```python df.pint.dequantify() ``` > 'DataFrame' object has no attribute 'dtype' pint_pandas.__version__ = 0.4, python 3.10
0.0
2e087e625cdc666540e8c06e26b31956911c1ead
[ "pint_pandas/testsuite/test_issues.py::TestIssue202::test_dequantify" ]
[ "pint_pandas/testsuite/test_issues.py::TestIssue80::test_div", "pint_pandas/testsuite/test_issues.py::TestIssue80::test_reductions[min]", "pint_pandas/testsuite/test_issues.py::TestIssue80::test_reductions[max]", "pint_pandas/testsuite/test_issues.py::TestIssue80::test_reductions[sum]", "pint_pandas/testsuite/test_issues.py::TestIssue80::test_reductions[mean]", "pint_pandas/testsuite/test_issues.py::TestIssue80::test_reductions[median]", "pint_pandas/testsuite/test_issues.py::test_issue_86", "pint_pandas/testsuite/test_issues.py::test_issue_71", "pint_pandas/testsuite/test_issues.py::test_issue_88", "pint_pandas/testsuite/test_issues.py::test_issue_127", "pint_pandas/testsuite/test_issues.py::test_issue_194[dtype0]", "pint_pandas/testsuite/test_issues.py::test_issue_194[float]" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-10-20 08:21:28+00:00
bsd-3-clause
2,725
hh-h__aiohttp-swagger3-98
diff --git a/.travis.yml b/.travis.yml index c9bb4cc..4eb9d8d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,7 +12,6 @@ matrix: - python: nightly fast_finish: true allow_failures: - - python: 3.10 - python: 3.10-dev - python: 3.11-dev - python: nightly diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 562f3d7..890f6f1 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,11 @@ Changelog ========= +0.7.1 (30-01-2021) +------------------ + +- requestBody can be optional (#97) + 0.7.0 (04-11-2021) ------------------ diff --git a/aiohttp_swagger3/__init__.py b/aiohttp_swagger3/__init__.py index 98d8dcc..ba70487 100644 --- a/aiohttp_swagger3/__init__.py +++ b/aiohttp_swagger3/__init__.py @@ -9,7 +9,7 @@ __all__ = ( "ValidatorError", "__version__", ) -__version__ = "0.7.0" +__version__ = "0.7.1" __author__ = "Valetov Konstantin" from .exceptions import ValidatorError diff --git a/aiohttp_swagger3/swagger_route.py b/aiohttp_swagger3/swagger_route.py index 1f15226..ef1c732 100644 --- a/aiohttp_swagger3/swagger_route.py +++ b/aiohttp_swagger3/swagger_route.py @@ -12,6 +12,8 @@ from .validators import MISSING, Validator, ValidatorError, schema_to_validator, _SwaggerHandler = Callable[..., Awaitable[web.StreamResponse]] +REQUEST_BODY_NAME: str = "body" + class RequestValidationFailed(web.HTTPBadRequest): """This exception can be caught in a aiohttp middleware. @@ -50,6 +52,7 @@ class SwaggerRoute: "hp", "cp", "bp", + "is_body_required", "auth", "params", ) @@ -68,6 +71,7 @@ class SwaggerRoute: method_section = self._swagger.spec["paths"][path][method] parameters = method_section.get("parameters") body = method_section.get("requestBody") + self.is_body_required = body and body.get("required", False) method_security = method_section.get("security") security = method_security if method_security is not None else self._swagger.spec.get("security", []) components = self._swagger.spec.get("components", {}) @@ -104,14 +108,14 @@ class SwaggerRoute: self._swagger._get_media_type_handler(media_type) value = body["content"][media_type] self.bp[media_type] = Parameter( - "body", + REQUEST_BODY_NAME, schema_to_validator(value["schema"]), body.get("required", False), ) self.params = set(_get_fn_parameters(self.handler)) async def parse(self, request: web.Request) -> Dict: - params = {} + params: Dict = {} if "request" in self.params: params["request"] = request request_key = self._swagger.request_key @@ -157,29 +161,39 @@ class SwaggerRoute: params[param.name] = value # body parameters if self.bp: - if "Content-Type" not in request.headers: - if next(iter(self.bp.values())).required: - errors["body"] = "is required" - else: - media_type, _ = cgi.parse_header(request.headers["Content-Type"]) - if media_type not in self.bp: - errors["body"] = f"no handler for {media_type}" + if request.body_exists: + if "Content-Type" not in request.headers: + if next(iter(self.bp.values())).required: + errors[REQUEST_BODY_NAME] = "is required" else: - handler = self._swagger._get_media_type_handler(media_type) - param = self.bp[media_type] - try: - v, has_raw = await handler(request) - except ValidatorError as e: - errors[param.name] = e.error + media_type, _ = cgi.parse_header(request.headers["Content-Type"]) + if media_type not in self.bp: + errors[REQUEST_BODY_NAME] = f"no handler for {media_type}" else: + handler = self._swagger._get_media_type_handler(media_type) + param = self.bp[media_type] try: - value = param.validator.validate(v, has_raw) + v, has_raw = await handler(request) except ValidatorError as e: errors[param.name] = e.error else: - request[request_key][param.name] = value - if param.name in self.params: - params[param.name] = value + try: + value = param.validator.validate(v, has_raw) + except ValidatorError as e: + errors[param.name] = e.error + else: + request[request_key][param.name] = value + if param.name in self.params: + params[param.name] = value + + elif self.is_body_required: + errors[REQUEST_BODY_NAME] = "is required" + + else: + request[request_key][REQUEST_BODY_NAME] = None + if REQUEST_BODY_NAME in self.params: + params[REQUEST_BODY_NAME] = None + # header parameters if self.hp: for param in self.hp: diff --git a/docs/conf.py b/docs/conf.py index ac1b90c..ab151ec 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -18,11 +18,11 @@ sys.path.insert(0, os.path.abspath('..')) # -- Project information ----------------------------------------------------- project = 'aiohttp-swagger3' -copyright = '2021, Konstantin Valetov' +copyright = '2022, Konstantin Valetov' author = 'Konstantin Valetov' # The full version, including alpha/beta/rc tags -release = '0.7.0' +release = '0.7.1' # -- General configuration ---------------------------------------------------
hh-h/aiohttp-swagger3
4ce20d2eb102bbbe7c95a22060a558fc22003618
diff --git a/tests/test_docs_request_bodies.py b/tests/test_docs_request_bodies.py index 5371a31..22e70d5 100644 --- a/tests/test_docs_request_bodies.py +++ b/tests/test_docs_request_bodies.py @@ -1211,6 +1211,44 @@ async def test_no_content_type(swagger_docs, aiohttp_client): assert await resp.json() is None +async def test_no_content_type_body_required(swagger_docs, aiohttp_client): + async def handler(request, body: Dict): + """ + --- + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - required + properties: + required: + type: integer + + responses: + '200': + description: OK. + + """ + return web.json_response(body) + + swagger = swagger_docs() + swagger.add_route("POST", "/r", handler) + + client = await aiohttp_client(swagger._app) + + resp = await client.post("/r", skip_auto_headers=("Content-Type",)) + assert resp.status == 400 + error = error_to_json(await resp.text()) + assert error == {"body": "is required"} + + resp = await client.post("/r", skip_auto_headers=("Content-Type",), data="payload") + assert resp.status == 400 + assert error == {"body": "is required"} + + async def test_required_no_content_type(swagger_docs, aiohttp_client): async def handler(request, body: Dict): """ @@ -1245,7 +1283,7 @@ async def test_required_no_content_type(swagger_docs, aiohttp_client): assert error == {"body": "is required"} -async def test_optional_body(swagger_docs, aiohttp_client): +async def test_no_handler(swagger_docs, aiohttp_client): async def handler(request, body: Optional[Dict] = None): """ --- @@ -1363,3 +1401,78 @@ async def test_nullable_ref(swagger_docs_with_components, aiohttp_client): resp = await client.post("/r", json=body) assert resp.status == 200 assert await resp.json() == body + + +async def test_optional_body_implicit(swagger_docs, aiohttp_client): + async def handler(request, body: Optional[Dict]): + """ + --- + requestBody: + content: + application/json: + schema: + type: object + required: + - required + properties: + required: + type: integer + + responses: + '200': + description: OK. + + """ + return web.json_response(body) + + swagger = swagger_docs() + swagger.add_route("POST", "/r", handler) + + client = await aiohttp_client(swagger._app) + + resp = await client.post("/r") + assert resp.status == 200 + assert await resp.json() is None + + body = {"required": 10} + resp = await client.post("/r", json=body) + assert resp.status == 200 + assert await resp.json() == body + + +async def test_optional_body_explicit(swagger_docs, aiohttp_client): + async def handler(request, body: Optional[Dict]): + """ + --- + requestBody: + required: false + content: + application/json: + schema: + type: object + required: + - required + properties: + required: + type: integer + + responses: + '200': + description: OK. + + """ + return web.json_response(body) + + swagger = swagger_docs() + swagger.add_route("POST", "/r", handler) + + client = await aiohttp_client(swagger._app) + + resp = await client.post("/r") + assert resp.status == 200 + assert await resp.json() is None + + body = {"required": 10} + resp = await client.post("/r", json=body) + assert resp.status == 200 + assert await resp.json() == body
Definitions with requestBody / required: false can not be used Schema with `requestBody` where `required` is set to false can not be used as the library fails to handle them. The library first raises a 400 with a complaint about the content type `application/octet-stream` not being supported. I would already expect this to work, as it is the default for an empty body. When explicitly setting `application/json` the library fails with another 400 `Expecting value: line 1 column 1 (char 0)`. Below a modification of `test_nullable_ref` test case from `test_docs_request_bodies` to reproduce the issue: ```python async def test_non_required(swagger_docs_with_components, aiohttp_client): routes = web.RouteTableDef() @routes.post("/r") async def handler(request, body: Dict): """ --- requestBody: required: false content: application/json: schema: type: object required: - pet properties: pet: nullable: true allOf: - $ref: '#/components/schemas/Pet' responses: '200': description: OK. """ return web.json_response(body) swagger = swagger_docs_with_components() swagger.add_routes(routes) client = await aiohttp_client(swagger._app) resp = await client.post("/r", headers={"content-type": "application/json"}) assert resp.status == 200 assert await resp.text() == "" body = { "pet": None, } resp = await client.post("/r", json=body) assert resp.status == 200 assert await resp.json() == body body = { "pet": { "name": "lizzy", "age": 12, } } resp = await client.post("/r", json=body) assert resp.status == 200 assert await resp.json() == body ```
0.0
4ce20d2eb102bbbe7c95a22060a558fc22003618
[ "tests/test_docs_request_bodies.py::test_optional_body_implicit[pyloop]", "tests/test_docs_request_bodies.py::test_optional_body_explicit[pyloop]" ]
[ "tests/test_docs_request_bodies.py::test_body[pyloop]", "tests/test_docs_request_bodies.py::test_body_with_additional_properties[pyloop]", "tests/test_docs_request_bodies.py::test_body_with_no_additional_properties[pyloop]", "tests/test_docs_request_bodies.py::test_body_with_object_additional_properties[pyloop]", "tests/test_docs_request_bodies.py::test_deep_nested[pyloop]", "tests/test_docs_request_bodies.py::test_nullable[pyloop]", "tests/test_docs_request_bodies.py::test_one_of_object[pyloop]", "tests/test_docs_request_bodies.py::test_any_of_object[pyloop]", "tests/test_docs_request_bodies.py::test_all_of_object[pyloop]", "tests/test_docs_request_bodies.py::test_array_in_object[pyloop]", "tests/test_docs_request_bodies.py::test_float_as_int[pyloop]", "tests/test_docs_request_bodies.py::test_min_max_properties[pyloop]", "tests/test_docs_request_bodies.py::test_body_with_optional_properties[pyloop]", "tests/test_docs_request_bodies.py::test_media_type_with_charset[pyloop]", "tests/test_docs_request_bodies.py::test_incorrect_json_body[pyloop]", "tests/test_docs_request_bodies.py::test_form_data[pyloop]", "tests/test_docs_request_bodies.py::test_object_can_have_optional_props[pyloop]", "tests/test_docs_request_bodies.py::test_no_content_type[pyloop]", "tests/test_docs_request_bodies.py::test_no_content_type_body_required[pyloop]", "tests/test_docs_request_bodies.py::test_required_no_content_type[pyloop]", "tests/test_docs_request_bodies.py::test_no_handler[pyloop]", "tests/test_docs_request_bodies.py::test_wrong_body[pyloop]", "tests/test_docs_request_bodies.py::test_nullable_ref[pyloop]" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-01-30 10:57:04+00:00
apache-2.0
2,726
hhursev__recipe-scrapers-415
diff --git a/README.rst b/README.rst index 28f7782f..68fb495f 100644 --- a/README.rst +++ b/README.rst @@ -145,7 +145,7 @@ Scrapers available for: - `https://hellofresh.com/ <https://hellofresh.com>`_ - `https://hellofresh.co.uk/ <https://hellofresh.co.uk>`_ - `https://www.hellofresh.de/ <https://www.hellofresh.de/>`_ -- `https://www.hellofresh.fr/ <https://www.hellofresh.de/>`_ +- `https://www.hellofresh.fr/ <https://www.hellofresh.fr/>`_ - `https://hostthetoast.com/ <https://hostthetoast.com/>`_ - `https://receitas.ig.com.br/ <https://receitas.ig.com.br>`_ - `https://indianhealthyrecipes.com <https://www.indianhealthyrecipes.com>`_ diff --git a/recipe_scrapers/domesticateme.py b/recipe_scrapers/domesticateme.py index 5b4bdb20..0e3827ef 100644 --- a/recipe_scrapers/domesticateme.py +++ b/recipe_scrapers/domesticateme.py @@ -4,7 +4,7 @@ from ._abstract import AbstractScraper class DomesticateMe(AbstractScraper): @classmethod def host(cls): - return "https://domesticate-me.com/" + return "domesticate-me.com" def title(self): return self.schema.title() diff --git a/recipe_scrapers/heb.py b/recipe_scrapers/heb.py index b407d746..17cf0b34 100644 --- a/recipe_scrapers/heb.py +++ b/recipe_scrapers/heb.py @@ -5,7 +5,7 @@ from ._utils import get_minutes, get_yields, normalize_string class HEB(AbstractScraper): @classmethod def host(self, domain="com"): - return f"www.heb.{domain}" + return f"heb.{domain}" def title(self): return self.soup.find("h1", {"class": "title"}).get_text() diff --git a/recipe_scrapers/streetkitchen.py b/recipe_scrapers/streetkitchen.py index 306a48c0..48f9c457 100644 --- a/recipe_scrapers/streetkitchen.py +++ b/recipe_scrapers/streetkitchen.py @@ -5,7 +5,7 @@ from ._utils import get_yields, normalize_string class StreetKitchen(AbstractScraper): @classmethod def host(cls): - return "https://streetkitchen.hu" + return "streetkitchen.hu" def title(self): return self.soup.find("h1", {"class": "entry-title"}).get_text() diff --git a/recipe_scrapers/wholefoods.py b/recipe_scrapers/wholefoods.py index bfb3a18f..b8ff5164 100644 --- a/recipe_scrapers/wholefoods.py +++ b/recipe_scrapers/wholefoods.py @@ -4,7 +4,7 @@ from ._abstract import AbstractScraper class WholeFoods(AbstractScraper): @classmethod def host(self, domain="com"): - return f"www.wholefoodsmarket.{domain}" + return f"wholefoodsmarket.{domain}" def title(self): return self.schema.title()
hhursev/recipe-scrapers
c4783179ed96e98722fb7572b2fcfefe24323d1b
diff --git a/tests/test_domesticateme.py b/tests/test_domesticateme.py index 4e4eec3a..8fa2d8da 100644 --- a/tests/test_domesticateme.py +++ b/tests/test_domesticateme.py @@ -7,7 +7,7 @@ class TestDomesticateMeScraper(ScraperTest): scraper_class = DomesticateMe def test_host(self): - self.assertEqual("https://domesticate-me.com/", self.harvester_class.host()) + self.assertEqual("domesticate-me.com", self.harvester_class.host()) def test_canonical_url(self): self.assertEqual( diff --git a/tests/test_heb.py b/tests/test_heb.py index a9646f15..7b94dfac 100644 --- a/tests/test_heb.py +++ b/tests/test_heb.py @@ -7,7 +7,7 @@ class TestHEBScraper(ScraperTest): scraper_class = HEB def test_host(self): - self.assertEqual("www.heb.com", self.harvester_class.host()) + self.assertEqual("heb.com", self.harvester_class.host()) def test_canonical_url(self): self.assertEqual( diff --git a/tests/test_streetkitchen.py b/tests/test_streetkitchen.py index 0e1b4a03..f0a45fdc 100644 --- a/tests/test_streetkitchen.py +++ b/tests/test_streetkitchen.py @@ -7,7 +7,7 @@ class TestStreetKitchenScraper(ScraperTest): scraper_class = StreetKitchen def test_host(self): - self.assertEqual("https://streetkitchen.hu", self.harvester_class.host()) + self.assertEqual("streetkitchen.hu", self.harvester_class.host()) def test_language(self): self.assertEqual("hu", self.harvester_class.language()) diff --git a/tests/test_wholefoods.py b/tests/test_wholefoods.py index c7427463..0761146d 100644 --- a/tests/test_wholefoods.py +++ b/tests/test_wholefoods.py @@ -7,11 +7,11 @@ class TestWholeFoodsScraper(ScraperTest): scraper_class = WholeFoods def test_host(self): - self.assertEqual("www.wholefoodsmarket.com", self.harvester_class.host()) + self.assertEqual("wholefoodsmarket.com", self.harvester_class.host()) def test_host_domain(self): self.assertEqual( - "www.wholefoodsmarket.co.uk", self.harvester_class.host(domain="co.uk") + "wholefoodsmarket.co.uk", self.harvester_class.host(domain="co.uk") ) def test_title(self):
mindmegette.hu and other hosts contains www I see that the scraper for mindmegette.hu [contains `www.` in its host](https://github.com/hhursev/recipe-scrapers/blob/master/recipe_scrapers/mindmegette.py#L8) unlike other hosts. I'm currently developing an application using your fantastic library, and I want to do uri validation to select the right method. As most domains are without the `www.` prefix, I'm doing ```python url_ = urllib.urlparse(url).netloc.strip('www.') if url_ not in SCRAPERS.keys(): [...] ``` I could try to instead check availability with both `www` and without, but it seems more consistent to remove the prefix from the host. I'm ok to do a PR to fix this. Thanks! Cyril **Pre-filing checks** - [x] I have searched for open issues that report the same problem - [x] I have checked that the bug affects the latest version of the library **The URL of the recipe(s) that are not being scraped correctly** - https://www.mindmegette.hu/fetas-paradicsomos-omlett.recept/ **The version of Python you're using** Python 3.8.11 **The operating system of your environment** Linux Mint - [ ] I'd like to try fixing this scraper myself - [x] I'd like guidance to help me develop a fix - [ ] I'd prefer if the `recipe-scrapers` team try to fix this
0.0
c4783179ed96e98722fb7572b2fcfefe24323d1b
[ "tests/test_domesticateme.py::TestDomesticateMeScraper::test_host", "tests/test_heb.py::TestHEBScraper::test_host", "tests/test_streetkitchen.py::TestStreetKitchenScraper::test_host", "tests/test_wholefoods.py::TestWholeFoodsScraper::test_host", "tests/test_wholefoods.py::TestWholeFoodsScraper::test_host_domain" ]
[ "tests/test_domesticateme.py::TestDomesticateMeScraper::test_canonical_url", "tests/test_domesticateme.py::TestDomesticateMeScraper::test_image", "tests/test_domesticateme.py::TestDomesticateMeScraper::test_ingredients", "tests/test_domesticateme.py::TestDomesticateMeScraper::test_instructions", "tests/test_domesticateme.py::TestDomesticateMeScraper::test_rating", "tests/test_domesticateme.py::TestDomesticateMeScraper::test_title", "tests/test_domesticateme.py::TestDomesticateMeScraper::test_total_time", "tests/test_domesticateme.py::TestDomesticateMeScraper::test_yields", "tests/test_heb.py::TestHEBScraper::test_canonical_url", "tests/test_heb.py::TestHEBScraper::test_image", "tests/test_heb.py::TestHEBScraper::test_ingredients", "tests/test_heb.py::TestHEBScraper::test_instructions", "tests/test_heb.py::TestHEBScraper::test_title", "tests/test_heb.py::TestHEBScraper::test_total_time", "tests/test_heb.py::TestHEBScraper::test_yields", "tests/test_streetkitchen.py::TestStreetKitchenScraper::test_canonical_url", "tests/test_streetkitchen.py::TestStreetKitchenScraper::test_image", "tests/test_streetkitchen.py::TestStreetKitchenScraper::test_ingredients", "tests/test_streetkitchen.py::TestStreetKitchenScraper::test_instructions", "tests/test_streetkitchen.py::TestStreetKitchenScraper::test_language", "tests/test_streetkitchen.py::TestStreetKitchenScraper::test_title", "tests/test_streetkitchen.py::TestStreetKitchenScraper::test_total_time", "tests/test_streetkitchen.py::TestStreetKitchenScraper::test_yields", "tests/test_wholefoods.py::TestWholeFoodsScraper::test_image", "tests/test_wholefoods.py::TestWholeFoodsScraper::test_ingredients", "tests/test_wholefoods.py::TestWholeFoodsScraper::test_instructions", "tests/test_wholefoods.py::TestWholeFoodsScraper::test_title", "tests/test_wholefoods.py::TestWholeFoodsScraper::test_total_time", "tests/test_wholefoods.py::TestWholeFoodsScraper::test_yields" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-07-23 11:29:52+00:00
mit
2,727
hhursev__recipe-scrapers-500
diff --git a/recipe_scrapers/_utils.py b/recipe_scrapers/_utils.py index 07f826bf..c84a4ac2 100644 --- a/recipe_scrapers/_utils.py +++ b/recipe_scrapers/_utils.py @@ -3,8 +3,19 @@ import re from ._exceptions import ElementNotFoundInHtml +FRACTIONS = { + "¼": 0.25, + "½": 0.50, + "¾": 0.75, + "⅓": 0.33, + "⅔": 0.66, + "⅕": 0.20, + "⅖": 0.40, + "⅗": 0.60, +} + TIME_REGEX = re.compile( - r"(\D*(?P<hours>\d+)\s*(hours|hrs|hr|h|óra))?(\D*(?P<minutes>\d+)\s*(minutes|mins|min|m|perc))?", + r"(\D*(?P<hours>[\d.\s/?¼½¾⅓⅔⅕⅖⅗]+)\s*(hours|hrs|hr|h|óra))?(\D*(?P<minutes>\d+)\s*(minutes|mins|min|m|perc))?", re.IGNORECASE, ) @@ -41,13 +52,39 @@ def get_minutes(element, return_zero_on_not_found=False): time_text = time_text.split("-", 2)[ 1 ] # sometimes formats are like this: '12-15 minutes' + if " to " in time_text: + time_text = time_text.split("to", 2)[ + 1 + ] # sometimes formats are like this: '12 to 15 minutes' if "h" in time_text: time_text = time_text.replace("h", "hours") + "minutes" matched = TIME_REGEX.search(time_text) minutes = int(matched.groupdict().get("minutes") or 0) - minutes += 60 * int(matched.groupdict().get("hours") or 0) + hours_matched = matched.groupdict().get("hours") + + if hours_matched: + hours_matched = hours_matched.strip() + if any([symbol in FRACTIONS.keys() for symbol in hours_matched]): + hours = 0 + for fraction, value in FRACTIONS.items(): + if fraction in hours_matched: + hours += value + hours_matched = hours_matched.replace(fraction, "") + hours += int(hours_matched) if hours_matched else 0 + elif "/" in hours_matched: + # for example "1 1/2" is matched + hours_matched_split = hours_matched.split(" ") + hours = 0 + if len(hours_matched_split) == 2: + hours += int(hours_matched_split[0]) + fraction = hours_matched_split[-1:][0].split("/") + hours += float(int(fraction[0]) / int(fraction[1])) + else: + hours = float(hours_matched) + + minutes += round(60 * hours, 0) return minutes
hhursev/recipe-scrapers
941f062572b15264be3fdf802b66c7eaaba7b291
diff --git a/tests/library/test_utils.py b/tests/library/test_utils.py index 3853c27d..a016e9ce 100644 --- a/tests/library/test_utils.py +++ b/tests/library/test_utils.py @@ -25,3 +25,47 @@ class TestUtils(unittest.TestCase): def test_get_minutes_int_in_string_literal(self): text = "90" self.assertEqual(90, get_minutes(text)) + + def test_get_minutes_fraction_in_hours_with_dot_notation(self): + text = "1.5 hours" + self.assertEqual(90, get_minutes(text)) + + def test_get_minutes_fraction_with_fraction_unicode_character_halves(self): + text = "1½ hours" + self.assertEqual(90, get_minutes(text)) + + def test_get_minutes_fraction_with_fraction_unicode_character_three_fours(self): + text = "1¾ hours" + self.assertEqual(105, get_minutes(text)) + + def test_get_minutes_fraction_with_fraction_unicode_character_one_fours(self): + text = "1¼ hours" + self.assertEqual(75, get_minutes(text)) + + def test_get_minutes_fraction_with_fraction_unicode_character_two_thirds(self): + text = "1⅔ hours" + self.assertEqual(100, get_minutes(text)) + + def test_get_minutes_fraction_with_fraction_digits_with_slash(self): + text = "1 1/2 hours" + self.assertEqual(90, get_minutes(text)) + + def test_get_minutes_fraction_with_fraction_digits_with_slash_three_fours(self): + text = "1 3/4 hours" + self.assertEqual(105, get_minutes(text)) + + def test_get_minutes_fraction_with_fraction_digits_with_slash_one_fours(self): + text = "1 1/4 hours" + self.assertEqual(75, get_minutes(text)) + + def test_get_minutes_fraction_with_fraction_digits_with_slash_two_thirds(self): + text = "1 2/3 hours" + self.assertEqual(100, get_minutes(text)) + + def test_get_minutes_handles_dashes(self): + text = "15 - 20 minutes" + self.assertEqual(20, get_minutes(text)) + + def test_get_minutes_handles_to(self): + text = "15 to 20 minutes" + self.assertEqual(20, get_minutes(text))
Handle fractions and decimals in get_minutes() current the get_minutes() utility fails to hour durations when parsing duration attributes. Examples - "1.5 hours" - "1 1/2 hours" - "1½ hours" I believe the code below works, but can is probably inefficient and I'm not confident on the full ramifications elsewhere in the code or which corner cases would introduce further defects. ``` from unicodedata import decomposition TIME_REGEX = re.compile( r"(\D*(?P<hours>\d*.?(\s\d)?\/?\d+)\s*(hours|hrs|hr|h|óra))?(\D*(?P<minutes>\d+)\s*(minutes|mins|min|m|perc))?", re.IGNORECASE, ) def get_minutes(element, return_zero_on_not_found=False): if element is None: # to be removed if return_zero_on_not_found: return 0 raise ElementNotFoundInHtml(element) try: return int(element) except Exception: pass if isinstance(element, str): time_text = element else: time_text = element.get_text() if time_text.startswith("P") and "T" in time_text: time_text = time_text.split("T", 2)[1] if "-" in time_text: time_text = time_text.split("-", 2)[ 1 ] # sometimes formats are like this: '12-15 minutes' if " to " in time_text: time_text = time_text.split("to", 2)[ 1 ] # sometimes formats are like this: '12 to 15 minutes' empty = '' for x in time_text: if 'fraction' in decomposition(x): f = decomposition(x[-1:]).split() empty += f" {f[1].replace('003', '')}/{f[3].replace('003', '')}" else: empty += x time_text = empty matched = TIME_REGEX.search(time_text) minutes = int(matched.groupdict().get("minutes") or 0) if "/" in (hours := matched.groupdict().get("hours")): number = hours.split(" ") if len(number) == 2: minutes += 60*int(number[0]) fraction = number[-1:][0].split("/") minutes += 60 * float(int(fraction[0])/int(fraction[1])) else: minutes += 60 * float(hours) return int(minutes) ``` - [ ] I'd like to try fixing this scraper myself - [ ] I'd like guidance to help me develop a fix - [x] I'd prefer if the `recipe-scrapers` team try to fix this
0.0
941f062572b15264be3fdf802b66c7eaaba7b291
[ "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_in_hours_with_dot_notation", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_digits_with_slash", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_digits_with_slash_one_fours", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_digits_with_slash_three_fours", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_digits_with_slash_two_thirds", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_unicode_character_halves", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_unicode_character_one_fours", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_unicode_character_three_fours", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_unicode_character_two_thirds", "tests/library/test_utils.py::TestUtils::test_get_minutes_handles_to" ]
[ "tests/library/test_utils.py::TestUtils::test_get_minutes_english_abbreviation", "tests/library/test_utils.py::TestUtils::test_get_minutes_english_description", "tests/library/test_utils.py::TestUtils::test_get_minutes_handles_dashes", "tests/library/test_utils.py::TestUtils::test_get_minutes_int_in_string_literal", "tests/library/test_utils.py::TestUtils::test_get_minutes_long_iso_format", "tests/library/test_utils.py::TestUtils::test_get_minutes_short_iso_format" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2022-02-19 15:30:59+00:00
mit
2,728
hhursev__recipe-scrapers-723
diff --git a/recipe_scrapers/__init__.py b/recipe_scrapers/__init__.py index 77547339..74a95f39 100644 --- a/recipe_scrapers/__init__.py +++ b/recipe_scrapers/__init__.py @@ -472,6 +472,15 @@ SCRAPERS = { } +def get_supported_urls() -> set[str]: + return set(SCRAPERS.keys()) + + +def scraper_exists_for(url_path: str) -> bool: + host_name = get_host_name(url_path) + return host_name in get_supported_urls() + + def scrape_me(url_path: str, **options: dict[str, Any]) -> AbstractScraper: host_name = get_host_name(url_path)
hhursev/recipe-scrapers
08a95ab0687f0bab83359b3c8ce6a4f1a7def335
diff --git a/.github/workflows/unittests.yaml b/.github/workflows/unittests.yaml index 43a0f7e6..4a299907 100644 --- a/.github/workflows/unittests.yaml +++ b/.github/workflows/unittests.yaml @@ -43,10 +43,10 @@ jobs: with: # coveralls repo token github-token: "SmlfzlVJy4ow55rduU7IU5GmmFCfAdGeq" - - if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' + - if: ${{ !github.event.pull_request.head.repo.fork }} && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' name: Create coverage xml report (needed for codacy) run: pip install coverage && coverage xml - - if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' + - if: ${{ !github.event.pull_request.head.repo.fork }} && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' name: Report code coverage to codacy uses: codacy/codacy-coverage-reporter-action@v1 with: diff --git a/tests/library/test_main_methods.py b/tests/library/test_main_methods.py new file mode 100644 index 00000000..e85ab439 --- /dev/null +++ b/tests/library/test_main_methods.py @@ -0,0 +1,19 @@ +import unittest + +from recipe_scrapers import get_supported_urls, scraper_exists_for +from recipe_scrapers._utils import get_host_name + + +class TestMainMethods(unittest.TestCase): + def test_get_supported_urls(self): + urls = get_supported_urls() + self.assertGreater(len(urls), 200) + self.assertIn(get_host_name("https://www.hellofresh.nl/"), urls) + self.assertIn(get_host_name("https://hellofresh.com/"), urls) + + def test_scraper_exists_for(self): + self.assertFalse(scraper_exists_for("example.com")) + self.assertTrue(scraper_exists_for("https://www.hellofresh.nl/")) + self.assertTrue( + scraper_exists_for("https://eatsmarter.de/rezepte/gruenkohl-kokos-suppe") + )
[feature] Get list of supported urls When integrating this into other programs, it would be nice to get a current list of supported urls from this package, ideally even with a flag that shows whether it works by schema or custom parsing. If I am correct, the only way to test this would currently either be hand-parsing the readme or trying the url without and with wild-mode and interpreting the results, right? Or have I missed something? This library looks very promising, thanks for your work. I would also be willing to implement this and more parsers as well.
0.0
08a95ab0687f0bab83359b3c8ce6a4f1a7def335
[ "tests/library/test_main_methods.py::TestMainMethods::test_scraper_exists_for", "tests/library/test_main_methods.py::TestMainMethods::test_get_supported_urls" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-01-19 15:44:48+00:00
mit
2,729
hhursev__recipe-scrapers-839
diff --git a/recipe_scrapers/realsimple.py b/recipe_scrapers/realsimple.py index 29ef67ff..cfed0b8a 100644 --- a/recipe_scrapers/realsimple.py +++ b/recipe_scrapers/realsimple.py @@ -20,12 +20,26 @@ class RealSimple(AbstractScraper): return self.schema.total_time() def yields(self): - return get_yields( - self.soup.find("div", string=re.compile(r"Yield:")).parent.get_text() - ) + yield_container = self.soup.find("div", string=re.compile(r"Yield:")) + if yield_container and yield_container.parent: + return get_yields(yield_container.parent.get_text()) + + return self.schema.yields() def ingredients(self): - return self.schema.ingredients() + ingredient_elements = self.soup.findAll( + "li", {"class": "mntl-structured-ingredients__list-item"} + ) + extracted_ingredients = [ + element.get_text(strip=True, separator=" ") + for element in ingredient_elements + if element.get_text() + ] + + if extracted_ingredients: + return extracted_ingredients + else: + return self.schema.ingredients() def instructions(self): return self.schema.instructions()
hhursev/recipe-scrapers
8ced0227b3b16c532fc5ebf3060c99ee0452adab
diff --git a/tests/test_realsimple.py b/tests/test_realsimple.py index d41ff6c2..b75b1a6a 100644 --- a/tests/test_realsimple.py +++ b/tests/test_realsimple.py @@ -34,26 +34,24 @@ class TestRealSimpleScraper(ScraperTest): self.assertEqual("10 servings", self.harvester_class.yields()) def test_ingredients(self): - self.assertEqual( - [ - "Cake:", - "18 graham crackers (2 sleeves)", - "0.5 cup (1 stick) unsalted butter, melted", - "0.25 teaspoon kosher salt", - "1 cup plus 4 tablespoons sugar", - "3 8-ounce packages cream cheese, at room temperature", - "2 cups sour cream, at room temperature", - "1.5 teaspoons pure vanilla extract", - "3 large eggs, at room temperature", - "Cherry sauce:", - "1 10-ounce bag frozen cherries", - "0.5 cup sugar", - "0.25 teaspoon kosher salt", - "1 tablespoons cornstarch", - "2 tablespoons fresh lemon juice", - ], - self.harvester_class.ingredients(), - ) + expected_ingredients = [ + "Cake:", + "18 graham crackers (2 sleeves)", + "½ cup (1 stick) unsalted butter, melted", + "¼ teaspoon kosher salt", + "1 cup plus 4 tablespoons sugar", + "3 8-ounce packages cream cheese, at room temperature", + "2 cups sour cream, at room temperature", + "1 ½ teaspoons pure vanilla extract", + "3 large eggs, at room temperature", + "Cherry sauce:", + "1 10-ounce bag frozen cherries", + "½ cup sugar", + "¼ teaspoon kosher salt", + "1 tablespoons cornstarch", + "2 tablespoons fresh lemon juice", + ] + self.assertEqual(expected_ingredients, self.harvester_class.ingredients()) def test_instructions(self): self.assertEqual(
Real Simple Ingredients **Pre-filing checks** - [x] I have searched for open issues that report the same problem - [x] I have checked that the bug affects the latest version of the library **The URL of the recipe(s) that are not being scraped correctly** - [https://www.realsimple.com/crispy-peanut-tofu-with-sugar-snap-peas-and-peppers-recipe-7229744] ... **The results you expect to see** Expect an ingredient list. ... **The results (including any Python error messages) that you are seeing** recipe.ingredients() ['1 cup long-grain white rice'] ... Script is only pulling the first ingredient in the recipe. Having a harder time figuring out the fix on this one
0.0
8ced0227b3b16c532fc5ebf3060c99ee0452adab
[ "tests/test_realsimple.py::TestRealSimpleScraper::test_ingredients" ]
[ "tests/test_realsimple.py::ScraperTest::test_consistent_ingredients_lists", "tests/test_realsimple.py::TestRealSimpleScraper::test_consistent_ingredients_lists", "tests/test_realsimple.py::TestRealSimpleScraper::test_title", "tests/test_realsimple.py::TestRealSimpleScraper::test_total_time", "tests/test_realsimple.py::TestRealSimpleScraper::test_yields", "tests/test_realsimple.py::TestRealSimpleScraper::test_host", "tests/test_realsimple.py::TestRealSimpleScraper::test_canonical_url", "tests/test_realsimple.py::TestRealSimpleScraper::test_instructions", "tests/test_realsimple.py::TestRealSimpleScraper::test_image", "tests/test_realsimple.py::TestRealSimpleScraper::test_author" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2023-09-05 19:17:16+00:00
mit
2,730
hhursev__recipe-scrapers-915
diff --git a/recipe_scrapers/_utils.py b/recipe_scrapers/_utils.py index fe3a401a..064046bd 100644 --- a/recipe_scrapers/_utils.py +++ b/recipe_scrapers/_utils.py @@ -75,6 +75,9 @@ def get_minutes(element, return_zero_on_not_found=False): # noqa: C901: TODO matched = TIME_REGEX.search(time_text) + if matched is None or not any(matched.groupdict().values()): + return None + minutes = int(matched.groupdict().get("minutes") or 0) hours_matched = matched.groupdict().get("hours") days_matched = matched.groupdict().get("days") diff --git a/recipe_scrapers/nihhealthyeating.py b/recipe_scrapers/nihhealthyeating.py index 2f89803c..435b3615 100644 --- a/recipe_scrapers/nihhealthyeating.py +++ b/recipe_scrapers/nihhealthyeating.py @@ -23,7 +23,9 @@ class NIHHealthyEating(AbstractScraper): if time_table is None: raise ElementNotFoundInHtml("Table with times was not found.") - return sum([get_minutes(td) for td in time_table.find_all("td")]) + return sum( + [get_minutes(td) for td in time_table.find_all("td") if get_minutes(td)] + ) def yields(self): # This content must be present for all recipes on this website. diff --git a/recipe_scrapers/tastykitchen.py b/recipe_scrapers/tastykitchen.py index 30774625..2531a3ca 100644 --- a/recipe_scrapers/tastykitchen.py +++ b/recipe_scrapers/tastykitchen.py @@ -12,12 +12,9 @@ class TastyKitchen(AbstractScraper): return self.soup.find("h1", {"itemprop": "name"}).get_text() def total_time(self): - return sum( - [ - get_minutes(self.soup.find("time", {"itemprop": "prepTime"})), - get_minutes(self.soup.find("time", {"itemprop": "cookTime"})), - ] - ) + prep_time = get_minutes(self.soup.find("time", {"itemprop": "prepTime"})) or 0 + cook_time = get_minutes(self.soup.find("time", {"itemprop": "cookTime"})) or 0 + return prep_time + cook_time def yields(self): return get_yields(self.soup.find("span", {"itemprop": "yield"}))
hhursev/recipe-scrapers
7509c98e39ed717c187226c7b4a02a4a6be984fe
diff --git a/tests/library/test_utils.py b/tests/library/test_utils.py index 1147865c..f561d0d2 100644 --- a/tests/library/test_utils.py +++ b/tests/library/test_utils.py @@ -72,6 +72,10 @@ class TestUtils(unittest.TestCase): text = "15 to 20 minutes" self.assertEqual(20, get_minutes(text)) + def test_get_minutes_imprecise_description(self): + text = "Pá-Pum" + self.assertEqual(None, get_minutes(text)) + iso8601_fixtures = { "PT1H": 60, "PT20M": 20, diff --git a/tests/test_kitchenstories.py b/tests/test_kitchenstories.py index 56cf1eb4..73cfc9db 100644 --- a/tests/test_kitchenstories.py +++ b/tests/test_kitchenstories.py @@ -27,7 +27,7 @@ class TestKitchenStoriesScraper(ScraperTest): self.assertEqual(80, self.harvester_class.total_time()) def test_cook_time(self): - self.assertEqual(0, self.harvester_class.cook_time()) + self.assertEqual(None, self.harvester_class.cook_time()) def test_prep_time(self): self.assertEqual(20, self.harvester_class.prep_time()) diff --git a/tests/test_panelinha_2.py b/tests/test_panelinha_2.py index 21785723..4d15d811 100644 --- a/tests/test_panelinha_2.py +++ b/tests/test_panelinha_2.py @@ -16,7 +16,7 @@ class TestPanelinhaScraper(ScraperTest): self.assertEqual(self.harvester_class.author(), "Panelinha") def test_total_time(self): - self.assertEqual(0, self.harvester_class.total_time()) + self.assertEqual(None, self.harvester_class.total_time()) def test_yields(self): self.assertEqual("2 servings", self.harvester_class.yields()) diff --git a/tests/test_weightwatchers.py b/tests/test_weightwatchers.py index 0b2bc234..dddc4ad6 100644 --- a/tests/test_weightwatchers.py +++ b/tests/test_weightwatchers.py @@ -27,7 +27,7 @@ class TestWeightwatchersScraper(ScraperTest): self.assertEqual(25, self.harvester_class.total_time()) def test_cook_time(self): - self.assertEqual(0, self.harvester_class.cook_time()) + self.assertEqual(None, self.harvester_class.cook_time()) def test_prep_time(self): self.assertEqual(25, self.harvester_class.prep_time())
bug: get_minutes returns precise-zero-minutes during fallback text matching case **Pre-filing checks** - [x] I have searched for open issues that report the same problem - [x] I have checked that the bug affects the latest version of the library **The URL of the recipe(s) that are not being scraped correctly** - https://panelinha.com.br/receita/arroz-sirio-com-frango **The results you expect to see** The `total_time` should _probably_ be returned as `None` -- the recipe describes it as "Pá-Pum" ('quickly'), however that's not the same as zero-time-at-all, and so it could be misleading to return zero. I don't think we should attempt to estmate the duration, so `None` seems like the next-best alternative at the moment. **The results (including any Python error messages) that you are seeing** `total_time` returns zero for the text "Pá-Pum" in the recipe timing information (credit to @jknndy for discovering and [reporting that](https://github.com/hhursev/recipe-scrapers/pull/905/files#r1366177652) during #905).
0.0
7509c98e39ed717c187226c7b4a02a4a6be984fe
[ "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_cook_time", "tests/library/test_utils.py::TestUtils::test_get_minutes_imprecise_description", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_cook_time", "tests/test_panelinha_2.py::TestPanelinhaScraper::test_total_time" ]
[ "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_ingredients", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_multiple_instructions", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_nutrients", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_description", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_total_time", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_category", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_canonical_url", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_instructions", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_image", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_language", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_consistent_ingredients_lists", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_title", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_ratings", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_cuisine", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_author", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_host", "tests/test_kitchenstories.py::TestKitchenStoriesScraper::test_prep_time", "tests/test_kitchenstories.py::ScraperTest::test_multiple_instructions", "tests/test_kitchenstories.py::ScraperTest::test_consistent_ingredients_lists", "tests/library/test_utils.py::TestUtils::test_list_public_methods", "tests/library/test_utils.py::TestUtils::test_get_minutes_english_abbreviation", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_digits_with_slash_three_fours", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_digits_with_slash_two_thirds", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_unicode_character_one_fours", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_unicode_character_three_fours", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_unicode_character_two_thirds", "tests/library/test_utils.py::TestUtils::test_get_minutes_english_description_with_and", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_unicode_character_halves", "tests/library/test_utils.py::TestUtils::test_get_minutes_long_iso_format", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_digits_with_slash_one_fours", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_with_fraction_digits_with_slash", "tests/library/test_utils.py::TestUtils::test_get_minutes_handles_iso8601", "tests/library/test_utils.py::TestUtils::test_get_minutes_english_description", "tests/library/test_utils.py::TestUtils::test_get_minutes_fraction_in_hours_with_dot_notation", "tests/library/test_utils.py::TestUtils::test_get_minutes_int_in_string_literal", "tests/library/test_utils.py::TestUtils::test_get_minutes_handles_dashes", "tests/library/test_utils.py::TestUtils::test_get_minutes_handles_to", "tests/library/test_utils.py::TestUtils::test_get_minutes_short_iso_format", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_nutrients", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_image", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_author", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_ingredients_count", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_prep_time", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_ingredients", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_title", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_consistent_ingredients_lists", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_category", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_total_time", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_instructions", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_multiple_instructions", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_difficulty", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_yields", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_host", "tests/test_weightwatchers.py::TestWeightwatchersScraper::test_description", "tests/test_weightwatchers.py::ScraperTest::test_multiple_instructions", "tests/test_weightwatchers.py::ScraperTest::test_consistent_ingredients_lists", "tests/test_panelinha_2.py::TestPanelinhaScraper::test_yields", "tests/test_panelinha_2.py::TestPanelinhaScraper::test_author", "tests/test_panelinha_2.py::TestPanelinhaScraper::test_multiple_instructions", "tests/test_panelinha_2.py::TestPanelinhaScraper::test_ingredients", "tests/test_panelinha_2.py::TestPanelinhaScraper::test_title", "tests/test_panelinha_2.py::TestPanelinhaScraper::test_consistent_ingredients_lists", "tests/test_panelinha_2.py::TestPanelinhaScraper::test_host", "tests/test_panelinha_2.py::TestPanelinhaScraper::test_instructions", "tests/test_panelinha_2.py::TestPanelinhaScraper::test_image", "tests/test_panelinha_2.py::ScraperTest::test_multiple_instructions", "tests/test_panelinha_2.py::ScraperTest::test_consistent_ingredients_lists" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-10-25 16:28:29+00:00
mit
2,731
hidrokit__hidrokit-121
diff --git a/CITATION.cff b/CITATION.cff index caf5fad..550bb8c 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -9,6 +9,6 @@ date-released: 2020-01-15 doi: 10.5281/zenodo.3276220 license: MIT message: "Jika menggunakan perangkat lunak ini, mohon sitasi menggunakan metadata ini." -repository-code: "https://github.com/taruma/hidrokit" +repository-code: "https://github.com/hidrokit/hidrokit" title: "hidrokit: Analisis Hidrologi dengan Python" -version: "0.3.5" +version: "0.3.6" diff --git a/hidrokit/__version__.py b/hidrokit/__version__.py index f033401..27e660b 100644 --- a/hidrokit/__version__.py +++ b/hidrokit/__version__.py @@ -1,4 +1,4 @@ -VERSION = (0, 3, 5) +VERSION = (0, 3, 6) DEV_STATUS = "" __version__ = '.'.join(map(str, VERSION)) + DEV_STATUS diff --git a/hidrokit/prep/timeseries.py b/hidrokit/prep/timeseries.py index fa2bb42..85e8720 100644 --- a/hidrokit/prep/timeseries.py +++ b/hidrokit/prep/timeseries.py @@ -34,7 +34,7 @@ def _timestep_single(array, index=0, timesteps=2, keep_first=True): x.append(array[start:end, index]) if not keep_first: - x.pop(-1) + x.pop(0) return np.array(x).transpose()
hidrokit/hidrokit
6f2cfd89a468405d1a6cd7e7ffe022dbc0c70951
diff --git a/tests/test_prep_timeseries.py b/tests/test_prep_timeseries.py index 95efa7b..4a69894 100644 --- a/tests/test_prep_timeseries.py +++ b/tests/test_prep_timeseries.py @@ -83,9 +83,9 @@ def test__timestep_multi(): # Argument: timesteps=4, keep_first=False array_result_3 = np.array( [ + [40, 30, 20, 10, 43, 33, 23, 13, 45, 35, 25, 15], [50, 40, 30, 20, 53, 43, 33, 23, 55, 45, 35, 25], - [60, 50, 40, 30, 63, 53, 43, 33, 65, 55, 45, 35], - [70, 60, 50, 40, 73, 63, 53, 43, 76, 65, 55, 45] + [60, 50, 40, 30, 63, 53, 43, 33, 65, 55, 45, 35] ] ) @@ -152,9 +152,9 @@ def test_timestep_table(): 'C_tmin1', 'C_tmin2', 'C_tmin3', 'C_tmin4'], data=np.array( [ + [40, 30, 20, 10, 43, 33, 23, 13, 45, 35, 25, 15], [50, 40, 30, 20, 53, 43, 33, 23, 55, 45, 35, 25], - [60, 50, 40, 30, 63, 53, 43, 33, 65, 55, 45, 35], - [70, 60, 50, 40, 73, 63, 53, 43, 76, 65, 55, 45] + [60, 50, 40, 30, 63, 53, 43, 33, 65, 55, 45, 35] ] ) )
bug: timeseries.timestep_table error saat menggunakan keep_first ### Sistem * Versi hidrokit: 0.4.0 * Distribusi python: ``` :: python version: 3.7.7 :: numpy version: 1.18.5 :: pandas version: 1.0.5 :: matplotlib version: 3.2.2 :: hydroerr version: 1.24 :: hidrokit version: 0.4.0 :: tensorflow version: 2.1.0 :: keras version: 2.2.4-tf ``` * Sistem operasi: Windows 10 ### Ringkasan Permasalahan Keluaran menggunakan `keep_first=False` tidak sesuai harapan __jika menggunakan True__ ![image](https://user-images.githubusercontent.com/1007910/87144619-3ec15600-c2d2-11ea-99d8-b8398540008f.png) __jika menggunakan False__ ![image](https://user-images.githubusercontent.com/1007910/87144713-66b0b980-c2d2-11ea-8c71-9aa4ad7c8c2b.png) Seharusnya, nilai `ch_A_tmin1` bernilai `4.18994` __tabel 8 baris pertama__ ![image](https://user-images.githubusercontent.com/1007910/87144787-8811a580-c2d2-11ea-8644-d4cd640b9430.png)
0.0
6f2cfd89a468405d1a6cd7e7ffe022dbc0c70951
[ "tests/test_prep_timeseries.py::test__timestep_multi", "tests/test_prep_timeseries.py::test_timestep_table" ]
[ "tests/test_prep_timeseries.py::test__timestep_single" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-07-12 20:01:31+00:00
mit
2,732
hidrokit__hidrokit-122
diff --git a/CITATION.cff b/CITATION.cff index caf5fad..550bb8c 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -9,6 +9,6 @@ date-released: 2020-01-15 doi: 10.5281/zenodo.3276220 license: MIT message: "Jika menggunakan perangkat lunak ini, mohon sitasi menggunakan metadata ini." -repository-code: "https://github.com/taruma/hidrokit" +repository-code: "https://github.com/hidrokit/hidrokit" title: "hidrokit: Analisis Hidrologi dengan Python" -version: "0.3.5" +version: "0.3.6" diff --git a/hidrokit/__version__.py b/hidrokit/__version__.py index f033401..27e660b 100644 --- a/hidrokit/__version__.py +++ b/hidrokit/__version__.py @@ -1,4 +1,4 @@ -VERSION = (0, 3, 5) +VERSION = (0, 3, 6) DEV_STATUS = "" __version__ = '.'.join(map(str, VERSION)) + DEV_STATUS diff --git a/hidrokit/prep/timeseries.py b/hidrokit/prep/timeseries.py index fa2bb42..85e8720 100644 --- a/hidrokit/prep/timeseries.py +++ b/hidrokit/prep/timeseries.py @@ -34,7 +34,7 @@ def _timestep_single(array, index=0, timesteps=2, keep_first=True): x.append(array[start:end, index]) if not keep_first: - x.pop(-1) + x.pop(0) return np.array(x).transpose()
hidrokit/hidrokit
504e283604ce2c726cbb9bce0af5c41fe233f4e5
diff --git a/tests/test_prep_timeseries.py b/tests/test_prep_timeseries.py index 95efa7b..4a69894 100644 --- a/tests/test_prep_timeseries.py +++ b/tests/test_prep_timeseries.py @@ -83,9 +83,9 @@ def test__timestep_multi(): # Argument: timesteps=4, keep_first=False array_result_3 = np.array( [ + [40, 30, 20, 10, 43, 33, 23, 13, 45, 35, 25, 15], [50, 40, 30, 20, 53, 43, 33, 23, 55, 45, 35, 25], - [60, 50, 40, 30, 63, 53, 43, 33, 65, 55, 45, 35], - [70, 60, 50, 40, 73, 63, 53, 43, 76, 65, 55, 45] + [60, 50, 40, 30, 63, 53, 43, 33, 65, 55, 45, 35] ] ) @@ -152,9 +152,9 @@ def test_timestep_table(): 'C_tmin1', 'C_tmin2', 'C_tmin3', 'C_tmin4'], data=np.array( [ + [40, 30, 20, 10, 43, 33, 23, 13, 45, 35, 25, 15], [50, 40, 30, 20, 53, 43, 33, 23, 55, 45, 35, 25], - [60, 50, 40, 30, 63, 53, 43, 33, 65, 55, 45, 35], - [70, 60, 50, 40, 73, 63, 53, 43, 76, 65, 55, 45] + [60, 50, 40, 30, 63, 53, 43, 33, 65, 55, 45, 35] ] ) )
bug: timeseries.timestep_table error saat menggunakan keep_first ### Sistem * Versi hidrokit: 0.4.0 * Distribusi python: ``` :: python version: 3.7.7 :: numpy version: 1.18.5 :: pandas version: 1.0.5 :: matplotlib version: 3.2.2 :: hydroerr version: 1.24 :: hidrokit version: 0.4.0 :: tensorflow version: 2.1.0 :: keras version: 2.2.4-tf ``` * Sistem operasi: Windows 10 ### Ringkasan Permasalahan Keluaran menggunakan `keep_first=False` tidak sesuai harapan __jika menggunakan True__ ![image](https://user-images.githubusercontent.com/1007910/87144619-3ec15600-c2d2-11ea-99d8-b8398540008f.png) __jika menggunakan False__ ![image](https://user-images.githubusercontent.com/1007910/87144713-66b0b980-c2d2-11ea-8c71-9aa4ad7c8c2b.png) Seharusnya, nilai `ch_A_tmin1` bernilai `4.18994` __tabel 8 baris pertama__ ![image](https://user-images.githubusercontent.com/1007910/87144787-8811a580-c2d2-11ea-8644-d4cd640b9430.png)
0.0
504e283604ce2c726cbb9bce0af5c41fe233f4e5
[ "tests/test_prep_timeseries.py::test__timestep_multi", "tests/test_prep_timeseries.py::test_timestep_table" ]
[ "tests/test_prep_timeseries.py::test__timestep_single" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-07-12 22:11:52+00:00
mit
2,733
hkraal__ssht-11
diff --git a/ssht/plugins.py b/ssht/plugins.py index e321935..0edfb7a 100644 --- a/ssht/plugins.py +++ b/ssht/plugins.py @@ -34,6 +34,13 @@ class Host(object): return '{0}@{1}'.format(self.user, self.hostname) return self.hostname + def match(self, needle): + for search_field in ['hostname', 'ipv4', 'ipv6']: + value = getattr(self, search_field, None) + if value is not None and needle in value: + return True + return False + def __repr__(self): if self.ipv4 is not None: return '<Host: hostname={0}, ipv4={1}>'.format( @@ -54,8 +61,15 @@ class Parser(object): def get_files(self, ext='.json'): return [x for x in os.listdir(self._path) if x.endswith(ext)] - def search(self, name): - return [x for x in self._hosts if name in x.hostname] + def search(self, needle): + ''' + Return Host objects which have properties matching the needle + ''' + results = [] + for host in self._hosts: + if host.match(needle): + results.append(host) + return results class JsonParser(Parser): # pragma: nocover
hkraal/ssht
7cf175ee2e5944dbe28046cf2dde8391555f7ca5
diff --git a/tests/test_parser.py b/tests/test_parser.py index 315a4ee..6409173 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -15,13 +15,27 @@ class TestParser: assert parser.get_files('.mysql') == ['test.mysql'] assert parser.get_files('.fake') == [] - def test_search(self): + def test_search_hostname(self): parser = Parser('/tmp') parser._hosts = [Host('host01.example.com'), Host('host02.example.com')] assert parser.search('host01')[0].hostname == 'host01.example.com' assert len(parser.search('example.com')) == 2 + def test_search_ipv4(self): + parser = Parser('/tmp') + parser._hosts = [Host('host01.example.com', ipv4='192.168.0.1'), + Host('host02.example.com', ipv4='192.168.0.2')] + assert parser.search('192.168.0.1')[0].hostname == 'host01.example.com' + assert len(parser.search('192.168.0')) == 2 + + def test_search_ipv6(self): + parser = Parser('/tmp') + parser._hosts = [Host('host01.example.com', ipv6='fe80::41:dead:beef:cafe'), + Host('host02.example.com', ipv6='fe80::41:dead:beef:daff')] + assert parser.search('dead:beef:cafe')[0].hostname == 'host01.example.com' + assert len(parser.search('dead:beef:')) == 2 + class TestJsonParser: pass diff --git a/tests/test_ssht.py b/tests/test_ssht.py index 3292f48..28f8e76 100644 --- a/tests/test_ssht.py +++ b/tests/test_ssht.py @@ -90,8 +90,8 @@ class TestConnect: class TestSsht(): def test_select_host(self, mocker): - hosts = [Host(hostname='host01.example.com'), - Host(hostname='host02.example.com', user='admin')] + hosts = [Host(hostname='host01.example.com', ipv4='127.0.0.1'), + Host(hostname='host02.example.com', ipv4='127.0.0.2', user='admin')] mocker.patch('ssht.ssht.get_answer', return_value='1') assert select_host(hosts).hostname == 'host01.example.com' @@ -100,7 +100,7 @@ class TestSsht(): mocker.patch('ssht.ssht.get_answer', return_value='2') assert select_host(hosts).hostname == 'host02.example.com' assert select_host(hosts).user == 'admin' - + mocker.patch('ssht.ssht.get_answer', return_value='') assert select_host(hosts) is None
Use more fields for searching Searching based on IP does not yield any result. Although it's not very useful `ssht <ip>` it should be possible since; - a user might be tempted to use `ssht` instead of `ssh` - a user might want to get feedback about the server it's connecting to
0.0
7cf175ee2e5944dbe28046cf2dde8391555f7ca5
[ "tests/test_parser.py::TestParser::test_search_ipv4", "tests/test_parser.py::TestParser::test_search_ipv6" ]
[ "tests/test_parser.py::TestParser::test_get_files", "tests/test_parser.py::TestParser::test_search_hostname", "tests/test_ssht.py::TestConnect::test_hostname", "tests/test_ssht.py::TestConnect::test_default_ipv4", "tests/test_ssht.py::TestConnect::test_port", "tests/test_ssht.py::TestConnect::test_ipv4_", "tests/test_ssht.py::TestConnect::test_ipv6", "tests/test_ssht.py::TestConnect::test_user", "tests/test_ssht.py::TestSsht::test_select_host", "tests/test_ssht.py::TestSsht::test_get_log_level_default", "tests/test_ssht.py::TestSsht::test_get_log_level_debug" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2017-03-22 13:15:38+00:00
mit
2,734