instance_id
stringlengths
10
57
patch
stringlengths
261
37.7k
repo
stringlengths
7
53
base_commit
stringlengths
40
40
hints_text
stringclasses
301 values
test_patch
stringlengths
212
2.22M
problem_statement
stringlengths
23
37.7k
version
stringclasses
1 value
environment_setup_commit
stringlengths
40
40
FAIL_TO_PASS
listlengths
1
4.94k
PASS_TO_PASS
listlengths
0
7.82k
meta
dict
created_at
stringlengths
25
25
license
stringclasses
8 values
__index_level_0__
int64
0
6.41k
beetbox__beets-5034
diff --git a/beets/ui/__init__.py b/beets/ui/__init__.py index 5e445613..abd7aa0a 100644 --- a/beets/ui/__init__.py +++ b/beets/ui/__init__.py @@ -591,7 +591,7 @@ def colorize(color_name, text): """Colorize text if colored output is enabled. (Like _colorize but conditional.) """ - if config["ui"]["color"]: + if config["ui"]["color"] and "NO_COLOR" not in os.environ: global COLORS if not COLORS: # Read all color configurations and set global variable COLORS. diff --git a/beets/ui/commands.py b/beets/ui/commands.py index 63f25fca..26eb5320 100755 --- a/beets/ui/commands.py +++ b/beets/ui/commands.py @@ -1506,6 +1506,20 @@ import_cmd.parser.add_option( action="store_false", help="do not skip already-imported directories", ) +import_cmd.parser.add_option( + "-R", + "--incremental-skip-later", + action="store_true", + dest="incremental_skip_later", + help="do not record skipped files during incremental import", +) +import_cmd.parser.add_option( + "-r", + "--noincremental-skip-later", + action="store_false", + dest="incremental_skip_later", + help="record skipped files during incremental import", +) import_cmd.parser.add_option( "--from-scratch", dest="from_scratch", diff --git a/beetsplug/smartplaylist.py b/beetsplug/smartplaylist.py index 6e20cc21..c892a604 100644 --- a/beetsplug/smartplaylist.py +++ b/beetsplug/smartplaylist.py @@ -49,6 +49,7 @@ class SmartPlaylistPlugin(BeetsPlugin): "prefix": "", "urlencode": False, "pretend_paths": False, + "extm3u": False, } ) @@ -71,6 +72,17 @@ class SmartPlaylistPlugin(BeetsPlugin): action="store_true", help="display query results but don't write playlist files.", ) + spl_update.parser.add_option( + "--extm3u", + action="store_true", + help="add artist/title as m3u8 comments to playlists.", + ) + spl_update.parser.add_option( + "--no-extm3u", + action="store_false", + dest="extm3u", + help="do not add artist/title as extm3u comments to playlists.", + ) spl_update.func = self.update_cmd return [spl_update] @@ -99,7 +111,7 @@ class SmartPlaylistPlugin(BeetsPlugin): else: self._matched_playlists = self._unmatched_playlists - self.update_playlists(lib, opts.pretend) + self.update_playlists(lib, opts.extm3u, opts.pretend) def build_queries(self): """ @@ -185,7 +197,7 @@ class SmartPlaylistPlugin(BeetsPlugin): self._unmatched_playlists -= self._matched_playlists - def update_playlists(self, lib, pretend=False): + def update_playlists(self, lib, extm3u=None, pretend=False): if pretend: self._log.info( "Showing query results for {0} smart playlists...", @@ -230,7 +242,7 @@ class SmartPlaylistPlugin(BeetsPlugin): if relative_to: item_path = os.path.relpath(item.path, relative_to) if item_path not in m3us[m3u_name]: - m3us[m3u_name].append(item_path) + m3us[m3u_name].append({"item": item, "path": item_path}) if pretend and self.config["pretend_paths"]: print(displayable_path(item_path)) elif pretend: @@ -244,13 +256,23 @@ class SmartPlaylistPlugin(BeetsPlugin): os.path.join(playlist_dir, bytestring_path(m3u)) ) mkdirall(m3u_path) + extm3u = extm3u is None and self.config["extm3u"] or extm3u with open(syspath(m3u_path), "wb") as f: - for path in m3us[m3u]: + if extm3u: + f.write(b"#EXTM3U\n") + for entry in m3us[m3u]: + path = entry["path"] + item = entry["item"] if self.config["forward_slash"].get(): path = path_as_posix(path) if self.config["urlencode"]: path = bytestring_path(pathname2url(path)) - f.write(prefix + path + b"\n") + comment = "" + if extm3u: + comment = "#EXTINF:{},{} - {}\n".format( + int(item.length), item.artist, item.title + ) + f.write(comment.encode("utf-8") + prefix + path + b"\n") # Send an event when playlists were updated. send_event("smartplaylist_update") diff --git a/docs/changelog.rst b/docs/changelog.rst index 23342329..d7530379 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -13,7 +13,7 @@ Major new features: * The beets importer UI received a major overhaul. Several new configuration options are available for customizing layout and colors: :ref:`ui_options`. - :bug:`3721` + :bug:`3721` :bug:`5028` New features: @@ -147,6 +147,8 @@ New features: * :doc:`/plugins/lyrics`: Add LRCLIB as a new lyrics provider and a new `synced` option to prefer synced lyrics over plain lyrics. * :ref:`import-cmd`: Expose import.quiet_fallback as CLI option. +* :ref:`import-cmd`: Expose `import.incremental_skip_later` as CLI option. +* :doc:`/plugins/smartplaylist`: Add new config option `smartplaylist.extm3u`. Bug fixes: @@ -267,8 +269,10 @@ Bug fixes: :bug:`4822` * Fix bug where an interrupted import process poisons the database, causing a null path that can't be removed. -* Fix bug where empty artist and title fields would return None instead of an - empty list in the discord plugin. :bug:`4973` + :bug:`4906` +* :doc:`/plugins/discogs`: Fix bug where empty artist and title fields would + return None instead of an empty list. + :bug:`4973` * Fix bug regarding displaying tracks that have been changed not being displayed unless the detail configuration is enabled. diff --git a/docs/plugins/smartplaylist.rst b/docs/plugins/smartplaylist.rst index e687a68a..6a78124e 100644 --- a/docs/plugins/smartplaylist.rst +++ b/docs/plugins/smartplaylist.rst @@ -118,3 +118,4 @@ other configuration options are: - **urlencoded**: URL-encode all paths. Default: ``no``. - **pretend_paths**: When running with ``--pretend``, show the actual file paths that will be written to the m3u file. Default: ``false``. +- **extm3u**: Generate extm3u/m3u8 playlists. Default ``ǹo``. diff --git a/docs/reference/cli.rst b/docs/reference/cli.rst index a2997c70..8caf7076 100644 --- a/docs/reference/cli.rst +++ b/docs/reference/cli.rst @@ -115,6 +115,15 @@ Optional command flags: time, when no subdirectories will be skipped. So consider enabling the ``incremental`` configuration option. +* If you don't want to record skipped files during an *incremental* import, use + the ``--incremental-skip-later`` flag which corresponds to the + ``incremental_skip_later`` configuration option. + Setting the flag prevents beets from persisting skip decisions during a + non-interactive import so that a user can make a decision regarding + previously skipped files during a subsequent interactive import run. + To record skipped files during incremental import explicitly, use the + ``--noincremental-skip-later`` option. + * When beets applies metadata to your music, it will retain the value of any existing tags that weren't overwritten, and import them into the database. You may prefer to only use existing metadata for finding matches, and to erase it
beetbox/beets
e5d10004ae08bcbbaa4ee1397a4d889e8b3b52de
diff --git a/test/plugins/test_smartplaylist.py b/test/plugins/test_smartplaylist.py index a3a03b54..f3660126 100644 --- a/test/plugins/test_smartplaylist.py +++ b/test/plugins/test_smartplaylist.py @@ -19,7 +19,7 @@ from shutil import rmtree from tempfile import mkdtemp from test import _common from test.helper import TestHelper -from unittest.mock import MagicMock, Mock +from unittest.mock import MagicMock, Mock, PropertyMock from beets import config from beets.dbcore import OrQuery @@ -191,6 +191,56 @@ class SmartPlaylistTest(_common.TestCase): self.assertEqual(content, b"/tagada.mp3\n") + def test_playlist_update_extm3u(self): + spl = SmartPlaylistPlugin() + + i = MagicMock() + type(i).artist = PropertyMock(return_value="fake artist") + type(i).title = PropertyMock(return_value="fake title") + type(i).length = PropertyMock(return_value=300.123) + type(i).path = PropertyMock(return_value=b"/tagada.mp3") + i.evaluate_template.side_effect = lambda pl, _: pl.replace( + b"$title", + b"ta:ga:da", + ).decode() + + lib = Mock() + lib.replacements = CHAR_REPLACE + lib.items.return_value = [i] + lib.albums.return_value = [] + + q = Mock() + a_q = Mock() + pl = b"$title-my<playlist>.m3u", (q, None), (a_q, None) + spl._matched_playlists = [pl] + + dir = bytestring_path(mkdtemp()) + config["smartplaylist"]["extm3u"] = True + config["smartplaylist"]["prefix"] = "http://beets:8337/files" + config["smartplaylist"]["relative_to"] = False + config["smartplaylist"]["playlist_dir"] = py3_path(dir) + try: + spl.update_playlists(lib) + except Exception: + rmtree(syspath(dir)) + raise + + lib.items.assert_called_once_with(q, None) + lib.albums.assert_called_once_with(a_q, None) + + m3u_filepath = path.join(dir, b"ta_ga_da-my_playlist_.m3u") + self.assertExists(m3u_filepath) + with open(syspath(m3u_filepath), "rb") as f: + content = f.read() + rmtree(syspath(dir)) + + self.assertEqual( + content, + b"#EXTM3U\n" + + b"#EXTINF:300,fake artist - fake title\n" + + b"http://beets:8337/files/tagada.mp3\n", + ) + class SmartPlaylistCLITest(_common.TestCase, TestHelper): def setUp(self):
Bring back NO_COLOR support? Back in 2019 I added support for the [NO_COLOR environment variable](https://no-color.org/). This was removed in be290e54441987fdb80437f7982f4d5ad01bac59 as part of a refactor. Unless I'm mistaken it seems there's no way to disable colors from the command line anymore. It must be done in the config file. That's really inconvenient. What was the reason for removing `NO_COLOR` support? Could it be brought back?
0.0
e5d10004ae08bcbbaa4ee1397a4d889e8b3b52de
[ "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_playlist_update_extm3u" ]
[ "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_build_queries", "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_build_queries_with_sorts", "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_db_changes", "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_matches", "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_playlist_update", "test/plugins/test_smartplaylist.py::SmartPlaylistCLITest::test_splupdate" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_git_commit_hash", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-12-07 09:29:31+00:00
mit
1,322
beetbox__beets-5050
diff --git a/beets/plugins.py b/beets/plugins.py index 270da975..e1ac7d61 100644 --- a/beets/plugins.py +++ b/beets/plugins.py @@ -444,14 +444,29 @@ def import_stages(): # New-style (lazy) plugin-provided fields. +def _check_conflicts_and_merge(plugin, plugin_funcs, funcs): + """Check the provided template functions for conflicts and merge into funcs. + + Raises a `PluginConflictException` if a plugin defines template functions + for fields that another plugin has already defined template functions for. + """ + if plugin_funcs: + if not plugin_funcs.keys().isdisjoint(funcs.keys()): + conflicted_fields = ", ".join(plugin_funcs.keys() & funcs.keys()) + raise PluginConflictException( + f"Plugin {plugin.name} defines template functions for " + f"{conflicted_fields} that conflict with another plugin." + ) + funcs.update(plugin_funcs) + + def item_field_getters(): """Get a dictionary mapping field names to unary functions that compute the field's value. """ funcs = {} for plugin in find_plugins(): - if plugin.template_fields: - funcs.update(plugin.template_fields) + _check_conflicts_and_merge(plugin, plugin.template_fields, funcs) return funcs @@ -459,8 +474,7 @@ def album_field_getters(): """As above, for album fields.""" funcs = {} for plugin in find_plugins(): - if plugin.album_template_fields: - funcs.update(plugin.album_template_fields) + _check_conflicts_and_merge(plugin, plugin.album_template_fields, funcs) return funcs diff --git a/beetsplug/smartplaylist.py b/beetsplug/smartplaylist.py index 120361d3..12a1c921 100644 --- a/beetsplug/smartplaylist.py +++ b/beetsplug/smartplaylist.py @@ -45,6 +45,7 @@ class SmartPlaylistPlugin(BeetsPlugin): "playlist_dir": ".", "auto": True, "playlists": [], + "uri_format": None, "forward_slash": False, "prefix": "", "urlencode": False, @@ -109,6 +110,12 @@ class SmartPlaylistPlugin(BeetsPlugin): action="store_true", help="URL-encode all paths.", ) + spl_update.parser.add_option( + "--uri-format", + dest="uri_format", + type="string", + help="playlist item URI template, e.g. http://beets:8337/item/$id/file.", + ) spl_update.parser.add_option( "--output", type="string", @@ -247,6 +254,8 @@ class SmartPlaylistPlugin(BeetsPlugin): playlist_dir = self.config["playlist_dir"].as_filename() playlist_dir = bytestring_path(playlist_dir) + tpl = self.config["uri_format"].get() + prefix = bytestring_path(self.config["prefix"].as_str()) relative_to = self.config["relative_to"].get() if relative_to: relative_to = normpath(relative_to) @@ -275,18 +284,26 @@ class SmartPlaylistPlugin(BeetsPlugin): m3u_name = sanitize_path(m3u_name, lib.replacements) if m3u_name not in m3us: m3us[m3u_name] = [] - item_path = item.path - if relative_to: - item_path = os.path.relpath(item.path, relative_to) - if item_path not in m3us[m3u_name]: - m3us[m3u_name].append({"item": item, "path": item_path}) + item_uri = item.path + if tpl: + item_uri = tpl.replace("$id", str(item.id)).encode("utf-8") + else: + if relative_to: + item_uri = os.path.relpath(item_uri, relative_to) + if self.config["forward_slash"].get(): + item_uri = path_as_posix(item_uri) + if self.config["urlencode"]: + item_uri = bytestring_path(pathname2url(item_uri)) + item_uri = prefix + item_uri + + if item_uri not in m3us[m3u_name]: + m3us[m3u_name].append({"item": item, "uri": item_uri}) if pretend and self.config["pretend_paths"]: - print(displayable_path(item_path)) + print(displayable_path(item_uri)) elif pretend: print(item) if not pretend: - prefix = bytestring_path(self.config["prefix"].as_str()) # Write all of the accumulated track lists to files. for m3u in m3us: m3u_path = normpath( @@ -303,18 +320,13 @@ class SmartPlaylistPlugin(BeetsPlugin): if m3u8: f.write(b"#EXTM3U\n") for entry in m3us[m3u]: - path = entry["path"] item = entry["item"] - if self.config["forward_slash"].get(): - path = path_as_posix(path) - if self.config["urlencode"]: - path = bytestring_path(pathname2url(path)) comment = "" if m3u8: comment = "#EXTINF:{},{} - {}\n".format( int(item.length), item.artist, item.title ) - f.write(comment.encode("utf-8") + prefix + path + b"\n") + f.write(comment.encode("utf-8") + entry["uri"] + b"\n") # Send an event when playlists were updated. send_event("smartplaylist_update") diff --git a/docs/changelog.rst b/docs/changelog.rst index c88a1009..86dc97b0 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -148,8 +148,9 @@ New features: `synced` option to prefer synced lyrics over plain lyrics. * :ref:`import-cmd`: Expose import.quiet_fallback as CLI option. * :ref:`import-cmd`: Expose `import.incremental_skip_later` as CLI option. -* :doc:`/plugins/smartplaylist`: Add new config option `smartplaylist.output`. * :doc:`/plugins/smartplaylist`: Expose config options as CLI options. +* :doc:`/plugins/smartplaylist`: Add new option `smartplaylist.output`. +* :doc:`/plugins/smartplaylist`: Add new option `smartplaylist.uri_format`. Bug fixes: @@ -277,6 +278,15 @@ Bug fixes: * Fix bug regarding displaying tracks that have been changed not being displayed unless the detail configuration is enabled. +For plugin developers: + +* beets now explicitly prevents multiple plugins to define replacement + functions for the same field. When previously defining `template_fields` + for the same field in two plugins, the last loaded plugin would silently + overwrite the function defined by the other plugin. + Now, beets will raise an exception when this happens. + :bug:`5002` + For packagers: * As noted above, the minimum Python version is now 3.7. diff --git a/docs/plugins/smartplaylist.rst b/docs/plugins/smartplaylist.rst index 365b5af3..a40d1888 100644 --- a/docs/plugins/smartplaylist.rst +++ b/docs/plugins/smartplaylist.rst @@ -118,9 +118,13 @@ other configuration options are: - **urlencode**: URL-encode all paths. Default: ``no``. - **pretend_paths**: When running with ``--pretend``, show the actual file paths that will be written to the m3u file. Default: ``false``. +- **uri_format**: Template with an ``$id`` placeholder used generate a + playlist item URI, e.g. ``http://beets:8337/item/$id/file``. + When this option is specified, the local path-related options ``prefix``, + ``relative_to``, ``forward_slash`` and ``urlencode`` are ignored. - **output**: Specify the playlist format: m3u|m3u8. Default ``m3u``. For many configuration options, there is a corresponding CLI option, e.g. ``--playlist-dir``, ``--relative-to``, ``--prefix``, ``--forward-slash``, -``--urlencode``, ``--output``, ``--pretend-paths``. +``--urlencode``, ``--uri-format``, ``--output``, ``--pretend-paths``. CLI options take precedence over those specified within the configuration file.
beetbox/beets
adf4b9779ac465e35d6ee2d9c9db4c81eec319cf
diff --git a/test/plugins/test_smartplaylist.py b/test/plugins/test_smartplaylist.py index 96eac625..921ae815 100644 --- a/test/plugins/test_smartplaylist.py +++ b/test/plugins/test_smartplaylist.py @@ -241,6 +241,51 @@ class SmartPlaylistTest(_common.TestCase): + b"http://beets:8337/files/tagada.mp3\n", ) + def test_playlist_update_uri_format(self): + spl = SmartPlaylistPlugin() + + i = MagicMock() + type(i).id = PropertyMock(return_value=3) + type(i).path = PropertyMock(return_value=b"/tagada.mp3") + i.evaluate_template.side_effect = lambda pl, _: pl.replace( + b"$title", b"ta:ga:da" + ).decode() + + lib = Mock() + lib.replacements = CHAR_REPLACE + lib.items.return_value = [i] + lib.albums.return_value = [] + + q = Mock() + a_q = Mock() + pl = b"$title-my<playlist>.m3u", (q, None), (a_q, None) + spl._matched_playlists = [pl] + + dir = bytestring_path(mkdtemp()) + tpl = "http://beets:8337/item/$id/file" + config["smartplaylist"]["uri_format"] = tpl + config["smartplaylist"]["playlist_dir"] = py3_path(dir) + # The following options should be ignored when uri_format is set + config["smartplaylist"]["relative_to"] = "/data" + config["smartplaylist"]["prefix"] = "/prefix" + config["smartplaylist"]["urlencode"] = True + try: + spl.update_playlists(lib) + except Exception: + rmtree(syspath(dir)) + raise + + lib.items.assert_called_once_with(q, None) + lib.albums.assert_called_once_with(a_q, None) + + m3u_filepath = path.join(dir, b"ta_ga_da-my_playlist_.m3u") + self.assertExists(m3u_filepath) + with open(syspath(m3u_filepath), "rb") as f: + content = f.read() + rmtree(syspath(dir)) + + self.assertEqual(content, b"http://beets:8337/item/3/file\n") + class SmartPlaylistCLITest(_common.TestCase, TestHelper): def setUp(self):
beetsplug: only one plugin can set `template_fields` per field ### Problem I ran into this when using the new `advancedrewrite` plugin on my production system together with the old `rewrite` plugin. When using these two (or any two other plugins modifying `BeetsPlugin.template_fields`) together and specifying rules that both target the same field (e.g., `artist`), only the rules of one plugin concerning this field will be applied. Even worse, which plugin will be used depends on the plugin loading order (the one loaded last will be used), which seems to be random-ish: in the importer, the `rewrite` plugin is loaded last, when just invoking `beet`, the `advancedrewrite` plugin is loaded last. The rules for that specific field from the previously loaded plugin(s) are silently dropped. ### Investigation I've pinned this bug down to the following line: [`beets.plugins.item_field_getters`](https://github.com/beetbox/beets/blob/36454a3883c9e5c656805152d2220e9496a7455c/beets/plugins.py#L454). The reason this breaks is that the `update` function on the `dict` overwrites previous getters, instead of merging them somehow. Of course, the problem lies in the fact that rewrites can generally affect each other and do thus need a deterministic way of applying them. ### Suggested solution (implemented in #5003) I suggest addressing this issue the following way: * Rework the getter functions applied to `template_fields` or `album_template_fields` so that they either return a replacement value **or `None`** (possibly breaking change for 3rd-party plugins). * Merge getter functions into a dict with lists of functions in `plugins.item_field_getters`. * When evaluating the getters in `beets.dbcore.Model._get`, the **list is iterated and the value of the first value that isn't `None` is used**. * To make sure the value that's used is actually deterministic, the getters can be sorted by the class names of the plugin providing them (alphabetically). In my opinion, that's not ideal, but it's at least consistent. ### Setup * OS: Ubuntu Server 22.04 * Python version: 3.10 * beets version: 1.6.1-master (2023-11-23) * Turning off plugins made problem go away: not applicable, bug specifically affects plugin handling
0.0
adf4b9779ac465e35d6ee2d9c9db4c81eec319cf
[ "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_playlist_update_uri_format" ]
[ "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_build_queries", "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_build_queries_with_sorts", "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_db_changes", "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_matches", "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_playlist_update", "test/plugins/test_smartplaylist.py::SmartPlaylistTest::test_playlist_update_output_m3u8", "test/plugins/test_smartplaylist.py::SmartPlaylistCLITest::test_splupdate" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-12-15 15:14:00+00:00
mit
1,323
beetbox__confuse-78
diff --git a/confuse.py b/confuse.py index 8d5d875..5ec3936 100644 --- a/confuse.py +++ b/confuse.py @@ -1681,6 +1681,8 @@ def as_template(value): return OneOf(value) elif value is float: return Number() + elif isinstance(value, float): + return Number(value) elif value is None: return Template() elif value is dict:
beetbox/confuse
5528e7ae70850be07ecaca25826c972c195ee90b
diff --git a/test/test_valid.py b/test/test_valid.py index f1452f3..f5a520f 100644 --- a/test/test_valid.py +++ b/test/test_valid.py @@ -163,6 +163,11 @@ class AsTemplateTest(unittest.TestCase): self.assertIsInstance(typ, confuse.Number) self.assertEqual(typ.default, confuse.REQUIRED) + def test_concrete_float_as_template(self): + typ = confuse.as_template(2.) + self.assertIsInstance(typ, confuse.Number) + self.assertEqual(typ.default, 2.) + def test_none_as_template(self): typ = confuse.as_template(None) self.assertIs(type(typ), confuse.Template)
as_template does not convert float If it encounters an integer, it will convert it to an integer type validator https://github.com/beetbox/confuse/blob/5528e7ae70850be07ecaca25826c972c195ee90b/confuse.py#L1666-L1669 If it encounters a float, it throws an error https://github.com/beetbox/confuse/blob/5528e7ae70850be07ecaca25826c972c195ee90b/confuse.py#L1682-L1683 ``` File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1665, in as_template return MappingTemplate(value) File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1209, in __init__ subtemplates[key] = as_template(typ) File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1665, in as_template return MappingTemplate(value) File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1209, in __init__ subtemplates[key] = as_template(typ) File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1665, in as_template return MappingTemplate(value) File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1209, in __init__ subtemplates[key] = as_template(typ) File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1693, in as_template raise ValueError(u'cannot convert to template: {0!r}'.format(value)) ValueError: cannot convert to template: 0.5 ```
0.0
5528e7ae70850be07ecaca25826c972c195ee90b
[ "test/test_valid.py::AsTemplateTest::test_concrete_float_as_template" ]
[ "test/test_valid.py::ValidConfigTest::test_attribute_access", "test/test_valid.py::ValidConfigTest::test_default_value", "test/test_valid.py::ValidConfigTest::test_int_default_shortcut", "test/test_valid.py::ValidConfigTest::test_int_template_shortcut", "test/test_valid.py::ValidConfigTest::test_missing_required_value_raises_error_on_validate", "test/test_valid.py::ValidConfigTest::test_nested_attribute_access", "test/test_valid.py::ValidConfigTest::test_nested_dict_template", "test/test_valid.py::ValidConfigTest::test_none_as_default", "test/test_valid.py::ValidConfigTest::test_undeclared_key_ignored_from_input", "test/test_valid.py::ValidConfigTest::test_undeclared_key_raises_keyerror", "test/test_valid.py::ValidConfigTest::test_validate_individual_value", "test/test_valid.py::ValidConfigTest::test_validate_simple_dict", "test/test_valid.py::ValidConfigTest::test_wrong_type_raises_error_on_validate", "test/test_valid.py::AsTemplateTest::test_concrete_int_as_template", "test/test_valid.py::AsTemplateTest::test_concrete_string_as_template", "test/test_valid.py::AsTemplateTest::test_dict_as_template", "test/test_valid.py::AsTemplateTest::test_dict_type_as_template", "test/test_valid.py::AsTemplateTest::test_enum_type_as_template", "test/test_valid.py::AsTemplateTest::test_float_type_as_tempalte", "test/test_valid.py::AsTemplateTest::test_list_as_template", "test/test_valid.py::AsTemplateTest::test_list_type_as_template", "test/test_valid.py::AsTemplateTest::test_nested_dict_as_template", "test/test_valid.py::AsTemplateTest::test_none_as_template", "test/test_valid.py::AsTemplateTest::test_other_type_as_template", "test/test_valid.py::AsTemplateTest::test_plain_int_as_template", "test/test_valid.py::AsTemplateTest::test_plain_string_as_template", "test/test_valid.py::AsTemplateTest::test_set_as_template", "test/test_valid.py::AsTemplateTest::test_set_type_as_template", "test/test_valid.py::StringTemplateTest::test_check_string_type", "test/test_valid.py::StringTemplateTest::test_pattern_matching", "test/test_valid.py::StringTemplateTest::test_string_default_shortcut", "test/test_valid.py::StringTemplateTest::test_string_default_value", "test/test_valid.py::StringTemplateTest::test_string_template_shortcut", "test/test_valid.py::StringTemplateTest::test_validate_string", "test/test_valid.py::NumberTest::test_validate_float_as_number", "test/test_valid.py::NumberTest::test_validate_int_as_number", "test/test_valid.py::NumberTest::test_validate_string_as_number", "test/test_valid.py::ChoiceTest::test_validate_bad_choice_in_dict", "test/test_valid.py::ChoiceTest::test_validate_bad_choice_in_list", "test/test_valid.py::ChoiceTest::test_validate_good_choice_in_dict", "test/test_valid.py::ChoiceTest::test_validate_good_choice_in_list", "test/test_valid.py::OneOfTest::test_default_value", "test/test_valid.py::OneOfTest::test_validate_bad_template", "test/test_valid.py::OneOfTest::test_validate_first_good_choice_in_list", "test/test_valid.py::OneOfTest::test_validate_good_choice_in_list", "test/test_valid.py::OneOfTest::test_validate_no_choice_in_list", "test/test_valid.py::StrSeqTest::test_invalid_sequence_type", "test/test_valid.py::StrSeqTest::test_invalid_type", "test/test_valid.py::StrSeqTest::test_string_list", "test/test_valid.py::StrSeqTest::test_string_tuple", "test/test_valid.py::StrSeqTest::test_whitespace_separated_string", "test/test_valid.py::FilenameTest::test_filename_relative_to_self", "test/test_valid.py::FilenameTest::test_filename_relative_to_sibling", "test/test_valid.py::FilenameTest::test_filename_relative_to_sibling_needs_siblings", "test/test_valid.py::FilenameTest::test_filename_relative_to_sibling_needs_template", "test/test_valid.py::FilenameTest::test_filename_relative_to_sibling_with_recursion", "test/test_valid.py::FilenameTest::test_filename_relative_to_working_dir", "test/test_valid.py::FilenameTest::test_filename_with_default_source", "test/test_valid.py::FilenameTest::test_filename_with_file_source", "test/test_valid.py::FilenameTest::test_filename_with_non_file_source", "test/test_valid.py::FilenameTest::test_filename_working_dir_overrides_sibling", "test/test_valid.py::FilenameTest::test_filename_wrong_type", "test/test_valid.py::BaseTemplateTest::test_base_template_accepts_any_value", "test/test_valid.py::BaseTemplateTest::test_base_template_required", "test/test_valid.py::BaseTemplateTest::test_base_template_with_default", "test/test_valid.py::TypeTemplateTest::test_correct_type", "test/test_valid.py::TypeTemplateTest::test_default_value", "test/test_valid.py::TypeTemplateTest::test_incorrect_type", "test/test_valid.py::TypeTemplateTest::test_missing_required_value", "test/test_valid.py::SequenceTest::test_dict_list", "test/test_valid.py::SequenceTest::test_int_list", "test/test_valid.py::SequenceTest::test_invalid_item" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2020-04-21 17:09:37+00:00
mit
1,324
beetbox__confuse-79
diff --git a/confuse.py b/confuse.py index 8d5d875..d15a7a7 100644 --- a/confuse.py +++ b/confuse.py @@ -1125,15 +1125,21 @@ class Template(object): May raise a `NotFoundError` if the value is missing (and the template requires it) or a `ConfigValueError` for invalid values. """ - if view.exists(): + try: value, _ = view.first() return self.convert(value, view) - elif self.default is REQUIRED: + except NotFoundError: + pass + + # get default value, raise if required + return self.get_default_value(view.name) + + def get_default_value(self, key_name='default'): + if self.default is REQUIRED: # Missing required value. This is an error. - raise NotFoundError(u"{0} not found".format(view.name)) - else: - # Missing value, but not required. - return self.default + raise NotFoundError(u"{} not found".format(key_name)) + # Missing value, but not required. + return self.default def convert(self, value, view): """Convert the YAML-deserialized value to a value of the desired @@ -1578,7 +1584,11 @@ class Filename(Template): return view.parent.get(next_template)[self.relative_to] def value(self, view, template=None): - path, source = view.first() + try: + path, source = view.first() + except NotFoundError: + return self.get_default_value(view.name) + if not isinstance(path, BASESTRING): self.fail( u'must be a filename, not {0}'.format(type(path).__name__), @@ -1615,8 +1625,11 @@ class Path(Filename): template. """ def value(self, view, template=None): + value = super(Path, self).value(view, template) + if value is None: + return import pathlib - return pathlib.Path(super(Path, self).value(view, template)) + return pathlib.Path(value) class TypeTemplate(Template): @@ -1681,6 +1694,8 @@ def as_template(value): return OneOf(value) elif value is float: return Number() + elif isinstance(value, float): + return Number(value) elif value is None: return Template() elif value is dict: diff --git a/docs/index.rst b/docs/index.rst index 72dc12a..9070785 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -371,6 +371,12 @@ The resulting YAML will contain "key: REDACTED" instead of the original data. Changelog --------- +v1.2.0 +'''''' + +- `float` values (like ``4.2``) can now be used in templates (just like + ``42`` works as an `int` template). + v1.1.0 '''''' diff --git a/setup.py b/setup.py index b82a3ac..f49345c 100644 --- a/setup.py +++ b/setup.py @@ -81,7 +81,7 @@ setup( long_description=_read("README.rst"), long_description_content_type='text/x-rst', install_requires=['pyyaml'], - tests_require=['tox'], + tests_require=['tox', 'pathlib'], py_modules=['confuse'], cmdclass={'test': test}, classifiers=[ diff --git a/tox.ini b/tox.ini index 6466e85..6f3c9dc 100644 --- a/tox.ini +++ b/tox.ini @@ -15,6 +15,7 @@ deps = nose nose-show-skipped pyyaml + pathlib [_flake8]
beetbox/confuse
5528e7ae70850be07ecaca25826c972c195ee90b
diff --git a/test/test_valid.py b/test/test_valid.py index f1452f3..ec7cf6b 100644 --- a/test/test_valid.py +++ b/test/test_valid.py @@ -163,6 +163,11 @@ class AsTemplateTest(unittest.TestCase): self.assertIsInstance(typ, confuse.Number) self.assertEqual(typ.default, confuse.REQUIRED) + def test_concrete_float_as_template(self): + typ = confuse.as_template(2.) + self.assertIsInstance(typ, confuse.Number) + self.assertEqual(typ.default, 2.) + def test_none_as_template(self): typ = confuse.as_template(None) self.assertIs(type(typ), confuse.Template) @@ -337,6 +342,21 @@ class StrSeqTest(unittest.TestCase): class FilenameTest(unittest.TestCase): + def test_default_value(self): + config = _root({}) + valid = config['foo'].get(confuse.Filename('foo/bar')) + self.assertEqual(valid, 'foo/bar') + + def test_default_none(self): + config = _root({}) + valid = config['foo'].get(confuse.Filename(None)) + self.assertEqual(valid, None) + + def test_missing_required_value(self): + config = _root({}) + with self.assertRaises(confuse.NotFoundError): + config['foo'].get(confuse.Filename()) + def test_filename_relative_to_working_dir(self): config = _root({'foo': 'bar'}) valid = config['foo'].get(confuse.Filename(cwd='/dev/null')) @@ -414,6 +434,30 @@ class FilenameTest(unittest.TestCase): config['foo'].get(confuse.Filename()) +class PathTest(unittest.TestCase): + def test_path_value(self): + import pathlib + config = _root({'foo': 'foo/bar'}) + valid = config['foo'].get(confuse.Path()) + self.assertEqual(valid, pathlib.Path(os.path.abspath('foo/bar'))) + + def test_default_value(self): + import pathlib + config = _root({}) + valid = config['foo'].get(confuse.Path('foo/bar')) + self.assertEqual(valid, pathlib.Path('foo/bar')) + + def test_default_none(self): + config = _root({}) + valid = config['foo'].get(confuse.Path(None)) + self.assertEqual(valid, None) + + def test_missing_required_value(self): + config = _root({}) + with self.assertRaises(confuse.NotFoundError): + config['foo'].get(confuse.Path()) + + class BaseTemplateTest(unittest.TestCase): def test_base_template_accepts_any_value(self): config = _root({'foo': 4.2})
confuse.Filename doesn't check "default" argument #### The Problem When you specify `confuse.Filename('/default/path/to/file')` you would expect that, if no config value was found, it would default to `'/default/path/to/file'`. Instead, it throws exceptions. When I set `my_path: null` in my `config_default.yaml`, it gives me: ``` File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1218, in value out[key] = typ.value(view[key], self) File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1218, in value out[key] = typ.value(view[key], self) File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1586, in value True File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1158, in fail u'{0}: {1}'.format(view.name, message) confuse.ConfigTypeError: rsa_key: must be a filename, not NoneType ``` and when I comment out the key entirely, I get: ``` File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1218, in value out[key] = typ.value(view[key], self) File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1218, in value out[key] = typ.value(view[key], self) File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 1581, in value path, source = view.first() File "/usr/local/lib/python3.7/dist-packages/confuse.py", line 201, in first raise NotFoundError(u"{0} not found".format(self.name)) confuse.NotFoundError: rsa_key not found ``` #### The Culprit `Template` implements the `value` method which is what normally checks the default value: https://github.com/beetbox/confuse/blob/5528e7ae70850be07ecaca25826c972c195ee90b/confuse.py#L1122-L1136 `Filename(Template)` overrides `value` and does nothing with the value of `default`. https://github.com/beetbox/confuse/blob/5528e7ae70850be07ecaca25826c972c195ee90b/confuse.py#L1580-L1605 One option, though I admittedly a bit messy, would be to do what's done in `OneOf` and set template as an attribute and perform the logic in `convert`. https://github.com/beetbox/confuse/blob/5528e7ae70850be07ecaca25826c972c195ee90b/confuse.py#L1356-L1363
0.0
5528e7ae70850be07ecaca25826c972c195ee90b
[ "test/test_valid.py::AsTemplateTest::test_concrete_float_as_template", "test/test_valid.py::FilenameTest::test_default_none", "test/test_valid.py::FilenameTest::test_default_value", "test/test_valid.py::PathTest::test_default_none", "test/test_valid.py::PathTest::test_default_value" ]
[ "test/test_valid.py::ValidConfigTest::test_attribute_access", "test/test_valid.py::ValidConfigTest::test_default_value", "test/test_valid.py::ValidConfigTest::test_int_default_shortcut", "test/test_valid.py::ValidConfigTest::test_int_template_shortcut", "test/test_valid.py::ValidConfigTest::test_missing_required_value_raises_error_on_validate", "test/test_valid.py::ValidConfigTest::test_nested_attribute_access", "test/test_valid.py::ValidConfigTest::test_nested_dict_template", "test/test_valid.py::ValidConfigTest::test_none_as_default", "test/test_valid.py::ValidConfigTest::test_undeclared_key_ignored_from_input", "test/test_valid.py::ValidConfigTest::test_undeclared_key_raises_keyerror", "test/test_valid.py::ValidConfigTest::test_validate_individual_value", "test/test_valid.py::ValidConfigTest::test_validate_simple_dict", "test/test_valid.py::ValidConfigTest::test_wrong_type_raises_error_on_validate", "test/test_valid.py::AsTemplateTest::test_concrete_int_as_template", "test/test_valid.py::AsTemplateTest::test_concrete_string_as_template", "test/test_valid.py::AsTemplateTest::test_dict_as_template", "test/test_valid.py::AsTemplateTest::test_dict_type_as_template", "test/test_valid.py::AsTemplateTest::test_enum_type_as_template", "test/test_valid.py::AsTemplateTest::test_float_type_as_tempalte", "test/test_valid.py::AsTemplateTest::test_list_as_template", "test/test_valid.py::AsTemplateTest::test_list_type_as_template", "test/test_valid.py::AsTemplateTest::test_nested_dict_as_template", "test/test_valid.py::AsTemplateTest::test_none_as_template", "test/test_valid.py::AsTemplateTest::test_other_type_as_template", "test/test_valid.py::AsTemplateTest::test_plain_int_as_template", "test/test_valid.py::AsTemplateTest::test_plain_string_as_template", "test/test_valid.py::AsTemplateTest::test_set_as_template", "test/test_valid.py::AsTemplateTest::test_set_type_as_template", "test/test_valid.py::StringTemplateTest::test_check_string_type", "test/test_valid.py::StringTemplateTest::test_pattern_matching", "test/test_valid.py::StringTemplateTest::test_string_default_shortcut", "test/test_valid.py::StringTemplateTest::test_string_default_value", "test/test_valid.py::StringTemplateTest::test_string_template_shortcut", "test/test_valid.py::StringTemplateTest::test_validate_string", "test/test_valid.py::NumberTest::test_validate_float_as_number", "test/test_valid.py::NumberTest::test_validate_int_as_number", "test/test_valid.py::NumberTest::test_validate_string_as_number", "test/test_valid.py::ChoiceTest::test_validate_bad_choice_in_dict", "test/test_valid.py::ChoiceTest::test_validate_bad_choice_in_list", "test/test_valid.py::ChoiceTest::test_validate_good_choice_in_dict", "test/test_valid.py::ChoiceTest::test_validate_good_choice_in_list", "test/test_valid.py::OneOfTest::test_default_value", "test/test_valid.py::OneOfTest::test_validate_bad_template", "test/test_valid.py::OneOfTest::test_validate_first_good_choice_in_list", "test/test_valid.py::OneOfTest::test_validate_good_choice_in_list", "test/test_valid.py::OneOfTest::test_validate_no_choice_in_list", "test/test_valid.py::StrSeqTest::test_invalid_sequence_type", "test/test_valid.py::StrSeqTest::test_invalid_type", "test/test_valid.py::StrSeqTest::test_string_list", "test/test_valid.py::StrSeqTest::test_string_tuple", "test/test_valid.py::StrSeqTest::test_whitespace_separated_string", "test/test_valid.py::FilenameTest::test_filename_relative_to_self", "test/test_valid.py::FilenameTest::test_filename_relative_to_sibling", "test/test_valid.py::FilenameTest::test_filename_relative_to_sibling_needs_siblings", "test/test_valid.py::FilenameTest::test_filename_relative_to_sibling_needs_template", "test/test_valid.py::FilenameTest::test_filename_relative_to_sibling_with_recursion", "test/test_valid.py::FilenameTest::test_filename_relative_to_working_dir", "test/test_valid.py::FilenameTest::test_filename_with_default_source", "test/test_valid.py::FilenameTest::test_filename_with_file_source", "test/test_valid.py::FilenameTest::test_filename_with_non_file_source", "test/test_valid.py::FilenameTest::test_filename_working_dir_overrides_sibling", "test/test_valid.py::FilenameTest::test_filename_wrong_type", "test/test_valid.py::FilenameTest::test_missing_required_value", "test/test_valid.py::PathTest::test_missing_required_value", "test/test_valid.py::PathTest::test_path_value", "test/test_valid.py::BaseTemplateTest::test_base_template_accepts_any_value", "test/test_valid.py::BaseTemplateTest::test_base_template_required", "test/test_valid.py::BaseTemplateTest::test_base_template_with_default", "test/test_valid.py::TypeTemplateTest::test_correct_type", "test/test_valid.py::TypeTemplateTest::test_default_value", "test/test_valid.py::TypeTemplateTest::test_incorrect_type", "test/test_valid.py::TypeTemplateTest::test_missing_required_value", "test/test_valid.py::SequenceTest::test_dict_list", "test/test_valid.py::SequenceTest::test_int_list", "test/test_valid.py::SequenceTest::test_invalid_item" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-04-21 18:29:44+00:00
mit
1,325
beetbox__confuse-97
diff --git a/confuse/core.py b/confuse/core.py index 193bfc3..5015705 100644 --- a/confuse/core.py +++ b/confuse/core.py @@ -335,7 +335,7 @@ class ConfigView(object): od[key] = view.get() return od - def get(self, template=None): + def get(self, template=templates.REQUIRED): """Retrieve the value for this view according to the template. The `template` against which the values are checked can be diff --git a/confuse/templates.py b/confuse/templates.py index 640ba66..984c341 100644 --- a/confuse/templates.py +++ b/confuse/templates.py @@ -626,6 +626,8 @@ def as_template(value): elif isinstance(value, float): return Number(value) elif value is None: + return Template(None) + elif value is REQUIRED: return Template() elif value is dict: return TypeTemplate(abc.Mapping) diff --git a/docs/index.rst b/docs/index.rst index 64f9d69..b8ac11b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -393,6 +393,10 @@ v1.3.0 - Break up the `confuse` module into a package. (All names should still be importable from `confuse`.) +- When using `None` as a template, the result is a value whose default is + `None`. Previously, this was equivalent to leaving the key off entirely, + i.e., a template with no default. To get the same effect now, use + `confuse.REQUIRED` in the template. v1.2.0 ''''''
beetbox/confuse
290a46f27eb081058ddb80afa5383b9253511f6d
diff --git a/test/test_valid.py b/test/test_valid.py index ec7cf6b..4ba8125 100644 --- a/test/test_valid.py +++ b/test/test_valid.py @@ -171,6 +171,11 @@ class AsTemplateTest(unittest.TestCase): def test_none_as_template(self): typ = confuse.as_template(None) self.assertIs(type(typ), confuse.Template) + self.assertEqual(typ.default, None) + + def test_required_as_template(self): + typ = confuse.as_template(confuse.REQUIRED) + self.assertIs(type(typ), confuse.Template) self.assertEqual(typ.default, confuse.REQUIRED) def test_dict_type_as_template(self):
None as Template should use None as default When I do this for a key that doesn't exist in `config_default.yaml`, I would expect the key to contain `None` by default. ```python config['something'].get({ 'key': None }) ``` But instead, I get: ``` confuse.NotFoundError: something.key not found ``` This is because of this line in `as_template`: https://github.com/beetbox/confuse/blob/b82d3faacb972b8964487a648805c3d0e06c0212/confuse.py#L1718-L1719 What I would propose: ```python # throws NotFoundError - '...' is often used to denote a missing value config['something'].get({ 'key': ... }) # returns None config['something'].get({ 'key': None }) ```
0.0
290a46f27eb081058ddb80afa5383b9253511f6d
[ "test/test_valid.py::AsTemplateTest::test_none_as_template", "test/test_valid.py::AsTemplateTest::test_required_as_template" ]
[ "test/test_valid.py::ValidConfigTest::test_attribute_access", "test/test_valid.py::ValidConfigTest::test_default_value", "test/test_valid.py::ValidConfigTest::test_int_default_shortcut", "test/test_valid.py::ValidConfigTest::test_int_template_shortcut", "test/test_valid.py::ValidConfigTest::test_missing_required_value_raises_error_on_validate", "test/test_valid.py::ValidConfigTest::test_nested_attribute_access", "test/test_valid.py::ValidConfigTest::test_nested_dict_template", "test/test_valid.py::ValidConfigTest::test_none_as_default", "test/test_valid.py::ValidConfigTest::test_undeclared_key_ignored_from_input", "test/test_valid.py::ValidConfigTest::test_undeclared_key_raises_keyerror", "test/test_valid.py::ValidConfigTest::test_validate_individual_value", "test/test_valid.py::ValidConfigTest::test_validate_simple_dict", "test/test_valid.py::ValidConfigTest::test_wrong_type_raises_error_on_validate", "test/test_valid.py::AsTemplateTest::test_concrete_float_as_template", "test/test_valid.py::AsTemplateTest::test_concrete_int_as_template", "test/test_valid.py::AsTemplateTest::test_concrete_string_as_template", "test/test_valid.py::AsTemplateTest::test_dict_as_template", "test/test_valid.py::AsTemplateTest::test_dict_type_as_template", "test/test_valid.py::AsTemplateTest::test_enum_type_as_template", "test/test_valid.py::AsTemplateTest::test_float_type_as_tempalte", "test/test_valid.py::AsTemplateTest::test_list_as_template", "test/test_valid.py::AsTemplateTest::test_list_type_as_template", "test/test_valid.py::AsTemplateTest::test_nested_dict_as_template", "test/test_valid.py::AsTemplateTest::test_other_type_as_template", "test/test_valid.py::AsTemplateTest::test_plain_int_as_template", "test/test_valid.py::AsTemplateTest::test_plain_string_as_template", "test/test_valid.py::AsTemplateTest::test_set_as_template", "test/test_valid.py::AsTemplateTest::test_set_type_as_template", "test/test_valid.py::StringTemplateTest::test_check_string_type", "test/test_valid.py::StringTemplateTest::test_pattern_matching", "test/test_valid.py::StringTemplateTest::test_string_default_shortcut", "test/test_valid.py::StringTemplateTest::test_string_default_value", "test/test_valid.py::StringTemplateTest::test_string_template_shortcut", "test/test_valid.py::StringTemplateTest::test_validate_string", "test/test_valid.py::NumberTest::test_validate_float_as_number", "test/test_valid.py::NumberTest::test_validate_int_as_number", "test/test_valid.py::NumberTest::test_validate_string_as_number", "test/test_valid.py::ChoiceTest::test_validate_bad_choice_in_dict", "test/test_valid.py::ChoiceTest::test_validate_bad_choice_in_list", "test/test_valid.py::ChoiceTest::test_validate_good_choice_in_dict", "test/test_valid.py::ChoiceTest::test_validate_good_choice_in_list", "test/test_valid.py::OneOfTest::test_default_value", "test/test_valid.py::OneOfTest::test_validate_bad_template", "test/test_valid.py::OneOfTest::test_validate_first_good_choice_in_list", "test/test_valid.py::OneOfTest::test_validate_good_choice_in_list", "test/test_valid.py::OneOfTest::test_validate_no_choice_in_list", "test/test_valid.py::StrSeqTest::test_invalid_sequence_type", "test/test_valid.py::StrSeqTest::test_invalid_type", "test/test_valid.py::StrSeqTest::test_string_list", "test/test_valid.py::StrSeqTest::test_string_tuple", "test/test_valid.py::StrSeqTest::test_whitespace_separated_string", "test/test_valid.py::FilenameTest::test_default_none", "test/test_valid.py::FilenameTest::test_default_value", "test/test_valid.py::FilenameTest::test_filename_relative_to_self", "test/test_valid.py::FilenameTest::test_filename_relative_to_sibling", "test/test_valid.py::FilenameTest::test_filename_relative_to_sibling_needs_siblings", "test/test_valid.py::FilenameTest::test_filename_relative_to_sibling_needs_template", "test/test_valid.py::FilenameTest::test_filename_relative_to_sibling_with_recursion", "test/test_valid.py::FilenameTest::test_filename_relative_to_working_dir", "test/test_valid.py::FilenameTest::test_filename_with_default_source", "test/test_valid.py::FilenameTest::test_filename_with_file_source", "test/test_valid.py::FilenameTest::test_filename_with_non_file_source", "test/test_valid.py::FilenameTest::test_filename_working_dir_overrides_sibling", "test/test_valid.py::FilenameTest::test_filename_wrong_type", "test/test_valid.py::FilenameTest::test_missing_required_value", "test/test_valid.py::PathTest::test_default_none", "test/test_valid.py::PathTest::test_default_value", "test/test_valid.py::PathTest::test_missing_required_value", "test/test_valid.py::PathTest::test_path_value", "test/test_valid.py::BaseTemplateTest::test_base_template_accepts_any_value", "test/test_valid.py::BaseTemplateTest::test_base_template_required", "test/test_valid.py::BaseTemplateTest::test_base_template_with_default", "test/test_valid.py::TypeTemplateTest::test_correct_type", "test/test_valid.py::TypeTemplateTest::test_default_value", "test/test_valid.py::TypeTemplateTest::test_incorrect_type", "test/test_valid.py::TypeTemplateTest::test_missing_required_value", "test/test_valid.py::SequenceTest::test_dict_list", "test/test_valid.py::SequenceTest::test_int_list", "test/test_valid.py::SequenceTest::test_invalid_item" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-06-09 21:59:16+00:00
mit
1,326
beetbox__mediafile-64
diff --git a/.gitignore b/.gitignore index 14035f2..29a33a4 100644 --- a/.gitignore +++ b/.gitignore @@ -74,6 +74,7 @@ target/ # virtualenv venv/ ENV/ +.venv/ # Spyder project settings .spyderproject diff --git a/mediafile.py b/mediafile.py index ca70c94..c834346 100644 --- a/mediafile.py +++ b/mediafile.py @@ -600,8 +600,8 @@ class ListStorageStyle(StorageStyle): object to each. Subclasses may overwrite ``fetch`` and ``store``. ``fetch`` must - return a (possibly empty) list and ``store`` receives a serialized - list of values as the second argument. + return a (possibly empty) list or `None` if the tag does not exist. + ``store`` receives a serialized list of values as the second argument. The `serialize` and `deserialize` methods (from the base `StorageStyle`) are still called with individual values. This class @@ -610,15 +610,23 @@ class ListStorageStyle(StorageStyle): def get(self, mutagen_file): """Get the first value in the field's value list. """ + values = self.get_list(mutagen_file) + if values is None: + return None + try: - return self.get_list(mutagen_file)[0] + return values[0] except IndexError: return None def get_list(self, mutagen_file): """Get a list of all values for the field using this style. """ - return [self.deserialize(item) for item in self.fetch(mutagen_file)] + raw_values = self.fetch(mutagen_file) + if raw_values is None: + return None + + return [self.deserialize(item) for item in raw_values] def fetch(self, mutagen_file): """Get the list of raw (serialized) values. @@ -626,19 +634,27 @@ class ListStorageStyle(StorageStyle): try: return mutagen_file[self.key] except KeyError: - return [] + return None def set(self, mutagen_file, value): """Set an individual value as the only value for the field using this style. """ - self.set_list(mutagen_file, [value]) + if value is None: + self.store(mutagen_file, None) + else: + self.set_list(mutagen_file, [value]) def set_list(self, mutagen_file, values): """Set all values for the field using this style. `values` should be an iterable. """ - self.store(mutagen_file, [self.serialize(value) for value in values]) + if values is None: + self.delete(mutagen_file) + else: + self.store( + mutagen_file, [self.serialize(value) for value in values] + ) def store(self, mutagen_file, values): """Set the list of all raw (serialized) values for this field. @@ -1317,7 +1333,7 @@ class ListMediaField(MediaField): values = style.get_list(mediafile.mgfile) if values: return [_safe_cast(self.out_type, value) for value in values] - return [] + return None def __set__(self, mediafile, values): for style in self.styles(mediafile.mgfile):
beetbox/mediafile
809579c35488f105bd7d87ee9b2e240110402d39
diff --git a/test/test_mediafile.py b/test/test_mediafile.py index 10bbf80..fa474bd 100644 --- a/test/test_mediafile.py +++ b/test/test_mediafile.py @@ -160,7 +160,7 @@ class ImageStructureTestMixin(ArtTestMixin): mediafile.save() mediafile = MediaFile(mediafile.filename) - self.assertEqual(len(mediafile.images), 0) + self.assertIsNone(mediafile.images) def test_guess_cover(self): mediafile = self._mediafile_fixture('image')
List fields don't return None if the tag doesn't exist If a list tag does not exist, it returns an empty list rather than `None`, but if a non-list tag does not exist, `None` is returned. I believe non-existent list tags should also return `None` so that this behavior is consistent.
0.0
809579c35488f105bd7d87ee9b2e240110402d39
[ "test/test_mediafile.py::MP3Test::test_delete_image_structures", "test/test_mediafile.py::MP4Test::test_delete_image_structures", "test/test_mediafile.py::WMATest::test_delete_image_structures", "test/test_mediafile.py::OggTest::test_delete_image_structures", "test/test_mediafile.py::FlacTest::test_delete_image_structures", "test/test_mediafile.py::ApeTest::test_delete_image_structures" ]
[ "test/test_mediafile.py::MP3Test::test_add_image_structure", "test/test_mediafile.py::MP3Test::test_add_tiff_image", "test/test_mediafile.py::MP3Test::test_append_genre_list", "test/test_mediafile.py::MP3Test::test_bitrate_mode", "test/test_mediafile.py::MP3Test::test_delete_art", "test/test_mediafile.py::MP3Test::test_delete_nonexisting", "test/test_mediafile.py::MP3Test::test_delete_packed_total", "test/test_mediafile.py::MP3Test::test_delete_partial_date", "test/test_mediafile.py::MP3Test::test_delete_tag", "test/test_mediafile.py::MP3Test::test_delete_year", "test/test_mediafile.py::MP3Test::test_encoder_info", "test/test_mediafile.py::MP3Test::test_encoder_settings", "test/test_mediafile.py::MP3Test::test_guess_cover", "test/test_mediafile.py::MP3Test::test_overwrite_full", "test/test_mediafile.py::MP3Test::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::MP3Test::test_read_audio_properties", "test/test_mediafile.py::MP3Test::test_read_empty", "test/test_mediafile.py::MP3Test::test_read_full", "test/test_mediafile.py::MP3Test::test_read_genre_list", "test/test_mediafile.py::MP3Test::test_read_image_structures", "test/test_mediafile.py::MP3Test::test_read_nonexisting", "test/test_mediafile.py::MP3Test::test_read_track_without_total", "test/test_mediafile.py::MP3Test::test_save_nonexisting", "test/test_mediafile.py::MP3Test::test_set_image_structure", "test/test_mediafile.py::MP3Test::test_set_jpg_art", "test/test_mediafile.py::MP3Test::test_set_png_art", "test/test_mediafile.py::MP3Test::test_unknown_apic_type", "test/test_mediafile.py::MP3Test::test_unparseable_date", "test/test_mediafile.py::MP3Test::test_update_empty", "test/test_mediafile.py::MP3Test::test_update_full", "test/test_mediafile.py::MP3Test::test_write_counters_without_total", "test/test_mediafile.py::MP3Test::test_write_date_components", "test/test_mediafile.py::MP3Test::test_write_dates", "test/test_mediafile.py::MP3Test::test_write_empty", "test/test_mediafile.py::MP3Test::test_write_genre_list", "test/test_mediafile.py::MP3Test::test_write_genre_list_get_first", "test/test_mediafile.py::MP3Test::test_write_incomplete_date_components", "test/test_mediafile.py::MP3Test::test_write_packed", "test/test_mediafile.py::MP4Test::test_add_image_structure", "test/test_mediafile.py::MP4Test::test_add_tiff_image_fails", "test/test_mediafile.py::MP4Test::test_append_genre_list", "test/test_mediafile.py::MP4Test::test_delete_art", "test/test_mediafile.py::MP4Test::test_delete_nonexisting", "test/test_mediafile.py::MP4Test::test_delete_packed_total", "test/test_mediafile.py::MP4Test::test_delete_partial_date", "test/test_mediafile.py::MP4Test::test_delete_tag", "test/test_mediafile.py::MP4Test::test_delete_year", "test/test_mediafile.py::MP4Test::test_guess_cover", "test/test_mediafile.py::MP4Test::test_overwrite_full", "test/test_mediafile.py::MP4Test::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::MP4Test::test_read_audio_properties", "test/test_mediafile.py::MP4Test::test_read_empty", "test/test_mediafile.py::MP4Test::test_read_full", "test/test_mediafile.py::MP4Test::test_read_genre_list", "test/test_mediafile.py::MP4Test::test_read_image_structures", "test/test_mediafile.py::MP4Test::test_read_nonexisting", "test/test_mediafile.py::MP4Test::test_read_track_without_total", "test/test_mediafile.py::MP4Test::test_save_nonexisting", "test/test_mediafile.py::MP4Test::test_set_image_structure", "test/test_mediafile.py::MP4Test::test_set_jpg_art", "test/test_mediafile.py::MP4Test::test_set_png_art", "test/test_mediafile.py::MP4Test::test_unparseable_date", "test/test_mediafile.py::MP4Test::test_update_empty", "test/test_mediafile.py::MP4Test::test_update_full", "test/test_mediafile.py::MP4Test::test_write_counters_without_total", "test/test_mediafile.py::MP4Test::test_write_date_components", "test/test_mediafile.py::MP4Test::test_write_dates", "test/test_mediafile.py::MP4Test::test_write_empty", "test/test_mediafile.py::MP4Test::test_write_genre_list", "test/test_mediafile.py::MP4Test::test_write_genre_list_get_first", "test/test_mediafile.py::MP4Test::test_write_incomplete_date_components", "test/test_mediafile.py::MP4Test::test_write_packed", "test/test_mediafile.py::AlacTest::test_append_genre_list", "test/test_mediafile.py::AlacTest::test_delete_art", "test/test_mediafile.py::AlacTest::test_delete_nonexisting", "test/test_mediafile.py::AlacTest::test_delete_packed_total", "test/test_mediafile.py::AlacTest::test_delete_partial_date", "test/test_mediafile.py::AlacTest::test_delete_tag", "test/test_mediafile.py::AlacTest::test_delete_year", "test/test_mediafile.py::AlacTest::test_overwrite_full", "test/test_mediafile.py::AlacTest::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::AlacTest::test_read_audio_properties", "test/test_mediafile.py::AlacTest::test_read_empty", "test/test_mediafile.py::AlacTest::test_read_full", "test/test_mediafile.py::AlacTest::test_read_genre_list", "test/test_mediafile.py::AlacTest::test_read_nonexisting", "test/test_mediafile.py::AlacTest::test_save_nonexisting", "test/test_mediafile.py::AlacTest::test_set_jpg_art", "test/test_mediafile.py::AlacTest::test_set_png_art", "test/test_mediafile.py::AlacTest::test_unparseable_date", "test/test_mediafile.py::AlacTest::test_update_empty", "test/test_mediafile.py::AlacTest::test_update_full", "test/test_mediafile.py::AlacTest::test_write_counters_without_total", "test/test_mediafile.py::AlacTest::test_write_date_components", "test/test_mediafile.py::AlacTest::test_write_dates", "test/test_mediafile.py::AlacTest::test_write_empty", "test/test_mediafile.py::AlacTest::test_write_genre_list", "test/test_mediafile.py::AlacTest::test_write_genre_list_get_first", "test/test_mediafile.py::AlacTest::test_write_incomplete_date_components", "test/test_mediafile.py::AlacTest::test_write_packed", "test/test_mediafile.py::MusepackTest::test_append_genre_list", "test/test_mediafile.py::MusepackTest::test_delete_art", "test/test_mediafile.py::MusepackTest::test_delete_nonexisting", "test/test_mediafile.py::MusepackTest::test_delete_packed_total", "test/test_mediafile.py::MusepackTest::test_delete_partial_date", "test/test_mediafile.py::MusepackTest::test_delete_tag", "test/test_mediafile.py::MusepackTest::test_delete_year", "test/test_mediafile.py::MusepackTest::test_overwrite_full", "test/test_mediafile.py::MusepackTest::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::MusepackTest::test_read_audio_properties", "test/test_mediafile.py::MusepackTest::test_read_empty", "test/test_mediafile.py::MusepackTest::test_read_full", "test/test_mediafile.py::MusepackTest::test_read_genre_list", "test/test_mediafile.py::MusepackTest::test_read_nonexisting", "test/test_mediafile.py::MusepackTest::test_save_nonexisting", "test/test_mediafile.py::MusepackTest::test_set_jpg_art", "test/test_mediafile.py::MusepackTest::test_set_png_art", "test/test_mediafile.py::MusepackTest::test_unparseable_date", "test/test_mediafile.py::MusepackTest::test_update_empty", "test/test_mediafile.py::MusepackTest::test_update_full", "test/test_mediafile.py::MusepackTest::test_write_counters_without_total", "test/test_mediafile.py::MusepackTest::test_write_date_components", "test/test_mediafile.py::MusepackTest::test_write_dates", "test/test_mediafile.py::MusepackTest::test_write_empty", "test/test_mediafile.py::MusepackTest::test_write_genre_list", "test/test_mediafile.py::MusepackTest::test_write_genre_list_get_first", "test/test_mediafile.py::MusepackTest::test_write_incomplete_date_components", "test/test_mediafile.py::MusepackTest::test_write_packed", "test/test_mediafile.py::WMATest::test_add_image_structure", "test/test_mediafile.py::WMATest::test_add_tiff_image", "test/test_mediafile.py::WMATest::test_append_genre_list", "test/test_mediafile.py::WMATest::test_delete_art", "test/test_mediafile.py::WMATest::test_delete_nonexisting", "test/test_mediafile.py::WMATest::test_delete_packed_total", "test/test_mediafile.py::WMATest::test_delete_partial_date", "test/test_mediafile.py::WMATest::test_delete_tag", "test/test_mediafile.py::WMATest::test_delete_year", "test/test_mediafile.py::WMATest::test_guess_cover", "test/test_mediafile.py::WMATest::test_overwrite_full", "test/test_mediafile.py::WMATest::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::WMATest::test_read_audio_properties", "test/test_mediafile.py::WMATest::test_read_empty", "test/test_mediafile.py::WMATest::test_read_full", "test/test_mediafile.py::WMATest::test_read_genre_list", "test/test_mediafile.py::WMATest::test_read_image_structures", "test/test_mediafile.py::WMATest::test_read_nonexisting", "test/test_mediafile.py::WMATest::test_read_pure_tags", "test/test_mediafile.py::WMATest::test_save_nonexisting", "test/test_mediafile.py::WMATest::test_set_image_structure", "test/test_mediafile.py::WMATest::test_set_jpg_art", "test/test_mediafile.py::WMATest::test_set_png_art", "test/test_mediafile.py::WMATest::test_unparseable_date", "test/test_mediafile.py::WMATest::test_update_empty", "test/test_mediafile.py::WMATest::test_update_full", "test/test_mediafile.py::WMATest::test_write_counters_without_total", "test/test_mediafile.py::WMATest::test_write_date_components", "test/test_mediafile.py::WMATest::test_write_dates", "test/test_mediafile.py::WMATest::test_write_empty", "test/test_mediafile.py::WMATest::test_write_genre_list", "test/test_mediafile.py::WMATest::test_write_genre_list_get_first", "test/test_mediafile.py::WMATest::test_write_incomplete_date_components", "test/test_mediafile.py::WMATest::test_write_packed", "test/test_mediafile.py::OggTest::test_add_image_structure", "test/test_mediafile.py::OggTest::test_add_tiff_image", "test/test_mediafile.py::OggTest::test_append_genre_list", "test/test_mediafile.py::OggTest::test_date_tag_with_slashes", "test/test_mediafile.py::OggTest::test_delete_art", "test/test_mediafile.py::OggTest::test_delete_nonexisting", "test/test_mediafile.py::OggTest::test_delete_packed_total", "test/test_mediafile.py::OggTest::test_delete_partial_date", "test/test_mediafile.py::OggTest::test_delete_tag", "test/test_mediafile.py::OggTest::test_delete_year", "test/test_mediafile.py::OggTest::test_guess_cover", "test/test_mediafile.py::OggTest::test_legacy_coverart_tag", "test/test_mediafile.py::OggTest::test_overwrite_full", "test/test_mediafile.py::OggTest::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::OggTest::test_read_audio_properties", "test/test_mediafile.py::OggTest::test_read_date_from_year_tag", "test/test_mediafile.py::OggTest::test_read_empty", "test/test_mediafile.py::OggTest::test_read_full", "test/test_mediafile.py::OggTest::test_read_genre_list", "test/test_mediafile.py::OggTest::test_read_image_structures", "test/test_mediafile.py::OggTest::test_read_nonexisting", "test/test_mediafile.py::OggTest::test_save_nonexisting", "test/test_mediafile.py::OggTest::test_set_image_structure", "test/test_mediafile.py::OggTest::test_set_jpg_art", "test/test_mediafile.py::OggTest::test_set_png_art", "test/test_mediafile.py::OggTest::test_unparseable_date", "test/test_mediafile.py::OggTest::test_update_empty", "test/test_mediafile.py::OggTest::test_update_full", "test/test_mediafile.py::OggTest::test_write_counters_without_total", "test/test_mediafile.py::OggTest::test_write_date_components", "test/test_mediafile.py::OggTest::test_write_date_to_year_tag", "test/test_mediafile.py::OggTest::test_write_dates", "test/test_mediafile.py::OggTest::test_write_empty", "test/test_mediafile.py::OggTest::test_write_genre_list", "test/test_mediafile.py::OggTest::test_write_genre_list_get_first", "test/test_mediafile.py::OggTest::test_write_incomplete_date_components", "test/test_mediafile.py::OggTest::test_write_packed", "test/test_mediafile.py::FlacTest::test_add_image_structure", "test/test_mediafile.py::FlacTest::test_add_tiff_image", "test/test_mediafile.py::FlacTest::test_append_genre_list", "test/test_mediafile.py::FlacTest::test_delete_art", "test/test_mediafile.py::FlacTest::test_delete_nonexisting", "test/test_mediafile.py::FlacTest::test_delete_packed_total", "test/test_mediafile.py::FlacTest::test_delete_partial_date", "test/test_mediafile.py::FlacTest::test_delete_tag", "test/test_mediafile.py::FlacTest::test_delete_year", "test/test_mediafile.py::FlacTest::test_guess_cover", "test/test_mediafile.py::FlacTest::test_overwrite_full", "test/test_mediafile.py::FlacTest::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::FlacTest::test_read_audio_properties", "test/test_mediafile.py::FlacTest::test_read_empty", "test/test_mediafile.py::FlacTest::test_read_full", "test/test_mediafile.py::FlacTest::test_read_genre_list", "test/test_mediafile.py::FlacTest::test_read_image_structures", "test/test_mediafile.py::FlacTest::test_read_nonexisting", "test/test_mediafile.py::FlacTest::test_read_track_without_total", "test/test_mediafile.py::FlacTest::test_save_nonexisting", "test/test_mediafile.py::FlacTest::test_set_image_structure", "test/test_mediafile.py::FlacTest::test_set_jpg_art", "test/test_mediafile.py::FlacTest::test_set_png_art", "test/test_mediafile.py::FlacTest::test_unparseable_date", "test/test_mediafile.py::FlacTest::test_update_empty", "test/test_mediafile.py::FlacTest::test_update_full", "test/test_mediafile.py::FlacTest::test_write_counters_without_total", "test/test_mediafile.py::FlacTest::test_write_date_components", "test/test_mediafile.py::FlacTest::test_write_dates", "test/test_mediafile.py::FlacTest::test_write_empty", "test/test_mediafile.py::FlacTest::test_write_genre_list", "test/test_mediafile.py::FlacTest::test_write_genre_list_get_first", "test/test_mediafile.py::FlacTest::test_write_incomplete_date_components", "test/test_mediafile.py::FlacTest::test_write_packed", "test/test_mediafile.py::ApeTest::test_add_image_structure", "test/test_mediafile.py::ApeTest::test_add_tiff_image", "test/test_mediafile.py::ApeTest::test_append_genre_list", "test/test_mediafile.py::ApeTest::test_delete_art", "test/test_mediafile.py::ApeTest::test_delete_nonexisting", "test/test_mediafile.py::ApeTest::test_delete_packed_total", "test/test_mediafile.py::ApeTest::test_delete_partial_date", "test/test_mediafile.py::ApeTest::test_delete_tag", "test/test_mediafile.py::ApeTest::test_delete_year", "test/test_mediafile.py::ApeTest::test_guess_cover", "test/test_mediafile.py::ApeTest::test_overwrite_full", "test/test_mediafile.py::ApeTest::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::ApeTest::test_read_audio_properties", "test/test_mediafile.py::ApeTest::test_read_empty", "test/test_mediafile.py::ApeTest::test_read_full", "test/test_mediafile.py::ApeTest::test_read_genre_list", "test/test_mediafile.py::ApeTest::test_read_image_structures", "test/test_mediafile.py::ApeTest::test_read_nonexisting", "test/test_mediafile.py::ApeTest::test_save_nonexisting", "test/test_mediafile.py::ApeTest::test_set_image_structure", "test/test_mediafile.py::ApeTest::test_set_jpg_art", "test/test_mediafile.py::ApeTest::test_set_png_art", "test/test_mediafile.py::ApeTest::test_unparseable_date", "test/test_mediafile.py::ApeTest::test_update_empty", "test/test_mediafile.py::ApeTest::test_update_full", "test/test_mediafile.py::ApeTest::test_write_counters_without_total", "test/test_mediafile.py::ApeTest::test_write_date_components", "test/test_mediafile.py::ApeTest::test_write_dates", "test/test_mediafile.py::ApeTest::test_write_empty", "test/test_mediafile.py::ApeTest::test_write_genre_list", "test/test_mediafile.py::ApeTest::test_write_genre_list_get_first", "test/test_mediafile.py::ApeTest::test_write_incomplete_date_components", "test/test_mediafile.py::ApeTest::test_write_packed", "test/test_mediafile.py::WavpackTest::test_append_genre_list", "test/test_mediafile.py::WavpackTest::test_delete_art", "test/test_mediafile.py::WavpackTest::test_delete_nonexisting", "test/test_mediafile.py::WavpackTest::test_delete_packed_total", "test/test_mediafile.py::WavpackTest::test_delete_partial_date", "test/test_mediafile.py::WavpackTest::test_delete_tag", "test/test_mediafile.py::WavpackTest::test_delete_year", "test/test_mediafile.py::WavpackTest::test_overwrite_full", "test/test_mediafile.py::WavpackTest::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::WavpackTest::test_read_audio_properties", "test/test_mediafile.py::WavpackTest::test_read_empty", "test/test_mediafile.py::WavpackTest::test_read_full", "test/test_mediafile.py::WavpackTest::test_read_genre_list", "test/test_mediafile.py::WavpackTest::test_read_nonexisting", "test/test_mediafile.py::WavpackTest::test_save_nonexisting", "test/test_mediafile.py::WavpackTest::test_set_jpg_art", "test/test_mediafile.py::WavpackTest::test_set_png_art", "test/test_mediafile.py::WavpackTest::test_unparseable_date", "test/test_mediafile.py::WavpackTest::test_update_empty", "test/test_mediafile.py::WavpackTest::test_update_full", "test/test_mediafile.py::WavpackTest::test_write_counters_without_total", "test/test_mediafile.py::WavpackTest::test_write_date_components", "test/test_mediafile.py::WavpackTest::test_write_dates", "test/test_mediafile.py::WavpackTest::test_write_empty", "test/test_mediafile.py::WavpackTest::test_write_genre_list", "test/test_mediafile.py::WavpackTest::test_write_genre_list_get_first", "test/test_mediafile.py::WavpackTest::test_write_incomplete_date_components", "test/test_mediafile.py::WavpackTest::test_write_packed", "test/test_mediafile.py::OpusTest::test_append_genre_list", "test/test_mediafile.py::OpusTest::test_delete_art", "test/test_mediafile.py::OpusTest::test_delete_nonexisting", "test/test_mediafile.py::OpusTest::test_delete_packed_total", "test/test_mediafile.py::OpusTest::test_delete_partial_date", "test/test_mediafile.py::OpusTest::test_delete_tag", "test/test_mediafile.py::OpusTest::test_delete_year", "test/test_mediafile.py::OpusTest::test_overwrite_full", "test/test_mediafile.py::OpusTest::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::OpusTest::test_read_audio_properties", "test/test_mediafile.py::OpusTest::test_read_empty", "test/test_mediafile.py::OpusTest::test_read_full", "test/test_mediafile.py::OpusTest::test_read_genre_list", "test/test_mediafile.py::OpusTest::test_read_nonexisting", "test/test_mediafile.py::OpusTest::test_save_nonexisting", "test/test_mediafile.py::OpusTest::test_set_jpg_art", "test/test_mediafile.py::OpusTest::test_set_png_art", "test/test_mediafile.py::OpusTest::test_unparseable_date", "test/test_mediafile.py::OpusTest::test_update_empty", "test/test_mediafile.py::OpusTest::test_update_full", "test/test_mediafile.py::OpusTest::test_write_counters_without_total", "test/test_mediafile.py::OpusTest::test_write_date_components", "test/test_mediafile.py::OpusTest::test_write_dates", "test/test_mediafile.py::OpusTest::test_write_empty", "test/test_mediafile.py::OpusTest::test_write_genre_list", "test/test_mediafile.py::OpusTest::test_write_genre_list_get_first", "test/test_mediafile.py::OpusTest::test_write_incomplete_date_components", "test/test_mediafile.py::OpusTest::test_write_packed", "test/test_mediafile.py::AIFFTest::test_append_genre_list", "test/test_mediafile.py::AIFFTest::test_delete_art", "test/test_mediafile.py::AIFFTest::test_delete_nonexisting", "test/test_mediafile.py::AIFFTest::test_delete_packed_total", "test/test_mediafile.py::AIFFTest::test_delete_partial_date", "test/test_mediafile.py::AIFFTest::test_delete_tag", "test/test_mediafile.py::AIFFTest::test_delete_year", "test/test_mediafile.py::AIFFTest::test_overwrite_full", "test/test_mediafile.py::AIFFTest::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::AIFFTest::test_read_audio_properties", "test/test_mediafile.py::AIFFTest::test_read_empty", "test/test_mediafile.py::AIFFTest::test_read_full", "test/test_mediafile.py::AIFFTest::test_read_genre_list", "test/test_mediafile.py::AIFFTest::test_read_nonexisting", "test/test_mediafile.py::AIFFTest::test_save_nonexisting", "test/test_mediafile.py::AIFFTest::test_set_jpg_art", "test/test_mediafile.py::AIFFTest::test_set_png_art", "test/test_mediafile.py::AIFFTest::test_unparseable_date", "test/test_mediafile.py::AIFFTest::test_update_empty", "test/test_mediafile.py::AIFFTest::test_update_full", "test/test_mediafile.py::AIFFTest::test_write_counters_without_total", "test/test_mediafile.py::AIFFTest::test_write_date_components", "test/test_mediafile.py::AIFFTest::test_write_dates", "test/test_mediafile.py::AIFFTest::test_write_empty", "test/test_mediafile.py::AIFFTest::test_write_genre_list", "test/test_mediafile.py::AIFFTest::test_write_genre_list_get_first", "test/test_mediafile.py::AIFFTest::test_write_incomplete_date_components", "test/test_mediafile.py::AIFFTest::test_write_packed", "test/test_mediafile.py::WAVETest::test_append_genre_list", "test/test_mediafile.py::WAVETest::test_delete_art", "test/test_mediafile.py::WAVETest::test_delete_nonexisting", "test/test_mediafile.py::WAVETest::test_delete_packed_total", "test/test_mediafile.py::WAVETest::test_delete_partial_date", "test/test_mediafile.py::WAVETest::test_delete_tag", "test/test_mediafile.py::WAVETest::test_delete_year", "test/test_mediafile.py::WAVETest::test_overwrite_full", "test/test_mediafile.py::WAVETest::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::WAVETest::test_read_audio_properties", "test/test_mediafile.py::WAVETest::test_read_empty", "test/test_mediafile.py::WAVETest::test_read_full", "test/test_mediafile.py::WAVETest::test_read_genre_list", "test/test_mediafile.py::WAVETest::test_read_nonexisting", "test/test_mediafile.py::WAVETest::test_save_nonexisting", "test/test_mediafile.py::WAVETest::test_set_jpg_art", "test/test_mediafile.py::WAVETest::test_set_png_art", "test/test_mediafile.py::WAVETest::test_unparseable_date", "test/test_mediafile.py::WAVETest::test_update_empty", "test/test_mediafile.py::WAVETest::test_update_full", "test/test_mediafile.py::WAVETest::test_write_counters_without_total", "test/test_mediafile.py::WAVETest::test_write_date_components", "test/test_mediafile.py::WAVETest::test_write_dates", "test/test_mediafile.py::WAVETest::test_write_empty", "test/test_mediafile.py::WAVETest::test_write_genre_list", "test/test_mediafile.py::WAVETest::test_write_genre_list_get_first", "test/test_mediafile.py::WAVETest::test_write_incomplete_date_components", "test/test_mediafile.py::WAVETest::test_write_packed", "test/test_mediafile.py::DSFTest::test_append_genre_list", "test/test_mediafile.py::DSFTest::test_delete_art", "test/test_mediafile.py::DSFTest::test_delete_nonexisting", "test/test_mediafile.py::DSFTest::test_delete_packed_total", "test/test_mediafile.py::DSFTest::test_delete_partial_date", "test/test_mediafile.py::DSFTest::test_delete_tag", "test/test_mediafile.py::DSFTest::test_delete_year", "test/test_mediafile.py::DSFTest::test_overwrite_full", "test/test_mediafile.py::DSFTest::test_r128_gain_stored_as_q8_number", "test/test_mediafile.py::DSFTest::test_read_audio_properties", "test/test_mediafile.py::DSFTest::test_read_empty", "test/test_mediafile.py::DSFTest::test_read_full", "test/test_mediafile.py::DSFTest::test_read_genre_list", "test/test_mediafile.py::DSFTest::test_read_nonexisting", "test/test_mediafile.py::DSFTest::test_save_nonexisting", "test/test_mediafile.py::DSFTest::test_set_jpg_art", "test/test_mediafile.py::DSFTest::test_set_png_art", "test/test_mediafile.py::DSFTest::test_unparseable_date", "test/test_mediafile.py::DSFTest::test_update_empty", "test/test_mediafile.py::DSFTest::test_update_full", "test/test_mediafile.py::DSFTest::test_write_counters_without_total", "test/test_mediafile.py::DSFTest::test_write_date_components", "test/test_mediafile.py::DSFTest::test_write_dates", "test/test_mediafile.py::DSFTest::test_write_empty", "test/test_mediafile.py::DSFTest::test_write_genre_list", "test/test_mediafile.py::DSFTest::test_write_genre_list_get_first", "test/test_mediafile.py::DSFTest::test_write_incomplete_date_components", "test/test_mediafile.py::DSFTest::test_write_packed", "test/test_mediafile.py::MediaFieldTest::test_fields_in_readable_fields", "test/test_mediafile.py::MediaFieldTest::test_known_fields", "test/test_mediafile.py::MediaFieldTest::test_properties_from_fields", "test/test_mediafile.py::MediaFieldTest::test_properties_from_readable_fields" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-10-20 18:53:51+00:00
mit
1,327
beeware__briefcase-1237
diff --git a/changes/1232.misc.rst b/changes/1232.misc.rst new file mode 100644 index 00000000..8d8bf701 --- /dev/null +++ b/changes/1232.misc.rst @@ -0,0 +1,1 @@ +If `git` fails to update a template in the cookiecutter cache, the `git` command and output are now captured in the Briefcase log file. diff --git a/changes/1236.bugfix.rst b/changes/1236.bugfix.rst new file mode 100644 index 00000000..0518430a --- /dev/null +++ b/changes/1236.bugfix.rst @@ -0,0 +1,1 @@ +Filenames and directories in rpm package definitions are quoted in order to include files with filenames that include whitespace. diff --git a/src/briefcase/commands/base.py b/src/briefcase/commands/base.py index cedf5412..b434bc21 100644 --- a/src/briefcase/commands/base.py +++ b/src/briefcase/commands/base.py @@ -772,10 +772,12 @@ Did you run Briefcase in a project directory that contains {filename.name!r}?""" # Attempt to update the repository remote = repo.remote(name="origin") remote.fetch() - except self.tools.git.exc.GitCommandError: + except self.tools.git.exc.GitCommandError as e: # We are offline, or otherwise unable to contact - # the origin git repo. It's OK to continue; but warn - # the user that the template may be stale. + # the origin git repo. It's OK to continue; but + # capture the error in the log and warn the user + # that the template may be stale. + self.logger.debug(str(e)) self.logger.warning( """ ************************************************************************* @@ -789,6 +791,7 @@ Did you run Briefcase in a project directory that contains {filename.name!r}?""" ************************************************************************* """ ) + try: # Check out the branch for the required version tag. head = remote.refs[branch] diff --git a/src/briefcase/platforms/linux/system.py b/src/briefcase/platforms/linux/system.py index 0867eb3d..944d6c01 100644 --- a/src/briefcase/platforms/linux/system.py +++ b/src/briefcase/platforms/linux/system.py @@ -981,9 +981,9 @@ class LinuxSystemPackageCommand(LinuxSystemMixin, PackageCommand): if filename.is_dir(): if app.app_name in path.parts: - f.write(f"%dir /{path}\n") + f.write(f'%dir "/{path}"\n') else: - f.write(f"/{path}\n") + f.write(f'"/{path}"\n') # Add the changelog content to the bottom of the spec file. f.write("\n%changelog\n")
beeware/briefcase
9a6b61cfe2eb0fff424793454d1d61350dcb84bc
diff --git a/tests/platforms/linux/system/test_package__rpm.py b/tests/platforms/linux/system/test_package__rpm.py index ad0d1f08..534b9a26 100644 --- a/tests/platforms/linux/system/test_package__rpm.py +++ b/tests/platforms/linux/system/test_package__rpm.py @@ -204,21 +204,21 @@ def test_rpm_package(package_command, first_app_rpm, tmp_path): "cp -r usr %{buildroot}/usr", "", "%files", - "/usr/bin/first-app", - "%dir /usr/lib/first-app", - "%dir /usr/lib/first-app/app", - "/usr/lib/first-app/app/support.so", - "%dir /usr/lib/first-app/app_packages", - "%dir /usr/lib/first-app/app_packages/firstlib", - "/usr/lib/first-app/app_packages/firstlib/first.so", - "/usr/lib/first-app/app_packages/firstlib/first.so.1.0", - "%dir /usr/lib/first-app/app_packages/secondlib", - "/usr/lib/first-app/app_packages/secondlib/second_a.so", - "/usr/lib/first-app/app_packages/secondlib/second_b.so", - "%dir /usr/share/doc/first-app", - "/usr/share/doc/first-app/UserManual", - "/usr/share/doc/first-app/license", - "/usr/share/man/man1/first-app.1.gz", + '"/usr/bin/first-app"', + '%dir "/usr/lib/first-app"', + '%dir "/usr/lib/first-app/app"', + '"/usr/lib/first-app/app/support.so"', + '%dir "/usr/lib/first-app/app_packages"', + '%dir "/usr/lib/first-app/app_packages/firstlib"', + '"/usr/lib/first-app/app_packages/firstlib/first.so"', + '"/usr/lib/first-app/app_packages/firstlib/first.so.1.0"', + '%dir "/usr/lib/first-app/app_packages/secondlib"', + '"/usr/lib/first-app/app_packages/secondlib/second_a.so"', + '"/usr/lib/first-app/app_packages/secondlib/second_b.so"', + '%dir "/usr/share/doc/first-app"', + '"/usr/share/doc/first-app/UserManual"', + '"/usr/share/doc/first-app/license"', + '"/usr/share/man/man1/first-app.1.gz"', "", "%changelog", "First App Changelog", @@ -352,21 +352,21 @@ def test_rpm_re_package(package_command, first_app_rpm, tmp_path): "cp -r usr %{buildroot}/usr", "", "%files", - "/usr/bin/first-app", - "%dir /usr/lib/first-app", - "%dir /usr/lib/first-app/app", - "/usr/lib/first-app/app/support.so", - "%dir /usr/lib/first-app/app_packages", - "%dir /usr/lib/first-app/app_packages/firstlib", - "/usr/lib/first-app/app_packages/firstlib/first.so", - "/usr/lib/first-app/app_packages/firstlib/first.so.1.0", - "%dir /usr/lib/first-app/app_packages/secondlib", - "/usr/lib/first-app/app_packages/secondlib/second_a.so", - "/usr/lib/first-app/app_packages/secondlib/second_b.so", - "%dir /usr/share/doc/first-app", - "/usr/share/doc/first-app/UserManual", - "/usr/share/doc/first-app/license", - "/usr/share/man/man1/first-app.1.gz", + '"/usr/bin/first-app"', + '%dir "/usr/lib/first-app"', + '%dir "/usr/lib/first-app/app"', + '"/usr/lib/first-app/app/support.so"', + '%dir "/usr/lib/first-app/app_packages"', + '%dir "/usr/lib/first-app/app_packages/firstlib"', + '"/usr/lib/first-app/app_packages/firstlib/first.so"', + '"/usr/lib/first-app/app_packages/firstlib/first.so.1.0"', + '%dir "/usr/lib/first-app/app_packages/secondlib"', + '"/usr/lib/first-app/app_packages/secondlib/second_a.so"', + '"/usr/lib/first-app/app_packages/secondlib/second_b.so"', + '%dir "/usr/share/doc/first-app"', + '"/usr/share/doc/first-app/UserManual"', + '"/usr/share/doc/first-app/license"', + '"/usr/share/man/man1/first-app.1.gz"', "", "%changelog", "First App Changelog", @@ -521,21 +521,21 @@ def test_rpm_package_extra_requirements(package_command, first_app_rpm, tmp_path "cp -r usr %{buildroot}/usr", "", "%files", - "/usr/bin/first-app", - "%dir /usr/lib/first-app", - "%dir /usr/lib/first-app/app", - "/usr/lib/first-app/app/support.so", - "%dir /usr/lib/first-app/app_packages", - "%dir /usr/lib/first-app/app_packages/firstlib", - "/usr/lib/first-app/app_packages/firstlib/first.so", - "/usr/lib/first-app/app_packages/firstlib/first.so.1.0", - "%dir /usr/lib/first-app/app_packages/secondlib", - "/usr/lib/first-app/app_packages/secondlib/second_a.so", - "/usr/lib/first-app/app_packages/secondlib/second_b.so", - "%dir /usr/share/doc/first-app", - "/usr/share/doc/first-app/UserManual", - "/usr/share/doc/first-app/license", - "/usr/share/man/man1/first-app.1.gz", + '"/usr/bin/first-app"', + '%dir "/usr/lib/first-app"', + '%dir "/usr/lib/first-app/app"', + '"/usr/lib/first-app/app/support.so"', + '%dir "/usr/lib/first-app/app_packages"', + '%dir "/usr/lib/first-app/app_packages/firstlib"', + '"/usr/lib/first-app/app_packages/firstlib/first.so"', + '"/usr/lib/first-app/app_packages/firstlib/first.so.1.0"', + '%dir "/usr/lib/first-app/app_packages/secondlib"', + '"/usr/lib/first-app/app_packages/secondlib/second_a.so"', + '"/usr/lib/first-app/app_packages/secondlib/second_b.so"', + '%dir "/usr/share/doc/first-app"', + '"/usr/share/doc/first-app/UserManual"', + '"/usr/share/doc/first-app/license"', + '"/usr/share/man/man1/first-app.1.gz"', "", "%changelog", "First App Changelog",
Python package generation generates invalid rpm specfile when there are files in the filename ### Describe the bug The `briefcase package` command fails with messages such as: ``` error: File must begin with "/": manifest.xml error: File must begin with "/": (dev).tmpl ``` Lines matching the strings listed in the spec file contain a space character: ``` /usr/lib64/rpmtest/app_packages/setuptools/command/launcher manifest.xml /usr/lib64/rpmtest/app_packages/setuptools/script (dev).tmpl ``` ### Steps to reproduce In a Linux environment (reproduced on Almalinux 9) 1. Create a new project with `briefcase new` 2. Add `redislite` to the [tool.briefcase.app.rpmtest] requires 3. Create the Application using `briefcase create` 4. Build the project using `briefcase build` 5. Attempt to create an rpm package using `briefcase package` ### Expected behavior An rpm package gets created ### Screenshots _No response_ ### Environment - Almalinux 9.1: - Python version: 3.9.14 - Software versions: - Briefcase: 0.3.14 ### Logs ``` [briefcase.2023_04_25-20_43_53.package.log](https://github.com/beeware/briefcase/files/11326841/briefcase.2023_04_25-20_43_53.package.log) ``` ### Additional context _No response_
0.0
9a6b61cfe2eb0fff424793454d1d61350dcb84bc
[ "tests/platforms/linux/system/test_package__rpm.py::test_rpm_package", "tests/platforms/linux/system/test_package__rpm.py::test_rpm_re_package", "tests/platforms/linux/system/test_package__rpm.py::test_rpm_package_extra_requirements" ]
[ "tests/platforms/linux/system/test_package__rpm.py::test_verify_no_docker", "tests/platforms/linux/system/test_package__rpm.py::test_verify_rpmbuild_missing", "tests/platforms/linux/system/test_package__rpm.py::test_verify_docker", "tests/platforms/linux/system/test_package__rpm.py::test_rpm_package_no_long_description", "tests/platforms/linux/system/test_package__rpm.py::test_rpm_package_failure", "tests/platforms/linux/system/test_package__rpm.py::test_no_changelog" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-04-25 21:07:07+00:00
bsd-3-clause
1,328
beeware__briefcase-1348
diff --git a/changes/1347.bugfix.rst b/changes/1347.bugfix.rst new file mode 100644 index 00000000..46493e9f --- /dev/null +++ b/changes/1347.bugfix.rst @@ -0,0 +1,1 @@ +An error no longer occurs when creating a new Briefcase project while ``cookiecutter>=2.2.0`` is installed. diff --git a/src/briefcase/commands/new.py b/src/briefcase/commands/new.py index c0993059..aa24ce22 100644 --- a/src/briefcase/commands/new.py +++ b/src/briefcase/commands/new.py @@ -468,8 +468,8 @@ What GUI toolkit do you want to use for this project?""", # include the version of Briefcase as well as the source of the template. context.update( { - "template": template, - "branch": branch, + "template_source": template, + "template_branch": branch, "briefcase_version": briefcase.__version__, } )
beeware/briefcase
73148bfa6012356ffa23f6a0c0e719e8113e795f
diff --git a/tests/commands/new/test_new_app.py b/tests/commands/new/test_new_app.py index eb5e8faf..5d8b3803 100644 --- a/tests/commands/new/test_new_app.py +++ b/tests/commands/new/test_new_app.py @@ -76,8 +76,8 @@ def test_new_app( # The expected app context # should now also contain the # default template and branch - "template": "https://github.com/beeware/briefcase-template", - "branch": expected_branch, + "template_source": "https://github.com/beeware/briefcase-template", + "template_branch": expected_branch, "briefcase_version": briefcase_version, }, ) @@ -129,8 +129,8 @@ def test_new_app_missing_template(monkeypatch, new_command, tmp_path): # The expected app context # should now also contain the # default template and branch - "template": "https://github.com/beeware/briefcase-template", - "branch": "v37.42.7", + "template_source": "https://github.com/beeware/briefcase-template", + "template_branch": "v37.42.7", "briefcase_version": "37.42.7", }, ) @@ -196,8 +196,8 @@ def test_new_app_dev(monkeypatch, new_command, tmp_path, briefcase_version): # The expected app context # should now also contain the # default template and branch - "template": "https://github.com/beeware/briefcase-template", - "branch": "v37.42.7", + "template_source": "https://github.com/beeware/briefcase-template", + "template_branch": "v37.42.7", "briefcase_version": briefcase_version, }, ), @@ -213,8 +213,8 @@ def test_new_app_dev(monkeypatch, new_command, tmp_path, briefcase_version): # The expected app context # should now also contain the # default template and branch - "template": "https://github.com/beeware/briefcase-template", - "branch": "v37.42.7", + "template_source": "https://github.com/beeware/briefcase-template", + "template_branch": "v37.42.7", "briefcase_version": briefcase_version, }, ), @@ -261,8 +261,8 @@ def test_new_app_with_template(monkeypatch, new_command, tmp_path): # The expected app context # should now also contain the # template and branch - "template": "https://example.com/other.git", - "branch": "v37.42.7", + "template_source": "https://example.com/other.git", + "template_branch": "v37.42.7", "briefcase_version": "37.42.7", }, ) @@ -313,8 +313,8 @@ def test_new_app_with_invalid_template(monkeypatch, new_command, tmp_path): # The expected app context # should now also contain the # template and branch - "template": "https://example.com/other.git", - "branch": "v37.42.7", + "template_source": "https://example.com/other.git", + "template_branch": "v37.42.7", "briefcase_version": "37.42.7", }, ) @@ -368,8 +368,8 @@ def test_new_app_with_invalid_template_branch(monkeypatch, new_command, tmp_path # The expected app context # should now also contain the # template and branch - "template": "https://example.com/other.git", - "branch": "v37.42.7", + "template_source": "https://example.com/other.git", + "template_branch": "v37.42.7", "briefcase_version": "37.42.7", }, ) @@ -414,8 +414,8 @@ def test_new_app_with_branch(monkeypatch, new_command, tmp_path): # The expected app context # should now also contain the # template and branch - "template": "https://github.com/beeware/briefcase-template", - "branch": "experimental", + "template_source": "https://github.com/beeware/briefcase-template", + "template_branch": "experimental", "briefcase_version": "37.42.7", }, )
Using `template` as a key in the cookiecutter `context` has special meaning in versions >=2.2.0 ### Impact The `briefcase new` command is unusable with `cookiecutter>=2.2.0`. ### Describe the bug The release of cookiecutter 2.2.0 (and 2.2.1) introduces special meaning for `template` in a cookiecutter `context`. This was to add support for nested templates in cookiecutter/cookiecutter#1770. Briefcase is currently [passing](https://github.com/beeware/briefcase/blob/73148bfa6012356ffa23f6a0c0e719e8113e795f/src/briefcase/commands/new.py#L471) a URL or filepath here from `briefcase new` so the source of the template is included in `pyproject.toml` of the rolled out project. Example trace from running `briefcase new`: ``` Traceback (most recent call last): File "/home/russell/github/beeware/briefcase/venv-3.10-briefcase/bin/briefcase", line 8, in <module> sys.exit(main()) File "/home/russell/github/beeware/briefcase/src/briefcase/__main__.py", line 25, in main command(**options) File "/home/russell/github/beeware/briefcase/src/briefcase/commands/new.py", line 531, in __call__ return self.new_app( File "/home/russell/github/beeware/briefcase/src/briefcase/commands/new.py", line 497, in new_app self.generate_template( File "/home/russell/github/beeware/briefcase/src/briefcase/commands/base.py", line 891, in generate_template self.tools.cookiecutter( File "/home/russell/github/beeware/briefcase/venv-3.10-briefcase/lib/python3.10/site-packages/cookiecutter/main.py", line 116, in cookiecutter ).group(1) AttributeError: 'NoneType' object has no attribute 'group' ``` ### Steps to reproduce 1. Install latest cookiecutter: `python -m pip install -U cookiecutter==2.2.0` 2. Run `briefcase new --no-input` ### Expected behavior The template is rolled out properly without an error. ### Screenshots _No response_ ### Environment - Operating System: pop os 22.04 - Python version: 3.10.12 - Software versions: - Briefcase: `0.3.14` and `0.3.15.dev385+g5ed30106.d20230706` ### Logs 0.3.14 [briefcase.2023_07_06-13_37_09.new.log](https://github.com/beeware/briefcase/files/11970740/briefcase.2023_07_06-13_37_09.new.log) 0.3.15.dev385+g5ed30106.d20230706 [briefcase.2023_07_06-13_39_17.new.log](https://github.com/beeware/briefcase/files/11970755/briefcase.2023_07_06-13_39_17.new.log) ### Additional context It isn't at least immediately clear to me if cookiecutter considered existing uses of `template` in a `context`; however, I am also not particularly sure if Briefcase's use of cookiecutter is especially different from intended use cases. Nonetheless, it doesn't look like Briefcase's use of `template` in `context` should be accommodated in cookiecutter....and instead, we should just change `template` to `template_source` or something. This also means any new installation of Briefcase will encounter this exception. A new release is probably necessary to mitigate this issue.
0.0
73148bfa6012356ffa23f6a0c0e719e8113e795f
[ "tests/commands/new/test_new_app.py::test_new_app[37.42.1-v37.42.1]", "tests/commands/new/test_new_app.py::test_new_app[37.42.2.dev0+gad61a29.d20220919-v37.42.2]", "tests/commands/new/test_new_app.py::test_new_app[37.42.3.dev73+gad61a29.d20220919-v37.42.3]", "tests/commands/new/test_new_app.py::test_new_app[37.42.4a1-v37.42.4]", "tests/commands/new/test_new_app.py::test_new_app[37.42.5b2-v37.42.5]", "tests/commands/new/test_new_app.py::test_new_app[37.42.6rc3-v37.42.6]", "tests/commands/new/test_new_app.py::test_new_app[37.42.7.post1-v37.42.7]", "tests/commands/new/test_new_app.py::test_new_app_missing_template", "tests/commands/new/test_new_app.py::test_new_app_dev[37.42.7.dev0+gad61a29.d20220919]", "tests/commands/new/test_new_app.py::test_new_app_dev[37.42.7.dev73+gad61a29.d20220919]", "tests/commands/new/test_new_app.py::test_new_app_with_template", "tests/commands/new/test_new_app.py::test_new_app_with_invalid_template", "tests/commands/new/test_new_app.py::test_new_app_with_invalid_template_branch", "tests/commands/new/test_new_app.py::test_new_app_with_branch" ]
[ "tests/commands/new/test_new_app.py::test_abort_if_directory_exists" ]
{ "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false }
2023-07-07 15:17:37+00:00
bsd-3-clause
1,329
beeware__briefcase-1373
diff --git a/changes/1361.bugfix.rst b/changes/1361.bugfix.rst new file mode 100644 index 00000000..168ebb78 --- /dev/null +++ b/changes/1361.bugfix.rst @@ -0,0 +1,1 @@ +Appimages builds now use ``LINUXDEPLOY_OUTPUT_VERSION`` rather than ``VERSION`` to inject the version number. diff --git a/src/briefcase/platforms/linux/appimage.py b/src/briefcase/platforms/linux/appimage.py index f9916a5f..5af145b2 100644 --- a/src/briefcase/platforms/linux/appimage.py +++ b/src/briefcase/platforms/linux/appimage.py @@ -258,7 +258,7 @@ class LinuxAppImageBuildCommand(LinuxAppImageMixin, BuildCommand): try: # For some reason, the version has to be passed in as an # environment variable, *not* in the configuration. - env["VERSION"] = app.version + env["LINUXDEPLOY_OUTPUT_VERSION"] = app.version # The internals of the binary aren't inherently visible, so # there's no need to package copyright files. These files # appear to be missing by default in the OS dev packages anyway,
beeware/briefcase
405c8b6f884c3e47f0107e5de8b41d2440bb8456
diff --git a/tests/platforms/linux/appimage/test_build.py b/tests/platforms/linux/appimage/test_build.py index c2d78db5..617cf9f0 100644 --- a/tests/platforms/linux/appimage/test_build.py +++ b/tests/platforms/linux/appimage/test_build.py @@ -186,7 +186,7 @@ def test_build_appimage(build_command, first_app, tmp_path, sub_stream_kw): ], env={ "PATH": "/usr/local/bin:/usr/bin:/path/to/somewhere", - "VERSION": "0.0.1", + "LINUXDEPLOY_OUTPUT_VERSION": "0.0.1", "DISABLE_COPYRIGHT_FILES_DEPLOYMENT": "1", "APPIMAGE_EXTRACT_AND_RUN": "1", "ARCH": "wonky", @@ -276,7 +276,7 @@ def test_build_appimage_with_plugin(build_command, first_app, tmp_path, sub_stre env={ "PATH": f"{gtk_plugin_path.parent}:{app_dir.parent}:/usr/local/bin:/usr/bin:/path/to/somewhere", "DEPLOY_GTK_VERSION": "3", - "VERSION": "0.0.1", + "LINUXDEPLOY_OUTPUT_VERSION": "0.0.1", "DISABLE_COPYRIGHT_FILES_DEPLOYMENT": "1", "APPIMAGE_EXTRACT_AND_RUN": "1", "ARCH": "wonky", @@ -353,7 +353,7 @@ def test_build_failure(build_command, first_app, tmp_path, sub_stream_kw): ], env={ "PATH": "/usr/local/bin:/usr/bin:/path/to/somewhere", - "VERSION": "0.0.1", + "LINUXDEPLOY_OUTPUT_VERSION": "0.0.1", "DISABLE_COPYRIGHT_FILES_DEPLOYMENT": "1", "APPIMAGE_EXTRACT_AND_RUN": "1", "ARCH": "wonky", @@ -416,7 +416,7 @@ def test_build_appimage_in_docker(build_command, first_app, tmp_path, sub_stream "--volume", f"{build_command.data_path}:/home/brutus/.cache/briefcase:z", "--env", - "VERSION=0.0.1", + "LINUXDEPLOY_OUTPUT_VERSION=0.0.1", "--env", "DISABLE_COPYRIGHT_FILES_DEPLOYMENT=1", "--env", @@ -540,7 +540,7 @@ def test_build_appimage_with_plugins_in_docker( ":/app:/docker/bin:/docker/sbin" ), "--env", - "VERSION=0.0.1", + "LINUXDEPLOY_OUTPUT_VERSION=0.0.1", "--env", "DISABLE_COPYRIGHT_FILES_DEPLOYMENT=1", "--env", @@ -671,7 +671,7 @@ def test_build_appimage_with_support_package_update( ], env={ "PATH": "/usr/local/bin:/usr/bin:/path/to/somewhere", - "VERSION": "0.0.1", + "LINUXDEPLOY_OUTPUT_VERSION": "0.0.1", "DISABLE_COPYRIGHT_FILES_DEPLOYMENT": "1", "APPIMAGE_EXTRACT_AND_RUN": "1", "ARCH": "wonky",
LinuxDeploy replaced `VERSION` Environment Variable with `LINUXDEPLOY_OUTPUT_VERSION` ### Describe the bug LinuxDeploy has [replaced](https://github.com/linuxdeploy/linuxdeploy-plugin-appimage/commit/9aafc9c98f7267b2a5d714ccaee79d52decaf4db) `VERSION` with `LINUXDEPLOY_OUTPUT_VERSION`; I believe this is because one of the AppImage tools uses `VERSION` for something else. For the time being, `VERSION` will be used as a fallback with the warning below: ``` [appimage/stderr] Warning: please use $LINUXDEPLOY_OUTPUT_VERSION instead of $VERSION ``` ### Steps to reproduce 1. Run `briefcase upgrade linuxdeploy` 2. See output from `briefcase build linux appimage` ### Expected behavior The new setting is used. Setting both avoids the warning. ### Screenshots _No response_ ### Environment - Operating System: pop os 22.04 - Python version: 3.11.4 - Software versions: - Briefcase: `0.3.16.dev11+gede56e67` ### Logs _No response_ ### Additional context _No response_
0.0
405c8b6f884c3e47f0107e5de8b41d2440bb8456
[ "tests/platforms/linux/appimage/test_build.py::test_build_appimage", "tests/platforms/linux/appimage/test_build.py::test_build_appimage_with_plugin", "tests/platforms/linux/appimage/test_build.py::test_build_failure", "tests/platforms/linux/appimage/test_build.py::test_build_appimage_in_docker", "tests/platforms/linux/appimage/test_build.py::test_build_appimage_with_plugins_in_docker", "tests/platforms/linux/appimage/test_build.py::test_build_appimage_with_support_package_update" ]
[ "tests/platforms/linux/appimage/test_build.py::test_verify_tools_wrong_platform", "tests/platforms/linux/appimage/test_build.py::test_verify_tools_download_failure" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
2023-07-22 15:23:04+00:00
bsd-3-clause
1,330
beeware__briefcase-1378
diff --git a/changes/1157.bugfix.rst b/changes/1157.bugfix.rst new file mode 100644 index 00000000..cb9c13db --- /dev/null +++ b/changes/1157.bugfix.rst @@ -0,0 +1,1 @@ +Briefcase will detect if you attempt to launch an Android app on a device whose OS doesn't meet minimum version requirements. diff --git a/src/briefcase/integrations/android_sdk.py b/src/briefcase/integrations/android_sdk.py index 59bf8e77..2147ab07 100644 --- a/src/briefcase/integrations/android_sdk.py +++ b/src/briefcase/integrations/android_sdk.py @@ -706,7 +706,6 @@ connection. output = self.tools.subprocess.check_output( [os.fsdecode(self.emulator_path), "-list-avds"] ).strip() - # AVD names are returned one per line. if len(output) == 0: return [] @@ -720,7 +719,6 @@ connection. output = self.tools.subprocess.check_output( [os.fsdecode(self.adb_path), "devices", "-l"] ).strip() - # Process the output of `adb devices -l`. # The first line is header information. # Each subsequent line is a single device descriptor. @@ -1326,7 +1324,7 @@ class ADB: # checking that they are valid, then parsing output to notice errors. # This keeps performance good in the success case. try: - return self.tools.subprocess.check_output( + output = self.tools.subprocess.check_output( [ os.fsdecode(self.tools.android_sdk.adb_path), "-s", @@ -1338,6 +1336,14 @@ class ADB: ], quiet=quiet, ) + # add returns status code 0 in the case of failure. The only tangible evidence + # of failure is the message "Failure [INSTALL_FAILED_OLDER_SDK]" in the, + # console output; so if that message exists in the output, raise an exception. + if "Failure [INSTALL_FAILED_OLDER_SDK]" in output: + raise BriefcaseCommandError( + "Your device doesn't meet the minimum SDK requirements of this app." + ) + return output except subprocess.CalledProcessError as e: if any(DEVICE_NOT_FOUND.match(line) for line in e.output.split("\n")): raise InvalidDeviceError("device id", self.device) from e
beeware/briefcase
9c0c752a970eaae437df39091d20f96074c728c0
diff --git a/tests/integrations/android_sdk/ADB/test_run.py b/tests/integrations/android_sdk/ADB/test_run.py index 0a828ebf..a6b114d5 100644 --- a/tests/integrations/android_sdk/ADB/test_run.py +++ b/tests/integrations/android_sdk/ADB/test_run.py @@ -5,7 +5,7 @@ from pathlib import Path import pytest -from briefcase.exceptions import InvalidDeviceError +from briefcase.exceptions import BriefcaseCommandError, InvalidDeviceError def test_simple_command(mock_tools, adb, tmp_path): @@ -99,3 +99,20 @@ def test_error_handling(mock_tools, adb, name, exception, tmp_path): ], quiet=False, ) + + +def test_older_sdk_error(mock_tools, adb): + """Failure [INSTALL_FAILED_OLDER_SDK] needs to be catched manually.""" + mock_tools.subprocess.check_output.return_value = "\n".join( + [ + "Performing Push Install", + "C:/.../app-debug.apk: 1 file pushed, 0 skipped. 5.5 MB/s (33125287 bytes in 5.768s)", + " pkg: /data/local/tmp/app-debug.apk", + "Failure [INSTALL_FAILED_OLDER_SDK]", + ] + ) + with pytest.raises( + BriefcaseCommandError, + match=r"Your device doesn't meet the minimum SDK requirements of this app", + ): + adb.run("example", "command")
Briefcase Does Not Detect Failure to Install an APK on an Android version which is too old ### Describe the bug During `briefcase run android`, the attempt to install the APK on the device can fail if the API level of the APK is beyond what the device can support. Briefcase does not detect this error and eventually errors out when attempting to run the app. ``` >>> Running Command: subprocess.py:665 >>> 'C:\Users\domin\AppData\Local\BeeWare\briefcase\Cache\tools\android_sdk\platform-tools\adb.exe' -s PLI5T31F7E install -r subprocess.py:666 'C:\Users\domin\beeware-tutorial\smartmirrortab\android\gradle\SmartMirrorTab\app\build\outputs\apk\debug\app-debug.apk' >>> Working Directory: subprocess.py:673 >>> C:\Users\domin\beeware-tutorial\smartmirrortab subprocess.py:674 [17:31:11] >>> Command Output: subprocess.py:690 >>> Performing Push Install subprocess.py:692 >>> C:\Users\domin\beeware-tutorial\smartmirrortab\android\gradle\SmartMirrorTab\app\build\outputs\apk\debug\app-debug.apk: 1 file pushed, 0 subprocess.py:692 skipped. 5.5 MB/s (33125287 bytes in 5.768s) >>> pkg: /data/local/tmp/app-debug.apk subprocess.py:692 >>> subprocess.py:692 >>> Failure [INSTALL_FAILED_OLDER_SDK] subprocess.py:692 >>> subprocess.py:692 >>> Return code: 0 subprocess.py:701 Installing new app version... done gradle.py:336 subprocess.py:664 >>> Running Command: subprocess.py:665 >>> 'C:\Users\domin\AppData\Local\BeeWare\briefcase\Cache\tools\android_sdk\platform-tools\adb.exe' -s PLI5T31F7E shell am start subprocess.py:666 com.example.smartmirrortab/org.beeware.android.MainActivity -a android.intent.action.MAIN -c android.intent.category.LAUNCHER >>> Working Directory: subprocess.py:673 >>> C:\Users\domin\beeware-tutorial\smartmirrortab subprocess.py:674 >>> Command Output: subprocess.py:690 >>> Starting: Intent { act=android.intent.action.MAIN cat=[android.intent.category.LAUNCHER] subprocess.py:692 cmp=com.example.smartmirrortab/org.beeware.android.MainActivity } >>> subprocess.py:692 >>> Error type 3 subprocess.py:692 >>> subprocess.py:692 >>> Error: Activity class {com.example.smartmirrortab/org.beeware.android.MainActivity} does not exist. subprocess.py:692 >>> subprocess.py:692 >>> Return code: 0 subprocess.py:701 [17:31:12] Launching app... gradle.py:355 __main__.py:30 Activity class not found while starting app. __main__.py:31 __main__.py:31 `adb` output: __main__.py:31 __main__.py:31 Starting: Intent { act=android.intent.action.MAIN cat=[android.intent.category.LAUNCHER] __main__.py:31 cmp=com.example.smartmirrortab/org.beeware.android.MainActivity } __main__.py:31 Error type 3 __main__.py:31 __main__.py:31 Error: Activity class {com.example.smartmirrortab/org.beeware.android.MainActivity} does not exist. __main__.py:31 ``` ### Steps to reproduce Attempt `briefcase run android` with a device running Android 4. ### Expected behavior Briefcase should detect the failure to install the APK and inform the user as such. ### Screenshots _No response_ ### Environment - Operating System: Windows 10 - Python version: Python 3.8 - Software versions: - Briefcase: 0.3.12 ### Logs [briefcase.2023_03_30-17_31_12.run.log](https://github.com/beeware/briefcase/files/11113658/briefcase.2023_03_30-17_31_12.run.log) ### Additional context Reported in [discord](https://discord.com/channels/836455665257021440/836455665257021443/1091023037513605261).
0.0
9c0c752a970eaae437df39091d20f96074c728c0
[ "tests/integrations/android_sdk/ADB/test_run.py::test_older_sdk_error" ]
[ "tests/integrations/android_sdk/ADB/test_run.py::test_simple_command", "tests/integrations/android_sdk/ADB/test_run.py::test_quiet_command", "tests/integrations/android_sdk/ADB/test_run.py::test_error_handling[device-not-found-InvalidDeviceError]", "tests/integrations/android_sdk/ADB/test_run.py::test_error_handling[arbitrary-adb-error-unknown-command-CalledProcessError]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2023-07-23 13:25:03+00:00
bsd-3-clause
1,331
beeware__briefcase-1514
diff --git a/changes/1513.bugfix.rst b/changes/1513.bugfix.rst new file mode 100644 index 00000000..ecd8498e --- /dev/null +++ b/changes/1513.bugfix.rst @@ -0,0 +1,1 @@ +``flatpak-builder`` 1.3+ can now be correctly identified. diff --git a/src/briefcase/integrations/flatpak.py b/src/briefcase/integrations/flatpak.py index f1c70640..c7ba438e 100644 --- a/src/briefcase/integrations/flatpak.py +++ b/src/briefcase/integrations/flatpak.py @@ -85,7 +85,10 @@ You must install both flatpak and flatpak-builder. ["flatpak-builder", "--version"] ).strip("\n") - parts = output.split(" ") + # flatpak-builder 1.3 changed the output of --version + # from "flatpak-builder 1.2.X" to "flatpak-build-1.3.X". + # Converge on the new-style format. + parts = output.replace(" ", "-").rsplit("-", 1) try: if parts[0] == "flatpak-builder": version = parts[1].split(".") diff --git a/src/briefcase/platforms/linux/system.py b/src/briefcase/platforms/linux/system.py index b431379c..12792e86 100644 --- a/src/briefcase/platforms/linux/system.py +++ b/src/briefcase/platforms/linux/system.py @@ -927,7 +927,7 @@ class LinuxSystemPackageCommand(LinuxSystemMixin, PackageCommand): f"Package: {app.app_name}", f"Version: {app.version}", f"Architecture: {self.deb_abi(app)}", - f"Maintainer: {app.author } <{app.author_email}>", + f"Maintainer: {app.author} <{app.author_email}>", f"Homepage: {app.url}", f"Description: {app.description}", f" {debian_multiline_description(app.long_description)}",
beeware/briefcase
a03d53aa2f799268be6f9fbda4c25457b1343e91
diff --git a/tests/integrations/flatpak/test_Flatpak__verify.py b/tests/integrations/flatpak/test_Flatpak__verify.py index 4f60bea7..888dfffd 100644 --- a/tests/integrations/flatpak/test_Flatpak__verify.py +++ b/tests/integrations/flatpak/test_Flatpak__verify.py @@ -157,11 +157,20 @@ def test_flatpak_builder_old(mock_tools): ) -def test_installed(mock_tools): [email protected]( + "flatpak_builder_version", + [ + # Ubuntu 22.04; flatpak-builder < 1.3 + "flatpak-builder 1.2.2", + # Fedora 38; flatpak-builder >= 1.3 + "flatpak-builder-1.3.3", + ], +) +def test_installed(mock_tools, flatpak_builder_version, capsys): """If flatpak is installed, it can be verified.""" mock_tools.subprocess.check_output.side_effect = [ "Flatpak 1.12.7", - "flatpak-builder 1.2.2", + flatpak_builder_version, ] Flatpak.verify(mock_tools) @@ -174,6 +183,14 @@ def test_installed(mock_tools): any_order=False, ) + # We shouldn't get any warnings about unknown versions. + output = capsys.readouterr() + assert ( + "** WARNING: Unable to determine the version of flatpak-builder" + not in output.out + ) + assert output.err == "" + @pytest.mark.parametrize( "flatpak_version",
Reccent Flatpak builder versions cannot be interpreted ### Describe the bug Fedora 38: ``` [root@9560ae46967b helloworld]# flatpak-builder --version flatpak-builder-1.3.3 ``` Ubuntu 22.04: ``` ❯ flatpak-builder --version flatpak-builder 1.2.2 ``` Error message: ``` ************************************************************************* ** WARNING: Unable to determine the version of flatpak-builder ** ************************************************************************* Briefcase will proceed, assuming everything is OK. If you experience problems, this is almost certainly the cause of those problems. Please report this as a bug at: https://github.com/beeware/briefcase/issues/new In your report, please including the output from running: flatpak-builder --version from the command prompt. ************************************************************************* ``` ### Steps to reproduce Run `briefcase build linux flatpak` on Fedora 38 ### Expected behavior Recent Flatpak builder versions are accepted. ### Screenshots _No response_ ### Environment - Operating System: fedora 38 - Python version: 3.10 - Software versions: - Briefcase: 0.3.16 ### Logs _No response_ ### Additional context _No response_
0.0
a03d53aa2f799268be6f9fbda4c25457b1343e91
[ "tests/integrations/flatpak/test_Flatpak__verify.py::test_installed[flatpak-builder-1.3.3]" ]
[ "tests/integrations/flatpak/test_Flatpak__verify.py::test_short_circuit", "tests/integrations/flatpak/test_Flatpak__verify.py::test_unsupported_os[Darwin]", "tests/integrations/flatpak/test_Flatpak__verify.py::test_unsupported_os[Windows]", "tests/integrations/flatpak/test_Flatpak__verify.py::test_unsupported_os[wonky]", "tests/integrations/flatpak/test_Flatpak__verify.py::test_flatpak_not_installed", "tests/integrations/flatpak/test_Flatpak__verify.py::test_flatpak_error", "tests/integrations/flatpak/test_Flatpak__verify.py::test_flatpak_old", "tests/integrations/flatpak/test_Flatpak__verify.py::test_flatpak_builder_not_installed", "tests/integrations/flatpak/test_Flatpak__verify.py::test_flatpak_builder_error", "tests/integrations/flatpak/test_Flatpak__verify.py::test_flatpak_builder_old", "tests/integrations/flatpak/test_Flatpak__verify.py::test_installed[flatpak-builder", "tests/integrations/flatpak/test_Flatpak__verify.py::test_installed_unknown_flatpak_version[not-flatpak", "tests/integrations/flatpak/test_Flatpak__verify.py::test_installed_unknown_flatpak_version[Flatpak]", "tests/integrations/flatpak/test_Flatpak__verify.py::test_installed_unknown_flatpak_version[Flatpak", "tests/integrations/flatpak/test_Flatpak__verify.py::test_installed_unknown_builder_version[not-flatpak-builder", "tests/integrations/flatpak/test_Flatpak__verify.py::test_installed_unknown_builder_version[flatpak-builder]", "tests/integrations/flatpak/test_Flatpak__verify.py::test_installed_unknown_builder_version[flatpak-builder" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-10-27 04:47:06+00:00
bsd-3-clause
1,332
beeware__briefcase-1709
diff --git a/changes/1674.feature.rst b/changes/1674.feature.rst new file mode 100644 index 00000000..e03ab6cb --- /dev/null +++ b/changes/1674.feature.rst @@ -0,0 +1,1 @@ +The contents of ``pyproject.toml`` is now included in the log file. diff --git a/changes/1708.feature.rst b/changes/1708.feature.rst new file mode 100644 index 00000000..f77632c6 --- /dev/null +++ b/changes/1708.feature.rst @@ -0,0 +1,1 @@ +When deep debug is activated via ``-vv``, ``pip`` now installs requirements for the app with verbose logging. diff --git a/src/briefcase/commands/create.py b/src/briefcase/commands/create.py index 81e98697..da05e986 100644 --- a/src/briefcase/commands/create.py +++ b/src/briefcase/commands/create.py @@ -524,6 +524,7 @@ class CreateCommand(BaseCommand): "--no-user", f"--target={app_packages_path}", ] + + (["-vv"] if self.logger.is_deep_debug else []) + self._extra_pip_args(app) + pip_args, check=True, diff --git a/src/briefcase/commands/dev.py b/src/briefcase/commands/dev.py index f4d78b13..f37e36ec 100644 --- a/src/briefcase/commands/dev.py +++ b/src/briefcase/commands/dev.py @@ -99,6 +99,7 @@ class DevCommand(RunAppMixin, BaseCommand): "install", "--upgrade", ] + + (["-vv"] if self.logger.is_deep_debug else []) + requires, check=True, encoding="UTF-8", diff --git a/src/briefcase/console.py b/src/briefcase/console.py index c05c3a0c..b2c48905 100644 --- a/src/briefcase/console.py +++ b/src/briefcase/console.py @@ -398,9 +398,9 @@ class Log: self.warning(f"Log saved to {log_filepath}") self.print.to_console() - def _build_log(self, command): + def _build_log(self, command) -> str: """Accumulate all information to include in the log file.""" - # add the exception stacktrace to end of log if one was captured + # Add the exception stacktraces to end of log if any were captured if self.stacktraces: # using print.log.print() instead of print.to_log() to avoid # timestamp and code location inclusion for the stacktrace box. @@ -415,6 +415,7 @@ class Log: new_line_start=True, ) + # Retrieve additional logging added by the Command if self.log_file_extras: with command.input.wait_bar( "Collecting extra information for log...", @@ -428,21 +429,30 @@ class Log: except Exception: self.error(traceback.format_exc()) - # build log header and export buffered log from Rich - uname = platform.uname() + # Capture env vars removing any potentially sensitive information sanitized_env_vars = "\n".join( f"\t{env_var}={value if not SENSITIVE_SETTING_RE.search(env_var) else '********************'}" for env_var, value in sorted(command.tools.os.environ.items()) ) + + # Capture pyproject.toml if one exists in the current directory + try: + with open(Path.cwd() / "pyproject.toml", encoding="utf-8") as f: + pyproject_toml = f.read().strip() + except OSError as e: + pyproject_toml = str(e) + + # Build log with buffered log from Rich + uname = platform.uname() return ( f"Date/Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S %Z')}\n" f"Command line: {' '.join(sys.argv)}\n" - f"\n" + "\n" f"OS Release: {uname.system} {uname.release}\n" f"OS Version: {uname.version}\n" f"Architecture: {uname.machine}\n" f"Platform: {platform.platform(aliased=True)}\n" - f"\n" + "\n" f"Python exe: {sys.executable}\n" # replace line breaks with spaces (use chr(10) since '\n' isn't allowed in f-strings...) f"Python version: {sys.version.replace(chr(10), ' ')}\n" @@ -452,15 +462,18 @@ class Log: f"Virtual env: {hasattr(sys, 'real_prefix') or sys.base_prefix != sys.prefix}\n" # for conda, prefix and base_prefix are likely the same but contain a conda-meta dir. f"Conda env: {(Path(sys.prefix) / 'conda-meta').exists()}\n" - f"\n" + "\n" f"Briefcase: {__version__}\n" f"Target platform: {command.platform}\n" f"Target format: {command.output_format}\n" - f"\n" - f"Environment Variables:\n" + "\n" + "Environment Variables:\n" f"{sanitized_env_vars}\n" - f"\n" - f"Briefcase Log:\n" + "\n" + "pyproject.toml:\n" + f"{pyproject_toml}\n" + "\n" + "Briefcase Log:\n" f"{self.print.export_log()}" ) diff --git a/src/briefcase/platforms/web/static.py b/src/briefcase/platforms/web/static.py index bd10c383..a80aa39f 100644 --- a/src/briefcase/platforms/web/static.py +++ b/src/briefcase/platforms/web/static.py @@ -169,7 +169,8 @@ class StaticWebBuildCommand(StaticWebMixin, BuildCommand): self.wheel_path(app), "-r", self.bundle_path(app) / "requirements.txt", - ], + ] + + (["-vv"] if self.logger.is_deep_debug else []), check=True, encoding="UTF-8", )
beeware/briefcase
84f76008a68edb0c6366d3e79919ba7163b44f1b
diff --git a/tests/commands/create/test_install_app_requirements.py b/tests/commands/create/test_install_app_requirements.py index 2a357c32..59dda9dc 100644 --- a/tests/commands/create/test_install_app_requirements.py +++ b/tests/commands/create/test_install_app_requirements.py @@ -7,6 +7,7 @@ import pytest import tomli_w from briefcase.commands.create import _is_local_requirement +from briefcase.console import LogLevel from briefcase.exceptions import BriefcaseCommandError, RequirementsInstallError from briefcase.integrations.subprocess import Subprocess @@ -285,13 +286,17 @@ def test_app_packages_offline( assert myapp.test_requires is None [email protected]("logging_level", [LogLevel.INFO, LogLevel.DEEP_DEBUG]) def test_app_packages_install_requirements( create_command, myapp, app_packages_path, app_packages_path_index, + logging_level, ): """Requirements can be installed.""" + # Configure logging level + create_command.logger.verbosity = logging_level # Set up the app requirements myapp.requires = ["first", "second", "third"] @@ -319,10 +324,9 @@ def test_app_packages_install_requirements( "--upgrade", "--no-user", f"--target={app_packages_path}", - "first", - "second", - "third", - ], + ] + + (["-vv"] if logging_level == LogLevel.DEEP_DEBUG else []) + + ["first", "second", "third"], check=True, encoding="UTF-8", ) diff --git a/tests/commands/dev/test_install_dev_requirements.py b/tests/commands/dev/test_install_dev_requirements.py index b8a02705..a813a4cd 100644 --- a/tests/commands/dev/test_install_dev_requirements.py +++ b/tests/commands/dev/test_install_dev_requirements.py @@ -3,11 +3,16 @@ from subprocess import CalledProcessError import pytest +from briefcase.console import LogLevel from briefcase.exceptions import RequirementsInstallError -def test_install_requirements_no_error(dev_command, first_app): [email protected]("logging_level", [LogLevel.INFO, LogLevel.DEEP_DEBUG]) +def test_install_requirements_no_error(dev_command, first_app, logging_level): """Ensure run is executed properly to install requirements.""" + # Configure logging level + dev_command.logger.verbosity = logging_level + first_app.requires = ["package-one", "package_two", "packagethree"] dev_command.install_dev_requirements(app=first_app) @@ -22,10 +27,9 @@ def test_install_requirements_no_error(dev_command, first_app): "pip", "install", "--upgrade", - "package-one", - "package_two", - "packagethree", - ], + ] + + (["-vv"] if logging_level == LogLevel.DEEP_DEBUG else []) + + ["package-one", "package_two", "packagethree"], check=True, encoding="UTF-8", ) diff --git a/tests/console/test_Log.py b/tests/console/test_Log.py index 3515c137..e17f00fe 100644 --- a/tests/console/test_Log.py +++ b/tests/console/test_Log.py @@ -2,6 +2,7 @@ import contextlib import datetime import logging from io import TextIOBase +from pathlib import Path from unittest.mock import MagicMock, PropertyMock, call import pytest @@ -162,7 +163,7 @@ def test_save_log_to_file_do_not_log(command): assert len(logger.stacktraces) == 0 -def test_save_log_to_file_no_exception(mock_now, command, tmp_path): +def test_save_log_to_file_no_exception(mock_now, command, tmp_path, monkeypatch): """Log file contains everything printed to log; env vars are sanitized; no stacktrace if one is not captured.""" command.tools.os.environ = { @@ -203,13 +204,18 @@ def test_save_log_to_file_no_exception(mock_now, command, tmp_path): with command.tools.input.wait_bar("abort message..."): raise KeyboardInterrupt + project_root = tmp_path / "project_root" + project_root.mkdir(exist_ok=True) + monkeypatch.chdir(project_root) + with open("pyproject.toml", "w", encoding="utf-8") as f: + f.writelines(["[section]\n", "name = 'project'\n\n\n\n\n"]) + logger.save_log_to_file(command=command) log_filepath = tmp_path / "logs/briefcase.2022_06_25-16_12_29.dev.log" assert log_filepath.exists() - with open(log_filepath, encoding="utf-8") as log: - log_contents = log.read() + log_contents = log_filepath.read_text(encoding="utf-8") assert log_contents.startswith("Date/Time: 2022-06-25 16:12:29") assert "this is debug output" in log_contents @@ -235,6 +241,7 @@ def test_save_log_to_file_no_exception(mock_now, command, tmp_path): assert "wait message... done" in log_contents assert "abort message... started" in log_contents assert "abort message... aborted" in log_contents + assert "pyproject.toml:\n[section]\nname = 'project'\n\nBriefcase" in log_contents assert TRACEBACK_HEADER not in log_contents assert EXTRA_HEADER not in log_contents @@ -253,8 +260,7 @@ def test_save_log_to_file_with_exception(mock_now, command, tmp_path): log_filepath = tmp_path / "logs/briefcase.2022_06_25-16_12_29.dev.log" assert log_filepath.exists() - with open(log_filepath, encoding="utf-8") as log: - log_contents = log.read() + log_contents = log_filepath.read_text(encoding="utf-8") assert len(logger.stacktraces) == 1 assert log_contents.startswith("Date/Time: 2022-06-25 16:12:29") @@ -277,8 +283,7 @@ def test_save_log_to_file_with_multiple_exceptions(mock_now, command, tmp_path): log_filepath = tmp_path / "logs/briefcase.2022_06_25-16_12_29.dev.log" assert log_filepath.exists() - with open(log_filepath, encoding="utf-8") as log: - log_contents = log.read() + log_contents = log_filepath.read_text(encoding="utf-8") assert len(logger.stacktraces) == 4 assert log_contents.startswith("Date/Time: 2022-06-25 16:12:29") @@ -306,8 +311,7 @@ def test_save_log_to_file_extra(mock_now, command, tmp_path): logger.add_log_file_extra(extra) logger.save_log_to_file(command=command) log_filepath = tmp_path / "logs/briefcase.2022_06_25-16_12_29.dev.log" - with open(log_filepath, encoding="utf-8") as log: - log_contents = log.read() + log_contents = log_filepath.read_text(encoding="utf-8") assert EXTRA_HEADER in log_contents assert "Log extra 1" in log_contents @@ -334,6 +338,25 @@ def test_save_log_to_file_extra_interrupted(mock_now, command, tmp_path): assert log_filepath.stat().st_size == 0 +def test_save_log_to_file_missing_pyproject(mock_now, command, tmp_path, monkeypatch): + """Log file contains pyproject read exception if it's missing.""" + logger = Log() + logger.save_log = True + + # ensure in a directory without a pyproject.toml + monkeypatch.chdir(tmp_path) + Path(tmp_path / "pyproject.toml").unlink(missing_ok=True) + + logger.save_log_to_file(command=command) + + log_filepath = tmp_path / "logs/briefcase.2022_06_25-16_12_29.dev.log" + + assert log_filepath.exists() + log_contents = log_filepath.read_text(encoding="utf-8") + + assert "pyproject.toml:\n[Errno 2] No such file or directory" in log_contents + + def test_save_log_to_file_fail_to_make_logs_dir( mock_now, command, diff --git a/tests/platforms/web/static/test_build.py b/tests/platforms/web/static/test_build.py index 72f3f5c7..b4927ecc 100644 --- a/tests/platforms/web/static/test_build.py +++ b/tests/platforms/web/static/test_build.py @@ -10,7 +10,7 @@ else: # pragma: no-cover-if-gte-py311 import pytest -from briefcase.console import Console, Log +from briefcase.console import Console, Log, LogLevel from briefcase.exceptions import BriefcaseCommandError, BriefcaseConfigError from briefcase.integrations.subprocess import Subprocess from briefcase.platforms.web.static import StaticWebBuildCommand @@ -32,8 +32,12 @@ def build_command(tmp_path): return command -def test_build_app(build_command, first_app_generated, tmp_path): [email protected]("logging_level", [LogLevel.INFO, LogLevel.DEEP_DEBUG]) +def test_build_app(build_command, first_app_generated, logging_level, tmp_path): """An app can be built.""" + # Configure logging level + build_command.logger.verbosity = logging_level + bundle_path = tmp_path / "base_path/build/first-app/web/static" # Invoking build will create wheels as a side effect. @@ -110,7 +114,8 @@ def test_build_app(build_command, first_app_generated, tmp_path): bundle_path / "www/static/wheels", "-r", bundle_path / "requirements.txt", - ], + ] + + (["-vv"] if logging_level == LogLevel.DEEP_DEBUG else []), check=True, encoding="UTF-8", ),
Include `pyproject.toml` in the log file ### What is the problem or limitation you are having? When the Briefcase log file doesn't provide information I'm interested in, it will likely be in the project's `pyproject.toml`. For instance, what `requires` is set to. ### Describe the solution you'd like Append the contents of `pyproject.toml` to the log file. An alternative to this would be dumping the `AppConfig` object. ### Describe alternatives you've considered Continue requesting such information specifically. ### Additional context _No response_
0.0
84f76008a68edb0c6366d3e79919ba7163b44f1b
[ "tests/commands/create/test_install_app_requirements.py::test_app_packages_install_requirements[LogLevel.DEEP_DEBUG]", "tests/commands/dev/test_install_dev_requirements.py::test_install_requirements_no_error[LogLevel.DEEP_DEBUG]", "tests/console/test_Log.py::test_save_log_to_file_no_exception", "tests/console/test_Log.py::test_save_log_to_file_missing_pyproject", "tests/platforms/web/static/test_build.py::test_build_app[LogLevel.DEEP_DEBUG]" ]
[ "tests/commands/create/test_install_app_requirements.py::test_bad_path_index", "tests/commands/create/test_install_app_requirements.py::test_app_packages_no_requires", "tests/commands/create/test_install_app_requirements.py::test_app_packages_empty_requires", "tests/commands/create/test_install_app_requirements.py::test_app_packages_valid_requires", "tests/commands/create/test_install_app_requirements.py::test_app_packages_valid_requires_no_support_package", "tests/commands/create/test_install_app_requirements.py::test_app_packages_invalid_requires", "tests/commands/create/test_install_app_requirements.py::test_app_packages_offline", "tests/commands/create/test_install_app_requirements.py::test_app_packages_install_requirements[LogLevel.INFO]", "tests/commands/create/test_install_app_requirements.py::test_app_packages_replace_existing_requirements", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_no_requires", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_empty_requires", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_requires", "tests/commands/create/test_install_app_requirements.py::test__is_local_requirement_altsep_respected[None-asdf/xcvb-True]", "tests/commands/create/test_install_app_requirements.py::test__is_local_requirement_altsep_respected[None-asdf>xcvb-False]", "tests/commands/create/test_install_app_requirements.py::test__is_local_requirement_altsep_respected[>-asdf/xcvb-True]", "tests/commands/create/test_install_app_requirements.py::test__is_local_requirement_altsep_respected[>-asdf>xcvb-True]", "tests/commands/create/test_install_app_requirements.py::test__is_local_requirement_altsep_respected[>-asdf+xcvb-False]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_non_paths[my-package]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_non_paths[my-package==1.2.3]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_non_paths[my-package<=1.2.3]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_non_paths[my-package[optional]<=1.2.3]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_non_paths[my-package[optional]<=1.2.3;", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_non_paths[git+https://github.com/project/package]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_non_paths[git+https://github.com/project/package#egg=my-package]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_non_paths[git+https://github.com/project/package@deadbeef#egg=my-package]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_non_paths[git+https://github.com/project/package@some-branch#egg=my-package]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_non_paths[http://example.com/path/to/mypackage-1.2.3-py3-none-any.whl]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_non_paths[https://example.com/path/to/mypackage-1.2.3-py3-none-any.whl]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_non_paths[my-package", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_paths_unix[/absolute/path/to/package]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_paths_unix[requirement1]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_paths_unix[requirement2]", "tests/commands/create/test_install_app_requirements.py::test_app_requirements_paths_unix[requirement3]", "tests/commands/create/test_install_app_requirements.py::test_app_packages_test_requires", "tests/commands/create/test_install_app_requirements.py::test_app_packages_test_requires_test_mode", "tests/commands/create/test_install_app_requirements.py::test_app_packages_only_test_requires_test_mode", "tests/commands/dev/test_install_dev_requirements.py::test_install_requirements_no_error[LogLevel.INFO]", "tests/commands/dev/test_install_dev_requirements.py::test_install_requirements_error", "tests/commands/dev/test_install_dev_requirements.py::test_no_requirements", "tests/commands/dev/test_install_dev_requirements.py::test_install_requirements_test_mode", "tests/commands/dev/test_install_dev_requirements.py::test_only_test_requirements", "tests/console/test_Log.py::test_log_level[-1-False-False-False]", "tests/console/test_Log.py::test_log_level[0-False-False-False]", "tests/console/test_Log.py::test_log_level[LogLevel.INFO-False-False-False]", "tests/console/test_Log.py::test_log_level[1-True-False-False]", "tests/console/test_Log.py::test_log_level[LogLevel.VERBOSE-True-False-False]", "tests/console/test_Log.py::test_log_level[2-True-True-False]", "tests/console/test_Log.py::test_log_level[LogLevel.DEBUG-True-True-False]", "tests/console/test_Log.py::test_log_level[3-True-True-True]", "tests/console/test_Log.py::test_log_level[LogLevel.DEEP_DEBUG-True-True-True]", "tests/console/test_Log.py::test_log_level[4-True-True-True]", "tests/console/test_Log.py::test_log_level[5-True-True-True]", "tests/console/test_Log.py::test_info_logging", "tests/console/test_Log.py::test_verbose_logging", "tests/console/test_Log.py::test_debug_logging", "tests/console/test_Log.py::test_capture_stacktrace", "tests/console/test_Log.py::test_capture_stacktrace_for_briefcaseerror[True]", "tests/console/test_Log.py::test_capture_stacktrace_for_briefcaseerror[False]", "tests/console/test_Log.py::test_save_log_to_file_do_not_log", "tests/console/test_Log.py::test_save_log_to_file_with_exception", "tests/console/test_Log.py::test_save_log_to_file_with_multiple_exceptions", "tests/console/test_Log.py::test_save_log_to_file_extra", "tests/console/test_Log.py::test_save_log_to_file_extra_interrupted", "tests/console/test_Log.py::test_save_log_to_file_fail_to_make_logs_dir", "tests/console/test_Log.py::test_save_log_to_file_fail_to_write_file", "tests/console/test_Log.py::test_log_with_context", "tests/console/test_Log.py::test_log_error_with_context", "tests/console/test_Log.py::test_stdlib_logging_config[LogLevel.DEEP_DEBUG-True]", "tests/console/test_Log.py::test_stdlib_logging_config[LogLevel.DEBUG-False]", "tests/console/test_Log.py::test_stdlib_logging_config[LogLevel.VERBOSE-False]", "tests/console/test_Log.py::test_stdlib_logging_config[LogLevel.INFO-False]", "tests/console/test_Log.py::test_stdlib_logging_only_one", "tests/console/test_Log.py::test_stdlib_logging_handler_writes_to_debug", "tests/platforms/web/static/test_build.py::test_build_app[LogLevel.INFO]", "tests/platforms/web/static/test_build.py::test_build_app_custom_pyscript_toml", "tests/platforms/web/static/test_build.py::test_build_app_invalid_custom_pyscript_toml", "tests/platforms/web/static/test_build.py::test_build_app_missing_wheel_dir", "tests/platforms/web/static/test_build.py::test_build_app_no_requirements", "tests/platforms/web/static/test_build.py::test_app_package_fail", "tests/platforms/web/static/test_build.py::test_dependency_fail" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2024-03-24 18:58:33+00:00
bsd-3-clause
1,333
beeware__briefcase-437
diff --git a/changes/397.bugfix.rst b/changes/397.bugfix.rst new file mode 100644 index 00000000..ae513cae --- /dev/null +++ b/changes/397.bugfix.rst @@ -0,0 +1,1 @@ +iOS builds will now warn if the Xcode command line tools are the active. diff --git a/src/briefcase/integrations/xcode.py b/src/briefcase/integrations/xcode.py index 11feebed..7c123dc9 100644 --- a/src/briefcase/integrations/xcode.py +++ b/src/briefcase/integrations/xcode.py @@ -148,6 +148,7 @@ Re-run Briefcase once that installation is complete. try: output = command.subprocess.check_output( ['xcodebuild', '-version'], + stderr=subprocess.STDOUT, universal_newlines=True ) @@ -198,16 +199,28 @@ Re-run Briefcase once that installation is complete. ************************************************************************* """) - except subprocess.CalledProcessError: - raise BriefcaseCommandError(""" -Xcode is not installed. + except subprocess.CalledProcessError as e: + if " is a command line tools instance" in e.output: + raise BriefcaseCommandError(""" +Xcode is installed, but the active developer directory is a +command line tools instance. To make XCode the active developer +directory, run: -You should be shown a dialog prompting you to install Xcode and the -command line tools. Select "Get Xcode" to install Xcode from the app store. + $ sudo xcode-select -switch /Applications/Xcode.app -You can install Xcode from the macOS App Store. +and then re-run Briefcase. +""") + else: + raise BriefcaseCommandError(""" +The Xcode install appears to exist, but Briefcase was unable to +determine the current Xcode version. Running: -Re-run Briefcase once that installation is complete. + $ xcodebuild -version + +should return the current Xcode version, but it raised an error. + +You may need to re-install Xcode. Re-run Briefcase once that +installation is complete. """)
beeware/briefcase
00204f3c2558d96037df5c725287154a528f6aa0
diff --git a/tests/integrations/xcode/test_ensure_xcode_is_installed.py b/tests/integrations/xcode/test_ensure_xcode_is_installed.py index ba379cf2..b4d0272c 100644 --- a/tests/integrations/xcode/test_ensure_xcode_is_installed.py +++ b/tests/integrations/xcode/test_ensure_xcode_is_installed.py @@ -34,20 +34,49 @@ def test_not_installed(tmp_path): command.subprocess.check_output.assert_not_called() -def test_exists_but_not_installed(xcode): +def test_exists_but_command_line_tools_selected(xcode): + "If the Xcode folder exists, but cmd-line tools are selected, raise an error." + command = mock.MagicMock() + command.subprocess.check_output.side_effect = subprocess.CalledProcessError( + cmd=['xcodebuild', '-version'], + returncode=1 + ) + command.subprocess.check_output.side_effect.output = ( + "xcode-select: error: tool 'xcodebuild' requires Xcode, but " + "active developer directory '/Library/Developer/CommandLineTools' " + "is a command line tools instance\n" + ) + + with pytest.raises(BriefcaseCommandError, match=r"xcode-select -switch"): + ensure_xcode_is_installed(command, xcode_location=xcode) + + # xcode-select was invoked + command.subprocess.check_output.assert_called_once_with( + ['xcodebuild', '-version'], + stderr=subprocess.STDOUT, + universal_newlines=True, + ) + + +def test_exists_but_corrupted(xcode): "If the Xcode folder exists, but xcodebuild breaks, raise an error." command = mock.MagicMock() command.subprocess.check_output.side_effect = subprocess.CalledProcessError( cmd=['xcodebuild', '-version'], returncode=1 ) + command.subprocess.check_output.side_effect.output = "Badness occurred." - with pytest.raises(BriefcaseCommandError): + with pytest.raises( + BriefcaseCommandError, + match=r"should return the current Xcode version" + ): ensure_xcode_is_installed(command, xcode_location=xcode) # xcode-select was invoked command.subprocess.check_output.assert_called_once_with( ['xcodebuild', '-version'], + stderr=subprocess.STDOUT, universal_newlines=True, ) @@ -63,6 +92,7 @@ def test_installed_no_minimum_version(xcode): # xcode-select was invoked command.subprocess.check_output.assert_called_once_with( ['xcodebuild', '-version'], + stderr=subprocess.STDOUT, universal_newlines=True, ) @@ -122,6 +152,7 @@ def test_installed_with_minimum_version_success(min_version, version, capsys, xc # xcode-select was invoked command.subprocess.check_output.assert_called_once_with( ['xcodebuild', '-version'], + stderr=subprocess.STDOUT, universal_newlines=True, ) @@ -160,6 +191,7 @@ def test_installed_with_minimum_version_failure(min_version, version, xcode): # xcode-select was invoked command.subprocess.check_output.assert_called_once_with( ['xcodebuild', '-version'], + stderr=subprocess.STDOUT, universal_newlines=True, ) @@ -179,6 +211,7 @@ def test_unexpected_version_output(capsys, xcode): # xcode-select was invoked command.subprocess.check_output.assert_called_once_with( ['xcodebuild', '-version'], + stderr=subprocess.STDOUT, universal_newlines=True, )
Error message is misleading if XCode is installed by Cmdline tools are active Originally reported as a [PR on beeware](https://github.com/beeware/beeware/pull/49) I was following the beeware tutorial and I got the following error ``` (beeware-venv) $ briefcase create iOS xcode-select: error: tool 'xcodebuild' requires Xcode, but active developer directory '/Library/Developer/CommandLineTools' is a command line tools instance Xcode is not installed. You should be shown a dialog prompting you to install Xcode and the command line tools. Select "Get Xcode" to install Xcode from the app store. You can install Xcode from the macOS App Store. Re-run Briefcase once that installation is complete. ``` It took me a while to figure out how to solve this because I already had Xcode installed. The solution was: ``` (beeware-venv) $ sudo xcode-select -s /Applications/Xcode.app/Contents/Developer ``` The problem is that the XCode *is* installed, but the command line tools are currently the active install. The error message needs to differentiate between: * XCode command line tools are installed, but XCode isn't installed * XCode command line tools *and* XCode are installed, but command line tools are active. and present an appropriate error message in both cases.
0.0
00204f3c2558d96037df5c725287154a528f6aa0
[ "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_exists_but_command_line_tools_selected", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_exists_but_corrupted", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_no_minimum_version", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version0-11.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version1-11.2.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version2-11.0.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version3-8.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version4-8.2.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version5-8.0.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version6-11.2.5]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version7-11.3.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version8-12.0.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version9-11.2.5]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version10-11.3.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version11-12.0.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version12-11.2.5]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version13-11.3.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version14-12.0.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version15-11.2]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version16-11.2]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version17-11.2]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version18-11.2]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version19-11.2]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version20-11.2]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_failure[min_version0-11.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_failure[min_version1-11.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_failure[min_version2-11.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_failure[min_version3-8.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_failure[min_version4-8.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_failure[min_version5-8.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_unexpected_version_output" ]
[ "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_not_installed" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-06-28 01:51:35+00:00
bsd-3-clause
1,334
beeware__briefcase-684
diff --git a/changes/460.bugfix.rst b/changes/460.bugfix.rst new file mode 100644 index 00000000..bd578ac8 --- /dev/null +++ b/changes/460.bugfix.rst @@ -0,0 +1,1 @@ +Bundle identifiers are now validated to ensure they don't contain reserved words. diff --git a/src/briefcase/commands/new.py b/src/briefcase/commands/new.py index 5d984b77..451abec7 100644 --- a/src/briefcase/commands/new.py +++ b/src/briefcase/commands/new.py @@ -7,14 +7,12 @@ from urllib.parse import urlparse from cookiecutter import exceptions as cookiecutter_exceptions -from briefcase.config import is_valid_app_name +from briefcase.config import is_valid_app_name, is_valid_bundle_identifier from briefcase.exceptions import NetworkFailure from .base import BaseCommand, BriefcaseCommandError from .create import InvalidTemplateRepository -VALID_BUNDLE_RE = re.compile(r'[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)+$') - def titlecase(s): """ @@ -138,12 +136,15 @@ class NewCommand(BaseCommand): :returns: True. If there are any validation problems, raises ValueError with a diagnostic message. """ - if not VALID_BUNDLE_RE.match(candidate): + if not is_valid_bundle_identifier(candidate): raise ValueError( - "Bundle should be a reversed domain name. It must contain at " - "least 2 dot-separated sections, and each section may only " - "include letters, numbers, and hyphens." + f"{candidate!r} is not a valid bundle identifier.\n\n" + "The bundle should be a reversed domain name. It must contain at least 2\n" + "dot-separated sections; each section may only include letters, numbers,\n" + "and hyphens; and each section may not contain any reserved words (like\n" + "'switch', or 'while')." ) + return True def make_domain(self, bundle): diff --git a/src/briefcase/config.py b/src/briefcase/config.py index 5fb26f9e..220af0d7 100644 --- a/src/briefcase/config.py +++ b/src/briefcase/config.py @@ -159,6 +159,21 @@ def is_valid_app_name(app_name): return False +VALID_BUNDLE_RE = re.compile(r'[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)+$') + + +def is_valid_bundle_identifier(bundle): + # Ensure the bundle identifier follows the basi + if not VALID_BUNDLE_RE.match(bundle): + return False + + for part in bundle.split('.'): + if is_reserved_keyword(part): + return False + + return True + + # This is the canonical definition from PEP440, modified to include named groups PEP440_CANONICAL_VERSION_PATTERN_RE = re.compile( r'^((?P<epoch>[1-9][0-9]*)!)?' @@ -295,6 +310,15 @@ class AppConfig(BaseConfig): "numbers, '-' and '_'; must start with a letter; and cannot end with '-' or '_')." ) + if not is_valid_bundle_identifier(self.bundle): + raise BriefcaseConfigError( + f"{self.bundle!r} is not a valid bundle identifier.\n\n" + "The bundle should be a reversed domain name. It must contain at least 2\n" + "dot-separated sections; each section may only include letters, numbers,\n" + "and hyphens; and each section may not contain any reserved words (like\n" + "'switch', or 'while')." + ) + # Version number is PEP440 compliant: if not is_pep440_canonical_version(self.version): raise BriefcaseConfigError(
beeware/briefcase
22372cab4fbc4265b40bb18983c9143fbe4ffc28
diff --git a/tests/commands/new/test_validate_bundle.py b/tests/commands/new/test_validate_bundle.py index a07eb574..112399f3 100644 --- a/tests/commands/new/test_validate_bundle.py +++ b/tests/commands/new/test_validate_bundle.py @@ -23,6 +23,7 @@ def test_valid_bundle(new_command, bundle): 'com.hello_world', # underscore 'com.hello,world', # comma 'com.hello world!', # exclamation point + 'com.pass.example', # Reserved word ] ) def test_invalid_bundle(new_command, bundle): diff --git a/tests/config/test_AppConfig.py b/tests/config/test_AppConfig.py index 4e7e8f6e..9c39b4ca 100644 --- a/tests/config/test_AppConfig.py +++ b/tests/config/test_AppConfig.py @@ -133,6 +133,53 @@ def test_invalid_app_name(name): ) [email protected]( + 'bundle', + [ + 'com.example', + 'com.example.more', + 'com.example42.more', + 'com.example-42.more', + ] +) +def test_valid_bundle(bundle): + try: + AppConfig( + app_name='myapp', + version="1.2.3", + bundle=bundle, + description="A simple app", + sources=['src/myapp'] + ) + except BriefcaseConfigError: + pytest.fail(f'{bundle} should be valid') + + [email protected]( + 'bundle', + [ + 'not a bundle!', # Free text. + 'home', # Only one section. + 'com.hello_world', # underscore + 'com.hello,world', # comma + 'com.hello world!', # exclamation point + 'com.pass', # Python reserved word + 'com.pass.example', # Python reserved word + 'com.switch', # Java reserved word + 'com.switch.example', # Java reserved word + ] +) +def test_invalid_bundle_identifier(bundle): + with pytest.raises(BriefcaseConfigError, match=r"is not a valid bundle identifier\."): + AppConfig( + app_name="myapp", + version="1.2.3", + bundle=bundle, + description="A simple app", + sources=['src/invalid'] + ) + + def test_valid_app_version(): try: AppConfig( diff --git a/tests/config/test_is_valid_bundle_identifier.py b/tests/config/test_is_valid_bundle_identifier.py new file mode 100644 index 00000000..0ad9f5fb --- /dev/null +++ b/tests/config/test_is_valid_bundle_identifier.py @@ -0,0 +1,36 @@ +import pytest + +from briefcase.config import is_valid_bundle_identifier + + [email protected]( + 'bundle', + [ + 'com.example', + 'com.example.more', + 'com.example42.more', + 'com.example-42.more', + ] +) +def test_valid_bundle(bundle): + "Test that valid bundles are accepted" + assert is_valid_bundle_identifier(bundle) + + [email protected]( + 'bundle', + [ + 'not a bundle!', # Free text. + 'home', # Only one section. + 'com.hello_world', # underscore + 'com.hello,world', # comma + 'com.hello world!', # exclamation point + 'com.pass', # Python reserved word + 'com.pass.example', # Python reserved word + 'com.switch', # Java reserved word + 'com.switch.example', # Java reserved word + ] +) +def test_invalid_bundle(bundle): + "Test that invalid bundles are rejected" + assert not is_valid_bundle_identifier(bundle)
On Android, if your app is called `x.y.switch`, it fails to build **Describe the bug** If you call your app `switch` (or another Java reserved word), you will be unable to build it on Android. **To Reproduce** You can reproduce by creating an app whose module_name is `switch`. For example, you can use a Toga demo app. Steps to reproduce the behavior: 1. Download the toga source code, then `cd` into `examples/switch` 2. Run `briefcase create android && briefcase build android` 3. Wait for the build to complete (about 60 seconds if you've used Briefcase on this system to make Android apps) 4. See an error: ``` > Task :app:processDebugResources FAILED FAILURE: Build failed with an exception. * What went wrong: Execution failed for task ':app:processDebugResources'. > A failure occurred while executing com.android.build.gradle.internal.tasks.Workers$ActionFacade > Package 'org.beeware.switch' from AndroidManifest.xml is not a valid Java package name as 'switch' is a Java keyword. * Try: Run with --stacktrace option to get the stack trace. Run with --info or --debug option to get more log output. Run with --scan to get full insights. * Get more help at https://help.gradle.org BUILD FAILED in 9s 15 actionable tasks: 15 executed Error while building project. ``` **Expected behavior** The app should build or briefcase should print an error at the `briefcase new` phase, or briefcase should print an error before trying to run `briefcase create android`. **Environment:** - Operating System: macOS 10.15 - Python version: 3.7 - Software versions: - Briefcase: 0.3.3 - Toga: current git (0.3.0pre21) **Additional context** Add any other context about the problem here.
0.0
22372cab4fbc4265b40bb18983c9143fbe4ffc28
[ "tests/commands/new/test_validate_bundle.py::test_valid_bundle[com.example]", "tests/commands/new/test_validate_bundle.py::test_valid_bundle[com.example.more]", "tests/commands/new/test_validate_bundle.py::test_valid_bundle[com.example42.more]", "tests/commands/new/test_validate_bundle.py::test_valid_bundle[com.example-42.more]", "tests/commands/new/test_validate_bundle.py::test_invalid_bundle[not", "tests/commands/new/test_validate_bundle.py::test_invalid_bundle[home]", "tests/commands/new/test_validate_bundle.py::test_invalid_bundle[com.hello_world]", "tests/commands/new/test_validate_bundle.py::test_invalid_bundle[com.hello,world]", "tests/commands/new/test_validate_bundle.py::test_invalid_bundle[com.hello", "tests/commands/new/test_validate_bundle.py::test_invalid_bundle[com.pass.example]", "tests/config/test_AppConfig.py::test_minimal_AppConfig", "tests/config/test_AppConfig.py::test_extra_attrs", "tests/config/test_AppConfig.py::test_valid_app_name[myapp]", "tests/config/test_AppConfig.py::test_valid_app_name[myApp]", "tests/config/test_AppConfig.py::test_valid_app_name[MyApp]", "tests/config/test_AppConfig.py::test_valid_app_name[MyAPP]", "tests/config/test_AppConfig.py::test_valid_app_name[my-app]", "tests/config/test_AppConfig.py::test_valid_app_name[my_app]", "tests/config/test_AppConfig.py::test_valid_app_name[myapp2]", "tests/config/test_AppConfig.py::test_valid_app_name[my2app]", "tests/config/test_AppConfig.py::test_invalid_app_name[!myapp]", "tests/config/test_AppConfig.py::test_invalid_app_name[my!app]", "tests/config/test_AppConfig.py::test_invalid_app_name[myapp!]", "tests/config/test_AppConfig.py::test_invalid_app_name[my$app]", "tests/config/test_AppConfig.py::test_invalid_app_name[-myApp]", "tests/config/test_AppConfig.py::test_invalid_app_name[myApp-]", "tests/config/test_AppConfig.py::test_invalid_app_name[_myApp]", "tests/config/test_AppConfig.py::test_invalid_app_name[myApp_]", "tests/config/test_AppConfig.py::test_valid_bundle[com.example]", "tests/config/test_AppConfig.py::test_valid_bundle[com.example.more]", "tests/config/test_AppConfig.py::test_valid_bundle[com.example42.more]", "tests/config/test_AppConfig.py::test_valid_bundle[com.example-42.more]", "tests/config/test_AppConfig.py::test_invalid_bundle_identifier[not", "tests/config/test_AppConfig.py::test_invalid_bundle_identifier[home]", "tests/config/test_AppConfig.py::test_invalid_bundle_identifier[com.hello_world]", "tests/config/test_AppConfig.py::test_invalid_bundle_identifier[com.hello,world]", "tests/config/test_AppConfig.py::test_invalid_bundle_identifier[com.hello", "tests/config/test_AppConfig.py::test_invalid_bundle_identifier[com.pass]", "tests/config/test_AppConfig.py::test_invalid_bundle_identifier[com.pass.example]", "tests/config/test_AppConfig.py::test_invalid_bundle_identifier[com.switch]", "tests/config/test_AppConfig.py::test_invalid_bundle_identifier[com.switch.example]", "tests/config/test_AppConfig.py::test_valid_app_version", "tests/config/test_AppConfig.py::test_invalid_app_version", "tests/config/test_AppConfig.py::test_module_name[myapp-myapp]", "tests/config/test_AppConfig.py::test_module_name[my-app-my_app]", "tests/config/test_AppConfig.py::test_package_name[com.example-com.example]", "tests/config/test_AppConfig.py::test_package_name[com.ex-ample-com.ex_ample]", "tests/config/test_AppConfig.py::test_duplicated_source[sources0]", "tests/config/test_AppConfig.py::test_duplicated_source[sources1]", "tests/config/test_AppConfig.py::test_duplicated_source[sources2]", "tests/config/test_AppConfig.py::test_duplicated_source[sources3]", "tests/config/test_AppConfig.py::test_no_source_for_app", "tests/config/test_is_valid_bundle_identifier.py::test_valid_bundle[com.example]", "tests/config/test_is_valid_bundle_identifier.py::test_valid_bundle[com.example.more]", "tests/config/test_is_valid_bundle_identifier.py::test_valid_bundle[com.example42.more]", "tests/config/test_is_valid_bundle_identifier.py::test_valid_bundle[com.example-42.more]", "tests/config/test_is_valid_bundle_identifier.py::test_invalid_bundle[not", "tests/config/test_is_valid_bundle_identifier.py::test_invalid_bundle[home]", "tests/config/test_is_valid_bundle_identifier.py::test_invalid_bundle[com.hello_world]", "tests/config/test_is_valid_bundle_identifier.py::test_invalid_bundle[com.hello,world]", "tests/config/test_is_valid_bundle_identifier.py::test_invalid_bundle[com.hello", "tests/config/test_is_valid_bundle_identifier.py::test_invalid_bundle[com.pass]", "tests/config/test_is_valid_bundle_identifier.py::test_invalid_bundle[com.pass.example]", "tests/config/test_is_valid_bundle_identifier.py::test_invalid_bundle[com.switch]", "tests/config/test_is_valid_bundle_identifier.py::test_invalid_bundle[com.switch.example]" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2022-03-22 06:27:16+00:00
bsd-3-clause
1,335
beeware__briefcase-695
diff --git a/changes/473.feature.rst b/changes/473.feature.rst new file mode 100644 index 00000000..1cd1f011 --- /dev/null +++ b/changes/473.feature.rst @@ -0,0 +1,1 @@ +Apps can be updated as part of a call to package. diff --git a/src/briefcase/commands/package.py b/src/briefcase/commands/package.py index ec77afeb..77784d6b 100644 --- a/src/briefcase/commands/package.py +++ b/src/briefcase/commands/package.py @@ -71,6 +71,12 @@ class PackageCommand(BaseCommand): return state def add_options(self, parser): + parser.add_argument( + '-u', + '--update', + action="store_true", + help='Update the app before building' + ) parser.add_argument( '-p', '--packaging-format',
beeware/briefcase
354a74edec3cc502d7548f02546c064e16e1a707
diff --git a/tests/commands/package/test_call.py b/tests/commands/package/test_call.py index 2ea9731a..9047cd3a 100644 --- a/tests/commands/package/test_call.py +++ b/tests/commands/package/test_call.py @@ -69,6 +69,7 @@ def test_no_args_package_two_app(package_command, first_app, second_app): "adhoc_sign": False, "identity": None, "sign_app": True, + # state of previous calls have been preserved. "package_state": "first", }, ), @@ -206,6 +207,7 @@ def test_no_sign_args_package_two_app(package_command, first_app, second_app): "adhoc_sign": False, "identity": None, "sign_app": False, + # state of previous calls have been preserved. "package_state": "first", }, ), @@ -252,6 +254,7 @@ def test_adhoc_sign_args_package_two_app(package_command, first_app, second_app) "adhoc_sign": True, "identity": None, "sign_app": True, + # state of previous calls have been preserved. "package_state": "first", }, ), @@ -296,6 +299,7 @@ def test_identity_sign_args_package_two_app(package_command, first_app, second_a "adhoc_sign": False, "identity": "test", "sign_app": True, + # state of previous calls have been preserved. "package_state": "first", }, ), @@ -331,3 +335,152 @@ def test_package_alternate_format(package_command, first_app): } ), ] + + +def test_update_package_one_app(package_command, first_app): + "If there is one app, and a -u argument, package updates the app" + # Add a single app + package_command.apps = { + "first": first_app, + } + + # Configure no command line options + options = package_command.parse_options(["-u"]) + + # Run the run command + package_command(**options) + + # The right sequence of things will be done + assert package_command.actions == [ + # Tools are verified + ("verify", ), + # Update (and then build) the first app + ( + "update", + "first", + { + "adhoc_sign": False, + "identity": None, + "sign_app": True, + } + ), + ( + "build", + "first", + { + "adhoc_sign": False, + "identity": None, + "sign_app": True, + "update_state": "first", + } + ), + # Package the first app + ( + "package", + "first", + { + "packaging_format": "pkg", + "adhoc_sign": False, + "identity": None, + "sign_app": True, + "update_state": "first", + "build_state": "first", + } + ), + ] + + +def test_update_package_two_app(package_command, first_app, second_app): + "If there are multiple apps, update and publish all of them" + # Add two apps + package_command.apps = { + "first": first_app, + "second": second_app, + } + + # Configure no command line options + options = package_command.parse_options(["--update"]) + + # Run the package command + package_command(**options) + + # The right sequence of things will be done + assert package_command.actions == [ + # Tools are verified + ("verify", ), + # Update (and then build) the first app + ( + "update", + "first", + { + "adhoc_sign": False, + "identity": None, + "sign_app": True, + } + ), + ( + "build", + "first", + { + "adhoc_sign": False, + "identity": None, + "sign_app": True, + "update_state": "first", + } + ), + # Package the first app + ( + "package", + "first", + { + "packaging_format": "pkg", + "adhoc_sign": False, + "identity": None, + "sign_app": True, + "update_state": "first", + "build_state": "first", + } + ), + # Update (and then build) the second app + ( + "update", + "second", + { + "adhoc_sign": False, + "identity": None, + "sign_app": True, + # state of previous calls have been preserved. + "update_state": "first", + "build_state": "first", + "package_state": "first", + } + ), + ( + "build", + "second", + { + "adhoc_sign": False, + "identity": None, + "sign_app": True, + "update_state": "second", + # state of previous calls have been preserved. + "build_state": "first", + "package_state": "first", + } + ), + # package the second app + ( + "package", + "second", + { + 'packaging_format': 'pkg', + "adhoc_sign": False, + "identity": None, + "sign_app": True, + "update_state": "second", + "build_state": "second", + # state of previous calls have been preserved. + "package_state": "first", + }, + ), + ]
briefcase package -u returns unrecognized arguments: -u **Describe the bug** Beeware Tutorial 4 says that the `briefcase package` command accepts the `-u` option but this does not work on Windows when attempting to build the MSI **To Reproduce** 1. Complete the Tutorial 4 to the end https://docs.beeware.org/en/latest/tutorial/tutorial-4.html#update-and-run-in-one-step See the error returned: ``` usage: briefcase package windows msi [-h] [-v] [-V] [--no-input] briefcase package windows msi: error: unrecognized arguments: -u ``` **Environment:** - Operating System: Windows 10 Pro, Version 2004 - Python version: Python 3.7.7 - Software versions: - Briefcase: 0.3.3 - Toga: 0.3.0.dev22
0.0
354a74edec3cc502d7548f02546c064e16e1a707
[ "tests/commands/package/test_call.py::test_update_package_one_app", "tests/commands/package/test_call.py::test_update_package_two_app" ]
[ "tests/commands/package/test_call.py::test_no_args_package_one_app", "tests/commands/package/test_call.py::test_no_args_package_two_app", "tests/commands/package/test_call.py::test_no_sign_package_one_app", "tests/commands/package/test_call.py::test_identity_arg_package_one_app", "tests/commands/package/test_call.py::test_adhoc_sign_package_one_app", "tests/commands/package/test_call.py::test_no_sign_args_package_two_app", "tests/commands/package/test_call.py::test_adhoc_sign_args_package_two_app", "tests/commands/package/test_call.py::test_identity_sign_args_package_two_app", "tests/commands/package/test_call.py::test_package_alternate_format" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
2022-04-04 00:40:14+00:00
bsd-3-clause
1,336
beeware__briefcase-697
diff --git a/changes/696.bugfix.rst b/changes/696.bugfix.rst new file mode 100644 index 00000000..07c83479 --- /dev/null +++ b/changes/696.bugfix.rst @@ -0,0 +1,1 @@ +Android projects that have punctuation in their formal names can now build without error. diff --git a/src/briefcase/platforms/android/gradle.py b/src/briefcase/platforms/android/gradle.py index f7c9ab0f..632294d9 100644 --- a/src/briefcase/platforms/android/gradle.py +++ b/src/briefcase/platforms/android/gradle.py @@ -1,3 +1,4 @@ +import re import subprocess from briefcase.commands import ( @@ -13,6 +14,20 @@ from briefcase.exceptions import BriefcaseCommandError from briefcase.integrations.android_sdk import AndroidSDK +def safe_formal_name(name): + """Converts the name into a safe name on Android. + + Certain characters (``/\\:<>"?*|``) can't be used as app names + on Android; ``!`` causes problems with Android build tooling. + Also ensure that trailing, leading, and consecutive whitespace + caused by removing punctuation is collapsed. + + :param name: The candidate name + :returns: The safe version of the name. + """ + return re.sub(r'\s+', ' ', re.sub(r'[!/\\:<>"\?\*\|]', "", name)).strip() + + class GradleMixin: output_format = "gradle" platform = "android" @@ -28,6 +43,20 @@ class GradleMixin: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + def bundle_path(self, app): + """ + The path to the bundle for the app in the output format. + + The bundle is the template-generated source form of the app. + The path will usually be a directory, the existence of which is + indicative that the template has been rolled out for an app. + + This overrides the default behavior, using a "safe" formal name + + :param app: The app config + """ + return self.platform_path / self.output_format / safe_formal_name(app.formal_name) + def binary_path(self, app): return ( self.bundle_path(app) @@ -88,6 +117,7 @@ class GradleCreateCommand(GradleMixin, CreateCommand): return { 'version_code': version_code, + 'safe_formal_name': safe_formal_name(app.formal_name), }
beeware/briefcase
8c4723ddb429076a6c5dcbfa0896210d020b4262
diff --git a/tests/platforms/android/gradle/test_create.py b/tests/platforms/android/gradle/test_create.py index 429035aa..49c5ecb3 100644 --- a/tests/platforms/android/gradle/test_create.py +++ b/tests/platforms/android/gradle/test_create.py @@ -43,7 +43,8 @@ def test_version_code(create_command, first_app_config, version, build, version_ if build: first_app_config.build = build assert create_command.output_format_template_context(first_app_config) == { - 'version_code': version_code + 'version_code': version_code, + 'safe_formal_name': 'First App', } # Version code must be less than a 32 bit signed integer MAXINT. assert int(version_code) < 2147483647 diff --git a/tests/platforms/android/gradle/test_safe_formal_name.py b/tests/platforms/android/gradle/test_safe_formal_name.py new file mode 100644 index 00000000..b0919524 --- /dev/null +++ b/tests/platforms/android/gradle/test_safe_formal_name.py @@ -0,0 +1,38 @@ +import pytest + +from briefcase.platforms.android.gradle import safe_formal_name + + [email protected]( + 'formal_name, safe_name', + [ + ('Hello World', 'Hello World'), + + # The invalid list is all stripped + ('Hello/World/', 'HelloWorld'), + ('Hello\\World', 'HelloWorld'), + ('Hello:World', 'HelloWorld'), + ('Hello<World', 'HelloWorld'), + ('Hello>World', 'HelloWorld'), + ('Hello "World"', 'Hello World'), + ('Hello World?', 'Hello World'), + ('Hello|World', 'HelloWorld'), + ('Hello World!', 'Hello World'), + + # All invalid punctuation is removed + # Valid punctuation is preserved + ('Hello! (World?)', 'Hello (World)'), + + # Position of punctuation doesn't matter + ('Hello! World', 'Hello World'), + ('!Hello World', 'Hello World'), + + # If removing punctuation leads to double spaces, reduce the double spaces + ('Hello | World', 'Hello World'), + ('Hello World |', 'Hello World'), + ('| Hello World', 'Hello World'), + + ] +) +def test_safe_formal_name(formal_name, safe_name): + assert safe_formal_name(formal_name) == safe_name
Certain characters in formal names cause problems with Android Certain characters (`/\:<>"?*|`) are prohibited from project paths on Android. These names are legal on other platforms In addition, the character `!` causes problems when used as part of a path name. **To Reproduce** Steps to reproduce the behavior: 1. Run `briefcase new`, using a formal name of `Hello!` or `Hello?` 2. Run `briefcase run android` Using `Hello!`, the build will fail with the error: ``` [hello] Building Android APK... Exception in thread "main" java.lang.RuntimeException: Could not determine wrapper version. at org.gradle.wrapper.GradleWrapperMain.wrapperVersion(GradleWrapperMain.java:111) at org.gradle.wrapper.GradleWrapperMain.main(GradleWrapperMain.java:61) Caused by: java.lang.RuntimeException: No build receipt resource found. at org.gradle.wrapper.GradleWrapperMain.wrapperVersion(GradleWrapperMain.java:97) ... 1 more Error while building project. ``` [hello] Building Android APK... FAILURE: Build failed with an exception. * Where: Settings file '/Users/rkm/projects/beeware/briefcase/local/hello/android/gradle/Hello?/settings.gradle' line: 2 * What went wrong: A problem occurred evaluating settings 'Hello?'. > The project name 'Hello?' must not contain any of the following characters: [/, \, :, <, >, ", ?, *, |]. Set the 'rootProject.name' or adjust the 'include' statement (see https://docs.gradle.org/7.2/dsl/org.gradle.api.initialization.Settings.html#org.gradle.api.initialization.Settings:include(java.lang.String[]) for more details). * Try: Run with --stacktrace option to get the stack trace. Run with --info or --debug option to get more log output. Run with --scan to get full insights. * Get more help at https://help.gradle.org BUILD FAILED in 633ms Error while building project. ``` **Expected behavior** App should build and start. **Environment:** - Operating System: All - Python version: All - Software versions: - Briefcase: 0.3.6
0.0
8c4723ddb429076a6c5dcbfa0896210d020b4262
[ "tests/platforms/android/gradle/test_create.py::test_version_code[0.1-None-10000]", "tests/platforms/android/gradle/test_create.py::test_version_code[0.1a3-None-10000]", "tests/platforms/android/gradle/test_create.py::test_version_code[1.2-None-1020000]", "tests/platforms/android/gradle/test_create.py::test_version_code[1.2a3-None-1020000]", "tests/platforms/android/gradle/test_create.py::test_version_code[1.2.3-None-1020300]", "tests/platforms/android/gradle/test_create.py::test_version_code[1.2.3a3-None-1020300]", "tests/platforms/android/gradle/test_create.py::test_version_code[1.2.3b4-None-1020300]", "tests/platforms/android/gradle/test_create.py::test_version_code[1.2.3rc5-None-1020300]", "tests/platforms/android/gradle/test_create.py::test_version_code[1.2.3.dev6-None-1020300]", "tests/platforms/android/gradle/test_create.py::test_version_code[1.2.3.post7-None-1020300]", "tests/platforms/android/gradle/test_create.py::test_version_code[2019.1-None-2019010000]", "tests/platforms/android/gradle/test_create.py::test_version_code[2019.18-None-2019180000]", "tests/platforms/android/gradle/test_create.py::test_version_code[2019.4.18-None-2019041800]", "tests/platforms/android/gradle/test_create.py::test_version_code[0.1-3-10003]", "tests/platforms/android/gradle/test_create.py::test_version_code[0.1a3-42-10042]", "tests/platforms/android/gradle/test_create.py::test_version_code[1.2-42-1020042]", "tests/platforms/android/gradle/test_create.py::test_version_code[1.2a3-3-1020003]", "tests/platforms/android/gradle/test_create.py::test_version_code[1.2.3-3-1020303]", "tests/platforms/android/gradle/test_create.py::test_version_code[1.2.3b4-42-1020342]", "tests/platforms/android/gradle/test_create.py::test_version_code[2019.1-3-2019010003]", "tests/platforms/android/gradle/test_create.py::test_version_code[2019.1b4-42-2019010042]", "tests/platforms/android/gradle/test_safe_formal_name.py::test_safe_formal_name[Hello", "tests/platforms/android/gradle/test_safe_formal_name.py::test_safe_formal_name[Hello/World/-HelloWorld]", "tests/platforms/android/gradle/test_safe_formal_name.py::test_safe_formal_name[Hello\\\\World-HelloWorld]", "tests/platforms/android/gradle/test_safe_formal_name.py::test_safe_formal_name[Hello:World-HelloWorld]", "tests/platforms/android/gradle/test_safe_formal_name.py::test_safe_formal_name[Hello<World-HelloWorld]", "tests/platforms/android/gradle/test_safe_formal_name.py::test_safe_formal_name[Hello>World-HelloWorld]", "tests/platforms/android/gradle/test_safe_formal_name.py::test_safe_formal_name[Hello|World-HelloWorld]", "tests/platforms/android/gradle/test_safe_formal_name.py::test_safe_formal_name[Hello!", "tests/platforms/android/gradle/test_safe_formal_name.py::test_safe_formal_name[!Hello", "tests/platforms/android/gradle/test_safe_formal_name.py::test_safe_formal_name[|" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-04-04 06:12:11+00:00
bsd-3-clause
1,337
beeware__briefcase-698
diff --git a/changes/612.bugfix.rst b/changes/612.bugfix.rst new file mode 100644 index 00000000..ca7b1ed2 --- /dev/null +++ b/changes/612.bugfix.rst @@ -0,0 +1,1 @@ +When the formal name uses non-Latin characters, the suggested Class and App names are now valid. diff --git a/src/briefcase/commands/new.py b/src/briefcase/commands/new.py index 451abec7..bed4f8dd 100644 --- a/src/briefcase/commands/new.py +++ b/src/briefcase/commands/new.py @@ -1,6 +1,7 @@ import os import re import subprocess +import unicodedata from email.utils import parseaddr from typing import Optional from urllib.parse import urlparse @@ -79,9 +80,36 @@ class NewCommand(BaseCommand): :param formal_name: The formal name :returns: The app's class name """ - class_name = re.sub('[^0-9a-zA-Z_]+', '', formal_name) - if class_name[0].isdigit(): + # Identifiers (including class names) can be unicode. + # https://docs.python.org/3/reference/lexical_analysis.html#identifiers + xid_start = { + "Lu", # uppercase letters + "Ll", # lowercase letters + "Lt", # titlecase letters + "Lm", # modifier letters + "Lo", # other letters + "Nl", # letter numbers + } + xid_continue = xid_start.union({ + "Mn", # nonspacing marks + "Mc", # spacing combining marks + "Nd", # decimal number + "Pc", # connector punctuations + }) + + # Normalize to NFKC form, then remove any character that isn't + # in the allowed categories, or is the underscore character + class_name = ''.join( + ch for ch in unicodedata.normalize('NFKC', formal_name) + if unicodedata.category(ch) in xid_continue + or ch in {'_'} + ) + + # If the first character isn't in the 'start' character set, + # and it isn't already an underscore, prepend an underscore. + if unicodedata.category(class_name[0]) not in xid_start and class_name[0] != '_': class_name = '_' + class_name + return class_name def make_app_name(self, formal_name): @@ -91,7 +119,14 @@ class NewCommand(BaseCommand): :param formal_name: The formal name :returns: The candidate app name """ - return re.sub('[^0-9a-zA-Z_]+', '', formal_name).lstrip('_').lower() + normalized = unicodedata.normalize('NFKD', formal_name) + stripped = re.sub('[^0-9a-zA-Z_]+', '', normalized).lstrip('_') + if stripped: + return stripped.lower() + else: + # If stripping removes all the content, + # use a dummy app name as the suggestion. + return 'myapp' def validate_app_name(self, candidate): """
beeware/briefcase
bc3168c09a7eb5ea9725d55d2e781367d11c8e9f
diff --git a/tests/commands/new/test_make_app_name.py b/tests/commands/new/test_make_app_name.py index 0e314413..84a39e0d 100644 --- a/tests/commands/new/test_make_app_name.py +++ b/tests/commands/new/test_make_app_name.py @@ -8,6 +8,13 @@ import pytest ('Hello World!', 'helloworld'), ('Hello! World', 'helloworld'), ('Hello-World', 'helloworld'), + + # Internationalized names that can be unicode-simplified + ('Hallo Vögel', 'hallovogel'), + ('Bonjour Garçon', 'bonjourgarcon'), + + # Internationalized names that cannot be unicode-simplified + ('你好 世界', 'myapp'), ] ) def test_make_app_name(new_command, formal_name, candidate): diff --git a/tests/commands/new/test_make_class_name.py b/tests/commands/new/test_make_class_name.py index 79d2eee7..4f08966d 100644 --- a/tests/commands/new/test_make_class_name.py +++ b/tests/commands/new/test_make_class_name.py @@ -4,12 +4,38 @@ import pytest @pytest.mark.parametrize( 'formal_name, candidate', [ + # Some simple cases ('Hello World', 'HelloWorld'), ('Hello World!', 'HelloWorld'), ('Hello! World', 'HelloWorld'), ('Hello_World', 'Hello_World'), ('Hello-World', 'HelloWorld'), - ('24 Jump Street', '_24JumpStreet'), + + # Startint with a number + ('24 Jump Street', '_24JumpStreet'), # Unicode category Nd + # Starting with an underscore + ('Hello_World', 'Hello_World'), + ('_Hello_World', '_Hello_World'), + + # Unicode names + ('你好 世界', '你好世界'), + ('Hallo Vögel', 'HalloVögel'), + ('Bonjour Garçon', 'BonjourGarçon'), + + # Unicode codepoints that can be at the start of an identifier + ('\u02EC World', '\u02ECWorld'), # Unicode category Lm + ('\u3006 World', '\u3006World'), # Unicode category Lo + ('\u3021 World', '\u3021World'), # Unicode category Nl + # ('\u2118 World', '\u2118World'), # in Other_ID_Start + + # Unicode codepoints that cannot be at the start of an identifer + ('\u20E1 World', '_\u20E1World'), # Unicode Category Mn + ('\u0903 World', '_\u0903World'), # Unicode Category Mc + ('\u2040 World', '_\u2040World'), # Category Pc + # ('\u00B7 World', '_\u00B7World'), # in Other_ID_Continue + + # Characters that are converted by NFKC normalization + ('\u2135 World', '\u05d0World'), # Unicode category Lo ] ) def test_make_class_name(new_command, formal_name, candidate):
Non-latin formal app names cause problems If you use non-latin characters (e.g., `学口算`, or anything outside the ASCII set) in the formal app name, `briefcase new` fails. Similar problems occur if you manually define `pyproject.toml`, and then run `briefcase create`. This is because briefcase tries to create a "Class Name", as well as other artefacts by processing the formal app name; these conversions fail with non-latin names. **To Reproduce** Steps to reproduce the behavior: 1. Run `briefcase new` 2. Specify an app name of `学口算`. or 1. Run `briefcase new` 2. Modify `pyproject.toml` so that the formal app name is `学口算` 3. Run `briefcase create` **Expected behavior** The formal name is human-readable, so any unicode string should be valid.
0.0
bc3168c09a7eb5ea9725d55d2e781367d11c8e9f
[ "tests/commands/new/test_make_app_name.py::test_make_app_name[Hallo", "tests/commands/new/test_make_app_name.py::test_make_app_name[Bonjour", "tests/commands/new/test_make_app_name.py::test_make_app_name[\\u4f60\\u597d", "tests/commands/new/test_make_class_name.py::test_make_class_name[\\u4f60\\u597d", "tests/commands/new/test_make_class_name.py::test_make_class_name[Hallo", "tests/commands/new/test_make_class_name.py::test_make_class_name[Bonjour", "tests/commands/new/test_make_class_name.py::test_make_class_name[\\u02ec", "tests/commands/new/test_make_class_name.py::test_make_class_name[\\u3006", "tests/commands/new/test_make_class_name.py::test_make_class_name[\\u3021", "tests/commands/new/test_make_class_name.py::test_make_class_name[\\u20e1", "tests/commands/new/test_make_class_name.py::test_make_class_name[\\u0903", "tests/commands/new/test_make_class_name.py::test_make_class_name[\\u2040", "tests/commands/new/test_make_class_name.py::test_make_class_name[\\u2135" ]
[ "tests/commands/new/test_make_app_name.py::test_make_app_name[Hello", "tests/commands/new/test_make_app_name.py::test_make_app_name[Hello!", "tests/commands/new/test_make_app_name.py::test_make_app_name[Hello-World-helloworld]", "tests/commands/new/test_make_class_name.py::test_make_class_name[Hello", "tests/commands/new/test_make_class_name.py::test_make_class_name[Hello!", "tests/commands/new/test_make_class_name.py::test_make_class_name[Hello_World-Hello_World0]", "tests/commands/new/test_make_class_name.py::test_make_class_name[Hello-World-HelloWorld]", "tests/commands/new/test_make_class_name.py::test_make_class_name[24", "tests/commands/new/test_make_class_name.py::test_make_class_name[Hello_World-Hello_World1]", "tests/commands/new/test_make_class_name.py::test_make_class_name[_Hello_World-_Hello_World]" ]
{ "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false }
2022-04-04 08:31:21+00:00
bsd-3-clause
1,338
beeware__briefcase-707
diff --git a/changes/668.bugfix.rst b/changes/668.bugfix.rst new file mode 100644 index 00000000..300feb98 --- /dev/null +++ b/changes/668.bugfix.rst @@ -0,0 +1,1 @@ +Xcode version checks are now more robust. diff --git a/setup.cfg b/setup.cfg index bbe517ad..8052b217 100644 --- a/setup.cfg +++ b/setup.cfg @@ -116,6 +116,7 @@ exclude=\ local/*,\ docs/*,\ build/*,\ + tests/apps/*,\ .eggs/*,\ .tox/* max-complexity = 10 diff --git a/src/briefcase/integrations/xcode.py b/src/briefcase/integrations/xcode.py index 8ea1f1e8..6c50422d 100644 --- a/src/briefcase/integrations/xcode.py +++ b/src/briefcase/integrations/xcode.py @@ -174,16 +174,17 @@ Re-run Briefcase once that installation is complete. ) if min_version is not None: - if output.startswith('Xcode '): + # Look for a line in the output that reads "Xcode X.Y.Z" + version_lines = [line for line in output.split('\n') if line.startswith('Xcode ')] + if version_lines: try: - # Split content up to the first \n - # then split the content after the first space + # Split the content after the first space # and split that content on the dots. # Append 0's to fill any gaps caused by # version numbers that don't have a minor version. version = tuple( int(v) - for v in output.split('\n')[0].split(' ')[1].split('.') + for v in version_lines[0].split(' ')[1].split('.') ) + (0, 0) if version < min_version:
beeware/briefcase
9d51ca01bf46e5689d941ca7c06c5034c724908e
diff --git a/tests/integrations/xcode/test_ensure_xcode_is_installed.py b/tests/integrations/xcode/test_ensure_xcode_is_installed.py index 181625fe..f82de173 100644 --- a/tests/integrations/xcode/test_ensure_xcode_is_installed.py +++ b/tests/integrations/xcode/test_ensure_xcode_is_installed.py @@ -26,9 +26,7 @@ def test_not_installed(tmp_path): # Test a location where Xcode *won't* be installed with pytest.raises(BriefcaseCommandError): - ensure_xcode_is_installed( - command, - ) + ensure_xcode_is_installed(command) def test_not_installed_hardcoded_path(tmp_path): @@ -113,6 +111,32 @@ def test_installed_no_minimum_version(xcode): ) +def test_installed_extra_output(capsys, xcode): + "If Xcode but outputs extra content, the check is still satisfied." + # This specific output was seen in the wild with Xcode 13.2.1; see #668 + command = mock.MagicMock() + command.subprocess.check_output.return_value = '\n'.join([ + "objc[86306]: Class AMSupportURLConnectionDelegate is implemented in both /usr/lib/libauthinstall.dylib (0x20d17ab90) and /Library/Apple/System/Library/PrivateFrameworks/MobileDevice.framework/Versions/A/MobileDevice (0x1084b82c8). One of the two will be used. Which one is undefined." # noqa: E501 + "objc[86306]: Class AMSupportURLSession is implemented in both /usr/lib/libauthinstall.dylib (0x20d17abe0) and /Library/Apple/System/Library/PrivateFrameworks/MobileDevice.framework/Versions/A/MobileDevice (0x1084b8318). One of the two will be used. Which one is undefined.", # noqa: E501 + "Xcode 13.2.1", + "Build version 13C100", + ]) + + # Check passes without an error. + ensure_xcode_is_installed(command, xcode_location=xcode, min_version=(11, 1)) + + # xcode-select was invoked + command.subprocess.check_output.assert_called_once_with( + ['xcodebuild', '-version'], + stderr=subprocess.STDOUT, + universal_newlines=True, + ) + + # No warning generated. + out = capsys.readouterr().out + assert "WARNING" not in out + + @pytest.mark.parametrize( 'min_version, version', [
Launch failed on ARM Mac **Describe the bug** The application cannot be opened for an unexpected reason **To Reproduce** Steps to reproduce the behavior: 1. Tutorial 1 2. Tutorial 2 3. Tutorial 3 **Expected behavior** Works as described in the tutorial. **Screenshots** ``` (beeware-venv) tony@x helloworld % briefcase run [helloworld] Starting app... The application cannot be opened for an unexpected reason, error=Error Domain=RBSRequestErrorDomain Code=5 "Launch failed." UserInfo={NSLocalizedFailureReason=Launch failed., NSUnderlyingError=0x60000361eb50 {Error Domain=NSPOSIXErrorDomain Code=153 "Unknown error: 153" UserInfo={NSLocalizedDescription=Launchd job spawn failed}}} Unable to start app helloworld. ``` **Environment:** - Operating System: MacOS Monterey 12.1 (Apple M1 Pro) - Python version: 3.8.9 - Software versions: - Briefcase: 0.3.5 - Toga: 0.3.0.dev20 **Additional context** Followed tutorials exactly. Ran code in ~/Downloads folder. ``` python3 -d Python 3.8.9 (default, Oct 26 2021, 07:25:53) [Clang 13.0.0 (clang-1300.0.29.30)] on darwin ```
0.0
9d51ca01bf46e5689d941ca7c06c5034c724908e
[ "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_extra_output" ]
[ "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_not_installed", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_not_installed_hardcoded_path", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_exists_but_command_line_tools_selected", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_exists_but_corrupted", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_no_minimum_version", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version0-11.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version1-11.2.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version2-11.0.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version3-8.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version4-8.2.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version5-8.0.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version6-11.2.5]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version7-11.3.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version8-12.0.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version9-11.2.5]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version10-11.3.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version11-12.0.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version12-11.2.5]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version13-11.3.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version14-12.0.0]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version15-11.2]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version16-11.2]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version17-11.2]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version18-11.2]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version19-11.2]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_success[min_version20-11.2]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_failure[min_version0-11.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_failure[min_version1-11.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_failure[min_version2-11.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_failure[min_version3-8.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_failure[min_version4-8.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_installed_with_minimum_version_failure[min_version5-8.2.1]", "tests/integrations/xcode/test_ensure_xcode_is_installed.py::test_unexpected_version_output" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2022-04-13 06:26:38+00:00
bsd-3-clause
1,339
beeware__briefcase-771
diff --git a/changes/749.bugfix.rst b/changes/749.bugfix.rst new file mode 100644 index 00000000..8f2f21f6 --- /dev/null +++ b/changes/749.bugfix.rst @@ -0,0 +1,1 @@ +Windows MSI projects are now able to support files with non-ASCII filenames. diff --git a/src/briefcase/platforms/windows/msi.py b/src/briefcase/platforms/windows/msi.py index 42a7d2d9..0023f89d 100644 --- a/src/briefcase/platforms/windows/msi.py +++ b/src/briefcase/platforms/windows/msi.py @@ -208,6 +208,8 @@ class WindowsMSIPackageCommand(WindowsMSIMixin, PackageCommand): "WixUtilExtension", "-ext", "WixUIExtension", + "-loc", + "unicode.wxl", "-o", self.distribution_path(app, packaging_format="msi"), f"{app.app_name}.wixobj",
beeware/briefcase
2127f6e226212a7e33cf3d2095e4ac175f6f378b
diff --git a/tests/platforms/windows/msi/test_package.py b/tests/platforms/windows/msi/test_package.py index 882d27a1..ed4725e7 100644 --- a/tests/platforms/windows/msi/test_package.py +++ b/tests/platforms/windows/msi/test_package.py @@ -70,6 +70,8 @@ def test_package_msi(package_command, first_app_config, tmp_path): "WixUtilExtension", "-ext", "WixUIExtension", + "-loc", + "unicode.wxl", "-o", tmp_path / "windows" / "First App-0.0.1.msi", "first-app.wixobj",
Windows packaging fails on non-Latin filenames Reported by user `@betterer` on Discord. If a project contains a non-Latin file name, the WiX compilation process fails, even though the app itself can run. This includes if the app doesn't use the file at all. The mere existence of a non-Latin filename by itself is enough to break Windows MSI packaging. **To Reproduce** Steps to reproduce the behavior: 1. Create a new Hello World app 2. Duplicate `app.py` as `app_副本.py` 3. Run `briefcase package` 4. See error ``` ... [dialogs] Building MSI... Compiling application manifest... Compiling application installer... dialogs.wxs dialogs-manifest.wxs Linking application installer... Z:\projects\beeware\toga\examples\dialogs\windows\msi\Dialog Demo\dialogs-manifest.wxs(19) : error LGHT0311 : A string was provided with characters that are not available in the specified database code page '1252'. Either change these characters to ones that exist in the database's code page, or update the database's code page by modifying one of the following attributes: Product/@Codepage, Module/@Codepage, Patch/@Codepage, PatchCreation/@Codepage, or WixLocalization/@Codepage. Unable to link app dialogs. ``` **Expected behavior** i18n filenames should not break the packaging process. **Screenshots** If applicable, add screenshots to help explain your problem. **Environment:** - Operating System: Windows - Python version: all - Software versions: - Briefcase: 0.3.7 **Additional context** The error message contains a candidate fix - adding a codepage declaration to the Product declaration. However, we need to be sure that this doesn't cause the filename to be corrupted by the packaging process.
0.0
2127f6e226212a7e33cf3d2095e4ac175f6f378b
[ "tests/platforms/windows/msi/test_package.py::test_package_msi" ]
[]
{ "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false }
2022-06-23 07:27:04+00:00
bsd-3-clause
1,340
beeware__briefcase-775
diff --git a/changes/382.feature.rst b/changes/382.feature.rst new file mode 100644 index 00000000..3cc4e55e --- /dev/null +++ b/changes/382.feature.rst @@ -0,0 +1,1 @@ +Windows MSI installers can now be configured to ask the user whether they want a per-user or per-machine install. diff --git a/changes/688.bugfix.rst b/changes/688.bugfix.rst new file mode 100644 index 00000000..a7f2db90 --- /dev/null +++ b/changes/688.bugfix.rst @@ -0,0 +1,1 @@ +Windows MSI installers now install in ``Program Files``, rather than ``Program Files (x86)``. diff --git a/changes/774.feature.rst b/changes/774.feature.rst new file mode 100644 index 00000000..246c2e4b --- /dev/null +++ b/changes/774.feature.rst @@ -0,0 +1,1 @@ +Temporary docker containers are now cleaned up after use. The wording of Docker progress messages has also been improved. diff --git a/docs/reference/platforms/windows/msi.rst b/docs/reference/platforms/windows/msi.rst index 870611e6..d767d4ae 100644 --- a/docs/reference/platforms/windows/msi.rst +++ b/docs/reference/platforms/windows/msi.rst @@ -43,7 +43,8 @@ installer; however, they are installed once and shared between all users on a computer. If ``true`` the installer will attempt to install the app as a per-machine app, -available to all users. Defaults to a per-user install. +available to all users. If ``false``, the installer will install as a per-user +app. If undefined the installer will ask the user for their preference. ``version_triple`` ------------------ diff --git a/src/briefcase/integrations/docker.py b/src/briefcase/integrations/docker.py index a05cd19b..90a921f5 100644 --- a/src/briefcase/integrations/docker.py +++ b/src/briefcase/integrations/docker.py @@ -202,14 +202,15 @@ class Docker: def prepare(self): try: self.command.logger.info( - "Building Docker container image...", prefix=self.app.app_name + "Building Docker container image...", + prefix=self.app.app_name, ) try: system_requires = " ".join(self.app.system_requires) except AttributeError: system_requires = "" - with self.command.input.wait_bar("Building container..."): + with self.command.input.wait_bar("Building Docker image..."): self._subprocess.run( [ "docker", @@ -237,23 +238,24 @@ class Docker: ) except subprocess.CalledProcessError as e: raise BriefcaseCommandError( - f"Error building Docker container for {self.app.app_name}." + f"Error building Docker image for {self.app.app_name}." ) from e def run(self, args, env=None, **kwargs): - """Run a process inside the Docker container.""" - # Set up the `docker run` invocation in interactive mode, - # with volume mounts for the platform and .briefcase directories. - # The :z suffix allows SELinux to modify the host mount; it is ignored - # on non-SELinux platforms. + """Run a process inside a Docker container.""" + # Set up the `docker run` with volume mounts for the platform & + # .briefcase directories and to delete the temporary container + # after running the command. + # The :z suffix allows SELinux to modify the host mount; it is + # ignored on non-SELinux platforms. docker_args = [ "docker", "run", - "--tty", "--volume", f"{self.command.platform_path}:/app:z", "--volume", f"{self.command.dot_briefcase_path}:/home/brutus/.briefcase:z", + "--rm", ] # If any environment variables have been defined, pass them in @@ -262,10 +264,10 @@ class Docker: for key, value in env.items(): docker_args.extend(["--env", f"{key}={value}"]) - # ... then the image name. + # ... then the image name to create the temporary container with docker_args.append(self.command.docker_image_tag(self.app)) - # ... then add all the arguments + # ... then add the command (and its arguments) to run in the container for arg in args: arg = str(arg) if arg == sys.executable: diff --git a/src/briefcase/platforms/windows/msi.py b/src/briefcase/platforms/windows/msi.py index 0023f89d..204a48f7 100644 --- a/src/briefcase/platforms/windows/msi.py +++ b/src/briefcase/platforms/windows/msi.py @@ -77,13 +77,17 @@ class WindowsMSICreateCommand(WindowsMSIMixin, CreateCommand): try: install_scope = "perMachine" if app.system_installer else "perUser" except AttributeError: - # system_installer not defined in config; default to perUser install. - install_scope = "perUser" + # system_installer not defined in config; default to asking the user + install_scope = None return { "version_triple": version_triple, "guid": str(guid), "install_scope": install_scope, + # Template forward compatibility flags + # 2022-06-29: #775 added the need to pass for -arch 64 to candle.exe; + # Briefcase v0.3.8 didn't use that flag. + "_use_arch64": True, } def install_app_support_package(self, app: BaseConfig): @@ -187,6 +191,8 @@ class WindowsMSIPackageCommand(WindowsMSIMixin, PackageCommand): "WixUtilExtension", "-ext", "WixUIExtension", + "-arch", + "x64", "-dSourceDir=src", f"{app.app_name}.wxs", f"{app.app_name}-manifest.wxs",
beeware/briefcase
8432bb8203f601835fb04abb6f96e145a6241fc0
diff --git a/tests/integrations/docker/test_Docker__run.py b/tests/integrations/docker/test_Docker__run.py index 3405acd5..253fff60 100644 --- a/tests/integrations/docker/test_Docker__run.py +++ b/tests/integrations/docker/test_Docker__run.py @@ -16,11 +16,11 @@ def test_simple_call(mock_docker, tmp_path, capsys): [ "docker", "run", - "--tty", "--volume", f"{tmp_path / 'platform'}:/app:z", "--volume", f"{tmp_path / '.briefcase'}:/home/brutus/.briefcase:z", + "--rm", "briefcase/com.example.myapp:py3.X", "hello", "world", @@ -40,11 +40,11 @@ def test_simple_call_with_arg(mock_docker, tmp_path, capsys): [ "docker", "run", - "--tty", "--volume", f"{tmp_path / 'platform'}:/app:z", "--volume", f"{tmp_path / '.briefcase'}:/home/brutus/.briefcase:z", + "--rm", "briefcase/com.example.myapp:py3.X", "hello", "world", @@ -64,11 +64,11 @@ def test_simple_call_with_path_arg(mock_docker, tmp_path, capsys): [ "docker", "run", - "--tty", "--volume", f"{tmp_path / 'platform'}:/app:z", "--volume", f"{tmp_path / '.briefcase'}:/home/brutus/.briefcase:z", + "--rm", "briefcase/com.example.myapp:py3.X", "hello", os.fsdecode(tmp_path / "location"), @@ -95,11 +95,11 @@ def test_simple_call_with_sys_executable_arg( [ "docker", "run", - "--tty", "--volume", f"{tmp_path / 'platform'}:/app:z", "--volume", f"{tmp_path / '.briefcase'}:/home/brutus/.briefcase:z", + "--rm", "briefcase/com.example.myapp:py3.X", "hello", "python3.X", @@ -124,11 +124,11 @@ def test_simple_verbose_call(mock_docker, tmp_path, capsys): [ "docker", "run", - "--tty", "--volume", f"{tmp_path / 'platform'}:/app:z", "--volume", f"{tmp_path / '.briefcase'}:/home/brutus/.briefcase:z", + "--rm", "briefcase/com.example.myapp:py3.X", "hello", "world", @@ -139,9 +139,10 @@ def test_simple_verbose_call(mock_docker, tmp_path, capsys): assert capsys.readouterr().out == ( "\n" ">>> Running Command:\n" - ">>> docker run --tty " + ">>> docker run " f"--volume {tmp_path / 'platform'}:/app:z " f"--volume {tmp_path / '.briefcase'}:/home/brutus/.briefcase:z " + "--rm " "briefcase/com.example.myapp:py3.X " "hello world\n" ">>> Return code: 3\n" diff --git a/tests/platforms/linux/appimage/test_build.py b/tests/platforms/linux/appimage/test_build.py index b469d16c..022d6857 100644 --- a/tests/platforms/linux/appimage/test_build.py +++ b/tests/platforms/linux/appimage/test_build.py @@ -216,11 +216,11 @@ def test_build_appimage_with_docker(build_command, first_app, tmp_path): [ "docker", "run", - "--tty", "--volume", f"{build_command.platform_path}:/app:z", "--volume", f"{build_command.dot_briefcase_path}:/home/brutus/.briefcase:z", + "--rm", "--env", "VERSION=0.0.1", f"briefcase/com.example.first-app:py3.{sys.version_info.minor}", diff --git a/tests/platforms/windows/msi/test_create.py b/tests/platforms/windows/msi/test_create.py index c01e00b1..9bbb09ed 100644 --- a/tests/platforms/windows/msi/test_create.py +++ b/tests/platforms/windows/msi/test_create.py @@ -83,7 +83,12 @@ def test_default_install_scope(first_app_config, tmp_path): context = command.output_format_template_context(first_app_config) - assert context["install_scope"] == "perUser" + assert context == { + "guid": "d666a4f1-c7b7-52cc-888a-3a35a7cc97e5", + "version_triple": "0.0.1", + "install_scope": None, + "_use_arch64": True, + } def test_per_machine_install_scope(first_app_config, tmp_path): @@ -93,7 +98,12 @@ def test_per_machine_install_scope(first_app_config, tmp_path): context = command.output_format_template_context(first_app_config) - assert context["install_scope"] == "perMachine" + assert context == { + "guid": "d666a4f1-c7b7-52cc-888a-3a35a7cc97e5", + "version_triple": "0.0.1", + "install_scope": "perMachine", + "_use_arch64": True, + } def test_per_user_install_scope(first_app_config, tmp_path): @@ -103,4 +113,9 @@ def test_per_user_install_scope(first_app_config, tmp_path): context = command.output_format_template_context(first_app_config) - assert context["install_scope"] == "perUser" + assert context == { + "guid": "d666a4f1-c7b7-52cc-888a-3a35a7cc97e5", + "version_triple": "0.0.1", + "install_scope": "perUser", + "_use_arch64": True, + } diff --git a/tests/platforms/windows/msi/test_package.py b/tests/platforms/windows/msi/test_package.py index ed4725e7..52c136fe 100644 --- a/tests/platforms/windows/msi/test_package.py +++ b/tests/platforms/windows/msi/test_package.py @@ -54,6 +54,8 @@ def test_package_msi(package_command, first_app_config, tmp_path): "WixUtilExtension", "-ext", "WixUIExtension", + "-arch", + "x64", "-dSourceDir=src", "first-app.wxs", "first-app-manifest.wxs",
User installable MSI packages? The Mu Editor MSI file I managed to produce in the context of #381 requires administrative privileges for installation. It would be very nice if `briefcase` had the option to produce user installable MSI files (assuming there is such a thing -- I suppose/hope so, given that lots of software is user installable, nowadays). If not, that could be considered a user-experience regression with respect to Mu users... Which of user-install/system-install should be the default is a secondary thing for now. Thoughts?
0.0
8432bb8203f601835fb04abb6f96e145a6241fc0
[ "tests/integrations/docker/test_Docker__run.py::test_simple_call", "tests/integrations/docker/test_Docker__run.py::test_simple_call_with_arg", "tests/integrations/docker/test_Docker__run.py::test_simple_call_with_path_arg", "tests/integrations/docker/test_Docker__run.py::test_simple_call_with_sys_executable_arg", "tests/integrations/docker/test_Docker__run.py::test_simple_verbose_call", "tests/platforms/linux/appimage/test_build.py::test_build_appimage_with_docker", "tests/platforms/windows/msi/test_create.py::test_default_install_scope", "tests/platforms/windows/msi/test_create.py::test_per_machine_install_scope", "tests/platforms/windows/msi/test_create.py::test_per_user_install_scope", "tests/platforms/windows/msi/test_package.py::test_package_msi" ]
[ "tests/platforms/linux/appimage/test_build.py::test_verify_tools_wrong_platform", "tests/platforms/linux/appimage/test_build.py::test_verify_tools_download_failure", "tests/platforms/linux/appimage/test_build.py::test_build_appimage", "tests/platforms/linux/appimage/test_build.py::test_build_failure", "tests/platforms/windows/msi/test_create.py::test_version_triple[1-1.0.0]", "tests/platforms/windows/msi/test_create.py::test_version_triple[1.2-1.2.0]", "tests/platforms/windows/msi/test_create.py::test_version_triple[1.2.3-1.2.3]", "tests/platforms/windows/msi/test_create.py::test_version_triple[1.2.3.4-1.2.3]", "tests/platforms/windows/msi/test_create.py::test_version_triple[1.2.3a4-1.2.3]", "tests/platforms/windows/msi/test_create.py::test_version_triple[1.2.3b5-1.2.3]", "tests/platforms/windows/msi/test_create.py::test_version_triple[1.2.3rc6-1.2.3]", "tests/platforms/windows/msi/test_create.py::test_version_triple[1.2.3.dev7-1.2.3]", "tests/platforms/windows/msi/test_create.py::test_version_triple[1.2.3.post8-1.2.3]", "tests/platforms/windows/msi/test_create.py::test_explicit_version_triple", "tests/platforms/windows/msi/test_create.py::test_guid", "tests/platforms/windows/msi/test_create.py::test_explicit_guid", "tests/platforms/windows/msi/test_create.py::test_support_package_url" ]
{ "failed_lite_validators": [ "has_issue_reference", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-06-28 08:06:47+00:00
bsd-3-clause
1,341
beeware__briefcase-844
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bddbba12..c0ad7731 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -131,27 +131,30 @@ jobs: run: | tox -e py - verify-desktop: - name: Desktop app verification - needs: smoke + verify-apps: + name: App verification + needs: platform-compat strategy: max-parallel: 4 matrix: - platform: ['macos-latest', 'windows-latest', 'ubuntu-18.04'] + os_name: ['macOS', 'windows', 'linux'] framework: ['toga', 'pyside2', 'pyside6', 'ppb'] include: - - platform: macos-latest + - os_name: macOS + platform: macos-latest briefcase-data-dir: ~/Library/Caches/org.beeware.briefcase pip-cache-dir: ~/Library/Caches/pip docker-cache-dir: ~/Library/Containers/com.docker.docker/Data/vms/0/ - - platform: windows-latest + - os_name: windows + platform: windows-latest briefcase-data-dir: ~\AppData\Local\BeeWare\briefcase\Cache pip-cache-dir: ~\AppData\Local\pip\Cache docker-cache-dir: C:\ProgramData\DockerDesktop - - platform: ubuntu-18.04 + - os_name: linux + # Need to use at least 22.04 to get the bugfix in flatpak for handling spaces in filenames. + platform: ubuntu-22.04 briefcase-data-dir: ~/.cache/briefcase pip-cache-dir: ~/.cache/pip - briefcase-args: --no-docker # cache action cannot cache docker images (actions/cache#31) # docker-cache-dir: /var/lib/docker runs-on: ${{ matrix.platform }} @@ -170,16 +173,57 @@ jobs: uses: actions/[email protected] with: python-version: ${{ env.python_version }} - - name: Install dependencies - if: matrix.platform == 'ubuntu-18.04' + - name: Install system dependencies + if: matrix.platform == 'ubuntu-22.04' run: | - sudo apt-get update -y - sudo apt-get install -y python3-gi python3-gi-cairo gir1.2-gtk-3.0 python3-dev libgirepository1.0-dev libcairo2-dev pkg-config + sudo apt-get install -y flatpak flatpak-builder - name: Install dependencies run: | pip install --upgrade pip pip install --upgrade setuptools - pip install tox - - name: Test + pip install . + - name: Create App + run: | + cd tests/apps + cat verify-${{ matrix.framework }}.config | briefcase new + - name: Build App + run: | + cd tests/apps/verify-${{ matrix.framework }} + briefcase create + briefcase build + briefcase package --adhoc-sign + - name: Build Xcode project + if: matrix.os_name == 'macOS' + run: | + cd tests/apps/verify-${{ matrix.framework }} + briefcase create ${{ matrix.os_name }} Xcode + briefcase build ${{ matrix.os_name }} Xcode + briefcase package ${{ matrix.os_name }} Xcode --adhoc-sign + - name: Build Visual Studio project + if: matrix.os_name == 'windows' + run: | + cd tests/apps/verify-${{ matrix.framework }} + briefcase create ${{ matrix.os_name }} VisualStudio + briefcase build ${{ matrix.os_name }} VisualStudio + briefcase package ${{ matrix.os_name }} VisualStudio --adhoc-sign + - name: Build Flatpak project + if: matrix.os_name == 'linux' && matrix.framework == 'toga' + run: | + cd tests/apps/verify-${{ matrix.framework }} + briefcase create ${{ matrix.os_name }} flatpak + briefcase build ${{ matrix.os_name }} flatpak + briefcase package ${{ matrix.os_name }} flatpak --adhoc-sign + - name: Build Android App + if: matrix.framework == 'toga' + run: | + cd tests/apps/verify-${{ matrix.framework }} + briefcase create android + briefcase build android + briefcase package android --adhoc-sign + - name: Build iOS App + if: matrix.platform == 'macos-latest' && matrix.framework == 'toga' run: | - tox -e verify-${{ matrix.framework }} -- ${{ matrix.briefcase-args }} + cd tests/apps/verify-${{ matrix.framework }} + briefcase create iOS + briefcase build iOS -d "iPhone SE (2nd generation)" + briefcase package iOS --adhoc-sign diff --git a/changes/841.bugfix.rst b/changes/841.bugfix.rst new file mode 100644 index 00000000..f05efe07 --- /dev/null +++ b/changes/841.bugfix.rst @@ -0,0 +1,1 @@ +When verifying the existence of the Android emulator, Briefcase now looks for the actual binary, not the folder that contains the binary. This was causing false positives on some Android SDK setups. diff --git a/changes/844.misc.rst b/changes/844.misc.rst new file mode 100644 index 00000000..7516c98d --- /dev/null +++ b/changes/844.misc.rst @@ -0,0 +1,1 @@ +Linux tests are now run in Docker on CI; iOS, Android, Linux Flatpak, macOS Xcode, and Windows VisualStudio are tested. diff --git a/src/briefcase/integrations/android_sdk.py b/src/briefcase/integrations/android_sdk.py index ecc7193a..48dfe94d 100644 --- a/src/briefcase/integrations/android_sdk.py +++ b/src/briefcase/integrations/android_sdk.py @@ -406,7 +406,7 @@ connection. # might be missing. (self.root_path / "platforms").mkdir(exist_ok=True) - if (self.root_path / "emulator").exists(): + if (self.emulator_path).exists(): self.command.logger.debug("Android emulator is already installed.") return diff --git a/tox.ini b/tox.ini index e10d2c16..8682bb42 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ # and then run "tox" from this directory. [tox] -envlist = towncrier-check,docs,package,py{37,38,39,310,311},verify-{toga,pyside2,pyside6,ppb} +envlist = towncrier-check,docs,package,py{37,38,39,310,311} skip_missing_interpreters = true [testenv] @@ -55,17 +55,3 @@ passenv = TWINE_PASSWORD commands = python -m twine upload dist/* - -[testenv:verify-{toga,pyside2,pyside6,ppb}] -setenv = PYTHONPATH = {toxinidir}/src -changedir = {toxinidir}/tests/apps -deps = -allowlist_externals = - sh - rm -commands = - rm -rf {envname} - sh -c 'cat {envname}.config | briefcase new' - sh -c 'cd {envname} && briefcase create {posargs}' - sh -c 'cd {envname} && briefcase build {posargs}' - sh -c 'cd {envname} && briefcase package --adhoc-sign {posargs}'
beeware/briefcase
48a0f0b94560030f75986cd6e0749060af672784
diff --git a/tests/integrations/android_sdk/ADB/test_command.py b/tests/integrations/android_sdk/ADB/test_command.py index 07fdb370..cc5250ed 100644 --- a/tests/integrations/android_sdk/ADB/test_command.py +++ b/tests/integrations/android_sdk/ADB/test_command.py @@ -1,5 +1,6 @@ import os import subprocess +import sys from pathlib import Path import pytest @@ -18,7 +19,12 @@ def test_simple_command(mock_sdk, tmp_path): # Check that adb was invoked with the expected commands mock_sdk.command.subprocess.check_output.assert_called_once_with( [ - os.fsdecode(tmp_path / "sdk" / "platform-tools" / "adb"), + os.fsdecode( + tmp_path + / "sdk" + / "platform-tools" + / f"adb{'.exe' if sys.platform == 'win32' else ''}" + ), "-s", "exampleDevice", "example", @@ -63,7 +69,12 @@ def test_error_handling(mock_sdk, tmp_path, name, exception): # Check that adb was invoked as expected mock_sdk.command.subprocess.check_output.assert_called_once_with( [ - os.fsdecode(tmp_path / "sdk" / "platform-tools" / "adb"), + os.fsdecode( + tmp_path + / "sdk" + / "platform-tools" + / f"adb{'.exe' if sys.platform == 'win32' else ''}" + ), "-s", "exampleDevice", "example", diff --git a/tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py b/tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py index 610a8647..6315f404 100644 --- a/tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py +++ b/tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py @@ -1,16 +1,29 @@ import os import subprocess +import sys import pytest from briefcase.exceptions import BriefcaseCommandError +from ....utils import create_file + + +def create_emulator(root_path): + # Create `emulator` within `root_path`. + if sys.platform == "win32": + emulator_bin = "emulator.exe" + else: + emulator_bin = "emulator" + + create_file(root_path / "emulator" / emulator_bin, "The Emulator", chmod=0o755) + def test_succeeds_immediately_if_emulator_installed(mock_sdk): """`verify_emulator()` exits early if the emulator exists in its root_path.""" # Create `emulator` within `root_path`. - (mock_sdk.root_path / "emulator").mkdir(parents=True) + create_emulator(mock_sdk.root_path) # Also create the platforms folder (mock_sdk.root_path / "platforms").mkdir(parents=True) @@ -28,7 +41,7 @@ def test_succeeds_immediately_if_emulator_installed(mock_sdk): def test_creates_platforms_folder(mock_sdk): """If the platforms folder doesn't exist, it is created.""" # Create `emulator` within `root_path`. - (mock_sdk.root_path / "emulator").mkdir(parents=True) + create_emulator(mock_sdk.root_path) # Verify the emulator. This should create the missing platforms folder. mock_sdk.verify_emulator() @@ -59,6 +72,28 @@ def test_installs_android_emulator(mock_sdk): ) +def test_partial_android_emulator_install(mock_sdk): + """If the Android emulator is only partially installed, it's not + detected.""" + # Create the emulator *directory*, but not the actual binary. + (mock_sdk.root_path / "emulator").mkdir(parents=True) + + mock_sdk.verify_emulator() + + # Platforms folder now exists + assert (mock_sdk.root_path / "platforms").exists() + + mock_sdk.command.subprocess.run.assert_called_once_with( + [ + os.fsdecode(mock_sdk.sdkmanager_path), + "platform-tools", + "emulator", + ], + env=mock_sdk.env, + check=True, + ) + + def test_install_problems_are_reported(mock_sdk): """If the sdkmanager fails to properly install the Android emulator, an exception is raised.""" diff --git a/tests/integrations/android_sdk/conftest.py b/tests/integrations/android_sdk/conftest.py index 1010a8a6..8845e05e 100644 --- a/tests/integrations/android_sdk/conftest.py +++ b/tests/integrations/android_sdk/conftest.py @@ -1,3 +1,4 @@ +import platform from pathlib import Path from unittest.mock import MagicMock @@ -17,9 +18,8 @@ def mock_sdk(tmp_path): command.input = DummyConsole() command.logger = Log(verbosity=1) - # For default test purposes, assume we're on macOS x86_64 - command.host_os = "Darwin" - command.host_arch = "x86_64" + command.host_arch = platform.machine() + command.host_os = platform.system() # Mock an empty environment command.os.environ = {} diff --git a/tests/utils.py b/tests/utils.py index 0295996a..165c13e1 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,3 +1,4 @@ +import os import zipfile from unittest.mock import MagicMock @@ -31,7 +32,7 @@ class FsPathMock(MagicMock): return MagicMock(**kw) -def create_file(filepath, content, mode="w"): +def create_file(filepath, content, mode="w", chmod=None): """A test utility to create a file with known content. Ensures that the directory for the file exists, and writes a file with @@ -48,6 +49,9 @@ def create_file(filepath, content, mode="w"): with filepath.open(mode) as f: f.write(content) + if chmod: + os.chmod(filepath, chmod) + return filepath
Modify build image of AppImage template to use Ubuntu 20.04 According to Github, by Dec 1 2022 they will [formally deprecate the Ubuntu-18.04 Actions runner](https://github.com/actions/runner-images/issues/6002). This puts Briefcase in an interesting situation. On the one hand Briefcase as used by end users is (mostly) unaffected - we encourage the use of Docker for building AppImages, and the Ubuntu 18.04 image will continue to exist. However, as a project, *we* have a problem. Due to historical issues using Docker in CI, we run our CI in `--no-docker` mode - which means that in December, we'll no longer be able to run CI for Linux. **Describe the solution you'd like** Update the template and documentation to use Ubuntu 20.04 as the base image. This is something we've done before ([transitioning from 16.04 to 18.04 as the base image](https://github.com/beeware/briefcase-linux-appimage-template/commit/a172088d46f86b0a95799e8f03bc04149741c765). Interestingly, this was almost 2 years ago to the day, which matches the 2 year LTS release cadence of Ubuntu. In addition to continued CI support, this has the added benefit that we'll have access to more recent packages in the base image. **Describe alternatives you've considered** 1. Remove AppImages from our testing setup, but otherwise do nothing. Ubuntu 18.04 AppImage images will continue to work; they'll just be untested. 2. Modify CI to use Docker, but continue to use 18.04 as the base image. 3. Deprecate the AppImage backend in favour of Flatpak. **Additional context** This may have some overlap with #824 - a switch of this magnitude requires some sort of control from a user perspective.
0.0
48a0f0b94560030f75986cd6e0749060af672784
[ "tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py::test_partial_android_emulator_install" ]
[ "tests/integrations/android_sdk/ADB/test_command.py::test_simple_command", "tests/integrations/android_sdk/ADB/test_command.py::test_error_handling[device-not-found-InvalidDeviceError]", "tests/integrations/android_sdk/ADB/test_command.py::test_error_handling[arbitrary-adb-error-unknown-command-CalledProcessError]", "tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py::test_succeeds_immediately_if_emulator_installed", "tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py::test_creates_platforms_folder", "tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py::test_installs_android_emulator", "tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py::test_install_problems_are_reported" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_issue_reference", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-08-24 03:15:23+00:00
bsd-3-clause
1,342
beeware__briefcase-845
diff --git a/changes/837.bugfix.rst b/changes/837.bugfix.rst new file mode 100644 index 00000000..5473bb5f --- /dev/null +++ b/changes/837.bugfix.rst @@ -0,0 +1,1 @@ +The RCEdit plugin can now be upgraded. diff --git a/changes/841.bugfix.rst b/changes/841.bugfix.rst new file mode 100644 index 00000000..f05efe07 --- /dev/null +++ b/changes/841.bugfix.rst @@ -0,0 +1,1 @@ +When verifying the existence of the Android emulator, Briefcase now looks for the actual binary, not the folder that contains the binary. This was causing false positives on some Android SDK setups. diff --git a/changes/842.feature.rst b/changes/842.feature.rst new file mode 100644 index 00000000..21b07dad --- /dev/null +++ b/changes/842.feature.rst @@ -0,0 +1,1 @@ +Log messages can now be captured on iOS if they originate from a dynamically loaded module. diff --git a/src/briefcase/integrations/android_sdk.py b/src/briefcase/integrations/android_sdk.py index ecc7193a..48dfe94d 100644 --- a/src/briefcase/integrations/android_sdk.py +++ b/src/briefcase/integrations/android_sdk.py @@ -406,7 +406,7 @@ connection. # might be missing. (self.root_path / "platforms").mkdir(exist_ok=True) - if (self.root_path / "emulator").exists(): + if (self.emulator_path).exists(): self.command.logger.debug("Android emulator is already installed.") return diff --git a/src/briefcase/integrations/rcedit.py b/src/briefcase/integrations/rcedit.py index 375f7320..16886a25 100644 --- a/src/briefcase/integrations/rcedit.py +++ b/src/briefcase/integrations/rcedit.py @@ -3,6 +3,7 @@ from briefcase.exceptions import MissingToolError class RCEdit: name = "rcedit" + full_name = "RCEdit" def __init__(self, command): self.command = command diff --git a/src/briefcase/platforms/iOS/xcode.py b/src/briefcase/platforms/iOS/xcode.py index cb248810..a1bdca11 100644 --- a/src/briefcase/platforms/iOS/xcode.py +++ b/src/briefcase/platforms/iOS/xcode.py @@ -351,6 +351,19 @@ class iOSXcodeRunCommand(iOSXcodeMixin, RunCommand): ) from e # Start log stream for the app. + # The following sets up a log stream filter that looks for: + # 1. a log sender that matches that app binary; or, + # 2. a log sender of that is a Python extension module, + # and a process that matches the app binary. + # Case (1) works when the standard libary is statically linked, + # and for native NSLog() calls in the bootstrap binary + # Case (2) works when the standard library is dynamically linked, + # and ctypes (which handles the NSLog integration) is an + # extension module. + # It's not enough to filter on *just* the processImagePath, + # as the process will generate lots of system-level messages. + # We can't filter on *just* the senderImagePath, because other + # apps will generate log messages that would be caught by the filter. simulator_log_popen = self.subprocess.Popen( [ "xcrun", @@ -362,7 +375,9 @@ class iOSXcodeRunCommand(iOSXcodeMixin, RunCommand): "--style", "compact", "--predicate", - f'senderImagePath ENDSWITH "/{app.formal_name}"', + f'senderImagePath ENDSWITH "/{app.formal_name}"' + f' OR (processImagePath ENDSWITH "/{app.formal_name}"' + ' AND senderImagePath ENDSWITH "-iphonesimulator.so")', ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, diff --git a/src/briefcase/platforms/macOS/__init__.py b/src/briefcase/platforms/macOS/__init__.py index 6c9530d7..1f90a3c5 100644 --- a/src/briefcase/platforms/macOS/__init__.py +++ b/src/briefcase/platforms/macOS/__init__.py @@ -47,6 +47,10 @@ class macOSRunMixin: # 2. a log sender of libffi, and a process that matches the app binary. # Case (1) works for pre-Python 3.9 static linked binaries. # Case (2) works for Python 3.9+ dynamic linked binaries. + # It's not enough to filter on *just* the processImagePath, + # as the process will generate lots of system-level messages. + # We can't filter on *just* the senderImagePath, because other + # apps will generate log messages that would be caught by the filter. sender = os.fsdecode( self.binary_path(app) / "Contents" / "MacOS" / app.formal_name )
beeware/briefcase
8aabd32f0c6bdb2b4860c63984b359d9203a184d
diff --git a/tests/integrations/android_sdk/ADB/test_command.py b/tests/integrations/android_sdk/ADB/test_command.py index 07fdb370..cc5250ed 100644 --- a/tests/integrations/android_sdk/ADB/test_command.py +++ b/tests/integrations/android_sdk/ADB/test_command.py @@ -1,5 +1,6 @@ import os import subprocess +import sys from pathlib import Path import pytest @@ -18,7 +19,12 @@ def test_simple_command(mock_sdk, tmp_path): # Check that adb was invoked with the expected commands mock_sdk.command.subprocess.check_output.assert_called_once_with( [ - os.fsdecode(tmp_path / "sdk" / "platform-tools" / "adb"), + os.fsdecode( + tmp_path + / "sdk" + / "platform-tools" + / f"adb{'.exe' if sys.platform == 'win32' else ''}" + ), "-s", "exampleDevice", "example", @@ -63,7 +69,12 @@ def test_error_handling(mock_sdk, tmp_path, name, exception): # Check that adb was invoked as expected mock_sdk.command.subprocess.check_output.assert_called_once_with( [ - os.fsdecode(tmp_path / "sdk" / "platform-tools" / "adb"), + os.fsdecode( + tmp_path + / "sdk" + / "platform-tools" + / f"adb{'.exe' if sys.platform == 'win32' else ''}" + ), "-s", "exampleDevice", "example", diff --git a/tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py b/tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py index 610a8647..6315f404 100644 --- a/tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py +++ b/tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py @@ -1,16 +1,29 @@ import os import subprocess +import sys import pytest from briefcase.exceptions import BriefcaseCommandError +from ....utils import create_file + + +def create_emulator(root_path): + # Create `emulator` within `root_path`. + if sys.platform == "win32": + emulator_bin = "emulator.exe" + else: + emulator_bin = "emulator" + + create_file(root_path / "emulator" / emulator_bin, "The Emulator", chmod=0o755) + def test_succeeds_immediately_if_emulator_installed(mock_sdk): """`verify_emulator()` exits early if the emulator exists in its root_path.""" # Create `emulator` within `root_path`. - (mock_sdk.root_path / "emulator").mkdir(parents=True) + create_emulator(mock_sdk.root_path) # Also create the platforms folder (mock_sdk.root_path / "platforms").mkdir(parents=True) @@ -28,7 +41,7 @@ def test_succeeds_immediately_if_emulator_installed(mock_sdk): def test_creates_platforms_folder(mock_sdk): """If the platforms folder doesn't exist, it is created.""" # Create `emulator` within `root_path`. - (mock_sdk.root_path / "emulator").mkdir(parents=True) + create_emulator(mock_sdk.root_path) # Verify the emulator. This should create the missing platforms folder. mock_sdk.verify_emulator() @@ -59,6 +72,28 @@ def test_installs_android_emulator(mock_sdk): ) +def test_partial_android_emulator_install(mock_sdk): + """If the Android emulator is only partially installed, it's not + detected.""" + # Create the emulator *directory*, but not the actual binary. + (mock_sdk.root_path / "emulator").mkdir(parents=True) + + mock_sdk.verify_emulator() + + # Platforms folder now exists + assert (mock_sdk.root_path / "platforms").exists() + + mock_sdk.command.subprocess.run.assert_called_once_with( + [ + os.fsdecode(mock_sdk.sdkmanager_path), + "platform-tools", + "emulator", + ], + env=mock_sdk.env, + check=True, + ) + + def test_install_problems_are_reported(mock_sdk): """If the sdkmanager fails to properly install the Android emulator, an exception is raised.""" diff --git a/tests/integrations/android_sdk/conftest.py b/tests/integrations/android_sdk/conftest.py index 1010a8a6..8845e05e 100644 --- a/tests/integrations/android_sdk/conftest.py +++ b/tests/integrations/android_sdk/conftest.py @@ -1,3 +1,4 @@ +import platform from pathlib import Path from unittest.mock import MagicMock @@ -17,9 +18,8 @@ def mock_sdk(tmp_path): command.input = DummyConsole() command.logger = Log(verbosity=1) - # For default test purposes, assume we're on macOS x86_64 - command.host_os = "Darwin" - command.host_arch = "x86_64" + command.host_arch = platform.machine() + command.host_os = platform.system() # Mock an empty environment command.os.environ = {} diff --git a/tests/platforms/iOS/xcode/test_run.py b/tests/platforms/iOS/xcode/test_run.py index 7302e9a6..ad7e1e31 100644 --- a/tests/platforms/iOS/xcode/test_run.py +++ b/tests/platforms/iOS/xcode/test_run.py @@ -95,7 +95,9 @@ def test_run_app_simulator_booted(first_app_config, tmp_path): "--style", "compact", "--predicate", - 'senderImagePath ENDSWITH "/First App"', + 'senderImagePath ENDSWITH "/First App"' + ' OR (processImagePath ENDSWITH "/First App"' + ' AND senderImagePath ENDSWITH "-iphonesimulator.so")', ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -199,7 +201,9 @@ def test_run_app_simulator_shut_down(first_app_config, tmp_path): "--style", "compact", "--predicate", - 'senderImagePath ENDSWITH "/First App"', + 'senderImagePath ENDSWITH "/First App"' + ' OR (processImagePath ENDSWITH "/First App"' + ' AND senderImagePath ENDSWITH "-iphonesimulator.so")', ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -316,7 +320,9 @@ def test_run_app_simulator_shutting_down(first_app_config, tmp_path): "--style", "compact", "--predicate", - 'senderImagePath ENDSWITH "/First App"', + 'senderImagePath ENDSWITH "/First App"' + ' OR (processImagePath ENDSWITH "/First App"' + ' AND senderImagePath ENDSWITH "-iphonesimulator.so")', ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -669,7 +675,9 @@ def test_run_app_simulator_launch_failure(first_app_config, tmp_path): "--style", "compact", "--predicate", - 'senderImagePath ENDSWITH "/First App"', + 'senderImagePath ENDSWITH "/First App"' + ' OR (processImagePath ENDSWITH "/First App"' + ' AND senderImagePath ENDSWITH "-iphonesimulator.so")', ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, diff --git a/tests/utils.py b/tests/utils.py index 0295996a..165c13e1 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,3 +1,4 @@ +import os import zipfile from unittest.mock import MagicMock @@ -31,7 +32,7 @@ class FsPathMock(MagicMock): return MagicMock(**kw) -def create_file(filepath, content, mode="w"): +def create_file(filepath, content, mode="w", chmod=None): """A test utility to create a file with known content. Ensures that the directory for the file exists, and writes a file with @@ -48,6 +49,9 @@ def create_file(filepath, content, mode="w"): with filepath.open(mode) as f: f.write(content) + if chmod: + os.chmod(filepath, chmod) + return filepath
Android emulator check can return a false positive See the log file below at 17:38:46: [briefcase.2022_08_23-17_38_49.run.log](https://github.com/beeware/briefcase/files/9401696/briefcase.2022_08_23-17_38_49.run.log) The emulator is not in the list of packages shown by sdkmanager. The user reported that the "emulator" directory contained nothing except the file `.installer/.installData`, with the following content: ``` #Mon Aug 22 06:11:28 CST 2022 class=com.android.repository.impl.installer.BasicInstaller path=C:\Users\Administrator\AppData\Local\BeeWare\briefcase\Cache\tools\android_sdk\.temp\PackageOperation01 ``` However, Briefcase incorrectly says the emulator is already installed, because the "emuilator" directory exists. This eventually led to Briefcase crashing while trying to run `emulator -list-avds`. I don't know how this SDK configuration arose, but Briefcase could recover from it by checking for the emulator executable (`self.emulator_path`) rather than just the containing directory. This is consistent with the way it already checks for the sdkmanager executable. **Environment:** - Operating System: Windows 10 - Python version: 3.8 - Software versions: - Briefcase: 0.3.9
0.0
8aabd32f0c6bdb2b4860c63984b359d9203a184d
[ "tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py::test_partial_android_emulator_install", "tests/platforms/iOS/xcode/test_run.py::test_run_app_simulator_booted", "tests/platforms/iOS/xcode/test_run.py::test_run_app_simulator_shut_down", "tests/platforms/iOS/xcode/test_run.py::test_run_app_simulator_shutting_down", "tests/platforms/iOS/xcode/test_run.py::test_run_app_simulator_launch_failure" ]
[ "tests/integrations/android_sdk/ADB/test_command.py::test_simple_command", "tests/integrations/android_sdk/ADB/test_command.py::test_error_handling[device-not-found-InvalidDeviceError]", "tests/integrations/android_sdk/ADB/test_command.py::test_error_handling[arbitrary-adb-error-unknown-command-CalledProcessError]", "tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py::test_succeeds_immediately_if_emulator_installed", "tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py::test_creates_platforms_folder", "tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py::test_installs_android_emulator", "tests/integrations/android_sdk/AndroidSDK/test_verify_emulator.py::test_install_problems_are_reported", "tests/platforms/iOS/xcode/test_run.py::test_run_app_simulator_boot_failure", "tests/platforms/iOS/xcode/test_run.py::test_run_app_simulator_open_failure", "tests/platforms/iOS/xcode/test_run.py::test_run_app_simulator_uninstall_failure", "tests/platforms/iOS/xcode/test_run.py::test_run_app_simulator_install_failure" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-08-24 03:35:30+00:00
bsd-3-clause
1,343
belm0__perf-timer-4
diff --git a/CHANGELOG.md b/CHANGELOG.md index f01fb66..3c73103 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Release history +## perf-timer (pending) +### Fixed +- handle absence of `time.thread_timer()` gracefully. This timer, which is the + default used by `ThreadPerfTimer`, may not be available in some OS X + environments. + ## perf-timer 0.2.1 (2020-11-09) ### Fixed - employ `atexit()` to robustly log results even when `__del__` finalizers are diff --git a/src/perf_timer/_impl.py b/src/perf_timer/_impl.py index e135af1..f8e842a 100644 --- a/src/perf_timer/_impl.py +++ b/src/perf_timer/_impl.py @@ -7,7 +7,12 @@ from weakref import WeakSet from contextvars import ContextVar from inspect import iscoroutinefunction from multiprocessing import Lock -from time import perf_counter, thread_time +from time import perf_counter +try: + from time import thread_time +except ImportError: + # thread_time is not available in some OS X environments + thread_time = None from perf_timer._histogram import ApproximateHistogram @@ -62,7 +67,8 @@ class _PerfTimerBase(_BetterContextDecorator): observer=None, quantiles=None): """ :param name: string used to annotate the timer output - :param time_fn: optional function which returns the current time + :param time_fn: optional function which returns the current time. + (A None value will raise NotImplementedError.) :param log_fn: optional function which records the output string :param observer: mixin class to observe and summarize samples (AverageObserver|StdDevObserver|HistogramObserver, default StdDevObserver) @@ -70,6 +76,8 @@ class _PerfTimerBase(_BetterContextDecorator): Values must be in range [0..1] and monotonically increasing. (default: (0.5, 0.9, 0.98)) """ + if not time_fn: + raise NotImplementedError self.name = name self._time_fn = time_fn self._log_fn = log_fn @@ -258,7 +266,11 @@ class PerfTimer(_PerfTimerBase, metaclass=_MixinMeta): class ThreadPerfTimer(_ObservationLock, PerfTimer): - """Variant of PerfTimer which measures CPU time of the current thread""" + """Variant of PerfTimer which measures CPU time of the current thread + + (Implemented with time.thread_time by default, which may not be available + in some OS X environments.) + """ def __init__(self, name, time_fn=thread_time, **kwargs): super().__init__(name, time_fn=time_fn, **kwargs)
belm0/perf-timer
9d7dc00f9ffe1ed436215835bdd8a8fa0d52e54b
diff --git a/tests/test_perf_timer.py b/tests/test_perf_timer.py index 09aeda1..6024dba 100644 --- a/tests/test_perf_timer.py +++ b/tests/test_perf_timer.py @@ -117,6 +117,11 @@ def test_perf_timer_type(): assert type(PerfTimer('foo')) is type(PerfTimer('bar')) +def test_perf_timer_not_implemented(): + with pytest.raises(NotImplementedError): + PerfTimer('foo', time_fn=None) + + @patch.object(PerfTimer, '_report_once') def test_perf_timer_atexit_and_del(_report_once): # atexit and del each cause 1 call to _report_once()
`time.thread_time` is not available on some OS X builds It appears `time.thread_time` is not available on mac os (https://bugs.python.org/issue32093) and thus trying to use this package will raise `ImportError` 😢 ```python-traceback --------------------------------------------------------------------------- ImportError Traceback (most recent call last) <ipython-input-277-0f19c673cd74> in <module> ----> 1 from perf_timer import PerfTimer, HistogramObserver ~/opt/anaconda3/envs/py37/lib/python3.7/site-packages/perf_timer/__init__.py in <module> ----> 1 from ._impl import (PerfTimer, ThreadPerfTimer, AverageObserver, 2 StdDevObserver, HistogramObserver, measure_overhead) 3 try: 4 from ._trio import trio_perf_counter, TrioPerfTimer 5 except ImportError: ~/opt/anaconda3/envs/py37/lib/python3.7/site-packages/perf_timer/_impl.py in <module> 8 from inspect import iscoroutinefunction 9 from multiprocessing import Lock ---> 10 from time import perf_counter, thread_time 11 12 from perf_timer._histogram import ApproximateHistogram ImportError: cannot import name 'thread_time' from 'time' (unknown location) ```
0.0
9d7dc00f9ffe1ed436215835bdd8a8fa0d52e54b
[ "tests/test_perf_timer.py::test_perf_timer_not_implemented" ]
[ "tests/test_perf_timer.py::test_perf_timer", "tests/test_perf_timer.py::test_perf_timer_decorator", "tests/test_perf_timer.py::test_perf_timer_one_run", "tests/test_perf_timer.py::test_perf_timer_non_reentrant", "tests/test_perf_timer.py::test_thread_perf_timer_lock", "tests/test_perf_timer.py::test_perf_timer_type", "tests/test_perf_timer.py::test_perf_timer_atexit_and_del", "tests/test_perf_timer.py::test_perf_timer_atexit_is_weak", "tests/test_perf_timer.py::test_perf_timer_report", "tests/test_perf_timer.py::test_measure_overhead" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-03-02 11:38:54+00:00
mit
1,344
benbusby__whoogle-search-1087
diff --git a/app/utils/results.py b/app/utils/results.py index 42654e9..c78f866 100644 --- a/app/utils/results.py +++ b/app/utils/results.py @@ -12,7 +12,7 @@ import re import warnings SKIP_ARGS = ['ref_src', 'utm'] -SKIP_PREFIX = ['//www.', '//mobile.', '//m.', 'www.', 'mobile.', 'm.'] +SKIP_PREFIX = ['//www.', '//mobile.', '//m.'] GOOG_STATIC = 'www.gstatic.com' G_M_LOGO_URL = 'https://www.gstatic.com/m/images/icons/googleg.gif' GOOG_IMG = '/images/branding/searchlogo/1x/googlelogo' @@ -152,11 +152,12 @@ def get_first_link(soup: BeautifulSoup) -> str: return '' -def get_site_alt(link: str) -> str: +def get_site_alt(link: str, site_alts: dict = SITE_ALTS) -> str: """Returns an alternative to a particular site, if one is configured Args: - link: A string result URL to check against the SITE_ALTS map + link: A string result URL to check against the site_alts map + site_alts: A map of site alternatives to replace with. defaults to SITE_ALTS Returns: str: An updated (or ignored) result link @@ -178,9 +179,9 @@ def get_site_alt(link: str) -> str: # "https://medium.com/..." should match, but "philomedium.com" should not) hostcomp = f'{parsed_link.scheme}://{hostname}' - for site_key in SITE_ALTS.keys(): + for site_key in site_alts.keys(): site_alt = f'{parsed_link.scheme}://{site_key}' - if not hostname or site_alt not in hostcomp or not SITE_ALTS[site_key]: + if not hostname or site_alt not in hostcomp or not site_alts[site_key]: continue # Wikipedia -> Wikiless replacements require the subdomain (if it's @@ -193,9 +194,8 @@ def get_site_alt(link: str) -> str: elif 'medium' in hostname and len(subdomain) > 0: hostname = f'{subdomain}.{hostname}' - parsed_alt = urlparse.urlparse(SITE_ALTS[site_key]) - link = link.replace(hostname, SITE_ALTS[site_key]) + params - + parsed_alt = urlparse.urlparse(site_alts[site_key]) + link = link.replace(hostname, site_alts[site_key]) + params # If a scheme is specified in the alternative, this results in a # replaced link that looks like "https://http://altservice.tld". # In this case, we can remove the original scheme from the result @@ -205,9 +205,12 @@ def get_site_alt(link: str) -> str: for prefix in SKIP_PREFIX: if parsed_alt.scheme: - link = link.replace(prefix, '') + # If a scheme is specified, remove everything before the + # first occurence of it + link = f'{parsed_alt.scheme}{link.split(parsed_alt.scheme, 1)[-1]}' else: - link = link.replace(prefix, '//') + # Otherwise, replace the first occurrence of the prefix + link = link.replace(prefix, '//', 1) break return link diff --git a/setup.cfg b/setup.cfg index 01bdec7..6e61f45 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,6 +27,7 @@ install_requires= python-dotenv requests stem + validators waitress [options.extras_require]
benbusby/whoogle-search
cdf0b50284e7843c26119ed7ac949256369207a1
diff --git a/test/test_results.py b/test/test_results.py index 63ae159..64caacd 100644 --- a/test/test_results.py +++ b/test/test_results.py @@ -2,6 +2,7 @@ from bs4 import BeautifulSoup from app.filter import Filter from app.models.config import Config from app.models.endpoint import Endpoint +from app.utils import results from app.utils.session import generate_key from datetime import datetime from dateutil.parser import ParserError, parse @@ -136,3 +137,22 @@ def test_leading_slash_search(client): continue assert link['href'].startswith(f'{Endpoint.search}') + + +def test_site_alt_prefix_skip(): + # Ensure prefixes are skipped correctly for site alts + + # default silte_alts (farside.link) + assert results.get_site_alt(link = 'https://www.reddit.com') == 'https://farside.link/libreddit' + assert results.get_site_alt(link = 'https://www.twitter.com') == 'https://farside.link/nitter' + assert results.get_site_alt(link = 'https://www.youtube.com') == 'https://farside.link/invidious' + + test_site_alts = { + 'reddit.com': 'reddit.endswithmobile.domain', + 'twitter.com': 'https://twitter.endswithm.domain', + 'youtube.com': 'http://yt.endswithwww.domain', + } + # Domains with part of SKIP_PREFIX in them + assert results.get_site_alt(link = 'https://www.reddit.com', site_alts = test_site_alts) == 'https://reddit.endswithmobile.domain' + assert results.get_site_alt(link = 'https://www.twitter.com', site_alts = test_site_alts) == 'https://twitter.endswithm.domain' + assert results.get_site_alt(link = 'https://www.youtube.com', site_alts = test_site_alts) == 'http://yt.endswithwww.domain'
validators package is missing from dependencies **Describe the bug** When installing the app into a dedicated venv (e.g. using `pipx`), it fails to start up, producing an error instead. **To Reproduce** Steps to reproduce the behavior: 1. [Install the app using `pipx`](https://github.com/benbusby/whoogle-search/#pipx) 2. Run `whoogle-search` in a terminal 3. See error ``` Traceback (most recent call last): File "/home/lex/.local/bin/whoogle-search", line 5, in <module> from app.routes import run_app File "/home/lex/.local/pipx/venvs/whoogle-search/lib/python3.11/site-packages/app/__init__.py", line 182, in <module> from app import routes # noqa ^^^^^^^^^^^^^^^^^^^^^^ File "/home/lex/.local/pipx/venvs/whoogle-search/lib/python3.11/site-packages/app/routes.py", line 10, in <module> import validators ModuleNotFoundError: No module named 'validators' ``` (Also happens when running directly from a temporary install using `pipx run`) **Deployment Method** pipx **Version of Whoogle Search** Latest build from GitHub **Desktop (please complete the following information):** - OS: Gentoo Linux - Browser N/A - Version N/A **Additional context** [Installing missing dependency into the venv](https://pypa.github.io/pipx/docs/#pipx-inject) fixes the error (though this method doesn't seem to be applicable to `pipx run`).
0.0
cdf0b50284e7843c26119ed7ac949256369207a1
[ "test/test_results.py::test_site_alt_prefix_skip" ]
[ "test/test_results.py::test_get_results", "test/test_results.py::test_post_results", "test/test_results.py::test_translate_search", "test/test_results.py::test_block_results", "test/test_results.py::test_view_my_ip", "test/test_results.py::test_recent_results", "test/test_results.py::test_leading_slash_search" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-10-23 14:56:12+00:00
mit
1,345
benbusby__whoogle-search-1092
diff --git a/app/utils/results.py b/app/utils/results.py index 42654e9..c78f866 100644 --- a/app/utils/results.py +++ b/app/utils/results.py @@ -12,7 +12,7 @@ import re import warnings SKIP_ARGS = ['ref_src', 'utm'] -SKIP_PREFIX = ['//www.', '//mobile.', '//m.', 'www.', 'mobile.', 'm.'] +SKIP_PREFIX = ['//www.', '//mobile.', '//m.'] GOOG_STATIC = 'www.gstatic.com' G_M_LOGO_URL = 'https://www.gstatic.com/m/images/icons/googleg.gif' GOOG_IMG = '/images/branding/searchlogo/1x/googlelogo' @@ -152,11 +152,12 @@ def get_first_link(soup: BeautifulSoup) -> str: return '' -def get_site_alt(link: str) -> str: +def get_site_alt(link: str, site_alts: dict = SITE_ALTS) -> str: """Returns an alternative to a particular site, if one is configured Args: - link: A string result URL to check against the SITE_ALTS map + link: A string result URL to check against the site_alts map + site_alts: A map of site alternatives to replace with. defaults to SITE_ALTS Returns: str: An updated (or ignored) result link @@ -178,9 +179,9 @@ def get_site_alt(link: str) -> str: # "https://medium.com/..." should match, but "philomedium.com" should not) hostcomp = f'{parsed_link.scheme}://{hostname}' - for site_key in SITE_ALTS.keys(): + for site_key in site_alts.keys(): site_alt = f'{parsed_link.scheme}://{site_key}' - if not hostname or site_alt not in hostcomp or not SITE_ALTS[site_key]: + if not hostname or site_alt not in hostcomp or not site_alts[site_key]: continue # Wikipedia -> Wikiless replacements require the subdomain (if it's @@ -193,9 +194,8 @@ def get_site_alt(link: str) -> str: elif 'medium' in hostname and len(subdomain) > 0: hostname = f'{subdomain}.{hostname}' - parsed_alt = urlparse.urlparse(SITE_ALTS[site_key]) - link = link.replace(hostname, SITE_ALTS[site_key]) + params - + parsed_alt = urlparse.urlparse(site_alts[site_key]) + link = link.replace(hostname, site_alts[site_key]) + params # If a scheme is specified in the alternative, this results in a # replaced link that looks like "https://http://altservice.tld". # In this case, we can remove the original scheme from the result @@ -205,9 +205,12 @@ def get_site_alt(link: str) -> str: for prefix in SKIP_PREFIX: if parsed_alt.scheme: - link = link.replace(prefix, '') + # If a scheme is specified, remove everything before the + # first occurence of it + link = f'{parsed_alt.scheme}{link.split(parsed_alt.scheme, 1)[-1]}' else: - link = link.replace(prefix, '//') + # Otherwise, replace the first occurrence of the prefix + link = link.replace(prefix, '//', 1) break return link
benbusby/whoogle-search
cdf0b50284e7843c26119ed7ac949256369207a1
diff --git a/test/test_results.py b/test/test_results.py index 63ae159..64caacd 100644 --- a/test/test_results.py +++ b/test/test_results.py @@ -2,6 +2,7 @@ from bs4 import BeautifulSoup from app.filter import Filter from app.models.config import Config from app.models.endpoint import Endpoint +from app.utils import results from app.utils.session import generate_key from datetime import datetime from dateutil.parser import ParserError, parse @@ -136,3 +137,22 @@ def test_leading_slash_search(client): continue assert link['href'].startswith(f'{Endpoint.search}') + + +def test_site_alt_prefix_skip(): + # Ensure prefixes are skipped correctly for site alts + + # default silte_alts (farside.link) + assert results.get_site_alt(link = 'https://www.reddit.com') == 'https://farside.link/libreddit' + assert results.get_site_alt(link = 'https://www.twitter.com') == 'https://farside.link/nitter' + assert results.get_site_alt(link = 'https://www.youtube.com') == 'https://farside.link/invidious' + + test_site_alts = { + 'reddit.com': 'reddit.endswithmobile.domain', + 'twitter.com': 'https://twitter.endswithm.domain', + 'youtube.com': 'http://yt.endswithwww.domain', + } + # Domains with part of SKIP_PREFIX in them + assert results.get_site_alt(link = 'https://www.reddit.com', site_alts = test_site_alts) == 'https://reddit.endswithmobile.domain' + assert results.get_site_alt(link = 'https://www.twitter.com', site_alts = test_site_alts) == 'https://twitter.endswithm.domain' + assert results.get_site_alt(link = 'https://www.youtube.com', site_alts = test_site_alts) == 'http://yt.endswithwww.domain'
[BUG] Broken site alts for some urls due to bad skip_prefix logic **Describe the bug** When using site_alts with certain patterns like for eg: "endswithm.net" , the replaced url is incorrect. for eg: reddit links are replaced with "endswith//net" instead of "endswithm.net" **To Reproduce** Steps to reproduce the behavior: 1. Set `WHOOGLE_ALT_RD=endswithm.net` 2. Do a search for reddit topics 3. See broken alt urls **Deployment Method** - [ ] Heroku (one-click deploy) - [ x] Docker - [ ] `run` executable - [ ] pip/pipx - [ ] Other: [describe setup] **Version of Whoogle Search** - [ x] Latest build from [source] (i.e. GitHub, Docker Hub, pip, etc) - [ ] Version [version number] - [ ] Not sure **Desktop (please complete the following information):** - OS: Windows - Browser : chrome - Version: 118.0.5993.89
0.0
cdf0b50284e7843c26119ed7ac949256369207a1
[ "test/test_results.py::test_site_alt_prefix_skip" ]
[ "test/test_results.py::test_get_results", "test/test_results.py::test_post_results", "test/test_results.py::test_translate_search", "test/test_results.py::test_block_results", "test/test_results.py::test_view_my_ip", "test/test_results.py::test_recent_results", "test/test_results.py::test_leading_slash_search" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-10-25 06:04:59+00:00
mit
1,346
benjamincorcoran__sasdocs-16
diff --git a/sasdocs/objects.py b/sasdocs/objects.py index 122120d..8b954ba 100644 --- a/sasdocs/objects.py +++ b/sasdocs/objects.py @@ -718,8 +718,8 @@ dataLine = dataObj.sep_by(spc) datastep = ps.seq( outputs = (ps.regex(r'data', flags=re.IGNORECASE) + spc) >> dataLine << col, - header = ps.regex(r'.*?(?=set|merge)', flags=reFlags), - inputs = (opspc + ps.regex(r'set|merge',flags=re.IGNORECASE) + opspc) >> dataLine << col, + header = (ps.regex(r'.*?(?=set|merge)', flags=reFlags)).optional(), + inputs = ((opspc + ps.regex(r'set|merge',flags=re.IGNORECASE) + opspc) >> dataLine << col).optional(), body = ps.regex(r'.*?(?=run)', flags=reFlags), _run = run + opspc + col ).combine_dict(dataStep)
benjamincorcoran/sasdocs
82a9be818d4dac358d726054e0c3a15fe64de200
diff --git a/tests/test_objects.py b/tests/test_objects.py index ece6658..8e46ebf 100644 --- a/tests/test_objects.py +++ b/tests/test_objects.py @@ -51,7 +51,8 @@ def test_dataLine_parse(case, expected): assert dataLine.parse(case) == expected testcases = [ - ("data test.test lib2.test(where=(ax=b) rename=(a=b)); format a; set test; a = 1; b = 2; output; run;", dataStep(outputs=[dataObject(library=['test'], dataset=['test'], options=None), dataObject(library=['lib2'], dataset=['test'], options=[dataArg(option=['where'], setting='(ax=b)'), dataArg(option=['rename'], setting='(a=b)')])], header=' format a; ', inputs=[dataObject(library=None, dataset=['test'], options=None)], body=' a = 1; b = 2; output; ')) + ("data test.test lib2.test(where=(ax=b) rename=(a=b)); format a; set test; a = 1; b = 2; output; run;", dataStep(outputs=[dataObject(library=['test'], dataset=['test'], options=None), dataObject(library=['lib2'], dataset=['test'], options=[dataArg(option=['where'], setting='(ax=b)'), dataArg(option=['rename'], setting='(a=b)')])], header=' format a; ', inputs=[dataObject(library=None, dataset=['test'], options=None)], body=' a = 1; b = 2; output; ')), + ("data out; input a; datalines; 1; run;", dataStep(outputs=[dataObject(library=None,dataset=['out'],options=None)],inputs=None,header=None,body=' input a; datalines; 1; ')) ] @pytest.mark.parametrize("case,expected", testcases)
datasteps without a set or merge statement # Issue Currently only datasteps with a `set` or `merge` statement are parsed. However this does not account for `datalines` and `cards` statements, as well as `infile` statements ```sas data a; input a; datalines; 1 ; run; ``` Should produce `dataStep` object with single output of `dataObject: a`
0.0
82a9be818d4dac358d726054e0c3a15fe64de200
[ "tests/test_objects.py::test_dataStep_parse[data" ]
[ "tests/test_objects.py::test_sasname_parse[test-expected0]", "tests/test_objects.py::test_sasname_parse[&test-expected1]", "tests/test_objects.py::test_sasname_parse[&test.-expected2]", "tests/test_objects.py::test_sasname_parse[&&test&test.-expected3]", "tests/test_objects.py::test_sasname_parse[ab&test-expected4]", "tests/test_objects.py::test_sasname_parse[ab&test.-expected5]", "tests/test_objects.py::test_sasname_parse[ab&test.ab-expected6]", "tests/test_objects.py::test_sasname_parse[ab&test.ab&test-expected7]", "tests/test_objects.py::test_sasname_parse[ab&test.ab&test.-expected8]", "tests/test_objects.py::test_sasname_parse[ab&test.abab&test.ab-expected9]", "tests/test_objects.py::test_dataObject_parse[lib.test-expected0]", "tests/test_objects.py::test_dataObject_parse[&test.test-expected1]", "tests/test_objects.py::test_dataObject_parse[lib.&test.-expected2]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.-expected3]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test-expected4]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.ab-expected5]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.ab&test-expected6]", "tests/test_objects.py::test_dataObject_parse[li&lib.b.ab&test.ab&test.-expected7]", "tests/test_objects.py::test_dataObject_parse[ab&lib.&lib.aa.ab&test.abab&test.ab-expected8]", "tests/test_objects.py::test_dataLineOption_parse[(where=(1=1))-expected0]", "tests/test_objects.py::test_dataLine_parse[test", "tests/test_objects.py::test_procedure_parse[proc", "tests/test_objects.py::test_libname_parse[libname", "tests/test_objects.py::test_include_parse[*Comment;-expected0]", "tests/test_objects.py::test_include_parse[/*Comment*/-expected1]", "tests/test_objects.py::test_create_table_parse[create", "tests/test_objects.py::test_sql_parse[proc", "tests/test_objects.py::test_macroVariableDefinition_parse[%let", "tests/test_objects.py::test_macroargument_parse[a-expected0]", "tests/test_objects.py::test_macroargument_parse[a=1-expected1]", "tests/test_objects.py::test_macroargument_parse[a", "tests/test_objects.py::test_macroargument_parse[a/*Docs*/-expected5]", "tests/test_objects.py::test_macroargument_parse[a=1/*Docs*/-expected6]", "tests/test_objects.py::test_macroargumentLine_parse[(a,", "tests/test_objects.py::test_macroargumentLine_parse[(a=1,", "tests/test_objects.py::test_macroargumentLine_parse[(a=1/*Doc", "tests/test_objects.py::test_macro_parse[%macro", "tests/test_objects.py::test_macro_about_parse[%macro", "tests/test_objects.py::test_macro_children_parse[%macro", "tests/test_objects.py::test_force_partial_parse[\\nlibname", "tests/test_objects.py::test_force_partial_marco_parse[\\nlibname" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2020-01-01 13:16:13+00:00
mit
1,347
benjamincorcoran__sasdocs-32
diff --git a/sasdocs/objects.py b/sasdocs/objects.py index 24ea09f..3958f48 100644 --- a/sasdocs/objects.py +++ b/sasdocs/objects.py @@ -52,7 +52,7 @@ def rebuild_macros(objs, i=0): while i < len(objs): obj = objs[i] if len(output) > 0 and type(output[0]) == macroStart and type(obj) == macroEnd: - return (macro(name=output[0].name, arguments=output[0].arguments, contents=output[1:]), i) + return (macro(name=output[0].name, arguments=output[0].arguments, options=output[0].options, contents=output[1:]), i) elif type(obj) != macroStart or (type(obj) == macroStart and len(output)==0) : output.append(obj) else: @@ -546,6 +546,7 @@ class macroStart(baseSASObject): """ name = attr.ib() arguments = attr.ib() + options = attr.ib(default=None) @attr.s class macroEnd(baseSASObject): @@ -625,6 +626,7 @@ class macro(baseSASObject): name = attr.ib() arguments = attr.ib() contents = attr.ib(repr=False) + options = attr.ib(default=None) def __attrs_post_init__(self): self.contents = [obj for obj in self.contents if obj != '\n'] @@ -678,6 +680,7 @@ lb = ps.string('(') rb = ps.string(')') star = ps.string('*') cmm = ps.string(',') +fs = ps.string('/') # Multiline comment entry and exit points @@ -721,7 +724,7 @@ mcvDef = ps.seq( # e.g. where=(1=1) datalineArg = ps.seq( option = sasName << (opspc + eq + opspc), - setting = lb + ps.regex(r'[^)]*') + rb + setting = lb + ps.regex(r'[^)]*',flags=reFlags) + rb ).combine_dict(dataArg) # datalineArg: Argument in dataline sasName = sasName sasName sasName... @@ -731,9 +734,13 @@ datalineArgNB = ps.seq( setting = ps.regex(r'.*?(?=\s+\w+\s*=)|.*?(?=\))') ).combine_dict(dataArg) +datalineArgPt = ps.seq( + option = sasName << (opspc + eq + opspc), + setting = opspc + qte >> fpth << opspc + qte +).combine_dict(dataArg) # datalineOptions: Seperate multiple datalineArgs by spaces -datalineOptions = lb >> (datalineArg|datalineArgNB|sasName).sep_by(spc) << rb +datalineOptions = lb >> (datalineArg|datalineArgPt|datalineArgNB|sasName).sep_by(spc) << rb # dataObj: Abstracted data object exists as three components: @@ -831,7 +838,9 @@ mcroargline = lb + opspc >> mcroarg.sep_by(opspc+cmm+opspc) << opspc + rb mcroStart = ps.seq( name = ps.regex(r'%macro',flags=re.IGNORECASE) + spc + opspc >> sasName, - arguments = (opspc >> mcroargline).optional() << opspc + col + arguments = (opspc >> mcroargline).optional(), + options = (opspc + fs + opspc >> (datalineArg|datalineArgPt|sasName).sep_by(spc)).optional(), + _col = opspc + col ).combine_dict(macroStart) mcroEnd = (ps.regex(r'%mend.*?;',flags=re.IGNORECASE)).map(macroEnd)
benjamincorcoran/sasdocs
e03ad9abc48ba3714ca252297d88b58e63112635
diff --git a/tests/test_objects.py b/tests/test_objects.py index c71f223..352ad42 100644 --- a/tests/test_objects.py +++ b/tests/test_objects.py @@ -167,6 +167,19 @@ testcases = [ def test_macroargumentLine_parse(case, expected): assert mcroargline.parse(case) == expected + +testcases = [ + ('%macro test;', macroStart(name=['test'], arguments=None, options=None)), + ('%macro test /des="Description";', macroStart(name=['test'], arguments=None, options=[dataArg(option=['des'],setting='Description')])), + ('%macro test /strict des="Description";', macroStart(name=['test'], arguments=None, options=[['strict'], dataArg(option=['des'],setting='Description')])) + +] [email protected]("case,expected", testcases) +def test_macro_start_parse(case, expected): + assert mcroStart.parse(case) == expected + + + testcases = [ ('%macro test; %mend;', macro(name=['test'], arguments=None, contents='')), ('%macro test(a, b, c); %mend;', macro(name=['test'], arguments=[macroargument(arg=['a'],default=None,doc=None), macroargument(arg=['b'],default=None,doc=None), macroargument(arg=['c'],default=None,doc=None)], contents='')), @@ -190,6 +203,17 @@ def test_macro_about_parse(case, expected): macro = force_partial_parse(fullprogram,case)[0] assert macro.about == expected +testcases = [ + ('%macro test; /*This is the test macro*/ %mend;', None), + ('%macro test /strict; /*This is the test macro*/\n/*This is the second line*/%mend;', [['strict']]), + ('%macro test /strict des="Description"; data a; set b; run; /*This is the test macro*/ %mend;', [['strict'], dataArg(option=['des'],setting='Description')]), +] + [email protected]("case,expected", testcases) +def test_macro_options_parse(case, expected): + macro = force_partial_parse(fullprogram,case)[0] + assert macro.options == expected + testcases = [ ('%macro test; data a; set b; run; %mend;', [dataStep(outputs=[dataObject(library=None, dataset=['a'], options=None)], header=' ', inputs=[dataObject(library=None, dataset=['b'], options=None)], body=' ')]), ('%macro test(a=1/*Doc A*/,b/*Doc B*/); data a; set b; run; %mend;', [dataStep(outputs=[dataObject(library=None, dataset=['a'], options=None)], header=' ', inputs=[dataObject(library='work', dataset=['b'])], body=' ')]),
Macro options # Issue Parser fails when macro defined with options i.e. ```sas %macro test /des='Hello' secure; %mend; ```
0.0
e03ad9abc48ba3714ca252297d88b58e63112635
[ "tests/test_objects.py::test_sasname_parse[test-expected0]", "tests/test_objects.py::test_sasname_parse[&test-expected1]", "tests/test_objects.py::test_sasname_parse[&test.-expected2]", "tests/test_objects.py::test_sasname_parse[&&test&test.-expected3]", "tests/test_objects.py::test_sasname_parse[ab&test-expected4]", "tests/test_objects.py::test_sasname_parse[ab&test.-expected5]", "tests/test_objects.py::test_sasname_parse[ab&test.ab-expected6]", "tests/test_objects.py::test_sasname_parse[ab&test.ab&test-expected7]", "tests/test_objects.py::test_sasname_parse[ab&test.ab&test.-expected8]", "tests/test_objects.py::test_sasname_parse[ab&test.abab&test.ab-expected9]", "tests/test_objects.py::test_dataObject_parse[lib.test-expected0]", "tests/test_objects.py::test_dataObject_parse[&test.test-expected1]", "tests/test_objects.py::test_dataObject_parse[lib.&test.-expected2]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.-expected3]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test-expected4]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.ab-expected5]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.ab&test-expected6]", "tests/test_objects.py::test_dataObject_parse[li&lib.b.ab&test.ab&test.-expected7]", "tests/test_objects.py::test_dataObject_parse[ab&lib.&lib.aa.ab&test.abab&test.ab-expected8]", "tests/test_objects.py::test_dataLineOption_parse[(where=(1=1))-expected0]", "tests/test_objects.py::test_dataLineOption_parse[(drop=a)-expected1]", "tests/test_objects.py::test_dataLineOption_parse[(drop=a", "tests/test_objects.py::test_dataLineOption_parse[(where=(1=1)", "tests/test_objects.py::test_dataLine_parse[test", "tests/test_objects.py::test_dataStep_parse[data", "tests/test_objects.py::test_procedure_parse[proc", "tests/test_objects.py::test_libname_parse[libname", "tests/test_objects.py::test_include_parse[*Comment;-expected0]", "tests/test_objects.py::test_include_parse[/*Comment*/-expected1]", "tests/test_objects.py::test_create_table_parse[create", "tests/test_objects.py::test_sql_parse[proc", "tests/test_objects.py::test_macroVariableDefinition_parse[%let", "tests/test_objects.py::test_macroargument_parse[a-expected0]", "tests/test_objects.py::test_macroargument_parse[a=1-expected1]", "tests/test_objects.py::test_macroargument_parse[a", "tests/test_objects.py::test_macroargument_parse[a/*Docs*/-expected5]", "tests/test_objects.py::test_macroargument_parse[a=1/*Docs*/-expected6]", "tests/test_objects.py::test_macroargumentLine_parse[(a,", "tests/test_objects.py::test_macroargumentLine_parse[(a=1,", "tests/test_objects.py::test_macroargumentLine_parse[(a=1/*Doc", "tests/test_objects.py::test_macro_start_parse[%macro", "tests/test_objects.py::test_macro_parse[%macro", "tests/test_objects.py::test_macro_about_parse[%macro", "tests/test_objects.py::test_macro_options_parse[%macro", "tests/test_objects.py::test_macro_children_parse[%macro", "tests/test_objects.py::test_force_partial_parse[\\nlibname", "tests/test_objects.py::test_force_partial_marco_parse[\\nlibname", "tests/test_objects.py::test_force_partial_incomplete_marco_parse[\\nlibname" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-01-08 11:25:53+00:00
mit
1,348
benjamincorcoran__sasdocs-37
diff --git a/sasdocs/objects.py b/sasdocs/objects.py index 3958f48..0ec2506 100644 --- a/sasdocs/objects.py +++ b/sasdocs/objects.py @@ -650,6 +650,33 @@ class macro(baseSASObject): self.shortDesc = re.sub(r'\s+',' ',self.shortDesc) [email protected] +class macroCall(baseSASObject): + """ + Abstracted python class for SAS macro call. + + This class parses a SAS macro call. + + .. code-block:: sas + + %runMacro; + + /*and*/ + + %runMacro(arg1=A); + + + Attributes + ---------- + name : str + Name of the marco + arguments : list, optional + List of macroarguments parsed from the macro defintion + """ + name = attr.ib() + arguments = attr.ib() + + # Parsy Objects # Define reFlags as ignorecase and dotall to capture new lines reFlags = re.IGNORECASE|re.DOTALL @@ -845,6 +872,12 @@ mcroStart = ps.seq( mcroEnd = (ps.regex(r'%mend.*?;',flags=re.IGNORECASE)).map(macroEnd) +mcroCall = ps.seq( + name = ps.regex(r'%') >> sasName, + arguments = (opspc >> mcroargline).optional(), + _col = opspc + col +).combine_dict(macroCall) + # fullprogram: multiple SAS objects including macros -fullprogram = (nl|mcvDef|cmnt|datastep|proc|sql|lbnm|icld|mcroStart|mcroEnd).optional() +fullprogram = (nl|mcvDef|cmnt|datastep|proc|sql|lbnm|icld|mcroStart|mcroEnd|mcroCall).optional()
benjamincorcoran/sasdocs
3a145da6510e24d1a5c52d40f3e17f98e3f42282
diff --git a/tests/test_objects.py b/tests/test_objects.py index 352ad42..03e1f5f 100644 --- a/tests/test_objects.py +++ b/tests/test_objects.py @@ -224,6 +224,18 @@ def test_macro_children_parse(case, expected): assert force_partial_parse(fullprogram, case)[0].contents == expected +testcases = [ + (r'%runMacro;',macroCall(name=['runMacro'], arguments=None)), + (r"%runMacro(A=B, C=D);",macroCall(name=['runMacro'], arguments=[macroargument(arg=['A'],default=["B"],doc=None),macroargument(arg=['C'],default=["D"],doc=None)])) +] + [email protected]("case,expected", testcases) +def test_macro_call_parse(case, expected): + assert mcroCall.parse(case) == expected + + + + testcases = [(""" libname a "path/to/folder"; %let a = 1;
Parse calls to macros # Issue Call to macros are not parsed and so drop the over all parsed percentage for the sasProgram. Must be careful not to capture macro language as macro calls ```sas /*This*/ %runMacro; /*Not this*/ %put; ```
0.0
3a145da6510e24d1a5c52d40f3e17f98e3f42282
[ "tests/test_objects.py::test_sasname_parse[test-expected0]", "tests/test_objects.py::test_sasname_parse[&test-expected1]", "tests/test_objects.py::test_sasname_parse[&test.-expected2]", "tests/test_objects.py::test_sasname_parse[&&test&test.-expected3]", "tests/test_objects.py::test_sasname_parse[ab&test-expected4]", "tests/test_objects.py::test_sasname_parse[ab&test.-expected5]", "tests/test_objects.py::test_sasname_parse[ab&test.ab-expected6]", "tests/test_objects.py::test_sasname_parse[ab&test.ab&test-expected7]", "tests/test_objects.py::test_sasname_parse[ab&test.ab&test.-expected8]", "tests/test_objects.py::test_sasname_parse[ab&test.abab&test.ab-expected9]", "tests/test_objects.py::test_dataObject_parse[lib.test-expected0]", "tests/test_objects.py::test_dataObject_parse[&test.test-expected1]", "tests/test_objects.py::test_dataObject_parse[lib.&test.-expected2]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.-expected3]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test-expected4]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.ab-expected5]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.ab&test-expected6]", "tests/test_objects.py::test_dataObject_parse[li&lib.b.ab&test.ab&test.-expected7]", "tests/test_objects.py::test_dataObject_parse[ab&lib.&lib.aa.ab&test.abab&test.ab-expected8]", "tests/test_objects.py::test_dataLineOption_parse[(where=(1=1))-expected0]", "tests/test_objects.py::test_dataLineOption_parse[(drop=a)-expected1]", "tests/test_objects.py::test_dataLineOption_parse[(drop=a", "tests/test_objects.py::test_dataLineOption_parse[(where=(1=1)", "tests/test_objects.py::test_dataLine_parse[test", "tests/test_objects.py::test_dataStep_parse[data", "tests/test_objects.py::test_procedure_parse[proc", "tests/test_objects.py::test_libname_parse[libname", "tests/test_objects.py::test_include_parse[*Comment;-expected0]", "tests/test_objects.py::test_include_parse[/*Comment*/-expected1]", "tests/test_objects.py::test_create_table_parse[create", "tests/test_objects.py::test_sql_parse[proc", "tests/test_objects.py::test_macroVariableDefinition_parse[%let", "tests/test_objects.py::test_macroargument_parse[a-expected0]", "tests/test_objects.py::test_macroargument_parse[a=1-expected1]", "tests/test_objects.py::test_macroargument_parse[a", "tests/test_objects.py::test_macroargument_parse[a/*Docs*/-expected5]", "tests/test_objects.py::test_macroargument_parse[a=1/*Docs*/-expected6]", "tests/test_objects.py::test_macroargumentLine_parse[(a,", "tests/test_objects.py::test_macroargumentLine_parse[(a=1,", "tests/test_objects.py::test_macroargumentLine_parse[(a=1/*Doc", "tests/test_objects.py::test_macro_start_parse[%macro", "tests/test_objects.py::test_macro_parse[%macro", "tests/test_objects.py::test_macro_about_parse[%macro", "tests/test_objects.py::test_macro_options_parse[%macro", "tests/test_objects.py::test_macro_children_parse[%macro", "tests/test_objects.py::test_macro_call_parse[%runMacro;-expected0]", "tests/test_objects.py::test_macro_call_parse[%runMacro(A=B,", "tests/test_objects.py::test_force_partial_parse[\\nlibname", "tests/test_objects.py::test_force_partial_marco_parse[\\nlibname", "tests/test_objects.py::test_force_partial_incomplete_marco_parse[\\nlibname" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2020-01-12 17:23:08+00:00
mit
1,349
benjamincorcoran__sasdocs-41
diff --git a/sasdocs/objects.py b/sasdocs/objects.py index 0ec2506..4536ab3 100644 --- a/sasdocs/objects.py +++ b/sasdocs/objects.py @@ -406,6 +406,7 @@ class dataStep(baseSASObject): inputs = attr.ib() header = attr.ib(repr=False, default=None) body = attr.ib(repr=False, default=None) + options = attr.ib(default=None) @attr.s class procedure(baseSASObject): @@ -738,7 +739,7 @@ cmnt = (inlinecmnt|multicmnt).map(comment) # Complex SAS Objects # sasName: Any named object in SAS, can contain macrovariable as part of name -sasName = (wrd|mcv).many() +sasName = (wrd|mcv).at_least(1) # Marcovariable definition: mcvDef = ps.seq( @@ -758,7 +759,7 @@ datalineArg = ps.seq( # e.g. keep=A B C datalineArgNB = ps.seq( option = sasName << (opspc + eq + opspc), - setting = ps.regex(r'.*?(?=\s+\w+\s*=)|.*?(?=\))') + setting = ps.regex(r'.*?(?=\s+\w+\s*=)|.*?(?=\))|.*?(?=;)') ).combine_dict(dataArg) datalineArgPt = ps.seq( @@ -777,7 +778,7 @@ datalineOptions = lb >> (datalineArg|datalineArgPt|datalineArgNB|sasName).sep_by dataObj = ps.seq( library = (sasName << dot).optional(), - dataset = dot >> sasName | sasName, + dataset = (dot >> sasName) | sasName, options = datalineOptions.optional() ).combine_dict(dataObject) @@ -792,7 +793,9 @@ dataLine = dataObj.sep_by(spc) # terminating run is thrown away datastep = ps.seq( - outputs = (ps.regex(r'data', flags=re.IGNORECASE) + spc) >> dataLine << col, + outputs = (ps.regex(r'data', flags=re.IGNORECASE) + spc) >> dataLine, + options = (opspc + fs + opspc >> (datalineArg|datalineArgPt|datalineArgNB|sasName).sep_by(spc)).optional(), + _col = opspc + col, header = (ps.regex(r'(?:(?!run).)*(?=set|merge)', flags=reFlags)).optional(), inputs = ((opspc + ps.regex(r'set|merge',flags=re.IGNORECASE) + opspc) >> dataLine << col).optional(), body = ps.regex(r'.*?(?=run)', flags=reFlags),
benjamincorcoran/sasdocs
a272abbed1f04bcdc7aa680dabf24462074c3acd
diff --git a/tests/test_objects.py b/tests/test_objects.py index 03e1f5f..2eeba96 100644 --- a/tests/test_objects.py +++ b/tests/test_objects.py @@ -58,9 +58,11 @@ def test_dataLine_parse(case, expected): testcases = [ ("data test.test lib2.test(where=(ax=b) rename=(a=b)); format a; set test; a = 1; b = 2; output; run;", dataStep(outputs=[dataObject(library=['test'], dataset=['test'], options=None), dataObject(library=['lib2'], dataset=['test'], options=[dataArg(option=['where'], setting='(ax=b)'), dataArg(option=['rename'], setting='(a=b)')])], header=' format a; ', inputs=[dataObject(library=None, dataset=['test'], options=None)], body=' a = 1; b = 2; output; ')), - ("data out; input a; datalines; 1; run;", dataStep(outputs=[dataObject(library=None,dataset=['out'],options=None)],inputs=None,header=None,body=' input a; datalines; 1; ')) + ("data out; input a; datalines; 1; run;", dataStep(outputs=[dataObject(library=None,dataset=['out'],options=None)],inputs=None,header=None,body=' input a; datalines; 1; ')), + ("data out /view=out; set in; run;", dataStep(options=[dataArg(option=['view'], setting='out')], outputs=[dataObject(library=None, dataset=['out'], options=None)], inputs=[dataObject(library=None, dataset=['in'], options=None)], header=" ", body=" ")) ] + @pytest.mark.parametrize("case,expected", testcases) def test_dataStep_parse(case, expected): assert datastep.parse(case) == expected
/ options for datasteps and procedures # Issue Parser currently fails when presented with a datastep or procedure with explicitly defined options. ```sas data test / view=testvew; set test; run;
0.0
a272abbed1f04bcdc7aa680dabf24462074c3acd
[ "tests/test_objects.py::test_sasname_parse[test-expected0]", "tests/test_objects.py::test_sasname_parse[&test-expected1]", "tests/test_objects.py::test_sasname_parse[&test.-expected2]", "tests/test_objects.py::test_sasname_parse[&&test&test.-expected3]", "tests/test_objects.py::test_sasname_parse[ab&test-expected4]", "tests/test_objects.py::test_sasname_parse[ab&test.-expected5]", "tests/test_objects.py::test_sasname_parse[ab&test.ab-expected6]", "tests/test_objects.py::test_sasname_parse[ab&test.ab&test-expected7]", "tests/test_objects.py::test_sasname_parse[ab&test.ab&test.-expected8]", "tests/test_objects.py::test_sasname_parse[ab&test.abab&test.ab-expected9]", "tests/test_objects.py::test_dataObject_parse[lib.test-expected0]", "tests/test_objects.py::test_dataObject_parse[&test.test-expected1]", "tests/test_objects.py::test_dataObject_parse[lib.&test.-expected2]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.-expected3]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test-expected4]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.ab-expected5]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.ab&test-expected6]", "tests/test_objects.py::test_dataObject_parse[li&lib.b.ab&test.ab&test.-expected7]", "tests/test_objects.py::test_dataObject_parse[ab&lib.&lib.aa.ab&test.abab&test.ab-expected8]", "tests/test_objects.py::test_dataLineOption_parse[(where=(1=1))-expected0]", "tests/test_objects.py::test_dataLineOption_parse[(drop=a)-expected1]", "tests/test_objects.py::test_dataLineOption_parse[(drop=a", "tests/test_objects.py::test_dataLineOption_parse[(where=(1=1)", "tests/test_objects.py::test_dataLine_parse[test", "tests/test_objects.py::test_dataStep_parse[data", "tests/test_objects.py::test_procedure_parse[proc", "tests/test_objects.py::test_libname_parse[libname", "tests/test_objects.py::test_include_parse[*Comment;-expected0]", "tests/test_objects.py::test_include_parse[/*Comment*/-expected1]", "tests/test_objects.py::test_create_table_parse[create", "tests/test_objects.py::test_sql_parse[proc", "tests/test_objects.py::test_macroVariableDefinition_parse[%let", "tests/test_objects.py::test_macroargument_parse[a-expected0]", "tests/test_objects.py::test_macroargument_parse[a=1-expected1]", "tests/test_objects.py::test_macroargument_parse[a", "tests/test_objects.py::test_macroargument_parse[a/*Docs*/-expected5]", "tests/test_objects.py::test_macroargument_parse[a=1/*Docs*/-expected6]", "tests/test_objects.py::test_macroargumentLine_parse[(a,", "tests/test_objects.py::test_macroargumentLine_parse[(a=1,", "tests/test_objects.py::test_macroargumentLine_parse[(a=1/*Doc", "tests/test_objects.py::test_macro_start_parse[%macro", "tests/test_objects.py::test_macro_parse[%macro", "tests/test_objects.py::test_macro_about_parse[%macro", "tests/test_objects.py::test_macro_options_parse[%macro", "tests/test_objects.py::test_macro_children_parse[%macro", "tests/test_objects.py::test_macro_call_parse[%runMacro;-expected0]", "tests/test_objects.py::test_macro_call_parse[%runMacro(A=B,", "tests/test_objects.py::test_force_partial_parse[\\nlibname", "tests/test_objects.py::test_force_partial_marco_parse[\\nlibname", "tests/test_objects.py::test_force_partial_incomplete_marco_parse[\\nlibname" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-01-15 11:49:44+00:00
mit
1,350
benjamincorcoran__sasdocs-44
diff --git a/sasdocs/objects.py b/sasdocs/objects.py index 0271f3c..59edb38 100644 --- a/sasdocs/objects.py +++ b/sasdocs/objects.py @@ -812,7 +812,7 @@ proc = ps.seq( _h1 = ps.regex(r'.*?(?=data)', flags=reFlags), inputs = (ps.regex(r'data', flags=re.IGNORECASE) + opspc + eq + opspc) >> dataObj, _h2 = ps.regex(r'.*?(?=out\s*=)', flags=reFlags).optional(), - outputs = ((ps.regex(r'out', flags=re.IGNORECASE) + opspc + eq + opspc) >> dataObj).optional(), + outputs = ((ps.regex(r'out', flags=re.IGNORECASE) + opspc + eq + opspc) >> dataObj).sep_by(ps.regex(r'(?:(?!run|quit).)*?(?=out\s*=)', flags=reFlags)).optional(), _h3 = ps.regex(r'.*?(?=run|quit)', flags=reFlags), _run = (run|qt) + opspc + col ).combine_dict(procedure)
benjamincorcoran/sasdocs
05464364b3e886c021728970af5662ecc896888a
diff --git a/tests/test_objects.py b/tests/test_objects.py index 2eeba96..1b82569 100644 --- a/tests/test_objects.py +++ b/tests/test_objects.py @@ -69,7 +69,9 @@ def test_dataStep_parse(case, expected): testcases = [ - ("proc summary data=lib2.test(where=(ax=b) rename=(a=b)); by x; output out=lib3.test2; run;", procedure(outputs=dataObject(library=['lib3'], dataset=['test2'], options=None), inputs=dataObject(library=['lib2'], dataset=['test'], options=[dataArg(option=['where'], setting='(ax=b)'), dataArg(option=['rename'], setting='(a=b)')]), type='summary')) + ("proc summary data=lib2.test(where=(ax=b) rename=(a=b)); by x; output out=lib3.test2; run;", procedure(outputs=dataObject(library=['lib3'], dataset=['test2'], options=None), inputs=dataObject(library=['lib2'], dataset=['test'], options=[dataArg(option=['where'], setting='(ax=b)'), dataArg(option=['rename'], setting='(a=b)')]), type='summary')), + ("proc summary data=lib2.test(where=(ax=b) rename=(a=b)); by x; output out=lib3.test2; out=lib4.test2; run;", procedure(outputs=[dataObject(library=['lib3'], dataset=['test2'], options=None), dataObject(library=['lib4'], dataset=['test2'], options=None)], inputs=dataObject(library=['lib2'], dataset=['test'], options=[dataArg(option=['where'], setting='(ax=b)'), dataArg(option=['rename'], setting='(a=b)')]), type='summary')), + ("proc summary data=lib2.test(where=(ax=b) rename=(a=b)); by x; output out=lib3.test2 sum=; out=lib4.test2 n=; run;", procedure(outputs=[dataObject(library=['lib3'], dataset=['test2'], options=None), dataObject(library=['lib4'], dataset=['test2'], options=None)], inputs=dataObject(library=['lib2'], dataset=['test'], options=[dataArg(option=['where'], setting='(ax=b)'), dataArg(option=['rename'], setting='(a=b)')]), type='summary')) ] @pytest.mark.parametrize("case,expected", testcases)
Multiple outputs in procedures # Issue Current parser does not capture multiple outputs in procedures. ```sas proc summary data=test; class a; var b; output out=sum sum=; output out=cnt n=; run; ```
0.0
05464364b3e886c021728970af5662ecc896888a
[ "tests/test_objects.py::test_procedure_parse[proc" ]
[ "tests/test_objects.py::test_sasname_parse[test-expected0]", "tests/test_objects.py::test_sasname_parse[&test-expected1]", "tests/test_objects.py::test_sasname_parse[&test.-expected2]", "tests/test_objects.py::test_sasname_parse[&&test&test.-expected3]", "tests/test_objects.py::test_sasname_parse[ab&test-expected4]", "tests/test_objects.py::test_sasname_parse[ab&test.-expected5]", "tests/test_objects.py::test_sasname_parse[ab&test.ab-expected6]", "tests/test_objects.py::test_sasname_parse[ab&test.ab&test-expected7]", "tests/test_objects.py::test_sasname_parse[ab&test.ab&test.-expected8]", "tests/test_objects.py::test_sasname_parse[ab&test.abab&test.ab-expected9]", "tests/test_objects.py::test_dataObject_parse[lib.test-expected0]", "tests/test_objects.py::test_dataObject_parse[&test.test-expected1]", "tests/test_objects.py::test_dataObject_parse[lib.&test.-expected2]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.-expected3]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test-expected4]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.ab-expected5]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.ab&test-expected6]", "tests/test_objects.py::test_dataObject_parse[li&lib.b.ab&test.ab&test.-expected7]", "tests/test_objects.py::test_dataObject_parse[ab&lib.&lib.aa.ab&test.abab&test.ab-expected8]", "tests/test_objects.py::test_dataLineOption_parse[(where=(1=1))-expected0]", "tests/test_objects.py::test_dataLineOption_parse[(drop=a)-expected1]", "tests/test_objects.py::test_dataLineOption_parse[(drop=a", "tests/test_objects.py::test_dataLineOption_parse[(where=(1=1)", "tests/test_objects.py::test_dataLine_parse[test", "tests/test_objects.py::test_dataStep_parse[data", "tests/test_objects.py::test_libname_parse[libname", "tests/test_objects.py::test_include_parse[*Comment;-expected0]", "tests/test_objects.py::test_include_parse[/*Comment*/-expected1]", "tests/test_objects.py::test_create_table_parse[create", "tests/test_objects.py::test_sql_parse[proc", "tests/test_objects.py::test_macroVariableDefinition_parse[%let", "tests/test_objects.py::test_macroargument_parse[a-expected0]", "tests/test_objects.py::test_macroargument_parse[a=1-expected1]", "tests/test_objects.py::test_macroargument_parse[a", "tests/test_objects.py::test_macroargument_parse[a/*Docs*/-expected5]", "tests/test_objects.py::test_macroargument_parse[a=1/*Docs*/-expected6]", "tests/test_objects.py::test_macroargumentLine_parse[(a,", "tests/test_objects.py::test_macroargumentLine_parse[(a=1,", "tests/test_objects.py::test_macroargumentLine_parse[(a=1/*Doc", "tests/test_objects.py::test_macro_start_parse[%macro", "tests/test_objects.py::test_macro_parse[%macro", "tests/test_objects.py::test_macro_about_parse[%macro", "tests/test_objects.py::test_macro_options_parse[%macro", "tests/test_objects.py::test_macro_children_parse[%macro", "tests/test_objects.py::test_macro_call_parse[%runMacro;-expected0]", "tests/test_objects.py::test_macro_call_parse[%runMacro(A=B,", "tests/test_objects.py::test_force_partial_parse[\\nlibname", "tests/test_objects.py::test_force_partial_marco_parse[\\nlibname", "tests/test_objects.py::test_force_partial_incomplete_marco_parse[\\nlibname" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2020-01-19 16:25:49+00:00
mit
1,351
benjamincorcoran__sasdocs-5
diff --git a/sasdocs/objects.py b/sasdocs/objects.py index b625e6e..5208195 100644 --- a/sasdocs/objects.py +++ b/sasdocs/objects.py @@ -507,6 +507,19 @@ class macro: def __attrs_post_init__(self): self.contents = [obj for obj in self.contents if obj != '\n'] + about = [] + for obj in self.contents: + if type(obj).__name__ == 'comment': + about.append(obj) + else: + break + if len(about) == 0: + self.about = 'No docstring found.' + self.documented = False + else: + self.about = '\n'.join([comment.text for comment in about]) + self.documented = True + # Parsy Objects
benjamincorcoran/sasdocs
e89745837421fd33469de903650aaf881110d891
diff --git a/tests/test_objects.py b/tests/test_objects.py index 7249a1f..515899d 100644 --- a/tests/test_objects.py +++ b/tests/test_objects.py @@ -149,10 +149,20 @@ testcases = [ ] @pytest.mark.parametrize("case,expected", testcases) -def test_macro_parse(case, expected): - +def test_macro_about_parse(case, expected): assert force_partial_parse(fullprogram, case) == [expected] +testcases = [ + ('%macro test; /*This is the test macro*/ %mend;', 'This is the test macro'), + ('%macro test; /*This is the test macro*/\n/*This is the second line*/%mend;', 'This is the test macro\nThis is the second line'), + ('%macro test; data a; set b; run; /*This is the test macro*/ %mend;', 'No docstring found.'), +] + [email protected]("case,expected", testcases) +def test_macro_parse(case, expected): + macro = force_partial_parse(fullprogram,case)[0] + assert macro.about == expected + testcases = [ ('%macro test; data a; set b; run; %mend;', [dataStep(outputs=[dataObject(library=None, dataset=['a'], options=None)], header=' ', inputs=[dataObject(library=None, dataset=['b'], options=None)], body=' ')]), ('%macro test(a=1/*Doc A*/,b/*Doc B*/); data a; set b; run; %mend;', [dataStep(outputs=[dataObject(library=None, dataset=['a'], options=None)], header=' ', inputs=[dataObject(library='work', dataset=['b'])], body=' ')]),
Give Macro object an about attribute # Issue Macro object needs an `about` attribute capturing the documentation of the macro variable. This should be the first set of comments *inside* the macro. The comments need to be collapsed into a single string object.
0.0
e89745837421fd33469de903650aaf881110d891
[ "tests/test_objects.py::test_macro_parse[%macro" ]
[ "tests/test_objects.py::test_sasname_parse[test-expected0]", "tests/test_objects.py::test_sasname_parse[&test-expected1]", "tests/test_objects.py::test_sasname_parse[&test.-expected2]", "tests/test_objects.py::test_sasname_parse[&&test&test.-expected3]", "tests/test_objects.py::test_sasname_parse[ab&test-expected4]", "tests/test_objects.py::test_sasname_parse[ab&test.-expected5]", "tests/test_objects.py::test_sasname_parse[ab&test.ab-expected6]", "tests/test_objects.py::test_sasname_parse[ab&test.ab&test-expected7]", "tests/test_objects.py::test_sasname_parse[ab&test.ab&test.-expected8]", "tests/test_objects.py::test_sasname_parse[ab&test.abab&test.ab-expected9]", "tests/test_objects.py::test_dataObject_parse[lib.test-expected0]", "tests/test_objects.py::test_dataObject_parse[&test.test-expected1]", "tests/test_objects.py::test_dataObject_parse[lib.&test.-expected2]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.-expected3]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test-expected4]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.ab-expected5]", "tests/test_objects.py::test_dataObject_parse[lib.ab&test.ab&test-expected6]", "tests/test_objects.py::test_dataObject_parse[li&lib.b.ab&test.ab&test.-expected7]", "tests/test_objects.py::test_dataObject_parse[ab&lib.&lib.aa.ab&test.abab&test.ab-expected8]", "tests/test_objects.py::test_dataLineOption_parse[(where=(1=1))-expected0]", "tests/test_objects.py::test_dataLine_parse[test", "tests/test_objects.py::test_dataStep_parse[data", "tests/test_objects.py::test_procedure_parse[proc", "tests/test_objects.py::test_libname_parse[libname", "tests/test_objects.py::test_include_parse[*Comment;-expected0]", "tests/test_objects.py::test_include_parse[/*Comment*/-expected1]", "tests/test_objects.py::test_macroVariableDefinition_parse[%let", "tests/test_objects.py::test_macroargument_parse[a-expected0]", "tests/test_objects.py::test_macroargument_parse[a=1-expected1]", "tests/test_objects.py::test_macroargument_parse[a", "tests/test_objects.py::test_macroargument_parse[a/*Docs*/-expected5]", "tests/test_objects.py::test_macroargument_parse[a=1/*Docs*/-expected6]", "tests/test_objects.py::test_macroargumentLine_parse[(a,", "tests/test_objects.py::test_macroargumentLine_parse[(a=1,", "tests/test_objects.py::test_macroargumentLine_parse[(a=1/*Doc", "tests/test_objects.py::test_macro_about_parse[%macro", "tests/test_objects.py::test_macro_children_parse[%macro", "tests/test_objects.py::test_force_partial_parse[\\nlibname", "tests/test_objects.py::test_force_partial_marco_parse[\\nlibname" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2019-12-28 13:42:09+00:00
mit
1,352
benmaier__binpacking-12
diff --git a/binpacking/to_constant_bin_number.py b/binpacking/to_constant_bin_number.py index dee0746..d189a3e 100644 --- a/binpacking/to_constant_bin_number.py +++ b/binpacking/to_constant_bin_number.py @@ -15,7 +15,7 @@ def csv_to_constant_bin_number(filepath,weight_column,N_bin,has_header=False,del save_csvs(bins,filepath,header,delim=delim,quotechar=quotechar) -def to_constant_bin_number(d,N_bin,weight_pos=None,lower_bound=None,upper_bound=None): +def to_constant_bin_number(d,N_bin,weight_pos=None,key=None,lower_bound=None,upper_bound=None): ''' Distributes a list of weights, a dictionary of weights or a list of tuples containing weights to a fixed number of bins while trying to keep the weight distribution constant. @@ -27,6 +27,7 @@ def to_constant_bin_number(d,N_bin,weight_pos=None,lower_bound=None,upper_bound= optional: ~~~ weight_pos: int -- if d is a list of tuples, this integer number gives the position of the weight in a tuple + ~~~ key: function -- if d is a list, this key functions grabs the weight for an item ~~~ lower_bound: weights under this bound are not considered ~~~ upper_bound: weights exceeding this bound are not considered ''' @@ -44,17 +45,21 @@ def to_constant_bin_number(d,N_bin,weight_pos=None,lower_bound=None,upper_bound= raise Exception("lower_bound is greater or equal to upper_bound") isdict = isinstance(d,dict) - is_tuple_list = not isdict and hasattr(d[0],'__len__') - if is_tuple_list: + if isinstance(d, list) and hasattr(d[0], '__len__'): if weight_pos is not None: - - new_dict = { i: tup for i,tup in enumerate(d) } - d = { i: tup[weight_pos] for i,tup in enumerate(d) } - isdict = True - else: - raise Exception("no weight axis provided for tuple list") - + key = lambda x: x[weight_pos] + if key is None: + raise ValueError("Must provide weight_pos or key for tuple list") + + if isinstance(d, list) and key: + new_dict = {i: val for i, val in enumerate(d)} + print(new_dict) + d = {i: key(val) for i, val in enumerate(d)} + isdict = True + is_tuple_list = True + else: + is_tuple_list = False if isdict: diff --git a/binpacking/to_constant_volume.py b/binpacking/to_constant_volume.py index 8b6002a..aa8c7b1 100644 --- a/binpacking/to_constant_volume.py +++ b/binpacking/to_constant_volume.py @@ -15,7 +15,7 @@ def csv_to_constant_volume(filepath,weight_column,V_max,has_header=False,delim=' save_csvs(bins,filepath,header,delim=delim,quotechar=quotechar) -def to_constant_volume(d,V_max,weight_pos=None,lower_bound=None,upper_bound=None): +def to_constant_volume(d,V_max,weight_pos=None,key=None,lower_bound=None,upper_bound=None): ''' Distributes a list of weights, a dictionary of weights or a list of tuples containing weights to a minimal number of bins which have a fixed volume. @@ -27,6 +27,7 @@ def to_constant_volume(d,V_max,weight_pos=None,lower_bound=None,upper_bound=None optional: ~~~ weight_pos: int -- if d is a list of tuples, this integer number gives the position of the weight in a tuple + ~~~ key: function -- if d is a list, this key functions grabs the weight for an item ~~~ lower_bound: weights under this bound are not considered ~~~ upper_bound: weights exceeding this bound are not considered ''' @@ -44,16 +45,21 @@ def to_constant_volume(d,V_max,weight_pos=None,lower_bound=None,upper_bound=None raise Exception("lower_bound is greater or equal to upper_bound") isdict = isinstance(d,dict) - is_tuple_list = (not isdict) and (hasattr(d[0],'__len__')) - if is_tuple_list: + if isinstance(d, list) and hasattr(d[0], '__len__'): if weight_pos is not None: - - new_dict = { i: tup for i,tup in enumerate(d) } - d = { i: tup[weight_pos] for i,tup in enumerate(d) } - isdict = True - else: - raise Exception("no weight axis provided for tuple list") + key = lambda x: x[weight_pos] + if key is None: + raise ValueError("Must provide weight_pos or key for tuple list") + + if isinstance(d, list) and key: + new_dict = {i: val for i, val in enumerate(d)} + print(new_dict) + d = {i: key(val) for i, val in enumerate(d)} + isdict = True + is_tuple_list = True + else: + is_tuple_list = False if isdict:
benmaier/binpacking
6f02918376c2aadabdb0adc2e854bfadb2f45bba
diff --git a/binpacking/tests/constant_bin_number.py b/binpacking/tests/constant_bin_number.py index 298f6c8..19ca943 100644 --- a/binpacking/tests/constant_bin_number.py +++ b/binpacking/tests/constant_bin_number.py @@ -5,3 +5,28 @@ def test_only_zero_weights(): values = [0, 0, 0] bins = to_constant_bin_number(values, 4) assert bins == [[0, 0, 0], [], [], []] + +def test_weight_pos(): + values = [ + [1, 'x'], + [2, 'y'], + [1, 'z'], + ] + bins = to_constant_bin_number(values, 2, weight_pos=0) + for bin_ in bins: + for item in bin_: + assert isinstance(item[0], int) + assert isinstance(item[1], str) + +def test_key_func(): + values = [ + {'x': 'a', 'y': 1}, + {'x': 'b', 'y': 5}, + {'x': 'b', 'y': 3}, + ] + bins = to_constant_bin_number(values, 2, key=lambda x: x['y']) + + for bin_ in bins: + for item in bin_: + assert 'x' in item + assert 'y' in item \ No newline at end of file diff --git a/binpacking/tests/constant_volume.py b/binpacking/tests/constant_volume.py index d549bac..4026f53 100644 --- a/binpacking/tests/constant_volume.py +++ b/binpacking/tests/constant_volume.py @@ -4,4 +4,29 @@ from binpacking.to_constant_volume import to_constant_volume, csv_to_constant_vo def test_exact_fit(): values = [1, 2, 1] bins = to_constant_volume(values, 2) - assert len(bins) == 2 \ No newline at end of file + assert len(bins) == 2 + +def test_weight_pos(): + values = [ + [1, 'x'], + [2, 'y'], + [1, 'z'], + ] + bins = to_constant_volume(values, 2, weight_pos=0) + for bin_ in bins: + for item in bin_: + assert isinstance(item[0], int) + assert isinstance(item[1], str) + +def test_key_func(): + values = [ + {'x': 'a', 'y': 1}, + {'x': 'b', 'y': 5}, + {'x': 'b', 'y': 3}, + ] + bins = to_constant_volume(values, 2, key=lambda x: x['y']) + + for bin_ in bins: + for item in bin_: + assert 'x' in item + assert 'y' in item \ No newline at end of file
Lambda key for lists It would be improve usability if we could pass in a lambda to retrieve the weight. The usage would be something like ``` a = [{'name': 'foo', 'weight': 30}, {'name': 'bar', 'weight': 10}] binpacking.to_constant_volume(a, key=lambda x: x['weight'], 30) ``` This would also eliminated the need for a weight_pos variable since it can then be written as ``` a = [('foo', 30), ('bar', 10)] binpacking.to_constant_volume(a, key=lambda x: x[1], 30) ```
0.0
6f02918376c2aadabdb0adc2e854bfadb2f45bba
[ "binpacking/tests/constant_bin_number.py::test_key_func", "binpacking/tests/constant_volume.py::test_key_func" ]
[ "binpacking/tests/constant_bin_number.py::test_only_zero_weights", "binpacking/tests/constant_bin_number.py::test_weight_pos", "binpacking/tests/constant_volume.py::test_exact_fit", "binpacking/tests/constant_volume.py::test_weight_pos" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-04-27 18:37:48+00:00
mit
1,353
benmaier__binpacking-16
diff --git a/.gitignore b/.gitignore index b48773d..8a9883a 100644 --- a/.gitignore +++ b/.gitignore @@ -13,4 +13,10 @@ # Python egg metadata, regenerated from source files by setuptools. /*.egg-info +# Python virtual environment +.venv + +# JetBrains project +.idea + .DS_Store diff --git a/binpacking/to_constant_volume.py b/binpacking/to_constant_volume.py index 7f0171d..46e3cfa 100644 --- a/binpacking/to_constant_volume.py +++ b/binpacking/to_constant_volume.py @@ -20,11 +20,11 @@ def to_constant_volume(d,V_max,weight_pos=None,key=None,lower_bound=None,upper_b Distributes a list of weights, a dictionary of weights or a list of tuples containing weights to a minimal number of bins which have a fixed volume. INPUT: - --- d: list containing weights, + --- d: list containing weights, OR dictionary where each (key,value)-pair carries the weight as value, - OR list of tuples where one entry in the tuple is the weight. The position of + OR list of tuples where one entry in the tuple is the weight. The position of this weight has to be given in optional variable weight_pos - + optional: ~~~ weight_pos: int -- if d is a list of tuples, this integer number gives the position of the weight in a tuple ~~~ key: function -- if d is a list, this key functions grabs the weight for an item @@ -51,7 +51,7 @@ def to_constant_volume(d,V_max,weight_pos=None,key=None,lower_bound=None,upper_b key = lambda x: x[weight_pos] if key is None: raise ValueError("Must provide weight_pos or key for tuple list") - + if isinstance(d, list) and key: new_dict = {i: val for i, val in enumerate(d)} d = {i: key(val) for i, val in enumerate(d)} @@ -93,7 +93,7 @@ def to_constant_volume(d,V_max,weight_pos=None,key=None,lower_bound=None,upper_b #iterate through the weight list, starting with heaviest for item,weight in enumerate(weights): - + if isdict: key = keys[item] @@ -108,7 +108,10 @@ def to_constant_volume(d,V_max,weight_pos=None,key=None,lower_bound=None,upper_b b = candidate_bins[candidate_index] #if this weight doesn't fit in any existent bin - else: + elif item > 0: + # note! if this is the very first item then there is already an + # empty bin open so we don't need to open another one. + # open a new bin b = len(weight_sum) weight_sum = np.append(weight_sum, 0.) @@ -117,14 +120,18 @@ def to_constant_volume(d,V_max,weight_pos=None,key=None,lower_bound=None,upper_b else: bins.append([]) - #put it in + # if we are at the very first item, use the empty bin already open + else: + b = 0 + + #put it in if isdict: bins[b][key] = weight else: bins[b].append(weight) #increase weight sum of the bin and continue with - #next item + #next item weight_sum[b] += weight if not is_tuple_list: @@ -137,7 +144,7 @@ def to_constant_volume(d,V_max,weight_pos=None,key=None,lower_bound=None,upper_b new_bins[b].append(new_dict[key]) return new_bins - + if __name__=="__main__": a = np.random.power(0.01,size=10000)
benmaier/binpacking
89166e3574d25068278c5f7d82b279fbba3bcc4b
diff --git a/binpacking/tests/constant_volume.py b/binpacking/tests/constant_volume.py index 4026f53..7b63ccd 100644 --- a/binpacking/tests/constant_volume.py +++ b/binpacking/tests/constant_volume.py @@ -29,4 +29,9 @@ def test_key_func(): for bin_ in bins: for item in bin_: assert 'x' in item - assert 'y' in item \ No newline at end of file + assert 'y' in item + +def test_no_fit(): + values = [42, 24] + bins = to_constant_volume(values, 20) + assert bins == [[42], [24]]
Additional empty bin returned when all values exceed bin size When using `to_constant_volume` if all given values exceed the bin size, then an additional empty bin is returned. Examples: # input 1 values = [24, 42] bins = to_constant_volume(values, 20) # output 1 bins = [[], [42], [24]] # input 2 values = {"a": 24, "b": 42} bins = to_constant_volume(values, 20) # output 2 bins = {{}, {"a": 42}, {"b": 24}}
0.0
89166e3574d25068278c5f7d82b279fbba3bcc4b
[ "binpacking/tests/constant_volume.py::test_no_fit" ]
[ "binpacking/tests/constant_volume.py::test_exact_fit", "binpacking/tests/constant_volume.py::test_weight_pos", "binpacking/tests/constant_volume.py::test_key_func" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-05-14 17:00:03+00:00
mit
1,354
benmoran56__esper-81
diff --git a/esper/__init__.py b/esper/__init__.py index c67b5dd..5ac17a7 100644 --- a/esper/__init__.py +++ b/esper/__init__.py @@ -215,6 +215,9 @@ class World: entity = self._next_entity_id + if entity not in self._entities: + self._entities[entity] = {} + for component_instance in components: component_type = type(component_instance) @@ -224,9 +227,6 @@ class World: self._components[component_type].add(entity) - if entity not in self._entities: - self._entities[entity] = {} - self._entities[entity][component_type] = component_instance self.clear_cache() @@ -314,9 +314,6 @@ class World: self._components[component_type].add(entity) - if entity not in self._entities: - self._entities[entity] = {} - self._entities[entity][component_type] = component_instance self.clear_cache()
benmoran56/esper
c413eccd6eae12556d0fbad48298f259b6c7ea7b
diff --git a/tests/test_world.py b/tests/test_world.py index 8af332f..126102c 100644 --- a/tests/test_world.py +++ b/tests/test_world.py @@ -39,6 +39,11 @@ def test_create_entity_with_components(world): assert world.has_component(entity2, ComponentB) is True +def test_adding_component_to_not_existing_entity_raises_error(world): + with pytest.raises(KeyError): + world.add_component(123, ComponentA()) + + def test_create_entity_and_add_components(world): entity1 = world.create_entity() world.add_component(entity1, ComponentA()) @@ -59,18 +64,17 @@ def test_delete_entity(world): world.add_component(entity1, ComponentC()) entity2 = world.create_entity() world.add_component(entity2, ComponentD()) - entity3 = world.create_entity() - world.add_component(entity3, ComponentE()) - entity4 = world.create_entity() + entity_with_component = world.create_entity() + world.add_component(entity_with_component, ComponentE()) + empty_entity = world.create_entity() - assert entity3 == 3 - world.delete_entity(entity3, immediate=True) + assert entity_with_component == 3 + world.delete_entity(entity_with_component, immediate=True) with pytest.raises(KeyError): - world.components_for_entity(entity3) + world.components_for_entity(entity_with_component) with pytest.raises(KeyError): world.delete_entity(999, immediate=True) - with pytest.raises(KeyError): - world.delete_entity(entity4, immediate=True) + world.delete_entity(empty_entity, immediate=True) def test_component_for_entity(world): @@ -256,17 +260,22 @@ def test_cache_results(world): assert len(list(query for query in world.get_components(ComponentB, ComponentC))) == 1 -def test_entity_exists(world): - dead_entity = world.create_entity(ComponentB()) - world.delete_entity(dead_entity) - empty_entity = world.create_entity() - existent_entity = world.create_entity(ComponentA()) - future_entity = existent_entity + 1 +class TestEntityExists: + def test_dead_entity(self, world): + dead_entity = world.create_entity(ComponentB()) + world.delete_entity(dead_entity) + assert not world.entity_exists(dead_entity) + + def test_not_created_entity(self, world): + assert not world.entity_exists(123) + + def test_empty_entity(self, world): + empty_entity = world.create_entity() + assert world.entity_exists(empty_entity) - assert world.entity_exists(existent_entity) - assert not world.entity_exists(dead_entity) - assert not world.entity_exists(empty_entity) - assert not world.entity_exists(future_entity) + def test_entity_with_component(self, world): + entity_with_component = world.create_entity(ComponentA()) + assert world.entity_exists(entity_with_component) def test_event_dispatch_no_handlers():
`World().create_entity()` does not create entity if no components are given **Describe the bug** As a factory method I assume it creates entity for sure but that do not happen is edge case of missing components. **To Reproduce** ```python3 import esper world = esper.World() entity = world.create_entity() assert world.entity_exists(entity) # raises AssertionError ``` ```python3 import esper world = esper.World() entity = world.create_entity() assert not world.entity_exists(entity) # not existing entity entity_2 = world.create_entity() assert entity_2 == 2 # Incrementing counter even though 1st one seems not to exist ``` **Expected behavior** Imo proper behaviour is to be able to create "empty" entity and provide it with components later. Kind of how you do it with builder pattern. Plus I've found that `add_component()` has logic for creating missing entity and that is most probably result of that bug. Following Single Responsibility Principle, that should not be part of `add_component()` method imo. See example below: ```python3 import esper world = esper.World() made_up_entity = 123 world.add_component(made_up_entity, None) # Imo should raise KeyError because of missing entity world.entity_exists(made_up_entity) # Works just fine and should not ``` **Development environment:** - Python 3.11 - Esper 2.4 **Fix proposal** To fix that it is simple enough to pull https://github.com/benmoran56/esper/blob/master/esper/__init__.py#L227 ```python3 if entity not in self._entities: self._entities[entity] = {} ``` out of for loop in `create_entity()` method. Consider removing entity creation from `add_component()` method. Tell me what do you think about it, I can implement change and unit tests later on
0.0
c413eccd6eae12556d0fbad48298f259b6c7ea7b
[ "tests/test_world.py::test_adding_component_to_not_existing_entity_raises_error", "tests/test_world.py::test_delete_entity", "tests/test_world.py::TestEntityExists::test_empty_entity" ]
[ "tests/test_world.py::test_world_instantiation", "tests/test_world.py::test_create_entity", "tests/test_world.py::test_create_entity_with_components", "tests/test_world.py::test_create_entity_and_add_components", "tests/test_world.py::test_create_entity_and_add_components_with_alias", "tests/test_world.py::test_component_for_entity", "tests/test_world.py::test_components_for_entity", "tests/test_world.py::test_has_component", "tests/test_world.py::test_has_components", "tests/test_world.py::test_get_component", "tests/test_world.py::test_get_two_components", "tests/test_world.py::test_get_three_components", "tests/test_world.py::test_try_component", "tests/test_world.py::test_try_components", "tests/test_world.py::test_clear_database", "tests/test_world.py::test_add_processor", "tests/test_world.py::test_remove_processor", "tests/test_world.py::test_get_processor", "tests/test_world.py::test_processor_args", "tests/test_world.py::test_processor_kwargs", "tests/test_world.py::test_clear_cache", "tests/test_world.py::test_cache_results", "tests/test_world.py::TestEntityExists::test_dead_entity", "tests/test_world.py::TestEntityExists::test_not_created_entity", "tests/test_world.py::TestEntityExists::test_entity_with_component", "tests/test_world.py::test_event_dispatch_no_handlers", "tests/test_world.py::test_event_dispatch_one_arg", "tests/test_world.py::test_event_dispatch_two_args", "tests/test_world.py::test_event_dispatch_incorrect_args", "tests/test_world.py::test_set_methoad_as_handler_in_init", "tests/test_world.py::test_set_instance_methoad_as_handler" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-04-20 11:26:18+00:00
mit
1,355
benmoran56__esper-83
diff --git a/esper/__init__.py b/esper/__init__.py index 5ac17a7..1c843e7 100644 --- a/esper/__init__.py +++ b/esper/__init__.py @@ -1,7 +1,9 @@ +import inspect as _inspect import time as _time from types import MethodType as _MethodType +from typing import cast as _cast from typing import Iterable as _Iterable from typing import List as _List from typing import Optional as _Optional @@ -317,10 +319,10 @@ class World: self._entities[entity][component_type] = component_instance self.clear_cache() - def remove_component(self, entity: int, component_type: _Type[_C]) -> int: + def remove_component(self, entity: int, component_type: _Type[_C]) -> _C: """Remove a Component instance from an Entity, by type. - A Component instance can be removed by providing its type. + A Component instance can only be removed by providing its type. For example: world.delete_component(enemy_a, Velocity) will remove the Velocity instance from the Entity enemy_a. @@ -332,13 +334,8 @@ class World: if not self._components[component_type]: del self._components[component_type] - del self._entities[entity][component_type] - - if not self._entities[entity]: - del self._entities[entity] - self.clear_cache() - return entity + return self._entities[entity].pop(component_type) def _get_component(self, component_type: _Type[_C]) -> _Iterable[_Tuple[int, _C]]: entity_db = self._entities
benmoran56/esper
b9aee53bada68d6da73fbca3a6c5114f98620278
diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index b01379b..b9db2ce 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -11,10 +11,10 @@ jobs: strategy: matrix: os: [ 'ubuntu-latest', 'macos-latest', 'windows-latest' ] - python-version: [ '3.8', '3.9', '3.10', 'pypy-3.7' ] + python-version: [ '3.8', '3.9', '3.10', '3.11', '3.12-dev', 'pypy-3.7' ] steps: - name: Python ${{ matrix.python-version }} ${{ matrix.os }} - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: diff --git a/tests/test_world.py b/tests/test_world.py index 126102c..db99d96 100644 --- a/tests/test_world.py +++ b/tests/test_world.py @@ -278,6 +278,55 @@ class TestEntityExists: assert world.entity_exists(entity_with_component) +class TestRemoveComponent: + def test_remove_from_not_existing_entity_raises_key_error(self, world): + with pytest.raises(KeyError): + world.remove_component(123, ComponentA) + + def test_remove_not_existing_component_raises_key_error(self, world): + entity = world.create_entity(ComponentB()) + + with pytest.raises(KeyError): + world.remove_component(entity, ComponentA) + + def test_remove_component_with_object_raises_key_error(self, populated_world): + entity = 2 + component = ComponentD() + + assert populated_world.has_component(entity, type(component)) + with pytest.raises(KeyError): + populated_world.remove_component(entity, component) + + def test_remove_component_returns_removed_instance(self, world): + component = ComponentA() + entity = world.create_entity(component) + + result = world.remove_component(entity, type(component)) + + assert result is component + + def test_remove_last_component_leaves_empty_entity(self, world): + entity = world.create_entity() + world.add_component(entity, ComponentA()) + + world.remove_component(entity, ComponentA) + + assert not world.has_component(entity, ComponentA) + assert world.entity_exists(entity) + + def test_removing_one_component_leaves_other_intact(self, world): + component_a = ComponentA() + component_b = ComponentB() + component_c = ComponentC() + entity = world.create_entity(component_a, component_b, component_c) + + world.remove_component(entity, ComponentB) + + assert world.component_for_entity(entity, ComponentA) is component_a + assert not world.has_component(entity, ComponentB) + assert world.component_for_entity(entity, ComponentC) is component_c + + def test_event_dispatch_no_handlers(): esper.dispatch_event("foo") esper.dispatch_event("foo", 1)
`remove_component()` method removes entity if no more components left **Describe the bug** As for Single Responsibility Principle remove component should do just that but it removes empty entity as well. As I'm creating tests for my own project, I need to populate entity with some dummy component to prevent that and test cleaning another component logic. **To Reproduce** ```python3 import esper class Component: ... world = esper.World() entity = world.create_entity(Component()) assert world.entity_exists(entity=entity) world.remove_component(entity=entity, component_type=Component) assert world.entity_exists(entity=entity) # AssertionError ``` **Expected behavior** I assume that cleaning components should not clear entity. As removing and adding component should be independent from entity existence. Entity should be deleted by `delete_entity()` only and not under the hood so programmer has full control over it. **Development environment:** - Python 3.11 - Esper 2.4 **Additional context** I will create unit tests for it and fix if approved for implementation
0.0
b9aee53bada68d6da73fbca3a6c5114f98620278
[ "tests/test_world.py::TestRemoveComponent::test_remove_component_returns_removed_instance", "tests/test_world.py::TestRemoveComponent::test_remove_last_component_leaves_empty_entity" ]
[ "tests/test_world.py::test_world_instantiation", "tests/test_world.py::test_create_entity", "tests/test_world.py::test_create_entity_with_components", "tests/test_world.py::test_adding_component_to_not_existing_entity_raises_error", "tests/test_world.py::test_create_entity_and_add_components", "tests/test_world.py::test_create_entity_and_add_components_with_alias", "tests/test_world.py::test_delete_entity", "tests/test_world.py::test_component_for_entity", "tests/test_world.py::test_components_for_entity", "tests/test_world.py::test_has_component", "tests/test_world.py::test_has_components", "tests/test_world.py::test_get_component", "tests/test_world.py::test_get_two_components", "tests/test_world.py::test_get_three_components", "tests/test_world.py::test_try_component", "tests/test_world.py::test_try_components", "tests/test_world.py::test_clear_database", "tests/test_world.py::test_add_processor", "tests/test_world.py::test_remove_processor", "tests/test_world.py::test_get_processor", "tests/test_world.py::test_processor_args", "tests/test_world.py::test_processor_kwargs", "tests/test_world.py::test_clear_cache", "tests/test_world.py::test_cache_results", "tests/test_world.py::TestEntityExists::test_dead_entity", "tests/test_world.py::TestEntityExists::test_not_created_entity", "tests/test_world.py::TestEntityExists::test_empty_entity", "tests/test_world.py::TestEntityExists::test_entity_with_component", "tests/test_world.py::TestRemoveComponent::test_remove_from_not_existing_entity_raises_key_error", "tests/test_world.py::TestRemoveComponent::test_remove_not_existing_component_raises_key_error", "tests/test_world.py::TestRemoveComponent::test_remove_component_with_object_raises_key_error", "tests/test_world.py::TestRemoveComponent::test_removing_one_component_leaves_other_intact", "tests/test_world.py::test_event_dispatch_no_handlers", "tests/test_world.py::test_event_dispatch_one_arg", "tests/test_world.py::test_event_dispatch_two_args", "tests/test_world.py::test_event_dispatch_incorrect_args", "tests/test_world.py::test_set_methoad_as_handler_in_init", "tests/test_world.py::test_set_instance_methoad_as_handler" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-05-05 19:22:48+00:00
mit
1,356
berkerpeksag__astor-103
diff --git a/.travis.yml b/.travis.yml index df42c87..f789743 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,7 @@ python: - 3.5 - 3.6 - pypy - - pypy3.3-5.2-alpha1 + - pypy3.5 - 3.7-dev matrix: allow_failures: diff --git a/AUTHORS b/AUTHORS index 39d96d5..9949ccc 100644 --- a/AUTHORS +++ b/AUTHORS @@ -12,3 +12,4 @@ And with some modifications based on Armin's code: * Zack M. Davis <[email protected]> * Ryan Gonzalez <[email protected]> * Lenny Truong <[email protected]> +* Radomír Bosák <[email protected]> diff --git a/astor/code_gen.py b/astor/code_gen.py index 47d6acc..c5c1ad6 100644 --- a/astor/code_gen.py +++ b/astor/code_gen.py @@ -580,6 +580,9 @@ class SourceGenerator(ExplicitNodeVisitor): index = len(result) recurse(node) + + # Flush trailing newlines (so that they are part of mystr) + self.write('') mystr = ''.join(result[index:]) del result[index:] self.colinfo = res_index, str_index # Put it back like we found it diff --git a/docs/changelog.rst b/docs/changelog.rst index 781f39d..0faff36 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -22,6 +22,16 @@ New features .. _`Issue 86`: https://github.com/berkerpeksag/astor/issues/86 +Bug fixes +~~~~~~~~~ + +* Fixed a bug where newlines would be inserted to a wrong place during + printing f-strings with trailing newlines. + (Reported by Felix Yan and contributed by Radomír Bosák in + `Issue 89`_.) + +.. _`Issue 89`: https://github.com/berkerpeksag/astor/issues/89 + 0.6.2 - 2017-11-11 ------------------ diff --git a/tox.ini b/tox.ini index 5149f5c..e364485 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py26, py27, py33, py34, py35, py36, pypy, pypy3.3-5.2-alpha1 +envlist = py26, py27, py33, py34, py35, py36, pypy, pypy3.5 skipsdist = True [testenv]
berkerpeksag/astor
b47718fa095e456c064d3d222f296fccfe36266b
diff --git a/tests/test_code_gen.py b/tests/test_code_gen.py index 0638d9a..1a80445 100644 --- a/tests/test_code_gen.py +++ b/tests/test_code_gen.py @@ -476,6 +476,12 @@ class CodegenTestCase(unittest.TestCase, Comparisons): ''' self.assertAstRoundtrips(source) + def test_fstring_trailing_newline(self): + source = ''' + x = f"""{host}\n\t{port}\n""" + ''' + self.assertSrcRoundtripsGtVer(source, (3, 6)) + if __name__ == '__main__': unittest.main()
Test failure in Python 3.6.3 Looks like a test-only failure, though. ``` ====================================================================== FAIL: test_convert_stdlib (tests.test_rtrip.RtripTestCase) ---------------------------------------------------------------------- Traceback (most recent call last): File "/build/python-astor/src/astor-0.6/tests/test_rtrip.py", line 24, in test_convert_stdlib self.assertEqual(result, []) AssertionError: Lists differ: ['/usr/lib/python3.6/test/test_fstring.py'[34 chars].py'] != [] First list contains 2 additional elements. First extra element 0: '/usr/lib/python3.6/test/test_fstring.py' + [] - ['/usr/lib/python3.6/test/test_fstring.py', - '/usr/lib/python3.6/idlelib/grep.py'] ```
0.0
b47718fa095e456c064d3d222f296fccfe36266b
[ "tests/test_code_gen.py::CodegenTestCase::test_fstring_trailing_newline" ]
[ "tests/test_code_gen.py::CodegenTestCase::test_async_comprehension", "tests/test_code_gen.py::CodegenTestCase::test_async_def_with_for", "tests/test_code_gen.py::CodegenTestCase::test_class_definition_with_starbases_and_kwargs", "tests/test_code_gen.py::CodegenTestCase::test_compile_types", "tests/test_code_gen.py::CodegenTestCase::test_double_await", "tests/test_code_gen.py::CodegenTestCase::test_elif", "tests/test_code_gen.py::CodegenTestCase::test_fstrings", "tests/test_code_gen.py::CodegenTestCase::test_imports", "tests/test_code_gen.py::CodegenTestCase::test_matrix_multiplication", "tests/test_code_gen.py::CodegenTestCase::test_non_string_leakage", "tests/test_code_gen.py::CodegenTestCase::test_output_formatting", "tests/test_code_gen.py::CodegenTestCase::test_pass_arguments_node", "tests/test_code_gen.py::CodegenTestCase::test_with" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-05-04 10:53:55+00:00
bsd-3-clause
1,357
best-doctor__flake8-annotations-complexity-14
diff --git a/.gitignore b/.gitignore index 894a44c..ac3dec0 100644 --- a/.gitignore +++ b/.gitignore @@ -102,3 +102,6 @@ venv.bak/ # mypy .mypy_cache/ + +# IDE +.idea diff --git a/README.md b/README.md index 7b2f875..0832f82 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ $ flake8 --max-annotations-complexity=1 test.py test.py:4:14: TAE002 too complex annotation (2 > 1) ``` -Tested on Python 3.5.0 and flake8 3.7.4. +Tested on Python 3.6, 3.7, 3.8 and flake8 3.7.8. ## Contributing diff --git a/flake8_annotations_complexity/ast_helpres.py b/flake8_annotations_complexity/ast_helpres.py index f4bdc83..24a7e2a 100644 --- a/flake8_annotations_complexity/ast_helpres.py +++ b/flake8_annotations_complexity/ast_helpres.py @@ -18,7 +18,27 @@ def get_annotation_complexity(annotation_node, default_complexity: int = 1) -> i return default_complexity -def validate_annotations_in_ast_node(node, max_annotations_complexity) -> List[Tuple[Any, int]]: +def get_annotation_len(annotation_node) -> int: + if isinstance(annotation_node, ast.Str): + try: + annotation_node = ast.parse(annotation_node.s).body[0].value # type: ignore + except (SyntaxError, IndexError): + return 0 + if isinstance(annotation_node, ast.Subscript): + try: + if sys.version_info >= (3, 9): + return len(annotation_node.slice.elts) # type: ignore + return len(annotation_node.slice.value.elts) # type: ignore + except AttributeError: + return 0 + return 0 + + +def validate_annotations_in_ast_node( + node, + max_annotations_complexity, + max_annotations_len, +) -> List[Tuple[Any, str]]: too_difficult_annotations = [] func_defs = [ f for f in ast.walk(node) @@ -35,6 +55,12 @@ def validate_annotations_in_ast_node(node, max_annotations_complexity) -> List[T if complexity > max_annotations_complexity: too_difficult_annotations.append(( annotation, - complexity, + 'TAE002 too complex annotation ({0} > {1})'.format(complexity, max_annotations_complexity), + )) + annotation_len = get_annotation_len(annotation) + if annotation_len > 7: + too_difficult_annotations.append(( + annotation, + 'TAE003 too long annotation ({0} > {1})'.format(annotation_len, max_annotations_len), )) return too_difficult_annotations diff --git a/flake8_annotations_complexity/checker.py b/flake8_annotations_complexity/checker.py index 45c88ba..6f1344b 100644 --- a/flake8_annotations_complexity/checker.py +++ b/flake8_annotations_complexity/checker.py @@ -11,13 +11,16 @@ class AnnotationsComplexityChecker: max_annotations_complexity = None default_max_annotations_complexity = 3 - _error_message_template = 'TAE002 too complex annotation ({0} > {1})' + max_annotations_len = None + default_max_annotations_len = 7 def __init__(self, tree, filename: str): self.filename = filename self.tree = tree if AnnotationsComplexityChecker.max_annotations_complexity is None: AnnotationsComplexityChecker.max_annotations_complexity = self.default_max_annotations_complexity + if AnnotationsComplexityChecker.max_annotations_len is None: + AnnotationsComplexityChecker.max_annotations_len = self.default_max_annotations_len @classmethod def add_options(cls, parser) -> None: @@ -27,6 +30,12 @@ class AnnotationsComplexityChecker: parse_from_config=True, default=cls.default_max_annotations_complexity, ) + parser.add_option( + '--max-annotations-len', + type=int, + parse_from_config=True, + default=cls.default_max_annotations_len, + ) @classmethod def parse_options(cls, options) -> None: @@ -36,12 +45,13 @@ class AnnotationsComplexityChecker: too_difficult_annotations = validate_annotations_in_ast_node( self.tree, self.max_annotations_complexity, + self.max_annotations_len, ) - for annotation, complexity in too_difficult_annotations: + for annotation, error_msg in too_difficult_annotations: yield ( annotation.lineno, annotation.col_offset, - self._error_message_template.format(complexity, self.max_annotations_complexity), + error_msg, type(self), )
best-doctor/flake8-annotations-complexity
6d85e18b127c280469118ed4a460a45c204b0b0e
diff --git a/tests/test_annotations_complexity.py b/tests/test_annotations_complexity.py index b7253e1..294f0bc 100644 --- a/tests/test_annotations_complexity.py +++ b/tests/test_annotations_complexity.py @@ -67,3 +67,8 @@ def test_pep_585_compliance(): assert len(errors) == 11 errors = run_validator_for_test_file('pep_585.py', max_annotations_complexity=2) assert len(errors) == 2 + + +def test_validates_too_long_annotations(): + errors = run_validator_for_test_file('too_long_annotation.py') + assert len(errors) == 4 diff --git a/tests/test_files/too_long_annotation.py b/tests/test_files/too_long_annotation.py new file mode 100644 index 0000000..bb7c1d2 --- /dev/null +++ b/tests/test_files/too_long_annotation.py @@ -0,0 +1,7 @@ +from typing import Any, Dict, List, Optional, Tuple + +foo: Tuple[str, str, str, int, List, Any, str, Dict, int] = tuple() + +bar: 'Tuple[str, str, str, int, List, Any, str, Dict, int]' = tuple() + +egg: Tuple[str, str, str, int, List, Any, List[int], Optional[Dict[str, int]]] = tuple()
Forbid flat, but long annotations This is not nested, but also not okey: `Tuple[str, str, str, int, List, Any, str, Dict, int]`.
0.0
6d85e18b127c280469118ed4a460a45c204b0b0e
[ "tests/test_annotations_complexity.py::test_validates_too_long_annotations" ]
[ "tests/test_annotations_complexity.py::test_always_ok_for_empty_file", "tests/test_annotations_complexity.py::test_ok_for_unannotated_file", "tests/test_annotations_complexity.py::test_ok_for_dynamic_annotations_file", "tests/test_annotations_complexity.py::test_ok_for_string_annotations_file", "tests/test_annotations_complexity.py::test_validates_annotations_complexity_for_annassigments", "tests/test_annotations_complexity.py::test_ok_for_empty_tuple", "tests/test_annotations_complexity.py::test_not_raises_errors_for_weird_annotations", "tests/test_annotations_complexity.py::test_ok_for_empty_string", "tests/test_annotations_complexity.py::test_pep_585_compliance" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-09-23 23:11:17+00:00
mit
1,358
betamaxpy__betamax-170
diff --git a/src/betamax/cassette/interaction.py b/src/betamax/cassette/interaction.py index f545bdb..799a8d5 100644 --- a/src/betamax/cassette/interaction.py +++ b/src/betamax/cassette/interaction.py @@ -69,6 +69,8 @@ class Interaction(object): self.replace(*placeholder.unpack(serializing)) def replace_in_headers(self, text_to_replace, placeholder): + if text_to_replace == '': + return for obj in ('request', 'response'): headers = self.data[obj]['headers'] for k, v in list(headers.items()): @@ -79,6 +81,8 @@ class Interaction(object): headers[k] = v.replace(text_to_replace, placeholder) def replace_in_body(self, text_to_replace, placeholder): + if text_to_replace == '': + return for obj in ('request', 'response'): body = self.data[obj]['body'] old_style = hasattr(body, 'replace') @@ -93,6 +97,8 @@ class Interaction(object): self.data[obj]['body']['string'] = body def replace_in_uri(self, text_to_replace, placeholder): + if text_to_replace == '': + return for (obj, key) in (('request', 'uri'), ('response', 'url')): uri = self.data[obj][key] if text_to_replace in uri:
betamaxpy/betamax
316eb978a7e8d1c318a5ab8eb7230763c18b6c3b
diff --git a/tests/unit/test_cassette.py b/tests/unit/test_cassette.py index edd83d8..01ae406 100644 --- a/tests/unit/test_cassette.py +++ b/tests/unit/test_cassette.py @@ -443,6 +443,7 @@ class TestInteraction(unittest.TestCase): self.interaction.replace('secret_value', '<SECRET_VALUE>') self.interaction.replace('foo', '<FOO>') self.interaction.replace('http://example.com', '<EXAMPLE_URI>') + self.interaction.replace('', '<IF_FAIL_THIS_INSERTS_BEFORE_AND_AFTER_EACH_CHARACTER') header = ( self.interaction.data['request']['headers']['Authorization'][0])
A blank string used for placeholder replacement can cause memory issue **Version: betamax-0.8.0** If you define a placeholder as described in the following, it can lead to memory issues, depending on how many blank strings you're trying to replace. ```python username = '' config.define_cassette_placeholder('<USERNAME>', username) ``` For example, if I have the original string `'WOW'` and perform the `.replace()` function, it will result in the following. ```python >>> 'WOW'.replace('','<USERNAME>') '<USERNAME>W<USERNAME>O<USERNAME>W<USERNAME>' ``` If you have multiple placeholders with a blank string, this compounds and can cause a memory issue. I experienced this issue in `src.betamax.cassette.interaction.py::Interaction::replace_in_uri`, but would think this is applicable to all functions that use `str.replace()` and it looks like it exists on the most current version of betamax too. I can submit a PR to check for blank strings prior to calling `str.replace()`, but figure I'd bring it up in an issue first.
0.0
316eb978a7e8d1c318a5ab8eb7230763c18b6c3b
[ "tests/unit/test_cassette.py::TestInteraction::test_replace" ]
[ "tests/unit/test_cassette.py::TestSerialization::test_add_urllib3_response", "tests/unit/test_cassette.py::TestSerialization::test_deserialize_prepared_request", "tests/unit/test_cassette.py::TestSerialization::test_deserialize_response_new", "tests/unit/test_cassette.py::TestSerialization::test_deserialize_response_old", "tests/unit/test_cassette.py::TestSerialization::test_from_list_handles_non_lists", "tests/unit/test_cassette.py::TestSerialization::test_from_list_returns_an_element", "tests/unit/test_cassette.py::TestSerialization::test_serialize_prepared_request", "tests/unit/test_cassette.py::TestSerialization::test_serialize_response", "tests/unit/test_cassette.py::test_cassette_initialization", "tests/unit/test_cassette.py::TestCassette::test_earliest_recorded_date", "tests/unit/test_cassette.py::TestCassette::test_eject", "tests/unit/test_cassette.py::TestCassette::test_find_match", "tests/unit/test_cassette.py::TestCassette::test_find_match__missing_matcher", "tests/unit/test_cassette.py::TestCassette::test_find_match_new_episodes_with_existing_unused_interactions", "tests/unit/test_cassette.py::TestCassette::test_find_match_new_episodes_with_no_unused_interactions", "tests/unit/test_cassette.py::TestCassette::test_holds_interactions", "tests/unit/test_cassette.py::TestCassette::test_serialize_interaction", "tests/unit/test_cassette.py::TestInteraction::test_as_response", "tests/unit/test_cassette.py::TestInteraction::test_as_response_returns_new_instance", "tests/unit/test_cassette.py::TestInteraction::test_deserialized_response", "tests/unit/test_cassette.py::TestInteraction::test_match", "tests/unit/test_cassette.py::TestInteraction::test_replace_in_body", "tests/unit/test_cassette.py::TestInteraction::test_replace_in_headers", "tests/unit/test_cassette.py::TestInteraction::test_replace_in_uri", "tests/unit/test_cassette.py::TestMockHTTPResponse::test_is_Message", "tests/unit/test_cassette.py::TestMockHTTPResponse::test_isclosed" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2019-07-15 13:19:40+00:00
apache-2.0
1,359
betamaxpy__betamax-179
diff --git a/AUTHORS.rst b/AUTHORS.rst index 8bbdd19..078debf 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -19,3 +19,4 @@ Contributors - Marc Abramowitz (@msabramo) - Bryce Boe <[email protected]> (@bboe) - Alex Richard-Hoyling <@arhoyling) +- Joey RH <[email protected]> (@jarhill0) diff --git a/docs/source/configuring.rst b/docs/source/configuring.rst index 93e70e4..361bec6 100644 --- a/docs/source/configuring.rst +++ b/docs/source/configuring.rst @@ -241,6 +241,27 @@ have our hook, we need merely register it like so: And we no longer need to worry about leaking sensitive data. +In addition to the ``before_record`` and ``before_playback`` hooks, +version 0.9.0 of Betamax adds :meth:`.after_start` and :meth:`.before_stop` +hooks. These two hooks both will pass the current +:class:`~betamax.cassette.cassette.Cassette` to the callback function provided. +Register these hooks like so: + +.. code-block:: python + + def hook(cassette): + if cassette.is_recording(): + print("This cassette is recording!") + + # Either + config.after_start(callback=hook) + # Or + config.before_stop(callback=hook) + +These hooks are useful for performing configuration actions external to Betamax +at the time Betamax is invoked, such as setting up correct authentication to +an API so that the recording will not encounter any errors. + Setting default serializer `````````````````````````` diff --git a/src/betamax/configure.py b/src/betamax/configure.py index 1eca77c..b065eff 100644 --- a/src/betamax/configure.py +++ b/src/betamax/configure.py @@ -1,8 +1,9 @@ +from collections import defaultdict + from .cassette import Cassette class Configuration(object): - """This object acts as a proxy to configure different parts of Betamax. You should only ever encounter this object when configuring the library as @@ -20,6 +21,7 @@ class Configuration(object): """ CASSETTE_LIBRARY_DIR = 'vcr/cassettes' + recording_hooks = defaultdict(list) def __enter__(self): return self @@ -33,6 +35,26 @@ class Configuration(object): else: super(Configuration, self).__setattr__(prop, value) + def after_start(self, callback=None): + """Register a function to call after Betamax is started. + + Example usage: + + .. code-block:: python + + def on_betamax_start(cassette): + if cassette.is_recording(): + print("Setting up authentication...") + + with Betamax.configure() as config: + config.cassette_load(callback=on_cassette_load) + + :param callable callback: + The function which accepts a cassette and might mutate + it before returning. + """ + self.recording_hooks['after_start'].append(callback) + def before_playback(self, tag=None, callback=None): """Register a function to call before playing back an interaction. @@ -79,6 +101,26 @@ class Configuration(object): """ Cassette.hooks['before_record'].append(callback) + def before_stop(self, callback=None): + """Register a function to call before Betamax stops. + + Example usage: + + .. code-block:: python + + def on_betamax_stop(cassette): + if not cassette.is_recording(): + print("Playback completed.") + + with Betamax.configure() as config: + config.cassette_eject(callback=on_betamax_stop) + + :param callable callback: + The function which accepts a cassette and might mutate + it before returning. + """ + self.recording_hooks['before_stop'].append(callback) + @property def cassette_library_dir(self): """Retrieve and set the directory to store the cassettes in.""" diff --git a/src/betamax/recorder.py b/src/betamax/recorder.py index 2a4b069..d89b8bd 100644 --- a/src/betamax/recorder.py +++ b/src/betamax/recorder.py @@ -123,10 +123,13 @@ class Betamax(object): """Start recording or replaying interactions.""" for k in self.http_adapters: self.session.mount(k, self.betamax_adapter) + dispatch_hooks('after_start', self.betamax_adapter.cassette) # ■ def stop(self): """Stop recording or replaying interactions.""" + dispatch_hooks('before_stop', self.betamax_adapter.cassette) + # No need to keep the cassette in memory any longer. self.betamax_adapter.eject_cassette() # On exit, we no longer wish to use our adapter and we want the @@ -166,3 +169,10 @@ class Betamax(object): raise ValueError('Cassette must have a valid name and may not be' ' None.') return self + + +def dispatch_hooks(hook_name, *args): + """Dispatch registered hooks.""" + hooks = Configuration.recording_hooks[hook_name] + for hook in hooks: + hook(*args)
betamaxpy/betamax
4009d74d0642fbc42a49ef2f27d16387711192e4
diff --git a/tests/integration/test_hooks.py b/tests/integration/test_hooks.py index 8378e5d..4cfa7f8 100644 --- a/tests/integration/test_hooks.py +++ b/tests/integration/test_hooks.py @@ -17,12 +17,60 @@ def preplayback_hook(interaction, cassette): interaction.data['response']['headers']['Betamax-Fake-Header'] = 'temp' +class Counter(object): + def __init__(self): + self.value = 0 + + def increment(self, cassette): + self.value += 1 + + class TestHooks(helper.IntegrationHelper): def tearDown(self): super(TestHooks, self).tearDown() # Clear out the hooks + betamax.configure.Configuration.recording_hooks.pop('after_start', None) betamax.cassette.Cassette.hooks.pop('before_record', None) betamax.cassette.Cassette.hooks.pop('before_playback', None) + betamax.configure.Configuration.recording_hooks.pop('before_stop', None) + + def test_post_start_hook(self): + start_count = Counter() + with betamax.Betamax.configure() as config: + config.after_start(callback=start_count.increment) + + recorder = betamax.Betamax(self.session) + + assert start_count.value == 0 + with recorder.use_cassette('after_start_hook'): + assert start_count.value == 1 + self.cassette_path = recorder.current_cassette.cassette_path + self.session.get('https://httpbin.org/get') + + assert start_count.value == 1 + with recorder.use_cassette('after_start_hook', record='none'): + assert start_count.value == 2 + self.session.get('https://httpbin.org/get') + assert start_count.value == 2 + + def test_pre_stop_hook(self): + stop_count = Counter() + with betamax.Betamax.configure() as config: + config.before_stop(callback=stop_count.increment) + + recorder = betamax.Betamax(self.session) + + assert stop_count.value == 0 + with recorder.use_cassette('before_stop_hook'): + self.cassette_path = recorder.current_cassette.cassette_path + self.session.get('https://httpbin.org/get') + assert stop_count.value == 0 + assert stop_count.value == 1 + + with recorder.use_cassette('before_stop_hook', record='none'): + self.session.get('https://httpbin.org/get') + assert stop_count.value == 1 + assert stop_count.value == 2 def test_prerecord_hook(self): with betamax.Betamax.configure() as config: diff --git a/tests/unit/test_configure.py b/tests/unit/test_configure.py index c39de63..4f00b93 100644 --- a/tests/unit/test_configure.py +++ b/tests/unit/test_configure.py @@ -4,6 +4,7 @@ import unittest from betamax.configure import Configuration from betamax.cassette import Cassette +from betamax.recorder import Betamax class TestConfiguration(unittest.TestCase): @@ -14,6 +15,7 @@ class TestConfiguration(unittest.TestCase): self.cassette_dir = Configuration.CASSETTE_LIBRARY_DIR def tearDown(self): + Configuration.recording_hooks = collections.defaultdict(list) Cassette.default_cassette_options = self.cassette_options Cassette.hooks = collections.defaultdict(list) Configuration.CASSETTE_LIBRARY_DIR = self.cassette_dir @@ -43,6 +45,14 @@ class TestConfiguration(unittest.TestCase): assert placeholders[0]['placeholder'] == '<TEST>' assert placeholders[0]['replace'] == 'test' + def test_registers_post_start_hooks(self): + c = Configuration() + assert Configuration.recording_hooks['after_start'] == [] + c.after_start(callback=lambda: None) + assert Configuration.recording_hooks['after_start'] != [] + assert len(Configuration.recording_hooks['after_start']) == 1 + assert callable(Configuration.recording_hooks['after_start'][0]) + def test_registers_pre_record_hooks(self): c = Configuration() assert Cassette.hooks['before_record'] == [] @@ -58,3 +68,11 @@ class TestConfiguration(unittest.TestCase): assert Cassette.hooks['before_playback'] != [] assert len(Cassette.hooks['before_playback']) == 1 assert callable(Cassette.hooks['before_playback'][0]) + + def test_registers_pre_stop_hooks(self): + c = Configuration() + assert Configuration.recording_hooks['before_stop'] == [] + c.before_stop(callback=lambda: None) + assert Configuration.recording_hooks['before_stop'] != [] + assert len(Configuration.recording_hooks['before_stop']) == 1 + assert callable(Configuration.recording_hooks['before_stop'][0])
Interest in additional hooks? A recently merged change in [PRAW](/praw-dev/praw) (which uses Betamax for its integration tests) hacks in an "after init" hook: ```python def add_init_hook(original_init): """Wrap an __init__ method to also call some hooks.""" @wraps(original_init) def wrapper(self, *args, **kwargs): original_init(self, *args, **kwargs) dispatch_hooks("after_init", self) return wrapper Cassette.__init__ = add_init_hook(Cassette.__init__) def init_hook(cassette): if cassette.is_recording(): pytest.set_up_record() # dynamically defined in __init__.py Cassette.hooks["after_init"].append(init_hook) ``` [source code in PRAW repo](https://github.com/praw-dev/praw/blob/edf9650a415bf484b3d6d886dca65517db141ac5/tests/conftest.py#L93-L104) For PRAW's tests, this hook is useful because it triggers at the time `.use_cassette()` is called, which in the case of PRAW's tests signifies that network requests will happen soon but they haven't started yet. This hook allows us to modify the test environment slightly before any requests are made, based on whether a certain cassette has been recorded or not. Rather than having to monkeypatch Betamax, it would be better for PRAW if this hook existed natively in Betamax. **Would this be a welcome feature in Betamax?** If so, I'll be happy to write the feature.
0.0
4009d74d0642fbc42a49ef2f27d16387711192e4
[ "tests/integration/test_hooks.py::TestHooks::test_post_start_hook", "tests/integration/test_hooks.py::TestHooks::test_pre_stop_hook", "tests/integration/test_hooks.py::TestHooks::test_preplayback_hook", "tests/integration/test_hooks.py::TestHooks::test_prerecord_hook", "tests/integration/test_hooks.py::TestHooks::test_prerecord_ignoring_hook", "tests/unit/test_configure.py::TestConfiguration::test_registers_post_start_hooks", "tests/unit/test_configure.py::TestConfiguration::test_registers_pre_stop_hooks" ]
[ "tests/unit/test_configure.py::TestConfiguration::test_acts_as_pass_through", "tests/unit/test_configure.py::TestConfiguration::test_allows_registration_of_placeholders", "tests/unit/test_configure.py::TestConfiguration::test_is_a_context_manager", "tests/unit/test_configure.py::TestConfiguration::test_registers_pre_playback_hooks", "tests/unit/test_configure.py::TestConfiguration::test_registers_pre_record_hooks", "tests/unit/test_configure.py::TestConfiguration::test_sets_cassette_library" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-01-10 22:33:11+00:00
apache-2.0
1,360
betodealmeida__shillelagh-276
diff --git a/ARCHITECTURE.rst b/ARCHITECTURE.rst index b0e43b2..a0d7e45 100644 --- a/ARCHITECTURE.rst +++ b/ARCHITECTURE.rst @@ -158,7 +158,7 @@ There's another layer of type conversion. SQLite has limited support for types, .. code-block:: python cursor.execute( - "SELECT event_time FROM sometable WHERE event_time > %s", + "SELECT event_time FROM sometable WHERE event_time > %s", (datetime.datetime.now() - datetime.timdelta(days=7),), ) diff --git a/README.rst b/README.rst index e1bab88..cf0d5c2 100644 --- a/README.rst +++ b/README.rst @@ -81,20 +81,20 @@ You can even run ``INSERT``/``DELETE``/``UPDATE`` queries against the spreadshee Queries like this are supported by `adapters <https://shillelagh.readthedocs.io/en/latest/adapters.html>`_. Currently Shillelagh has the following adapters: -============ ============ ========================================================================== ===================================================================================================== - Name Type URI pattern Example URI -============ ============ ========================================================================== ===================================================================================================== - CSV File ``/path/to/file.csv`` ``/home/user/sample_data.csv`` - Datasette API ``http(s)://*`` ``https://global-power-plants.datasettes.com/global-power-plants/global-power-plants`` - GitHub API ``https://api.github.com/repos/${owner}/{$repo}/pulls`` ``https://api.github.com/repos/apache/superset/pulls`` - GSheets API ``https://docs.google.com/spreadsheets/d/${id}/edit#gid=${sheet_id}`` ``https://docs.google.com/spreadsheets/d/1LcWZMsdCl92g7nA-D6qGRqg1T5TiHyuKJUY1u9XAnsk/edit#gid=0`` - HTML table API ``http(s)://*`` ``https://en.wikipedia.org/wiki/List_of_countries_and_dependencies_by_population`` - Pandas In memory Any variable name (local or global) ``my_df`` - S3 API ``s3://bucket/path/to/file`` ``s3://shillelagh/sample_data.csv`` - Socrata API ``https://${domain}/resource/${dataset-id}.json`` ``https://data.cdc.gov/resource/unsk-b7fc.json`` - System API ``system://${resource}`` ``system://cpu?interval=2`` - WeatherAPI API ``https://api.weatherapi.com/v1/history.json?key=${key}&q=${location}`` ``https://api.weatherapi.com/v1/history.json?key=XXX&q=London`` -============ ============ ========================================================================== ===================================================================================================== +============ ============ ========================================================================== ===================================================================================================== + Name Type URI pattern Example URI +============ ============ ========================================================================== ===================================================================================================== + CSV File ``/path/to/file.csv`` ``/home/user/sample_data.csv`` + Datasette API ``http(s)://*`` ``https://global-power-plants.datasettes.com/global-power-plants/global-power-plants`` + GitHub API ``https://api.github.com/repos/${owner}/{$repo}/pulls`` ``https://api.github.com/repos/apache/superset/pulls`` + GSheets API ``https://docs.google.com/spreadsheets/d/${id}/edit#gid=${sheet_id}`` ``https://docs.google.com/spreadsheets/d/1LcWZMsdCl92g7nA-D6qGRqg1T5TiHyuKJUY1u9XAnsk/edit#gid=0`` + HTML table API ``http(s)://*`` ``https://en.wikipedia.org/wiki/List_of_countries_and_dependencies_by_population`` + Pandas In memory Any variable name (local or global) ``my_df`` + S3 API ``s3://bucket/path/to/file`` ``s3://shillelagh/sample_data.csv`` + Socrata API ``https://${domain}/resource/${dataset-id}.json`` ``https://data.cdc.gov/resource/unsk-b7fc.json`` + System API ``system://${resource}`` ``system://cpu?interval=2`` + WeatherAPI API ``https://api.weatherapi.com/v1/history.json?key=${key}&q=${location}`` ``https://api.weatherapi.com/v1/history.json?key=XXX&q=London`` +============ ============ ========================================================================== ===================================================================================================== There are also 3rd-party adapters: @@ -134,7 +134,7 @@ You also need to install optional dependencies, depending on the adapter you wan $ pip install 'shillelagh[datasetteapi]' # for Datasette $ pip install 'shillelagh[githubapi]' # for GitHub $ pip install 'shillelagh[gsheetsapi]' # for GSheets - $ pip install 'shillelagh[htmltableapi]' # for HTML tables + $ pip install 'shillelagh[htmltableapi]' # for HTML tables $ pip install 'shillelagh[pandasmemory]' # for Pandas in memory $ pip install 'shillelagh[s3selectapi]' # for S3 files $ pip install 'shillelagh[socrataapi]' # for Socrata API diff --git a/src/shillelagh/adapters/api/github.py b/src/shillelagh/adapters/api/github.py index 6574d6f..fb787c5 100644 --- a/src/shillelagh/adapters/api/github.py +++ b/src/shillelagh/adapters/api/github.py @@ -11,7 +11,7 @@ from jsonpath import JSONPath from shillelagh.adapters.base import Adapter from shillelagh.exceptions import ProgrammingError -from shillelagh.fields import Boolean, Field, Integer, ISODateTime, String +from shillelagh.fields import Boolean, Field, Integer, String, StringDateTime from shillelagh.filters import Equal, Filter from shillelagh.typing import RequestedOrder, Row @@ -58,10 +58,10 @@ TABLES: Dict[str, Dict[str, List[Column]]] = { Column("username", "user.login", String()), Column("draft", "draft", Boolean()), Column("head", "head.ref", String(filters=[Equal])), # head.label? - Column("created_at", "created_at", ISODateTime()), - Column("updated_at", "updated_at", ISODateTime()), - Column("closed_at", "closed_at", ISODateTime()), - Column("merged_at", "merged_at", ISODateTime()), + Column("created_at", "created_at", StringDateTime()), + Column("updated_at", "updated_at", StringDateTime()), + Column("closed_at", "closed_at", StringDateTime()), + Column("merged_at", "merged_at", StringDateTime()), ], }, } diff --git a/src/shillelagh/adapters/api/socrata.py b/src/shillelagh/adapters/api/socrata.py index 8121a21..2c892da 100644 --- a/src/shillelagh/adapters/api/socrata.py +++ b/src/shillelagh/adapters/api/socrata.py @@ -15,7 +15,7 @@ from typing_extensions import TypedDict from shillelagh.adapters.base import Adapter from shillelagh.exceptions import ImpossibleFilterError, ProgrammingError -from shillelagh.fields import Field, ISODate, Order, String +from shillelagh.fields import Field, Order, String, StringDate from shillelagh.filters import Equal, Filter, IsNotNull, IsNull, Like, NotEqual, Range from shillelagh.lib import SimpleCostModel, build_sql from shillelagh.typing import RequestedOrder, Row @@ -69,7 +69,7 @@ class Number(Field[str, float]): type_map: Dict[str, Tuple[Type[Field], List[Type[Filter]]]] = { - "calendar_date": (ISODate, [Range, Equal, NotEqual, IsNull, IsNotNull]), + "calendar_date": (StringDate, [Range, Equal, NotEqual, IsNull, IsNotNull]), "number": (Number, [Range, Equal, NotEqual, IsNull, IsNotNull]), "text": (String, [Range, Equal, NotEqual, Like, IsNull, IsNotNull]), } diff --git a/src/shillelagh/fields.py b/src/shillelagh/fields.py index 5f1e847..0741cd2 100644 --- a/src/shillelagh/fields.py +++ b/src/shillelagh/fields.py @@ -361,11 +361,11 @@ class ISODate(Field[str, datetime.date]): return None try: - date = dateutil.parser.parse(value) - except dateutil.parser.ParserError: + date = datetime.date.fromisoformat(value) + except ValueError: return None - return date.date() + return date def format(self, value: Optional[datetime.date]) -> Optional[str]: if value is None: @@ -378,6 +378,23 @@ class ISODate(Field[str, datetime.date]): return f"'{value}'" +class StringDate(ISODate): + """ + A more permissive date format. + """ + + def parse(self, value: Optional[str]) -> Optional[datetime.date]: + if value is None: + return None + + try: + date = dateutil.parser.parse(value) + except dateutil.parser.ParserError: + return None + + return date.date() + + class Time(Field[datetime.time, datetime.time]): """ A time of the day. @@ -413,11 +430,11 @@ class ISOTime(Field[str, datetime.time]): return None try: - timestamp = dateutil.parser.parse(value) - except dateutil.parser.ParserError: + timestamp = datetime.time.fromisoformat(value) + except ValueError: return None - time = timestamp.time() + time = timestamp # timezone is not preserved return time.replace(tzinfo=timestamp.tzinfo) @@ -433,6 +450,26 @@ class ISOTime(Field[str, datetime.time]): return f"'{value}'" +class StringTime(ISOTime): + """ + A more permissive time format. + """ + + def parse(self, value: Optional[str]) -> Optional[datetime.time]: + if value is None: + return None + + try: + timestamp = dateutil.parser.parse(value) + except dateutil.parser.ParserError: + return None + + time = timestamp.time() + + # timezone is not preserved + return time.replace(tzinfo=timestamp.tzinfo) + + class DateTime(Field[datetime.datetime, datetime.datetime]): """ A timestamp. @@ -469,8 +506,8 @@ class ISODateTime(Field[str, datetime.datetime]): return None try: - timestamp = dateutil.parser.parse(value) - except dateutil.parser.ParserError: + timestamp = datetime.datetime.fromisoformat(value) + except ValueError: return None # if the timestamp has a timezone change it to UTC, so that @@ -497,6 +534,28 @@ class ISODateTime(Field[str, datetime.datetime]): return f"'{value}'" +class StringDateTime(ISODateTime): + """ + A more permissive datetime format. + """ + + def parse(self, value: Optional[str]) -> Optional[datetime.datetime]: + if value is None: + return None + + try: + timestamp = dateutil.parser.parse(value) + except dateutil.parser.ParserError: + return None + + # if the timestamp has a timezone change it to UTC, so that + # timestamps in different timezones can be compared as strings + if timestamp.tzinfo is not None: + timestamp = timestamp.astimezone(datetime.timezone.utc) + + return timestamp + + class StringDuration(Field[str, datetime.timedelta]): """ A duration.
betodealmeida/shillelagh
22b080458e74ab0f15744e4fef107879a14cd71b
diff --git a/tests/fields_test.py b/tests/fields_test.py index 96a724e..babb701 100644 --- a/tests/fields_test.py +++ b/tests/fields_test.py @@ -22,7 +22,10 @@ from shillelagh.fields import ( String, StringBlob, StringBoolean, + StringDate, + StringDateTime, StringDuration, + StringTime, Time, ) from shillelagh.filters import Equal @@ -102,11 +105,6 @@ def test_isodate() -> None: Test ``ISODate``. """ assert ISODate().parse("2020-01-01") == datetime.date(2020, 1, 1) - assert ISODate().parse("2020-01-01T00:00+00:00") == datetime.date( - 2020, - 1, - 1, - ) assert ISODate().parse(None) is None assert ISODate().parse("invalid") is None assert ISODate().format(datetime.date(2020, 1, 1)) == "2020-01-01" @@ -115,6 +113,20 @@ def test_isodate() -> None: assert ISODate().quote(None) == "NULL" +def test_string_date() -> None: + """ + Test ``StringDate``. + """ + assert StringDate().parse("2020-01-01") == datetime.date(2020, 1, 1) + assert StringDate().parse("2020-01-01T00:00+00:00") == datetime.date( + 2020, + 1, + 1, + ) + assert StringDate().parse(None) is None + assert StringDate().parse("invalid") is None + + def test_time() -> None: """ Test ``Time``. @@ -164,6 +176,23 @@ def test_iso_time() -> None: assert ISOTime().quote(None) == "NULL" +def test_string_time() -> None: + """ + Test ``StringTime``. + """ + assert StringTime().parse("12:00+00:00") == datetime.time( + 12, + 0, + tzinfo=datetime.timezone.utc, + ) + assert StringTime().parse("12:00") == datetime.time( + 12, + 0, + ) + assert StringTime().parse(None) is None + assert StringTime().parse("invalid") is None + + def test_datetime() -> None: """ Test ``DateTime``. @@ -228,6 +257,40 @@ def test_iso_datetime() -> None: assert ISODateTime().quote(None) == "NULL" +def test_string_datetime() -> None: + """ + Test ``StringDateTime``. + """ + assert StringDateTime().parse("2020-01-01T12:00+00:00") == datetime.datetime( + 2020, + 1, + 1, + 12, + 0, + 0, + tzinfo=datetime.timezone.utc, + ) + assert StringDateTime().parse("2020-01-01T12:00Z") == datetime.datetime( + 2020, + 1, + 1, + 12, + 0, + 0, + tzinfo=datetime.timezone.utc, + ) + assert StringDateTime().parse("2020-01-01T12:00") == datetime.datetime( + 2020, + 1, + 1, + 12, + 0, + 0, + ) + assert StringDateTime().parse(None) is None + assert StringDateTime().parse("invalid") is None + + def test_boolean() -> None: """ Test ``Boolean``.
ISODate parse can use std library Currently `ISODate`'s `parse` uses `dateutil`: https://github.com/betodealmeida/shillelagh/blob/7afaf13ec822f8c56895a8aec6ad77a7de2ea600/src/shillelagh/fields.py#L323-L328 however as of Python 3.7, [`date.fromisoformat`](https://docs.python.org/3/library/datetime.html#datetime.date.fromisoformat) should work and be faster + less permissive (it doesn't try to guess the format)
0.0
22b080458e74ab0f15744e4fef107879a14cd71b
[ "tests/fields_test.py::test_comparison", "tests/fields_test.py::test_integer", "tests/fields_test.py::test_float", "tests/fields_test.py::test_string", "tests/fields_test.py::test_date", "tests/fields_test.py::test_isodate", "tests/fields_test.py::test_string_date", "tests/fields_test.py::test_time", "tests/fields_test.py::test_iso_time", "tests/fields_test.py::test_string_time", "tests/fields_test.py::test_datetime", "tests/fields_test.py::test_iso_datetime", "tests/fields_test.py::test_string_datetime", "tests/fields_test.py::test_boolean", "tests/fields_test.py::test_int_boolean", "tests/fields_test.py::test_string_boolean", "tests/fields_test.py::test_blob", "tests/fields_test.py::test_string_blob", "tests/fields_test.py::test_type_code", "tests/fields_test.py::test_string_duration" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-07-24 21:38:53+00:00
mit
1,361
betodealmeida__shillelagh-340
diff --git a/docs/adapters.rst b/docs/adapters.rst index 7f7b29f..431085b 100644 --- a/docs/adapters.rst +++ b/docs/adapters.rst @@ -51,7 +51,7 @@ For domain wide access you need to create a service account. Make sure that the connection = connect( ":memory:", adapter_kwargs={ - "gsheetaspi": { + "gsheetsapi": { # "service_account_file": "/path/to/credentials.json", "service_account_info": { "type": "service_account", diff --git a/src/shillelagh/backends/apsw/db.py b/src/shillelagh/backends/apsw/db.py index bf915c1..91cfb53 100644 --- a/src/shillelagh/backends/apsw/db.py +++ b/src/shillelagh/backends/apsw/db.py @@ -39,7 +39,12 @@ from shillelagh.exceptions import ( # nopycln: import; pylint: disable=redefine Warning, ) from shillelagh.fields import Blob, Field -from shillelagh.lib import combine_args_kwargs, escape, find_adapter, serialize +from shillelagh.lib import ( + combine_args_kwargs, + escape_identifier, + find_adapter, + serialize, +) from shillelagh.types import ( BINARY, DATETIME, @@ -290,7 +295,7 @@ class Cursor: # pylint: disable=too-many-instance-attributes f"'{serialize(arg)}'" for arg in combine_args_kwargs(adapter, *args, **kwargs) ) - table_name = escape(uri) + table_name = escape_identifier(uri) self._cursor.execute( f'CREATE VIRTUAL TABLE "{table_name}" USING {adapter.__name__}({formatted_args})', ) diff --git a/src/shillelagh/lib.py b/src/shillelagh/lib.py index 684af28..e36b254 100644 --- a/src/shillelagh/lib.py +++ b/src/shillelagh/lib.py @@ -221,16 +221,26 @@ def update_order( return current_order -def escape(value: str) -> str: +def escape_string(value: str) -> str: """Escape single quotes.""" return value.replace("'", "''") -def unescape(value: str) -> str: +def unescape_string(value: str) -> str: """Unescape single quotes.""" return value.replace("''", "'") +def escape_identifier(value: str) -> str: + """Escape double quotes.""" + return value.replace('"', '""') + + +def unescape_identifier(value: str) -> str: + """Unescape double quotes.""" + return value.replace('""', '"') + + def serialize(value: Any) -> str: """ Serialize adapter arguments. @@ -247,7 +257,7 @@ def serialize(value: Any) -> str: "numbers) are passed as arguments to adapters.", ) from ex - return escape(base64.b64encode(serialized).decode()) + return escape_string(base64.b64encode(serialized).decode()) def deserialize(value: str) -> Any: @@ -257,7 +267,7 @@ def deserialize(value: str) -> Any: This function is used by the SQLite backend, in order to deserialize the virtual table definition and instantiate an adapter. """ - return marshal.loads(base64.b64decode(unescape(value).encode())) + return marshal.loads(base64.b64decode(unescape_string(value).encode())) def build_sql( # pylint: disable=too-many-locals, too-many-arguments, too-many-branches
betodealmeida/shillelagh
e379e8ac0b3dc2f45217be5084b01934c8a489d3
diff --git a/tests/lib_test.py b/tests/lib_test.py index da4a3e6..1433c7a 100644 --- a/tests/lib_test.py +++ b/tests/lib_test.py @@ -26,13 +26,15 @@ from shillelagh.lib import ( build_sql, combine_args_kwargs, deserialize, - escape, + escape_identifier, + escape_string, filter_data, find_adapter, is_not_null, is_null, serialize, - unescape, + unescape_identifier, + unescape_string, update_order, ) from shillelagh.typing import RequestedOrder @@ -272,20 +274,44 @@ def test_build_sql_impossible() -> None: build_sql(columns, {"a": Impossible()}, []) -def test_escape() -> None: +def test_escape_string() -> None: """ - Test ``escape``. + Test ``escape_string``. """ - assert escape("1") == "1" - assert escape("O'Malley's") == "O''Malley''s" + assert escape_string("1") == "1" + assert escape_string("O'Malley's") == "O''Malley''s" -def test_unescape() -> None: +def test_unescape_string() -> None: """ - Test ``unescape``. + Test ``unescape_string``. """ - assert unescape("1") == "1" - assert unescape("O''Malley''s") == "O'Malley's" + assert unescape_string("1") == "1" + assert unescape_string("O''Malley''s") == "O'Malley's" + + +def test_escape_identifier() -> None: + """ + Test ``escape_identifier``. + """ + assert escape_identifier("1") == "1" + assert escape_identifier("O'Malley's") == "O'Malley's" + assert ( + escape_identifier('a dove called: "Who? who? who?"') + == 'a dove called: ""Who? who? who?""' + ) + + +def test_unescape_identifier() -> None: + """ + Test ``unescape_identifier``. + """ + assert unescape_identifier("1") == "1" + assert unescape_identifier("O''Malley''s") == "O''Malley''s" + assert ( + unescape_identifier('a dove called: ""Who? who? who?""') + == 'a dove called: "Who? who? who?"' + ) def test_serialize() -> None:
Virtual table names are incorrectly escaped **Describe the bug** When virtual tables are added/created, they are defined as: ``` table_name = escape(uri) self._cursor.execute( f'CREATE VIRTUAL TABLE "{table_name}" USING {adapter.__name__}({formatted_args})', ) ``` see: https://github.com/betodealmeida/shillelagh/blob/e379e8ac0b3dc2f45217be5084b01934c8a489d3/src/shillelagh/backends/apsw/db.py#L294 In particular, the table name is put in double quotes. The escape method applied before, replaces _single_ quotes rather than _double_ quotes: ``` def escape(value: str) -> str: """Escape single quotes.""" return value.replace("'", "''") ``` see: https://github.com/betodealmeida/shillelagh/blob/e379e8ac0b3dc2f45217be5084b01934c8a489d3/src/shillelagh/lib.py#L224 **Expected behavior** As the table name is quotes in double quotes, double quotes should be escaped in the table name. There are multiple ways to fix this: 1. Switch `escape` to replace double quotes. Likely not desired as the method seems to be used in other places. 2. Quote the table name in single quotes when creating. Is this breaking anything existing? 3. Ad-hoc escape the double quotes in the table name (likely best option if the impact of 2 can't be assessed). Happy to send a pull request.
0.0
e379e8ac0b3dc2f45217be5084b01934c8a489d3
[ "tests/lib_test.py::test_row_id_manager_empty_range", "tests/lib_test.py::test_row_id_manager", "tests/lib_test.py::test_analyze", "tests/lib_test.py::test_update_order", "tests/lib_test.py::test_update_order_none", "tests/lib_test.py::test_build_sql", "tests/lib_test.py::test_build_sql_with_aliases", "tests/lib_test.py::test_build_sql_with_map", "tests/lib_test.py::test_build_sql_impossible", "tests/lib_test.py::test_escape_string", "tests/lib_test.py::test_unescape_string", "tests/lib_test.py::test_escape_identifier", "tests/lib_test.py::test_unescape_identifier", "tests/lib_test.py::test_serialize", "tests/lib_test.py::test_deserialize", "tests/lib_test.py::test_combine_args_kwargs", "tests/lib_test.py::test_filter_data", "tests/lib_test.py::test_find_adapter", "tests/lib_test.py::test_is_null", "tests/lib_test.py::test_is_not_null", "tests/lib_test.py::test_apply_limit_and_offset" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-03-08 09:44:36+00:00
mit
1,362
bids-standard__pybids-1018
diff --git a/bids/layout/config/bids.json b/bids/layout/config/bids.json index bad844ad..d1db778a 100644 --- a/bids/layout/config/bids.json +++ b/bids/layout/config/bids.json @@ -156,10 +156,10 @@ "sub-{subject}[/ses-{session}]/{datatype<fmap>|fmap}/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_flip-{flip}][_inv-{inv}][_part-{part<mag|phase|real|imag>}]_{suffix<TB1AFI|TB1TFL|TB1RFM|RB1COR>}{extension<.nii|.nii.gz|.json>|.nii.gz}", "sub-{subject}[/ses-{session}]/{datatype<fmap>|fmap}/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}][_run-{run}][_echo-{echo}]_flip-{flip}_inv-{inv}[_part-{part<mag|phase|real|imag>}]_{suffix<TB1SRGE>}{extension<.nii|.nii.gz|.json>|.nii.gz}", "sub-{subject}[/ses-{session}]/{datatype<fmap>|fmap}/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}][_run-{run}]_{suffix<TB1map|RB1map>}{extension<.nii|.nii.gz|.json>|.nii.gz}", - "sub-{subject}[/ses-{session}]/{datatype<pet>|pet}/sub-{subject}[_ses-{session}][_task-{task}][trc-{tracer}][_rec-{reconstruction}][_run-{run}]_{suffix<pet>}{extension<.nii|.nii.gz|.json>|.nii.gz}", - "sub-{subject}[/ses-{session}]/{datatype<pet>|pet}/sub-{subject}[_ses-{session}][_task-{task}][trc-{tracer}][_rec-{reconstruction}][_run-{run}]_recording-{recording}_{suffix<blood>}{extension<.tsv|.json>}", - "sub-{subject}[/ses-{session}]/{datatype<pet>|pet}/sub-{subject}[_ses-{session}]_task-{task}[trc-{tracer}][_rec-{reconstruction}][_run-{run}]_{suffix<events>}{extension<.tsv|.json>}", - "sub-{subject}[/ses-{session}]/{datatype<pet>|pet}/sub-{subject}[_ses-{session}][_task-{task}][trc-{tracer}][_rec-{reconstruction}][_run-{run}][_recording-{recording}]_{suffix<physio|stim>}{extension<.tsv.gz|.json>}", + "sub-{subject}[/ses-{session}]/{datatype<pet>|pet}/sub-{subject}[_ses-{session}][_task-{task}][_trc-{tracer}][_rec-{reconstruction}][_run-{run}]_{suffix<pet>}{extension<.nii|.nii.gz|.json>|.nii.gz}", + "sub-{subject}[/ses-{session}]/{datatype<pet>|pet}/sub-{subject}[_ses-{session}][_task-{task}][_trc-{tracer}][_rec-{reconstruction}][_run-{run}]_recording-{recording}_{suffix<blood>}{extension<.tsv|.json>}", + "sub-{subject}[/ses-{session}]/{datatype<pet>|pet}/sub-{subject}[_ses-{session}]_task-{task}[_trc-{tracer}][_rec-{reconstruction}][_run-{run}]_{suffix<events>}{extension<.tsv|.json>}", + "sub-{subject}[/ses-{session}]/{datatype<pet>|pet}/sub-{subject}[_ses-{session}][_task-{task}][_trc-{tracer}][_rec-{reconstruction}][_run-{run}][_recording-{recording}]_{suffix<physio|stim>}{extension<.tsv.gz|.json>}", "sub-{subject}[/ses-{session}]/{datatype<perf>|perf}/sub-{subject}[_ses-{session}][_acq-{acquisition}][_rec-{reconstruction}][_dir-{direction}][_run-{run}]_{suffix<asl|m0scan>}{extension<.nii|.nii.gz|.json>|.nii.gz}", "sub-{subject}[/ses-{session}]/{datatype<perf>|perf}/sub-{subject}[_ses-{session}][_acq-{acquisition}][_rec-{reconstruction}][_dir-{direction}][_run-{run}]_{suffix<aslcontext>}{extension<.tsv|.json>}", "sub-{subject}[/ses-{session}]/{datatype<perf>|perf}/sub-{subject}[_ses-{session}][_acq-{acquisition}][_rec-{reconstruction}][_run-{run}]_{suffix<asllabeling>}{extension<.jpg>}",
bids-standard/pybids
bf15918ade4c1241a8c1187292cd8a114e21e75a
diff --git a/bids/layout/tests/test_path_building.py b/bids/layout/tests/test_path_building.py index f67444b5..27c2042e 100644 --- a/bids/layout/tests/test_path_building.py +++ b/bids/layout/tests/test_path_building.py @@ -13,6 +13,15 @@ def layout(): data_dir = join(get_test_data_path(), '7t_trt') return BIDSLayout(data_dir) +def test_path_building_pet(layout): + """regression test for https://github.com/bids-standard/pybids/issues/1017 + + TODO: remove if https://github.com/bids-standard/bids-examples/issues/394 is fixed + as this test will then be covered + by test_path_building_in_raw_scope + + """ + layout.build_path({"subject": "123", "tracer": "18F", "suffix": "pet"}) def test_bold_construction(layout): ents = dict(subject='01', run=1, task='rest', suffix='bold')
BIDSValidationError When Using build_path with tracer **Description:** I'm encountering a `BIDSValidationError` when using the `build_path` method from the `pybids` library (version 0.15.5) to generate BIDS-compliant paths using specific entities. The error message suggests that the built path is not a valid BIDS filename, even though the provided entity values seem spec-compliant. **Reproduction Steps:** 1. Import the `pybids` library and create a `BIDSLayout` instance. 2. Attempt to use the `build_path` method with a dictionary containing certain entities. **Expected Behavior:** I expected the `build_path` method to generate a BIDS-compliant path based on the provided entities. **Actual Behavior:** When using the `build_path` method with entities like `"subject"`, `"tracer"`, and `"suffix"`, a `BIDSValidationError` exception is raised with the message that the built path is not a valid BIDS filename. The specific path construction is the issue. **Additional Information:** - The `pybids` version I'm using: 0.15.5 - Python version: 3.10.6 - Example code snippet: ```python from bids import BIDSLayout BIDSLayout('/path/to/bids_dataset').build_path({"subject": "123", "tracer": "18F", "suffix": "pet"}) ``` - Error: ```python bids.exceptions.BIDSValidationError: Built path sub-123/pet/sub-123trc-18F_pet.nii.gz is not a valid BIDS filename. Please make sure all provided entity values are spec-compliant. ``` As you can see, the filename is missing an underscore before the `trc`. I hope this is an easy fix. I tried manipulating `bids/layout/config/bids.json`, but couldn't figure out if or how that would be the solution. I've double-checked the entity values, and they seem to be spec-compliant. However, the error indicates that the generated path is invalid according to BIDS filename conventions. Is there a specific order or formatting that I need to follow when using certain entities in the build_path method? Thank you so much for your help in resolving this issue.
0.0
bf15918ade4c1241a8c1187292cd8a114e21e75a
[ "bids/layout/tests/test_path_building.py::test_path_building_pet" ]
[ "bids/layout/tests/test_path_building.py::test_bold_construction", "bids/layout/tests/test_path_building.py::test_invalid_file_construction", "bids/layout/tests/test_path_building.py::test_failed_file_construction", "bids/layout/tests/test_path_building.py::test_insufficient_entities[True-True]", "bids/layout/tests/test_path_building.py::test_insufficient_entities[True-False]", "bids/layout/tests/test_path_building.py::test_insufficient_entities[False-True]", "bids/layout/tests/test_path_building.py::test_insufficient_entities[False-False]" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-08-21 15:13:02+00:00
mit
1,363
bids-standard__pybids-492
diff --git a/bids/layout/layout.py b/bids/layout/layout.py index 32cf3c03..b00336ea 100644 --- a/bids/layout/layout.py +++ b/bids/layout/layout.py @@ -381,6 +381,25 @@ class BIDSLayout(object): layouts = [l for l in collect_layouts(self) if l._in_scope(scope)] return list(set(layouts)) + def _sanitize_query_dtypes(self, entities): + """ Automatically convert entity query values to correct dtypes. """ + entities = entities.copy() + names = list(entities.keys()) + ents = {e.name: e for e in + self.session.query(Entity) + .filter(Entity.name.in_(names)).all()} + # Fail silently because the DB may still know how to reconcile + # type differences. + for name, val in entities.items(): + try: + if isinstance(val, (list, tuple)): + entities[name] = [ents[name]._astype(v) for v in val] + else: + entities[name] = ents[name]._astype(val) + except: + pass + return entities + @property def entities(self): """Get the entities.""" @@ -786,6 +805,9 @@ class BIDSLayout(object): if filters: query = query.join(BIDSFile.tags) regex = kwargs.get('regex_search', False) + + filters = self._sanitize_query_dtypes(filters) + for name, val in filters.items(): if isinstance(val, (list, tuple)) and len(val) == 1: val = val[0]
bids-standard/pybids
15aa08fb2b146e3fcc62f359ea4a4d810d38591a
diff --git a/bids/layout/tests/test_layout.py b/bids/layout/tests/test_layout.py index b1f5238c..1faa7132 100644 --- a/bids/layout/tests/test_layout.py +++ b/bids/layout/tests/test_layout.py @@ -2,16 +2,20 @@ functionality should go in the grabbit package. """ import os +import re +import tempfile +from os.path import join, abspath, basename, dirname + +import numpy as np import pytest + import bids -import re from bids.layout import BIDSLayout, parse_file_entities, add_config_paths from bids.layout.models import (BIDSFile, BIDSImageFile, Entity, Config, FileAssociation) -from os.path import join, abspath, basename, dirname from bids.tests import get_test_data_path from bids.utils import natural_sort -import tempfile + def test_layout_init(layout_7t_trt): @@ -557,4 +561,13 @@ def test_indexing_tag_conflict(): layout = BIDSLayout(data_dir) print(exc.value.message) assert exc.value.message.startswith("Conflicting values found") - assert 'run' in exc.value.message \ No newline at end of file + assert 'run' in exc.value.message + + +def test_get_with_wrong_dtypes(layout_7t_trt): + ''' Test automatic dtype sanitization. ''' + l = layout_7t_trt + assert (l.get(run=1) == l.get(run='1') == l.get(run=np.int64(1)) == + l.get(run=[1, '15'])) + assert not l.get(run='not_numeric') + assert l.get(session=1) == l.get(session='1') \ No newline at end of file
layout.get doesn't find any entities when passed a numpy.int64 value for run When doing a `layout.get()`, if the type of the run argument is np.int64, it fails to find any entities. Here's a minimal reproduction using pybids test data: ``` In [1]: from os.path import join ...: from bids.analysis import Analysis ...: from bids.analysis.analysis import ContrastInfo, DesignMatrixInfo ...: from bids.layout import BIDSLayout ...: from bids.tests import get_test_data_path ...: import numpy as np ...: ...: def analysis(): ...: layout_path = join(get_test_data_path(), 'ds005') ...: layout = BIDSLayout(layout_path) ...: json_file = join(layout_path, 'models', 'ds-005_type-test_model.json') ...: analysis = Analysis(layout, json_file) ...: analysis.setup(scan_length=480, subject=['01', '02']) ...: return analysis ...: foo = analysis() In [2]: layout_path = join(get_test_data_path(), 'ds005') ...: layout = BIDSLayout(layout_path) ...: layout.get_runs() Out[2]: [1, 2, 3] In [3]: layout.get(suffix="bold",subject='01', run=1) Out[3]: [<BIDSImageFile filename='/gpfs/gsfs11/users/MBDU/midla/notebooks/code/pybids/bids/tests/data/ds005/sub-01/func/sub-01_task-mixedgamblestask_run-01_bold.nii.gz'>] In [4]: layout.get(suffix="bold",subject='01', run=np.int64(1)) ...: Out[4]: [] In [5]: # Now just to show that pybids itself is generating numpy.int64 run values, here's the output from get design matrix In [6]: for _, _, ents in foo.steps[0].get_design_matrix(): ...: break ...: ents Out[6]: {'task': 'mixedgamblestask', 'run': 1, 'suffix': 'bold', 'datatype': 'func', 'subject': '01'} In [7]: type(ents['run']) Out[7]: numpy.int64 In [8]: layout.get(**ents) Out[8]: [] In [9]: ents['run']=1 ...: layout.get(**ents) Out[9]: [<BIDSImageFile filename='/gpfs/gsfs11/users/MBDU/midla/notebooks/code/pybids/bids/tests/data/ds005/sub-01/func/sub-01_task-mixedgamblestask_run-01_bold.nii.gz'>] ``` And before anyone suggests that the solution is to avoid passing int64s as run numbers, notice that `get_design_matrix` returns an int64. I've reproduced the above results with both 0.9.2 and 0.9.3.
0.0
15aa08fb2b146e3fcc62f359ea4a4d810d38591a
[ "bids/layout/tests/test_layout.py::test_get_with_wrong_dtypes" ]
[ "bids/layout/tests/test_layout.py::test_layout_init", "bids/layout/tests/test_layout.py::test_layout_repr", "bids/layout/tests/test_layout.py::test_load_description", "bids/layout/tests/test_layout.py::test_get_file", "bids/layout/tests/test_layout.py::test_get_metadata", "bids/layout/tests/test_layout.py::test_get_metadata2", "bids/layout/tests/test_layout.py::test_get_metadata3", "bids/layout/tests/test_layout.py::test_get_metadata4", "bids/layout/tests/test_layout.py::test_get_metadata_meg", "bids/layout/tests/test_layout.py::test_get_metadata5", "bids/layout/tests/test_layout.py::test_get_metadata_via_bidsfile", "bids/layout/tests/test_layout.py::test_get_with_bad_target", "bids/layout/tests/test_layout.py::test_get_bvals_bvecs", "bids/layout/tests/test_layout.py::test_get_subjects", "bids/layout/tests/test_layout.py::test_get_fieldmap", "bids/layout/tests/test_layout.py::test_get_fieldmap2", "bids/layout/tests/test_layout.py::test_bids_json", "bids/layout/tests/test_layout.py::test_get_return_type_dir", "bids/layout/tests/test_layout.py::test_get_val_none", "bids/layout/tests/test_layout.py::test_get_return_sorted", "bids/layout/tests/test_layout.py::test_force_index", "bids/layout/tests/test_layout.py::test_nested_include_exclude", "bids/layout/tests/test_layout.py::test_nested_include_exclude_with_regex", "bids/layout/tests/test_layout.py::test_layout_with_derivs", "bids/layout/tests/test_layout.py::test_layout_with_multi_derivs", "bids/layout/tests/test_layout.py::test_query_derivatives", "bids/layout/tests/test_layout.py::test_restricted_words_in_path", "bids/layout/tests/test_layout.py::test_derivative_getters", "bids/layout/tests/test_layout.py::test_get_tr", "bids/layout/tests/test_layout.py::test_parse_file_entities", "bids/layout/tests/test_layout.py::test_parse_file_entities_from_layout", "bids/layout/tests/test_layout.py::test_deriv_indexing", "bids/layout/tests/test_layout.py::test_add_config_paths", "bids/layout/tests/test_layout.py::test_layout_in_scope", "bids/layout/tests/test_layout.py::test_get_layouts_in_scope", "bids/layout/tests/test_layout.py::test_get_dataset_description", "bids/layout/tests/test_layout.py::test_indexed_file_associations", "bids/layout/tests/test_layout.py::test_layout_save", "bids/layout/tests/test_layout.py::test_indexing_tag_conflict" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2019-09-11 17:01:09+00:00
mit
1,364
bids-standard__pybids-574
diff --git a/Makefile b/Makefile index 5f61023c..78028c3e 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ .PHONY: doc tutorial travis_tests travis_tests: - pytest -n 2 -v --cov bids --cov-config .coveragerc --cov-report xml:cov.xml bids + pytest --doctest-modules -n 2 -v --cov bids --cov-config .coveragerc --cov-report xml:cov.xml bids tutorial: jupyter nbconvert --execute examples/pybids_tutorial.ipynb diff --git a/bids/layout/config/bids.json b/bids/layout/config/bids.json index 52a573a8..fd299f06 100644 --- a/bids/layout/config/bids.json +++ b/bids/layout/config/bids.json @@ -80,18 +80,18 @@ ], "default_path_patterns": [ - "sub-{subject}[/ses-{session}]/anat/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}]_{suffix<T1w|T2w|T1rho|T1map|T2map|T2star|FLAIR|FLASH|PDmap|PD|PDT2|inplaneT[12]|angio>}.{extension<nii|nii.gz|json>|nii.gz}", - "sub-{subject}[/ses-{session}]/anat/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}][_mod-{modality}]_{suffix<defacemask>}.{extension<nii|nii.gz|json>|nii.gz}", - "sub-{subject}[/ses-{session}]/func/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}]_{suffix<bold|cbv|phase|sbref>}.{extension<nii|nii.gz|json>|nii.gz}", - "sub-{subject}[/ses-{session}]/dwi/sub-{subject}[_ses-{session}][_acq-{acquisition}]_{suffix<dwi>}.{extension<bval|bvec|json|nii.gz|nii>|nii.gz}", - "sub-{subject}[/ses-{session}]/fmap/sub-{subject}[_ses-{session}][_acq-{acquisition}][_dir-{direction}][_run-{run}]_{fmap<phasediff|magnitude[1-2]|phase[1-2]|fieldmap>}.{extension<nii|nii.gz|json>|nii.gz}", - "sub-{subject}[/ses-{session}]/fmap/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}]_dir-{direction}[_run-{run}]_{fmap<epi>}.{extension<nii|nii.gz|json>|nii.gz}", + "sub-{subject}[/ses-{session}]/{datatype<anat>|anat}/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}]_{suffix<T1w|T2w|T1rho|T1map|T2map|T2star|FLAIR|FLASH|PDmap|PD|PDT2|inplaneT[12]|angio>}.{extension<nii|nii.gz|json>|nii.gz}", + "sub-{subject}[/ses-{session}]/{datatype<anat>|anat}/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}][_mod-{modality}]_{suffix<defacemask>}.{extension<nii|nii.gz|json>|nii.gz}", + "sub-{subject}[/ses-{session}]/{datatype<func>|func}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}]_{suffix<bold|cbv|phase|sbref>}.{extension<nii|nii.gz|json>|nii.gz}", + "sub-{subject}[/ses-{session}]/{datatype<dwi>|dwi}/sub-{subject}[_ses-{session}][_acq-{acquisition}]_{suffix<dwi>}.{extension<bval|bvec|json|nii.gz|nii>|nii.gz}", + "sub-{subject}[/ses-{session}]/{datatype<fmap>|fmap}/sub-{subject}[_ses-{session}][_acq-{acquisition}][_dir-{direction}][_run-{run}]_{fmap<phasediff|magnitude[12]|phase[12]|fieldmap>}.{extension<nii|nii.gz|json>|nii.gz}", + "sub-{subject}[/ses-{session}]/{datatype<fmap>|fmap}/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}]_dir-{direction}[_run-{run}]_{fmap<epi>}.{extension<nii|nii.gz|json>|nii.gz}", "sub-{subject}[/ses-{session}]/[{datatype<func|meg|beh>|func}/]sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_recording-{recording}]_{suffix<events>}.{extension<tsv|json>|tsv}", "sub-{subject}[/ses-{session}]/[{datatype<func|beh>|func}/]sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_recording-{recording}]_{suffix<physio|stim>}.{extension<tsv.gz|json>|tsv.gz}", - "sub-{subject}[/ses-{session}]/meg/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_run-{run}][_proc-{proc}]_{suffix<meg>}.{extension}", - "sub-{subject}[/ses-{session}]/meg/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_run-{run}][_proc-{proc}]_{suffix<channels>}.{extension<tsv|json>|tsv}", - "sub-{subject}[/ses-{session}]/meg/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}]_{suffix<coordsystem>}.{extension<json>|json}", - "sub-{subject}[/ses-{session}]/meg/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}]_{suffix<photo>}.{extension<jpg>|jpg}", + "sub-{subject}[/ses-{session}]/{datatype<meg>|meg}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_run-{run}][_proc-{proc}]_{suffix<meg>}.{extension}", + "sub-{subject}[/ses-{session}]/{datatype<meg>|meg}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}][_run-{run}][_proc-{proc}]_{suffix<channels>}.{extension<tsv|json>|tsv}", + "sub-{subject}[/ses-{session}]/{datatype<meg>|meg}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}]_{suffix<coordsystem>}.{extension<json>|json}", + "sub-{subject}[/ses-{session}]/{datatype<meg>|meg}/sub-{subject}[_ses-{session}]_task-{task}[_acq-{acquisition}]_{suffix<photo>}.{extension<jpg>|jpg}", "[acq-{acquisition}_][ce-{ceagent}_][rec-{reconstruction}_]{suffix<T1w|T2w|T1rho|T1map|T2map|T2star|FLAIR|FLASH|PDmap|PD|PDT2|inplaneT[12]|angio>}.{extension<json>|json}", "[acq-{acquisition}_][ce-{ceagent}_][rec-{reconstruction}_][mod-{modality}_]{suffix<defacemask>}.{extension<json>|json}", "task-{task}[_acq-{acquisition}][_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}][_run-{run}][_echo-{echo}]_{suffix<bold|cbv|phase|sbref>}.{extension<json>|json}", diff --git a/bids/layout/writing.py b/bids/layout/writing.py index e762ab2a..e3b8b853 100644 --- a/bids/layout/writing.py +++ b/bids/layout/writing.py @@ -6,57 +6,14 @@ import warnings import os import re import sys +from string import Formatter +from itertools import product from ..utils import splitext, listify from os.path import join, dirname, exists, islink, isabs, isdir -__all__ = ['replace_entities', 'build_path', 'write_contents_to_file'] +__all__ = ['build_path', 'write_contents_to_file'] - -def replace_entities(entities, pattern): - """ - Replaces all entity names in a given pattern with the corresponding - values provided by entities. - - Parameters - ---------- - entities : dict - A dictionary mapping entity names to entity values. - pattern : str - A path pattern that contains entity names denoted - by curly braces. Optional portions denoted by square braces. - For example: 'sub-{subject}/[var-{name}/]{id}.csv' - Accepted entity values, using regex matching, denoted within angle - brackets. - For example: 'sub-{subject<01|02>}/{task}.csv' - - Returns - ------- - A new string with the entity values inserted where entity names - were denoted in the provided pattern. - """ - entities = entities.copy() # make a local copy, since dicts are mutable - ents = re.findall(r'{(.*?)\}', pattern) - new_path = pattern - for ent in ents: - match = re.search(r'([^|<]+)(<.*?>)?(\|.*)?', ent) - if match is None: - return None - name, valid, default = match.groups() - default = default[1:] if default is not None else default - - if name in entities and valid is not None: - ent_val = str(entities[name]) - if not re.match(valid[1:-1], ent_val): - if default is None: - return None - entities[name] = default - - ent_val = entities.get(name, default) - if ent_val is None: - return None - new_path = new_path.replace('{%s}' % ent, str(ent_val)) - - return new_path +_PATTERN_FIND = re.compile(r'({([\w\d]*?)(?:<([^>]+)>)?(?:\|((?:\.?[\w])+))?\})') def build_path(entities, path_patterns, strict=False): @@ -66,9 +23,13 @@ def build_path(entities, path_patterns, strict=False): Parameters ---------- - entities : dict + entities : :obj:`dict` A dictionary mapping entity names to entity values. - path_patterns : str or list + Entities with ``None`` or empty-string value will be removed. + Otherwise, entities will be cast to string values, therefore + if any format is expected (e.g., zero-padded integers), the + value should be formatted. + path_patterns : :obj:`str` or :obj:`list` One or more filename patterns to write the file to. Entities should be represented by the name surrounded by curly braces. Optional portions of the patterns @@ -78,36 +39,143 @@ def build_path(entities, path_patterns, strict=False): the pipe operator. E.g., (e.g., {type<image>|bold} would only match the pattern if the entity 'type' was passed and its value is "image", otherwise the default value "bold" will be used). - Example 1: 'sub-{subject}/[var-{name}/]{id}.csv' - Result 2: 'sub-01/var-SES/1045.csv' - strict : bool + strict : :obj:`bool` If True, all passed entities must be matched inside a pattern in order to be a valid match. If False, extra entities will be ignored so long as all mandatory entities are found. Returns ------- - A constructed path for this file based on the provided patterns. + A constructed path for this file based on the provided patterns, or + ``None`` if no path was built given the combination of entities and patterns. + + Examples + -------- + >>> entities = { + ... 'extension': 'nii', + ... 'space': 'MNI', + ... 'subject': '001', + ... 'suffix': 'inplaneT2', + ... } + >>> patterns = ['sub-{subject}[/ses-{session}]/anat/sub-{subject}[_ses-{session}]' + ... '[_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}]_' + ... '{suffix<T[12]w|T1rho|T[12]map|T2star|FLAIR|FLASH|PDmap|PD|PDT2|' + ... 'inplaneT[12]|angio>}.{extension<nii|nii.gz|json>|nii.gz}', + ... 'sub-{subject}[/ses-{session}]/anat/sub-{subject}[_ses-{session}]' + ... '[_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}]' + ... '[_space-{space}][_desc-{desc}]_{suffix<T1w|T2w|T1rho|T1map|T2map|' + ... 'T2star|FLAIR|FLASH|PDmap|PD|PDT2|inplaneT[12]|angio>}.' + ... '{extension<nii|nii.gz|json>|nii.gz}'] + >>> build_path(entities, patterns) + 'sub-001/anat/sub-001_inplaneT2.nii' + + >>> build_path(entities, patterns, strict=True) + 'sub-001/anat/sub-001_space-MNI_inplaneT2.nii' + + >>> entities['space'] = None + >>> build_path(entities, patterns, strict=True) + 'sub-001/anat/sub-001_inplaneT2.nii' + + >>> # If some entity is set to None, they are dropped + >>> entities['extension'] = None + >>> build_path(entities, patterns, strict=True) + 'sub-001/anat/sub-001_inplaneT2.nii.gz' + + >>> # If some entity is set to empty-string, they are dropped + >>> entities['extension'] = '' + >>> build_path(entities, patterns, strict=True) + 'sub-001/anat/sub-001_inplaneT2.nii.gz' + + >>> # If some selector is not in the pattern, skip it... + >>> entities['datatype'] = 'anat' + >>> build_path(entities, patterns) + 'sub-001/anat/sub-001_inplaneT2.nii.gz' + + >>> # ... unless the pattern should be strictly matched + >>> entities['datatype'] = 'anat' + >>> build_path(entities, patterns, strict=True) is None + True + + >>> # If the value of an entity is not valid, do not match the pattern + >>> entities['suffix'] = 'bold' + >>> build_path(entities, patterns) is None + True + + >>> entities = { + ... 'extension': 'bvec', + ... 'subject': '001', + ... } + >>> patterns = ( + ... "sub-{subject}[/ses-{session}]/{datatype|dwi}/sub-{subject}[_ses-{session}]" + ... "[_acq-{acquisition}]_{suffix|dwi}.{extension<bval|bvec|json|nii.gz|nii>|nii.gz}" + ... ) + >>> build_path(entities, patterns, strict=True) + 'sub-001/dwi/sub-001_dwi.bvec' + """ path_patterns = listify(path_patterns) + # One less source of confusion + if 'extension' in entities and entities['extension'] is not None: + entities['extension'] = entities['extension'].lstrip('.') + + # Drop None and empty-strings, keep zeros + entities = {k: v for k, v in entities.items() if v or v == 0} + # Loop over available patherns, return first one that matches all for pattern in path_patterns: + entities_matched = list(_PATTERN_FIND.findall(pattern)) + defined = [e[1] for e in entities_matched] + # If strict, all entities must be contained in the pattern if strict: - defined = re.findall(r'{(.*?)(?:<[^>]+>)?\}', pattern) if set(entities.keys()) - set(defined): continue + # Iterate through the provided path patterns new_path = pattern - optional_patterns = re.findall(r'\[(.*?)\]', pattern) - # First build from optional patterns if possible - for optional_pattern in optional_patterns: - optional_chunk = replace_entities(entities, optional_pattern) or '' - new_path = new_path.replace('[%s]' % optional_pattern, - optional_chunk) - # Replace remaining entities - new_path = replace_entities(entities, new_path) + + # Expand options within valid values and + # check whether entities provided have acceptable value + tmp_entities = entities.copy() # Do not modify the original query + for fmt, name, valid, defval in entities_matched: + valid_expanded = [v for val in valid.split('|') if val + for v in _expand_options(val)] + if valid_expanded and defval and defval not in valid_expanded: + warnings.warn( + 'Pattern "%s" is inconsistent as it defines an invalid default value.' % fmt + ) + if ( + valid_expanded + and name in entities + and entities[name] not in valid_expanded + ): + continue + + if defval and name not in tmp_entities: + tmp_entities[name] = defval + + # At this point, valid & default values are checked & set - simplify pattern + new_path = new_path.replace(fmt, '{%s}' % name) + + optional_patterns = re.findall(r'(\[.*?\])', new_path) + # Optional patterns with selector are cast to mandatory or removed + for op in optional_patterns: + for ent_name in {k for k, v in entities.items() if v is not None}: + if ('{%s}' % ent_name) in op: + new_path = new_path.replace(op, op[1:-1]) + continue + + # Surviving optional patterns are removed + new_path = new_path.replace(op, '') + + # Replace entities + fields = {pat[1] for pat in Formatter().parse(new_path) + if pat[1] and not pat[1].isdigit()} + if fields - set(tmp_entities.keys()): + continue + + new_path = new_path.format(**tmp_entities) if new_path: return new_path @@ -191,3 +259,24 @@ def write_contents_to_file(path, contents=None, link_to=None, f.write(contents) else: raise ValueError('One of contents or link_to must be provided.') + + +def _expand_options(value): + """ + Expand optional substrings of valid entity values. + + Examples + -------- + >>> _expand_options('[Jj]son[12]') + ['Json1', 'Json2', 'json1', 'json2'] + + >>> _expand_options('json') + ['json'] + + """ + expand_patterns = re.findall(r'\[(.*?)\]', value) + if not expand_patterns: + return [value] + + value = re.sub(r'\[(.*?)\]', '%s', value) + return [value % _r for _r in product(*expand_patterns)] diff --git a/bids/reports/report.py b/bids/reports/report.py index 9f297276..78f19c9e 100644 --- a/bids/reports/report.py +++ b/bids/reports/report.py @@ -50,7 +50,7 @@ class BIDSReport(object): self.config = config def generate(self, **kwargs): - """Generate the methods section. + r"""Generate the methods section. Parameters ---------- @@ -77,7 +77,12 @@ class BIDSReport(object): >>> layout = BIDSLayout(join(get_test_data_path(), 'synthetic')) >>> report = BIDSReport(layout) >>> counter = report.generate(session='01') - >>> counter.most_common()[0][0] + Number of patterns detected: 1 + Remember to double-check everything and to replace <deg> with a degree symbol. + + >>> counter.most_common()[0][0] # doctest: +ELLIPSIS + 'For session 01:\n\tMR data were...' + """ descriptions = [] diff --git a/bids/variables/io.py b/bids/variables/io.py index 8a3ab176..b41ae25a 100644 --- a/bids/variables/io.py +++ b/bids/variables/io.py @@ -56,7 +56,7 @@ def load_variables(layout, types=None, levels=None, skip_empty=True, Examples -------- - >>> load_variables(layout, ['events', 'physio'], subject='01') + >>> load_variables(layout, ['events', 'physio'], subject='01') # doctest: +SKIP # returns all variables stored in _events.tsv and _physio.tsv.gz files # for runs that belong to subject with id '01'. """ diff --git a/doc/api.rst b/doc/api.rst index a6c63861..4d5c1e01 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -20,6 +20,7 @@ API Reference bids.layout.parse_file_entities bids.layout.add_config_paths bids.layout.index.BIDSLayoutIndexer + bids.layout.writing .. currentmodule:: bids
bids-standard/pybids
ce25158070557ec67af98b41ebefbaa05515febf
diff --git a/bids/layout/tests/test_path_building.py b/bids/layout/tests/test_path_building.py index 8457c95b..601c5276 100644 --- a/bids/layout/tests/test_path_building.py +++ b/bids/layout/tests/test_path_building.py @@ -22,7 +22,7 @@ def test_invalid_file_construction(layout): ents = dict(subject='01', run=1, task='resting-state', suffix='bold') with pytest.raises(ValueError): layout.build_path(ents) - + target = "sub-01/func/sub-01_task-resting-state_run-1_bold.nii.gz" assert layout.build_path(ents, validate=False) == target @@ -30,4 +30,4 @@ def test_invalid_file_construction(layout): def test_failed_file_construction(layout): ents = dict(subject='01', fakekey='foobar') with pytest.raises(ValueError): - layout.build_path(ents) + layout.build_path(ents, strict=True) diff --git a/bids/layout/tests/test_writing.py b/bids/layout/tests/test_writing.py index 6883afbc..099be470 100644 --- a/bids/layout/tests/test_writing.py +++ b/bids/layout/tests/test_writing.py @@ -3,7 +3,7 @@ import os import shutil from os.path import join, exists, islink, dirname -from bids.layout.writing import build_path +from bids.layout.writing import build_path, _PATTERN_FIND from bids.tests import get_test_data_path from bids import BIDSLayout from bids.layout.models import BIDSFile, Entity, Tag, Base @@ -61,6 +61,42 @@ def layout(tmp_bids): class TestWritableFile: + def test_parse_pattern_re(self): + """Unit tests on the strict entity pattern finder regex.""" + assert _PATTERN_FIND.findall('{extension<nii|nii.gz|json>|nii.gz}') == [ + ('{extension<nii|nii.gz|json>|nii.gz}', 'extension', 'nii|nii.gz|json', 'nii.gz') + ] + assert _PATTERN_FIND.findall('{extension<json|jsld>|json}') == [ + ('{extension<json|jsld>|json}', 'extension', 'json|jsld', 'json') + ] + assert _PATTERN_FIND.findall('{task<func|rest>}/r-{run}.nii.gz') == [ + ('{task<func|rest>}', 'task', 'func|rest', ''), + ('{run}', 'run', '', '') + ] + + pattern = """\ +sub-{subject}[/ses-{session}]/anat/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}]\ +[_space-{space}]_{suffix<T1w|T2w|T1rho|T1map|T2map|T2star|FLAIR|FLASH|PDmap|PD|PDT2|inplaneT[12]|angio>}.\ +{extension<nii|nii.gz|json>|nii.gz}""" + assert sorted(_PATTERN_FIND.findall(pattern)) == [ + ('{acquisition}', 'acquisition', '', ''), + ('{ceagent}', 'ceagent', '', ''), + ('{extension<nii|nii.gz|json>|nii.gz}', 'extension', 'nii|nii.gz|json', 'nii.gz'), + ('{reconstruction}', 'reconstruction', '', ''), + ('{session}', 'session', '', ''), + ('{session}', 'session', '', ''), + ('{space}', 'space', '', ''), + ('{subject}', 'subject', '', ''), + ('{subject}', 'subject', '', ''), + ( + '{suffix<T1w|T2w|T1rho|T1map|T2map|T2star|FLAIR|FLASH|PDmap|' + 'PD|PDT2|inplaneT[12]|angio>}', + 'suffix', + 'T1w|T2w|T1rho|T1map|T2map|T2star|FLAIR|FLASH|PDmap|PD|PDT2|inplaneT[12]|angio', + '' + ) + ] + def test_build_path(self, writable_file): # Single simple pattern @@ -112,11 +148,37 @@ class TestWritableFile: assert build_path({'run': 3}, pats) == 'ses-A/r-3.nii.gz' # Pattern with both valid and default values - pats = ['ses-{session<A|B|C>|D}/r-{run}.nii.gz'] - assert build_path({'session': 1, 'run': 3}, pats) == 'ses-D/r-3.nii.gz' - pats = ['ses-{session<A|B|C>|D}/r-{run}.nii.gz'] + pats = ['ses-{session<A|B|C|D>|D}/r-{run}.nii.gz'] + assert build_path({'run': 3}, pats) == 'ses-D/r-3.nii.gz' + pats = ['ses-{session<A|B|C|D>|D}/r-{run}.nii.gz'] assert build_path({'session': 'B', 'run': 3}, pats) == 'ses-B/r-3.nii.gz' + # Test extensions with dot and warning is issued + pats = ['ses-{session<A|B|C>|D}/r-{run}.{extension}'] + with pytest.warns(UserWarning) as record: + assert build_path({'session': 'B', 'run': 3, 'extension': '.nii'}, + pats) == 'ses-B/r-3.nii' + assert "defines an invalid default value" in record[0].message.args[0] + + # Test expansion of optional characters + pats = ['ses-{session<[ABCD]>|D}/r-{run}.{extension}'] + assert build_path({'session': 'B', 'run': 3, 'extension': '.nii'}, + pats) == 'ses-B/r-3.nii' + + # Test default-only patterns are correctly overriden by setting entity + entities = { + 'subject': '01', + 'extension': 'bvec', + 'suffix': 'T1rho', + } + pats = ( + "sub-{subject}[/ses-{session}]/{datatype|dwi}/sub-{subject}[_ses-{session}]" + "[_acq-{acquisition}]_{suffix|dwi}.{extension<bval|bvec|json|nii.gz|nii>|nii.gz}" + ) + assert build_path(entities, pats) == 'sub-01/dwi/sub-01_T1rho.bvec' + assert build_path(entities, pats, strict=True) == 'sub-01/dwi/sub-01_T1rho.bvec' + + def test_strict_build_path(self): # Test with strict matching--should fail
`build_path` chooses the first pattern that matches, even when a better match exists It looks like `BIDSLayout.build_path()` goes with the first path pattern it finds for which all required entities are given. I ran into a situation where this was blocking access to a better pattern later in the list. A minimal example: ``` from bids.layout import BIDSLayout data=BIDSLayout('/path/to/my/BIDS/data') entities = {'subject':'001', 'suffix':'T1w', 'extension':'.nii.gz', 'space':'MNI' } patterns = ['sub-{subject}[/ses-{session}]/anat/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}]_{suffix<T1w|T2w|T1rho|T1map|T2map|T2star|FLAIR|FLASH|PDmap|PD|PDT2|inplaneT[12]|angio>}.{extension<nii|nii.gz|json>|nii.gz}', 'sub-{subject}[/ses-{session}]/anat/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}][_space-{space}][_desc-{desc}]_{suffix<T1w|T2w|T1rho|T1map|T2map|T2star|FLAIR|FLASH|PDmap|PD|PDT2|inplaneT[12]|angio>}.{extension<nii|nii.gz|json>|nii.gz}'] print(data.build_path(entities, patterns, validate=False)) print(data.build_path(entities, patterns[1:], validate=False)) ``` outputs the following: ``` sub-001/anat/sub-001_T1w.nii.gz sub-001/anat/sub-001_space-MNI_T1w.nii.gz ``` Including the first pattern causes the 2nd to never be reached, even though it is better: `patterns[1]` makes use of the 'space' entity which is absent in `patterns[0]`. Users can work around this by giving the exact patterns they want and no others, but it would be nice if pybids tried to find a pattern which matched all given entities, or failing that, as many as possible.
0.0
ce25158070557ec67af98b41ebefbaa05515febf
[ "bids/layout/tests/test_path_building.py::test_bold_construction", "bids/layout/tests/test_path_building.py::test_invalid_file_construction", "bids/layout/tests/test_path_building.py::test_failed_file_construction", "bids/layout/tests/test_writing.py::TestWritableFile::test_parse_pattern_re", "bids/layout/tests/test_writing.py::TestWritableFile::test_strict_build_path", "bids/layout/tests/test_writing.py::TestWritableLayout::test_write_files", "bids/layout/tests/test_writing.py::TestWritableLayout::test_write_contents_to_file", "bids/layout/tests/test_writing.py::TestWritableLayout::test_write_contents_to_file_defaults", "bids/layout/tests/test_writing.py::TestWritableLayout::test_build_file_from_layout" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-01-11 03:08:35+00:00
mit
1,365
bids-standard__pybids-610
diff --git a/bids/layout/layout.py b/bids/layout/layout.py index e1acd6bd..5adba334 100644 --- a/bids/layout/layout.py +++ b/bids/layout/layout.py @@ -11,6 +11,7 @@ import warnings import sqlite3 import enum from pathlib import Path +import difflib import sqlalchemy as sa from sqlalchemy.orm import joinedload @@ -848,7 +849,7 @@ class BIDSLayout(object): return data.reset_index() def get(self, return_type='object', target=None, scope='all', - regex_search=False, absolute_paths=None, drop_invalid_filters=True, + regex_search=False, absolute_paths=None, invalid_filters='error', **filters): """Retrieve files and/or metadata from the current Layout. @@ -882,6 +883,14 @@ class BIDSLayout(object): to report either absolute or relative (to the top of the dataset) paths. If None, will fall back on the value specified at BIDSLayout initialization. + invalid_filters (str): Controls behavior when named filters are + encountered that don't exist in the database (e.g., in the case of + a typo like subbject='0.1'). Valid values: + 'error' (default): Raise an explicit error. + 'drop': Silently drop invalid filters (equivalent to not having + passed them as arguments in the first place). + 'allow': Include the invalid filters in the query, resulting + in no results being returned. filters : dict Any optional key/values to filter the entities on. Keys are entity names, values are regexes to filter on. For @@ -909,15 +918,25 @@ class BIDSLayout(object): filters['extension'] = [x.lstrip('.') if isinstance(x, str) else x for x in exts] - if drop_invalid_filters: - invalid_filters = set(filters.keys()) - set(entities.keys()) - if invalid_filters: - for inv_filt in invalid_filters: - filters.pop(inv_filt) + if invalid_filters != 'allow': + bad_filters = set(filters.keys()) - set(entities.keys()) + if bad_filters: + if invalid_filters == 'drop': + for bad_filt in bad_filters: + filters.pop(bad_filt) + elif invalid_filters == 'error': + first_bad = list(bad_filters)[0] + msg = "'{}' is not a recognized entity. ".format(first_bad) + ents = list(entities.keys()) + suggestions = difflib.get_close_matches(first_bad, ents) + if suggestions: + msg += "Did you mean {}? ".format(suggestions) + raise ValueError(msg + "If you're sure you want to impose " + "this constraint, set " + "invalid_filters='allow'.") # Provide some suggestions if target is specified and invalid. if target is not None and target not in entities: - import difflib potential = list(entities.keys()) suggestions = difflib.get_close_matches(target, potential) if suggestions:
bids-standard/pybids
c4d1f1f46c758e9f9b005eea2ccd6096459985cb
diff --git a/bids/layout/tests/test_layout.py b/bids/layout/tests/test_layout.py index 229634d9..e2a39375 100644 --- a/bids/layout/tests/test_layout.py +++ b/bids/layout/tests/test_layout.py @@ -40,7 +40,8 @@ def test_index_metadata(index_metadata, query, result): if not index_metadata and query is not None: indexer = BIDSLayoutIndexer(layout) indexer.index_metadata(**query) - sample_file = layout.get(task='rest', extension='nii.gz', acq='fullbrain')[0] + sample_file = layout.get(task='rest', extension='nii.gz', + acquisition='fullbrain')[0] metadata = sample_file.get_metadata() assert metadata.get('RepetitionTime') == result @@ -267,25 +268,25 @@ def test_get_return_type_dir(layout_7t_trt, layout_7t_trt_relpath): @pytest.mark.parametrize("acq", [None, Query.NONE]) def test_get_val_none(layout_7t_trt, acq): - t1w_files = layout_7t_trt.get(subject='01', ses='1', suffix='T1w') + t1w_files = layout_7t_trt.get(subject='01', session='1', suffix='T1w') assert len(t1w_files) == 1 assert 'acq' not in t1w_files[0].path t1w_files = layout_7t_trt.get( - subject='01', ses='1', suffix='T1w', acquisition=acq) + subject='01', session='1', suffix='T1w', acquisition=acq) assert len(t1w_files) == 1 bold_files = layout_7t_trt.get( - subject='01', ses='1', suffix='bold', acquisition=acq) + subject='01', session='1', suffix='bold', acquisition=acq) assert len(bold_files) == 0 def test_get_val_enum_any(layout_7t_trt): t1w_files = layout_7t_trt.get( - subject='01', ses='1', suffix='T1w', acquisition=Query.ANY, + subject='01', session='1', suffix='T1w', acquisition=Query.ANY, extension=Query.ANY) assert not t1w_files - bold_files = layout_7t_trt.get(subject='01', ses='1', run=1, suffix='bold', - acquisition=Query.ANY) - assert len(bold_files) == 3 + bold_files = layout_7t_trt.get(subject='01', session='1', run=1, + suffix='bold', acquisition=Query.ANY) + assert len(bold_files) == 2 def test_get_return_sorted(layout_7t_trt): @@ -669,6 +670,26 @@ def test_get_with_regex_search_bad_dtype(layout_7t_trt): # Two runs (1 per session) for each of subjects '10' and '01' assert len(results) == 4 + +def test_get_with_invalid_filters(layout_ds005): + l = layout_ds005 + # Raise error with suggestions + with pytest.raises(ValueError, match='session'): + l.get(subject='12', ses=True, invalid_filters='error') + with pytest.raises(ValueError, match='session'): + l.get(subject='12', ses=True) + # Silently drop amazing + res_without = l.get(subject='12', suffix='bold') + res_drop = l.get(subject='12', suffix='bold', amazing='!!!', + invalid_filters='drop') + assert res_without == res_drop + assert len(res_drop) == 3 + # Retain amazing, producing empty set + allow_res = l.get(subject='12', amazing=True, invalid_filters='allow') + assert allow_res == [] + + + def test_load_layout(layout_synthetic_nodb, db_dir): db_path = str(db_dir / 'tmp_db') layout_synthetic_nodb.save(db_path)
get silently ignores unknown keyword arguments (in my case `ses`) I am "inside" one of the tests, and decided to get a sample file. To my surprise - I got multiple: ``` 431 def test_derivative_getters(): 432 synth_path = join(get_test_data_path(), 'synthetic') 433 bare_layout = BIDSLayout(synth_path, derivatives=False) 434 full_layout = BIDSLayout(synth_path, derivatives=True) 435 import pdb; pdb.set_trace() 436 -> assert bare_layout.get_spaces() == [] *(Pdb) from pprint import pprint (Pdb) pprint(bare_layout.get(subject='05', ses='02', suffix='T1w')) [<BIDSImageFile filename='/home/yoh/proj/bids/pybids/bids/tests/data/synthetic/sub-05/ses-01/anat/sub-05_ses-01_T1w.nii'>, <BIDSImageFile filename='/home/yoh/proj/bids/pybids/bids/tests/data/synthetic/sub-05/ses-01/anat/sub-05_ses-01_T1w.nii.gz'>, <BIDSImageFile filename='/home/yoh/proj/bids/pybids/bids/tests/data/synthetic/sub-05/ses-02/anat/sub-05_ses-02_T1w.nii'>, <BIDSImageFile filename='/home/yoh/proj/bids/pybids/bids/tests/data/synthetic/sub-05/ses-02/anat/sub-05_ses-02_T1w.nii.gz'>] (Pdb) bids.__version__ '0.10.2+12.g8b5ca0e.dirty' ``` it does select correctly when I correctly specify `session` instead of `ses`. I would expect pybids to at least complain that whatever I have specified `ses` was not understood. Typos, API changes, etc could silently lead to incorrect results, thus imho it warrants AT LEAST a warning (I feel now that we had some discussion like that before, sorry if I am repeating myself ;))
0.0
c4d1f1f46c758e9f9b005eea2ccd6096459985cb
[ "bids/layout/tests/test_layout.py::test_get_with_invalid_filters" ]
[ "bids/layout/tests/test_layout.py::test_layout_init", "bids/layout/tests/test_layout.py::test_index_metadata[True-None-3.0]", "bids/layout/tests/test_layout.py::test_index_metadata[False-None-None]", "bids/layout/tests/test_layout.py::test_index_metadata[False-query2-3.0]", "bids/layout/tests/test_layout.py::test_index_metadata[False-query3-3.0]", "bids/layout/tests/test_layout.py::test_index_metadata[False-query4-3.0]", "bids/layout/tests/test_layout.py::test_index_metadata[False-query5-3.0]", "bids/layout/tests/test_layout.py::test_index_metadata[False-query6-3.0]", "bids/layout/tests/test_layout.py::test_layout_repr", "bids/layout/tests/test_layout.py::test_load_description", "bids/layout/tests/test_layout.py::test_get_file", "bids/layout/tests/test_layout.py::test_get_metadata", "bids/layout/tests/test_layout.py::test_get_metadata2", "bids/layout/tests/test_layout.py::test_get_metadata3", "bids/layout/tests/test_layout.py::test_get_metadata4", "bids/layout/tests/test_layout.py::test_get_metadata_meg", "bids/layout/tests/test_layout.py::test_get_metadata5", "bids/layout/tests/test_layout.py::test_get_metadata_via_bidsfile", "bids/layout/tests/test_layout.py::test_get_metadata_error", "bids/layout/tests/test_layout.py::test_get_with_bad_target", "bids/layout/tests/test_layout.py::test_get_bvals_bvecs", "bids/layout/tests/test_layout.py::test_get_subjects", "bids/layout/tests/test_layout.py::test_get_fieldmap", "bids/layout/tests/test_layout.py::test_get_fieldmap2", "bids/layout/tests/test_layout.py::test_bids_json", "bids/layout/tests/test_layout.py::test_get_val_none[None]", "bids/layout/tests/test_layout.py::test_get_val_none[Query.NONE]", "bids/layout/tests/test_layout.py::test_get_val_enum_any", "bids/layout/tests/test_layout.py::test_get_return_sorted", "bids/layout/tests/test_layout.py::test_ignore_files", "bids/layout/tests/test_layout.py::test_force_index", "bids/layout/tests/test_layout.py::test_nested_include_exclude", "bids/layout/tests/test_layout.py::test_nested_include_exclude_with_regex", "bids/layout/tests/test_layout.py::test_layout_with_derivs", "bids/layout/tests/test_layout.py::test_layout_with_multi_derivs[None]", "bids/layout/tests/test_layout.py::test_get_layouts_in_scope[None]", "bids/layout/tests/test_layout.py::test_get_dataset_description[None]", "bids/layout/tests/test_layout.py::test_layout_with_multi_derivs[bidsdb0]", "bids/layout/tests/test_layout.py::test_get_layouts_in_scope[bidsdb0]", "bids/layout/tests/test_layout.py::test_get_dataset_description[bidsdb0]", "bids/layout/tests/test_layout.py::test_layout_with_multi_derivs[bidsdb1]", "bids/layout/tests/test_layout.py::test_get_layouts_in_scope[bidsdb1]", "bids/layout/tests/test_layout.py::test_get_dataset_description[bidsdb1]", "bids/layout/tests/test_layout.py::test_query_derivatives", "bids/layout/tests/test_layout.py::test_restricted_words_in_path", "bids/layout/tests/test_layout.py::test_derivative_getters", "bids/layout/tests/test_layout.py::test_get_tr", "bids/layout/tests/test_layout.py::test_parse_file_entities", "bids/layout/tests/test_layout.py::test_parse_file_entities_from_layout[None]", "bids/layout/tests/test_layout.py::test_parse_file_entities_from_layout[bidsdb-synth0]", "bids/layout/tests/test_layout.py::test_parse_file_entities_from_layout[bidsdb-synth1]", "bids/layout/tests/test_layout.py::test_deriv_indexing", "bids/layout/tests/test_layout.py::test_add_config_paths", "bids/layout/tests/test_layout.py::test_layout_in_scope", "bids/layout/tests/test_layout.py::test_indexed_file_associations", "bids/layout/tests/test_layout.py::test_layout_save", "bids/layout/tests/test_layout.py::test_indexing_tag_conflict", "bids/layout/tests/test_layout.py::test_get_with_wrong_dtypes", "bids/layout/tests/test_layout.py::test_get_with_regex_search", "bids/layout/tests/test_layout.py::test_get_with_regex_search_bad_dtype", "bids/layout/tests/test_layout.py::test_load_layout" ]
{ "failed_lite_validators": [ "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-05-12 21:59:57+00:00
mit
1,366
bids-standard__pybids-611
diff --git a/bids/layout/models.py b/bids/layout/models.py index 766edd14..cd39426f 100644 --- a/bids/layout/models.py +++ b/bids/layout/models.py @@ -309,7 +309,7 @@ class BIDSDataFile(BIDSFile): } def get_df(self, include_timing=True, adjust_onset=False, - enforce_dtypes=True): + enforce_dtypes=True, **pd_args): """Return the contents of a tsv file as a pandas DataFrame. Parameters @@ -326,6 +326,8 @@ class BIDSDataFile(BIDSFile): If True, enforces the data types defined in the BIDS spec on core columns (e.g., subject_id and session_id must be represented as strings). + pd_args : dict + Optional keyword arguments to pass onto pd.read_csv(). Returns ------- @@ -347,8 +349,10 @@ class BIDSDataFile(BIDSFile): # TODO: memoize this for efficiency. (Note: caching is insufficient, # because the dtype enforcement will break if we ignore the value of # enforce_dtypes). + suffix = self.entities['suffix'] + header = None if suffix in {'physio', 'stim'} else 'infer' self.data = pd.read_csv(self.path, sep='\t', na_values='n/a', - dtype=dtype) + dtype=dtype, header=header, **pd_args) data = self.data.copy()
bids-standard/pybids
5fab1dee770babffa7aef668b24cb59a90ba37ef
diff --git a/bids/layout/tests/test_models.py b/bids/layout/tests/test_models.py index b6a15e73..770999fd 100644 --- a/bids/layout/tests/test_models.py +++ b/bids/layout/tests/test_models.py @@ -176,7 +176,7 @@ def test_bidsfile_get_df_from_tsv_gz(layout_synthetic): df1 = bf.get_df() df2 = bf.get_df(include_timing=True) assert df1.equals(df2) - assert df1.shape == (1599, 3) + assert df1.shape == (1600, 3) assert set(df1.columns) == {'onset', 'respiratory', 'cardiac'} assert df1.iloc[0, 0] == 0. assert df1.iloc[1, 0] - df1.iloc[0, 0] == 0.1 @@ -190,6 +190,7 @@ def test_bidsfile_get_df_from_tsv_gz(layout_synthetic): def test_bidsdatafile_enforces_dtype(layout_synthetic): bf = layout_synthetic.get(suffix='participants', extension='tsv')[0] df = bf.get_df(enforce_dtypes=False) + assert df.shape[0] == 5 assert df.loc[:, 'subject_id'].dtype == int assert df.loc[:, 'subject_id'][0] == 1 df = bf.get_df(enforce_dtypes=True)
First line omitted when using get_df() method of BIDSDataFile object Hi, The method `get_df() `of the BIDSDataFile object omits the first line of the data file. This issue arises when calling the function `read_csv()` of Pandas, as header is set to 0 if no names are passed: ``` self.data = pd.read_csv(self.path, sep='\t', na_values='n/a', dtype=dtype) ``` This behaviour can be problematic if one is working with the physiological recording data, since they don't have a header line and therefore the first observation would be always lost. This could be solved by modifying the piece of code above as follows: ``` if self.entities['suffix'] == 'physio' or 'stim': self.data = pd.read_csv(self.path, sep='\t', na_values='n/a', header=None, dtype=dtype) else: self.data = pd.read_csv(self.path, sep='\t', na_values='n/a', dtype=dtype) ``` Does this modification make sense?
0.0
5fab1dee770babffa7aef668b24cb59a90ba37ef
[ "bids/layout/tests/test_models.py::test_bidsfile_get_df_from_tsv_gz[None]", "bids/layout/tests/test_models.py::test_bidsfile_get_df_from_tsv_gz[bidsdb-synth0]", "bids/layout/tests/test_models.py::test_bidsfile_get_df_from_tsv_gz[bidsdb-synth1]" ]
[ "bids/layout/tests/test_models.py::test_entity_initialization", "bids/layout/tests/test_models.py::test_entity_init_all_args", "bids/layout/tests/test_models.py::test_entity_init_with_bad_dtype", "bids/layout/tests/test_models.py::test_entity_matches", "bids/layout/tests/test_models.py::test_entity_deepcopy", "bids/layout/tests/test_models.py::test_file_associations", "bids/layout/tests/test_models.py::test_tag_init", "bids/layout/tests/test_models.py::test_tag_dtype", "bids/layout/tests/test_models.py::test_entity_add_file", "bids/layout/tests/test_models.py::test_config_init_with_args", "bids/layout/tests/test_models.py::test_bidsdatafile_enforces_dtype[None]", "bids/layout/tests/test_models.py::test_bidsjsonfile[None]", "bids/layout/tests/test_models.py::test_bidsfile_get_metadata[None]", "bids/layout/tests/test_models.py::test_bidsfile_get_entities[None]", "bids/layout/tests/test_models.py::test_bidsdatafile_enforces_dtype[bidsdb-synth0]", "bids/layout/tests/test_models.py::test_bidsjsonfile[bidsdb-synth0]", "bids/layout/tests/test_models.py::test_bidsfile_get_metadata[bidsdb-synth0]", "bids/layout/tests/test_models.py::test_bidsfile_get_entities[bidsdb-synth0]", "bids/layout/tests/test_models.py::test_bidsdatafile_enforces_dtype[bidsdb-synth1]", "bids/layout/tests/test_models.py::test_bidsjsonfile[bidsdb-synth1]", "bids/layout/tests/test_models.py::test_bidsfile_get_metadata[bidsdb-synth1]", "bids/layout/tests/test_models.py::test_bidsfile_get_entities[bidsdb-synth1]", "bids/layout/tests/test_models.py::test_bidsimagefile_get_image", "bids/layout/tests/test_models.py::test_bidsfile_fspath" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2020-05-12 22:22:36+00:00
mit
1,367
bids-standard__pybids-613
diff --git a/bids/layout/layout.py b/bids/layout/layout.py index b0d331d0..68b15648 100644 --- a/bids/layout/layout.py +++ b/bids/layout/layout.py @@ -19,7 +19,7 @@ from bids_validator import BIDSValidator from ..utils import listify, natural_sort, make_bidsfile from ..external import inflect -from .writing import build_path, write_contents_to_file +from .writing import build_path, write_to_file from .models import (Base, Config, BIDSFile, Entity, Tag) from .index import BIDSLayoutIndexer from .utils import BIDSMetadata @@ -1584,12 +1584,11 @@ class BIDSLayout(object): f.copy(path_patterns, symbolic_link=symbolic_links, root=self.root, conflicts=conflicts) - def write_contents_to_file(self, entities, path_patterns=None, - contents=None, link_to=None, - content_mode='text', conflicts='fail', - strict=False, validate=True): - """Write arbitrary data to a file defined by the passed entities and - path patterns. + def write_to_file(self, entities, path_patterns=None, + contents=None, link_to=None, copy_from=None, + content_mode='text', conflicts='fail', + strict=False, validate=True): + """Write data to a file defined by the passed entities and patterns. Parameters ---------- @@ -1632,9 +1631,9 @@ class BIDSLayout(object): "the passed entities given available path " "patterns.") - write_contents_to_file(path, contents=contents, link_to=link_to, - content_mode=content_mode, conflicts=conflicts, - root=self.root) + write_to_file(path, contents=contents, link_to=link_to, + copy_from=copy_from, content_mode=content_mode, + conflicts=conflicts, root=self.root) class Query(enum.Enum): diff --git a/bids/layout/models.py b/bids/layout/models.py index cd39426f..e483d45c 100644 --- a/bids/layout/models.py +++ b/bids/layout/models.py @@ -12,7 +12,7 @@ import json from copy import deepcopy from itertools import chain -from .writing import build_path, write_contents_to_file +from .writing import build_path, write_to_file from ..config import get_option from .utils import BIDSMetadata @@ -284,17 +284,13 @@ class BIDSFile(Base): raise ValueError("Target filename to copy/symlink (%s) doesn't " "exist." % path) + kwargs = dict(path=new_filename, root=root, conflicts=conflicts) if symbolic_link: - contents = None - link_to = path + kwargs['link_to'] = path else: - with open(path, 'r') as f: - contents = f.read() - link_to = None + kwargs['copy_from'] = path - write_contents_to_file(new_filename, contents=contents, - link_to=link_to, content_mode='text', root=root, - conflicts=conflicts) + write_to_file(**kwargs) class BIDSDataFile(BIDSFile): diff --git a/bids/layout/writing.py b/bids/layout/writing.py index 14534a6a..46935c49 100644 --- a/bids/layout/writing.py +++ b/bids/layout/writing.py @@ -6,12 +6,13 @@ import warnings import os import re import sys +import shutil from string import Formatter from itertools import product from ..utils import splitext, listify from os.path import join, dirname, exists, islink, isabs, isdir -__all__ = ['build_path', 'write_contents_to_file'] +__all__ = ['build_path', 'write_to_file'] _PATTERN_FIND = re.compile(r'({([\w\d]*?)(?:<([^>]+)>)?(?:\|((?:\.?[\w])+))?\})') @@ -199,11 +200,10 @@ def build_path(entities, path_patterns, strict=False): return None -def write_contents_to_file(path, contents=None, link_to=None, - content_mode='text', root=None, conflicts='fail'): +def write_to_file(path, contents=None, link_to=None, copy_from=None, + content_mode='text', root=None, conflicts='fail'): """ - Uses provided filename patterns to write contents to a new path, given - a corresponding entity map. + Writes provided contents to a new path, or copies from an old path. Parameters ---------- @@ -214,8 +214,11 @@ def write_contents_to_file(path, contents=None, link_to=None, to the new path. link_to : str Optional path with which to create a symbolic link to. - Used as an alternative to and takes priority over the contents + Used as an alternative to, and takes priority over, the contents argument. + copy_from : str + Optional filename to copy to new location. Used an alternative to, and + takes priority over, the contents argument. content_mode : {'text', 'binary'} Either 'text' or 'binary' to indicate the writing mode for the new file. Only relevant if contents is provided. @@ -267,14 +270,19 @@ def write_contents_to_file(path, contents=None, link_to=None, if not exists(dirname(path)): os.makedirs(dirname(path)) - if link_to: + if link_to is not None: os.symlink(link_to, path) + elif copy_from is not None: + if not exists(copy_from): + raise ValueError("Source file '{}' does not exist.".format(copy_from)) + shutil.copy(copy_from, path) + elif contents: mode = 'wb' if content_mode == 'binary' else 'w' with open(path, mode) as f: f.write(contents) else: - raise ValueError('One of contents or link_to must be provided.') + raise ValueError('One of contents, copy_from or link_to must be provided.') def _expand_options(value):
bids-standard/pybids
e7def429f161cc2cab7b9615905fefb039733a2f
diff --git a/bids/layout/tests/test_writing.py b/bids/layout/tests/test_writing.py index 20e4d3fc..476b59df 100644 --- a/bids/layout/tests/test_writing.py +++ b/bids/layout/tests/test_writing.py @@ -304,11 +304,11 @@ class TestWritableLayout: layout.copy_files(path_patterns=pat, conflicts='overwrite') assert exists(example_file) - def test_write_contents_to_file(self, tmp_bids, layout): + def test_write_to_file(self, tmp_bids, layout): contents = 'test' entities = {'subject': 'Bob', 'session': '01'} pat = join('sub-{subject}/ses-{session}/desc.txt') - layout.write_contents_to_file(entities, path_patterns=pat, + layout.write_to_file(entities, path_patterns=pat, contents=contents, validate=False) target = join(str(tmp_bids), 'bids', 'sub-Bob/ses-01/desc.txt') assert exists(target) @@ -317,12 +317,12 @@ class TestWritableLayout: assert written == contents assert target not in layout.files - def test_write_contents_to_file_defaults(self, tmp_bids, layout): + def test_write_to_file_defaults(self, tmp_bids, layout): contents = 'test' entities = {'subject': 'Bob', 'session': '01', 'run': '1', 'suffix': 'bold', 'task': 'test', 'acquisition': 'test', 'bval': 0} - layout.write_contents_to_file(entities, contents=contents) + layout.write_to_file(entities, contents=contents) target = join(str(tmp_bids), 'bids', 'sub-Bob', 'ses-01', 'func', 'sub-Bob_ses-01_task-test_acq-test_run-1_bold.nii.gz') assert exists(target)
BIDSFile.copy() does not work on binary files I am running the following code as a test before I put it in a script. when I run it I get the following error: ``` x = BIDSLayout("/path/to/bids/directory", derivatives=True) x.get()[2].copy(x.get()[2].filename,False,"/path/to/derivatives/directory","overwright") Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/aaron/.local/lib/python3.6/site-packages/bids/layout/models.py", line 268, in copy contents = f.read() File "/usr/lib/python3.6/codecs.py", line 321, in decode (result, consumed) = self._buffer_decode(data, self.errors, final) UnicodeDecodeError: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte ``` Is there something wrong with my image files? I can view them just fine on FSL. How would I check?
0.0
e7def429f161cc2cab7b9615905fefb039733a2f
[ "bids/layout/tests/test_writing.py::TestWritableLayout::test_write_to_file", "bids/layout/tests/test_writing.py::TestWritableLayout::test_write_to_file_defaults" ]
[ "bids/layout/tests/test_writing.py::TestWritableFile::test_parse_pattern_re", "bids/layout/tests/test_writing.py::TestWritableFile::test_strict_build_path", "bids/layout/tests/test_writing.py::TestWritableLayout::test_write_files", "bids/layout/tests/test_writing.py::TestWritableLayout::test_build_file_from_layout" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-05-13 00:15:49+00:00
mit
1,368
bids-standard__pybids-615
diff --git a/bids/exceptions.py b/bids/exceptions.py new file mode 100644 index 00000000..7c599ea6 --- /dev/null +++ b/bids/exceptions.py @@ -0,0 +1,37 @@ +""" Exceptions. + +Exceptions relating to problems with BIDS itself, should carry BIDS in their +name. All exceptions should subclass from PyBIDSError +""" + + +class PyBIDSError(Exception): + """ Base class. Typically for mix-in.""" + + +class ConfigError(ValueError, PyBIDSError): + """ Problems with config file. """ + + +class NoMatchError(ValueError, PyBIDSError): + """ No match found where it is required. """ + + +class BIDSEntityError(AttributeError, PyBIDSError): + """ An unknown entity. """ + + +class TargetError(ValueError, PyBIDSError): + """ An unknown target. """ + + +class BIDSValidationError(ValueError, PyBIDSError): + """ An invalid BIDS dataset. """ + + +class BIDSDerivativesValidationError(BIDSValidationError): + """ An invalid BIDS derivative dataset. """ + + +class BIDSConflictingValuesError(BIDSValidationError): + """ A value conflict (e.g. in filename and side-car .json) """ diff --git a/bids/layout/index.py b/bids/layout/index.py index ee41acda..e181b463 100644 --- a/bids/layout/index.py +++ b/bids/layout/index.py @@ -6,6 +6,7 @@ from collections import defaultdict from bids_validator import BIDSValidator from .models import Config, Entity, Tag, FileAssociation from ..utils import listify, make_bidsfile +from ..exceptions import BIDSConflictingValuesError def _extract_entities(bidsfile, entities): @@ -367,8 +368,9 @@ class BIDSLayoutIndexer(object): "filename {} (value='{}') versus its JSON sidecar " "(value='{}'). Please reconcile this discrepancy." ) - raise ValueError(msg.format(md_key, bf.path, file_val, - md_val)) + raise BIDSConflictingValuesError( + msg.format(md_key, bf.path, file_val, + md_val)) continue if md_key not in all_entities: all_entities[md_key] = Entity(md_key, is_metadata=True) diff --git a/bids/layout/layout.py b/bids/layout/layout.py index f0ef306c..de9777ea 100644 --- a/bids/layout/layout.py +++ b/bids/layout/layout.py @@ -24,6 +24,14 @@ from .models import (Base, Config, BIDSFile, Entity, Tag) from .index import BIDSLayoutIndexer from .utils import BIDSMetadata from .. import config as cf +from ..exceptions import ( + BIDSDerivativesValidationError, + BIDSEntityError, + BIDSValidationError, + ConfigError, + NoMatchError, + TargetError, +) try: from os.path import commonpath @@ -122,10 +130,10 @@ def add_config_paths(**kwargs): """ for k, path in kwargs.items(): if not os.path.exists(path): - raise ValueError( + raise ConfigError( 'Configuration file "{}" does not exist'.format(k)) if k in cf.get_option('config_paths'): - raise ValueError('Configuration {!r} already exists'.format(k)) + raise ConfigError('Configuration {!r} already exists'.format(k)) kwargs.update(**cf.get_option('config_paths')) cf.set_option('config_paths', kwargs) @@ -307,7 +315,7 @@ class BIDSLayout(object): if sing in entities: ent_name = sing else: - raise AttributeError( + raise BIDSEntityError( "'get_{}' can't be called because '{}' isn't a " "recognized entity name.".format(ent_name, ent_name)) return partial(self.get, return_type='id', target=ent_name) @@ -480,7 +488,7 @@ class BIDSLayout(object): target = os.path.join(self.root, 'dataset_description.json') if not os.path.exists(target): if self.validate: - raise ValueError( + raise BIDSValidationError( "'dataset_description.json' is missing from project root." " Every valid BIDS dataset must have this file." "\nExample contents of 'dataset_description.json': \n%s" % @@ -494,9 +502,9 @@ class BIDSLayout(object): if self.validate: for k in MANDATORY_BIDS_FIELDS: if k not in self.description: - raise ValueError("Mandatory %r field missing from " - "'dataset_description.json'." - "\nExample: %s" % (k, MANDATORY_BIDS_FIELDS[k]) + raise BIDSValidationError("Mandatory %r field missing from " + "'dataset_description.json'." + "\nExample: %s" % (k, MANDATORY_BIDS_FIELDS[k]) ) def _validate_force_index(self): @@ -805,13 +813,15 @@ class BIDSLayout(object): pipeline_name = description.get( 'PipelineDescription', {}).get('Name') if pipeline_name is None: - raise ValueError("Every valid BIDS-derivatives dataset must " + raise BIDSDerivativesValidationError( + "Every valid BIDS-derivatives dataset must " "have a PipelineDescription.Name field set " "inside 'dataset_description.json'. " "\nExample: %s" % MANDATORY_DERIVATIVES_FIELDS['PipelineDescription.Name']) if pipeline_name in self.derivatives: - raise ValueError("Pipeline name '%s' has already been added " + raise BIDSDerivativesValidationError( + "Pipeline name '%s' has already been added " "to this BIDSLayout. Every added pipeline " "must have a unique name!") # Default config and sources values @@ -968,7 +978,7 @@ class BIDSLayout(object): message = "Did you mean one of: {}?".format(suggestions) else: message = "Valid targets are: {}".format(potential) - raise ValueError(("Unknown target '{}'. " + message) + raise TargetError(("Unknown target '{}'. " + message) .format(target)) results = [] @@ -992,7 +1002,7 @@ class BIDSLayout(object): elif return_type in ['id', 'dir']: if target is None: - raise ValueError('If return_type is "id" or "dir", a valid ' + raise TargetError('If return_type is "id" or "dir", a valid ' 'target entity must also be specified.') results = [x for x in results if target in x.entities] @@ -1254,7 +1264,7 @@ class BIDSLayout(object): if not filters.get('suffix'): f = self.get_file(path) if 'suffix' not in f.entities: - raise ValueError( + raise BIDSValidationError( "File '%s' does not have a valid suffix, most " "likely because it is not a valid BIDS file." % path ) @@ -1424,7 +1434,7 @@ class BIDSLayout(object): images = self.get(extension=['nii', 'nii.gz'], scope=scope, **filters) if not images: - raise ValueError("No functional images that match criteria found.") + raise NoMatchError("No functional images that match criteria found.") all_trs = set() for img in images: @@ -1432,7 +1442,7 @@ class BIDSLayout(object): all_trs.add(round(float(md['RepetitionTime']), 5)) if len(all_trs) > 1: - raise ValueError("Unique TR cannot be found given filters {!r}" + raise NoMatchError("Unique TR cannot be found given filters {!r}" .format(filters)) return all_trs.pop() @@ -1515,7 +1525,8 @@ class BIDSLayout(object): to_check = os.path.join(os.path.sep, built) if validate and not BIDSValidator().is_bids(to_check): - raise ValueError("Built path {} is not a valid BIDS filename. " + raise BIDSValidationError( + "Built path {} is not a valid BIDS filename. " "Please make sure all provided entity values are " "spec-compliant.".format(built))
bids-standard/pybids
5ea4087c2999a6412a603b8ca7a3a0211efd25ca
diff --git a/bids/layout/tests/test_layout.py b/bids/layout/tests/test_layout.py index e2a39375..20fb5af5 100644 --- a/bids/layout/tests/test_layout.py +++ b/bids/layout/tests/test_layout.py @@ -18,6 +18,13 @@ from bids.layout.utils import BIDSMetadata from bids.tests import get_test_data_path from bids.utils import natural_sort +from bids.exceptions import ( + BIDSValidationError, + ConfigError, + NoMatchError, + TargetError, +) + def test_layout_init(layout_7t_trt): assert isinstance(layout_7t_trt.files, dict) @@ -194,11 +201,11 @@ def test_get_metadata_error(layout_7t_trt): def test_get_with_bad_target(layout_7t_trt): - with pytest.raises(ValueError) as exc: + with pytest.raises(TargetError) as exc: layout_7t_trt.get(target='unicorn') msg = exc.value.message assert 'subject' in msg and 'reconstruction' in msg and 'proc' in msg - with pytest.raises(ValueError) as exc: + with pytest.raises(TargetError) as exc: layout_7t_trt.get(target='sub') msg = exc.value.message assert 'subject' in msg and 'reconstruction' not in msg @@ -440,11 +447,11 @@ def test_derivative_getters(): def test_get_tr(layout_7t_trt): # Bad subject, should fail - with pytest.raises(ValueError) as exc: + with pytest.raises(NoMatchError) as exc: layout_7t_trt.get_tr(subject="zzz") assert exc.value.message.startswith("No functional images") # There are multiple tasks with different TRs, so this should fail - with pytest.raises(ValueError) as exc: + with pytest.raises(NoMatchError) as exc: layout_7t_trt.get_tr(subject=['01', '02']) assert exc.value.message.startswith("Unique TR") # This should work @@ -543,10 +550,10 @@ def test_deriv_indexing(): def test_add_config_paths(): bids_dir = dirname(bids.__file__) bids_json = os.path.join(bids_dir, 'layout', 'config', 'bids.json') - with pytest.raises(ValueError) as exc: + with pytest.raises(ConfigError) as exc: add_config_paths(test_config1='nonexistentpath.json') assert str(exc.value).startswith('Configuration file') - with pytest.raises(ValueError) as exc: + with pytest.raises(ConfigError) as exc: add_config_paths(bids=bids_json) assert str(exc.value).startswith("Configuration 'bids' already") add_config_paths(dummy=bids_json) @@ -625,7 +632,7 @@ def test_layout_save(tmp_path, layout_7t_trt): def test_indexing_tag_conflict(): data_dir = join(get_test_data_path(), 'ds005_conflict') - with pytest.raises(ValueError) as exc: + with pytest.raises(BIDSValidationError) as exc: layout = BIDSLayout(data_dir) print(exc.value.message) assert exc.value.message.startswith("Conflicting values found")
should pybids more lenient or at least get a dedicated exception like NonCompliantBIDSError ATM pybids immediately throws a generic ValueError if found contained derivatives dataset is not fully compliant: ```shell /tmp > datalad install https://github.com/OpenNeuroDatasets/ds001868 && cd ds001868 [INFO ] Cloning https://github.com/OpenNeuroDatasets/ds001868 [1 other candidates] into '/tmp/ds001868' [INFO ] Remote origin not usable by git-annex; setting annex-ignore [INFO ] access to 1 dataset sibling s3-PRIVATE not auto-enabled, enable with: | datalad siblings -d "/tmp/ds001868" enable -s s3-PRIVATE install(ok): /tmp/ds001868 (dataset) /tmp/ds001868 > python -c 'from bids import BIDSLayout; b=BIDSLayout(".", derivatives=True)' Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/yoh/proj/bids/pybids/bids/layout/layout.py", line 227, in __init__ index_metadata=index_metadata) File "/home/yoh/proj/bids/pybids/bids/layout/layout.py", line 555, in add_derivatives raise ValueError("Every valid BIDS-derivatives dataset must " ValueError: Every valid BIDS-derivatives dataset must have a PipelineDescription.Name field set inside dataset_description.json. ``` But I wonder - if this should be a warning and not error (optionally or not) - there should be a dedicated exception (e.g. `NonCompliantBIDSError(ValueError)` and `NonCompliantDerivativesBIDSError(NonCompliantBIDSError)`) which would be thrown here instead of a generic ValueError which makes it fragile to handle such cases programmatically
0.0
5ea4087c2999a6412a603b8ca7a3a0211efd25ca
[ "bids/layout/tests/test_layout.py::test_layout_init", "bids/layout/tests/test_layout.py::test_index_metadata[True-None-3.0]", "bids/layout/tests/test_layout.py::test_index_metadata[False-None-None]", "bids/layout/tests/test_layout.py::test_index_metadata[False-query2-3.0]", "bids/layout/tests/test_layout.py::test_index_metadata[False-query3-3.0]", "bids/layout/tests/test_layout.py::test_index_metadata[False-query4-3.0]", "bids/layout/tests/test_layout.py::test_index_metadata[False-query5-3.0]", "bids/layout/tests/test_layout.py::test_index_metadata[False-query6-3.0]", "bids/layout/tests/test_layout.py::test_layout_repr", "bids/layout/tests/test_layout.py::test_load_description", "bids/layout/tests/test_layout.py::test_get_file", "bids/layout/tests/test_layout.py::test_get_metadata", "bids/layout/tests/test_layout.py::test_get_metadata2", "bids/layout/tests/test_layout.py::test_get_metadata3", "bids/layout/tests/test_layout.py::test_get_metadata4", "bids/layout/tests/test_layout.py::test_get_metadata_meg", "bids/layout/tests/test_layout.py::test_get_metadata5", "bids/layout/tests/test_layout.py::test_get_metadata_via_bidsfile", "bids/layout/tests/test_layout.py::test_get_metadata_error", "bids/layout/tests/test_layout.py::test_get_with_bad_target", "bids/layout/tests/test_layout.py::test_get_bvals_bvecs", "bids/layout/tests/test_layout.py::test_get_subjects", "bids/layout/tests/test_layout.py::test_get_fieldmap", "bids/layout/tests/test_layout.py::test_get_fieldmap2", "bids/layout/tests/test_layout.py::test_bids_json", "bids/layout/tests/test_layout.py::test_get_val_none[None]", "bids/layout/tests/test_layout.py::test_get_val_none[Query.NONE]", "bids/layout/tests/test_layout.py::test_get_val_enum_any", "bids/layout/tests/test_layout.py::test_get_return_sorted", "bids/layout/tests/test_layout.py::test_ignore_files", "bids/layout/tests/test_layout.py::test_force_index", "bids/layout/tests/test_layout.py::test_nested_include_exclude", "bids/layout/tests/test_layout.py::test_nested_include_exclude_with_regex", "bids/layout/tests/test_layout.py::test_layout_with_derivs", "bids/layout/tests/test_layout.py::test_layout_with_multi_derivs[None]", "bids/layout/tests/test_layout.py::test_get_layouts_in_scope[None]", "bids/layout/tests/test_layout.py::test_get_dataset_description[None]", "bids/layout/tests/test_layout.py::test_layout_with_multi_derivs[bidsdb0]", "bids/layout/tests/test_layout.py::test_get_layouts_in_scope[bidsdb0]", "bids/layout/tests/test_layout.py::test_get_dataset_description[bidsdb0]", "bids/layout/tests/test_layout.py::test_layout_with_multi_derivs[bidsdb1]", "bids/layout/tests/test_layout.py::test_get_layouts_in_scope[bidsdb1]", "bids/layout/tests/test_layout.py::test_get_dataset_description[bidsdb1]", "bids/layout/tests/test_layout.py::test_query_derivatives", "bids/layout/tests/test_layout.py::test_restricted_words_in_path", "bids/layout/tests/test_layout.py::test_derivative_getters", "bids/layout/tests/test_layout.py::test_get_tr", "bids/layout/tests/test_layout.py::test_parse_file_entities", "bids/layout/tests/test_layout.py::test_parse_file_entities_from_layout[None]", "bids/layout/tests/test_layout.py::test_parse_file_entities_from_layout[bidsdb-synth0]", "bids/layout/tests/test_layout.py::test_parse_file_entities_from_layout[bidsdb-synth1]", "bids/layout/tests/test_layout.py::test_deriv_indexing", "bids/layout/tests/test_layout.py::test_add_config_paths", "bids/layout/tests/test_layout.py::test_layout_in_scope", "bids/layout/tests/test_layout.py::test_indexed_file_associations", "bids/layout/tests/test_layout.py::test_layout_save", "bids/layout/tests/test_layout.py::test_indexing_tag_conflict", "bids/layout/tests/test_layout.py::test_get_with_wrong_dtypes", "bids/layout/tests/test_layout.py::test_get_with_regex_search", "bids/layout/tests/test_layout.py::test_get_with_regex_search_bad_dtype", "bids/layout/tests/test_layout.py::test_get_with_invalid_filters", "bids/layout/tests/test_layout.py::test_load_layout" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-05-13 05:04:01+00:00
mit
1,369
bids-standard__pybids-836
diff --git a/bids/modeling/transformations/base.py b/bids/modeling/transformations/base.py index 3c87f69f..85d5d1d2 100644 --- a/bids/modeling/transformations/base.py +++ b/bids/modeling/transformations/base.py @@ -91,6 +91,11 @@ class Transformation(metaclass=ABCMeta): # be passed through as-is even if categorical. _allow_categorical = None + # Boolean indicating whether to treat each key word argument as a one-to-one + # mapping with each variable or to treat the key word argument as applying to + # every input variable. + _sync_kwargs = True + def __new__(cls, collection, variables, *args, **kwargs): t = super(Transformation, cls).__new__(cls) t._setup(collection, variables, *args, **kwargs) @@ -117,7 +122,11 @@ class Transformation(metaclass=ABCMeta): # 'variables' kwargs[arg_spec.args[2 + i]] = arg_val - self.kwargs = kwargs + # listify kwargs if synced + if self._sync_kwargs: + self.kwargs = {k: listify(v) for k, v in kwargs.items()} + else: + self.kwargs = kwargs # Expand any detected variable group names or wild cards self._expand_variable_groups() @@ -255,20 +264,22 @@ class Transformation(metaclass=ABCMeta): if not self._loopable: variables = [variables] + i_kwargs = kwargs for i, col in enumerate(variables): - + if self._sync_kwargs: + i_kwargs = {k: v[i] for k, v in kwargs.items()} # If we still have a list, pass all variables in one block if isinstance(col, (list, tuple)): - result = self._transform(data, **kwargs) + result = self._transform(data, **i_kwargs) if self._return_type not in ['none', None]: col = col[0].clone(data=result, name=self.output[0]) # Otherwise loop over variables individually else: if self._groupable and self.groupby is not None: result = col.apply(self._transform, groupby=self.groupby, - **kwargs) + **i_kwargs) else: - result = self._transform(data[i], **kwargs) + result = self._transform(data[i], **i_kwargs) if self._return_type in ['none', None]: continue diff --git a/bids/modeling/transformations/compute.py b/bids/modeling/transformations/compute.py index 71877c06..a2b1c0dd 100644 --- a/bids/modeling/transformations/compute.py +++ b/bids/modeling/transformations/compute.py @@ -192,6 +192,7 @@ class Sum(Transformation): _groupable = False _aligned_required = True _output_required = True + _sync_kwargs = False def _transform(self, data, weights=None): data = pd.concat(data, axis=1, sort=True) diff --git a/bids/modeling/transformations/munge.py b/bids/modeling/transformations/munge.py index ea0e1e28..5cc312c8 100644 --- a/bids/modeling/transformations/munge.py +++ b/bids/modeling/transformations/munge.py @@ -299,6 +299,7 @@ class Split(Transformation): _return_type = 'variable' _allow_categorical = ('by',) _densify = ('variables', 'by') + _sync_kwargs = False def _transform(self, var, by):
bids-standard/pybids
37dc6eadfdcfbbf61ad930710fb703a6b48cd49c
diff --git a/bids/modeling/tests/test_transformations.py b/bids/modeling/tests/test_transformations.py index 91b15506..4415fd6e 100644 --- a/bids/modeling/tests/test_transformations.py +++ b/bids/modeling/tests/test_transformations.py @@ -344,6 +344,20 @@ def test_assign(collection): assert np.array_equal(t2.duration, pg.duration) +def test_assign_multiple(collection): + # test kwarg distribution + transform.Assign(collection, ['RT', 'respcat'], target=['gain', 'loss'], + input_attr=['amplitude', 'amplitude'], target_attr=['duration', 'amplitude'], + output=['gain_rt', 'loss_cat']) + rt = collection['RT'] + gain_rt = collection['gain_rt'] + loss_cat = collection['loss_cat'] + rc = collection['respcat'] + + assert np.array_equal(gain_rt.duration, rt.values.values) + assert np.array_equal(loss_cat.values.values, rc.values.values) + + def test_copy(collection): transform.Copy(collection, 'RT', output='RT_copy') assert 'RT_copy' in collection.variables.keys()
handling kwargs in loopable Transformations? AFAICT the following is a valid Transformation: ``` { "Name": "Assign", "Input": ["response_time", "response_time", "response_time"], "Target": ["pumps", "control_pumps", "cash"], "Output": ["pumps_rt", "control_pumps_rt", "cash_rt"], "InputAttr": ["value", "value", "value"], "TargetAttr": ["duration", "duration", "duration"] } ``` according to [the transformer specification](https://docs.google.com/document/d/1uxN6vPWbC7ciAx2XWtT5Y-lBrdckZKpPdNUNpwRxHoU/edit#) as long as "Target", "Output", "InputAttr", and "TargetAttr" are the same length, then the transformer should perform a one-to-one mapping of variables, instead I get the following traceback: ``` Traceback (most recent call last): File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/nipype/interfaces/base/core.py", line 398, in run runtime = self._run_interface(runtime) File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/fitlins/interfaces/bids.py", line 250, in _run_interface self._results['all_specs'] = self._load_graph(runtime, graph) File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/fitlins/interfaces/bids.py", line 258, in _load_graph specs = node.run(inputs, group_by=node.group_by, **filters) File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/statsmodels.py", line 464, in run node_output = BIDSStatsModelsNodeOutput( File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/statsmodels.py", line 581, in __init__ dfs = self._collections_to_dfs(collections) File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/statsmodels.py", line 657, in _collections_to_dfs coll = transformer.transform(coll.clone(), transformations['instructions']) File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/transformations/base.py", line 465, in transform func(collection, cols, **kwargs) File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/transformations/base.py", line 97, in __new__ return t.transform() File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/transformations/base.py", line 271, in transform result = self._transform(data[i], **kwargs) File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/transformations/munge.py", line 39, in _transform target = self.collection.variables[target].clone() TypeError: unhashable type: 'list' ```
0.0
37dc6eadfdcfbbf61ad930710fb703a6b48cd49c
[ "bids/modeling/tests/test_transformations.py::test_assign_multiple" ]
[ "bids/modeling/tests/test_transformations.py::test_convolve", "bids/modeling/tests/test_transformations.py::test_convolve_impulse", "bids/modeling/tests/test_transformations.py::test_rename", "bids/modeling/tests/test_transformations.py::test_product", "bids/modeling/tests/test_transformations.py::test_sum", "bids/modeling/tests/test_transformations.py::test_scale", "bids/modeling/tests/test_transformations.py::test_demean", "bids/modeling/tests/test_transformations.py::test_split", "bids/modeling/tests/test_transformations.py::test_resample_dense", "bids/modeling/tests/test_transformations.py::test_threshold", "bids/modeling/tests/test_transformations.py::test_assign", "bids/modeling/tests/test_transformations.py::test_copy", "bids/modeling/tests/test_transformations.py::test_expand_variable_names", "bids/modeling/tests/test_transformations.py::test_factor", "bids/modeling/tests/test_transformations.py::test_filter", "bids/modeling/tests/test_transformations.py::test_replace", "bids/modeling/tests/test_transformations.py::test_select", "bids/modeling/tests/test_transformations.py::test_delete", "bids/modeling/tests/test_transformations.py::test_and", "bids/modeling/tests/test_transformations.py::test_or", "bids/modeling/tests/test_transformations.py::test_not", "bids/modeling/tests/test_transformations.py::test_dropna", "bids/modeling/tests/test_transformations.py::test_group", "bids/modeling/tests/test_transformations.py::test_resample", "bids/modeling/tests/test_transformations.py::test_Lag" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-04-11 21:17:24+00:00
mit
1,370
bids-standard__pybids-997
diff --git a/.zenodo.json b/.zenodo.json index 8083d1d5..890e0bcb 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -43,7 +43,9 @@ "orcid": "0000-0002-1535-9767" }, { - "name": "Papadopoulos Orfanos, Dimitri" + "affiliation": "CEA", + "name": "Papadopoulos Orfanos, Dimitri", + "orcid": "0000-0002-1242-8990" }, { "affiliation": "University of Texas at Austin", diff --git a/bids/layout/config/bids.json b/bids/layout/config/bids.json index dada4b1c..bad844ad 100644 --- a/bids/layout/config/bids.json +++ b/bids/layout/config/bids.json @@ -95,7 +95,7 @@ }, { "name": "suffix", - "pattern": "[._]*([a-zA-Z0-9]*?)\\.[^/\\\\]+$" + "pattern": "(?:^|[_/\\\\])([a-zA-Z0-9]+)\\.[^/\\\\]+$" }, { "name": "scans", @@ -111,7 +111,7 @@ }, { "name": "extension", - "pattern": "[._]*[a-zA-Z0-9]*?(\\.[^/\\\\]+)$" + "pattern": "[^./\\\\](\\.[^/\\\\]+)$" } ], "default_path_patterns": [
bids-standard/pybids
0388abb4a4e07b8ea775154c940cb6cb485238f8
diff --git a/bids/layout/tests/test_utils.py b/bids/layout/tests/test_utils.py index 1023fb76..26f1511b 100644 --- a/bids/layout/tests/test_utils.py +++ b/bids/layout/tests/test_utils.py @@ -48,6 +48,19 @@ def test_parse_file_entities(mock_config): assert target == parse_file_entities(filename, entities=entities) [email protected]( + "filename, target", + [ + ('/path/to/sub-01.ext', {'subject': '01', 'extension': '.ext'}), + ('/path/to/stub.ext', {'suffix': 'stub', 'extension': '.ext'}), + ('/path/to/.dotfile', {}), + ('/path/to/stub', {}), + ] +) +def test_parse_degenerate_files(mock_config, filename, target): + assert parse_file_entities(filename, config='bids') == target + + def test_add_config_paths(): bids_dir = os.path.dirname(bids.__file__) bids_json = os.path.join(bids_dir, 'layout', 'config', 'bids.json')
Bug/Ambiguous parsing when extension but no suffix present Found a bit of buggy parsing when fuzzing inputs on another project. For the following dataset: ``` sub-0 sub-0.ext ``` The sole file gets parsed with the following entities: ```py {'extension': '.ext', 'subject': '0', 'suffix': '0'} ``` Clearly `0` should not be attached both to `subject` and `suffix`. My inclination is to add a negative lookbehind to the `suffix` regex preventing alphanums preceded by a `-` from being matched: `(?<!-)[._]*([a-zA-Z0-9]*?)\.[^\/\\]$`. This should generally prevent it from snatching the values from other entities. However, I'm not sure if bids *requires* files with extensions to also have suffixes? The regexes in the config file seem to suggest this is so (the `suffix` and `extension` regexes are identical, with only the capture group location moved). But even if so, pybids needs to do *something* with the above file, and ignoring it as invalid seems harsh. Also not sure how this situation will change if official bids spec files are used.
0.0
0388abb4a4e07b8ea775154c940cb6cb485238f8
[ "bids/layout/tests/test_utils.py::test_parse_degenerate_files[/path/to/sub-01.ext-target0]", "bids/layout/tests/test_utils.py::test_parse_degenerate_files[/path/to/.dotfile-target2]" ]
[ "bids/layout/tests/test_utils.py::test_bidsmetadata_class", "bids/layout/tests/test_utils.py::test_parse_file_entities", "bids/layout/tests/test_utils.py::test_parse_degenerate_files[/path/to/stub.ext-target1]", "bids/layout/tests/test_utils.py::test_parse_degenerate_files[/path/to/stub-target3]", "bids/layout/tests/test_utils.py::test_add_config_paths" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-04-30 15:11:45+00:00
mit
1,371
bids-standard__pybv-17
diff --git a/pybv/io.py b/pybv/io.py index f89288a..1989c0e 100644 --- a/pybv/io.py +++ b/pybv/io.py @@ -16,15 +16,15 @@ from . import __version__ # ascii as future formats supported_formats = { - 'binary_float32' : 'IEEE_FLOAT_32', # noqa: E203 - 'binary_int16' : 'INT_16', # noqa: E203 + 'binary_float32' : ('IEEE_FLOAT_32', np.float32), # noqa: E203 + 'binary_int16' : ('INT_16', np.int16), # noqa: E203 } -supported_orients = set(['multiplexed']) +supported_orients = {'multiplexed'} def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, - events=None, resolution=1e-7): + events=None, resolution=1e-7, scale_data=True): """Write raw data to BrainVision format. Parameters @@ -46,12 +46,19 @@ def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, The first column is the index of each event (corresponding to the "time" dimension of the data array). The second column is a number associated with the "type" of event. - resolution : float + resolution : float | ndarray The resolution **in volts** in which you'd like the data to be stored. By default, this will be 1e-7, or .1 microvolts. Since data is stored in microvolts, the data will be multiplied by the inverse of this factor, and all decimals will be cut off after this. So, this number controls the amount of round-trip resolution you want. + This can be either a single float for all channels or an array with + nchan elements. + scale_data : bool + Boolean indicating if the data is in volts and should be scaled to + `resolution` (True), or if the data is already in the previously + specified target resolution and should be left as-is (False). + This is mostly useful if you have int16 data with a custom resolution. """ # Create output file names/paths if not op.isdir(folder_out): @@ -70,12 +77,14 @@ def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, if events.shape[1] != 2: raise ValueError(ev_err) - if len(data) != len(ch_names): + nchan = len(ch_names) + + if len(data) != nchan: raise ValueError("Number of channels in data ({}) does " "not match number of channel names ({})" .format(len(data), len(ch_names))) - if len(set(ch_names)) != len(ch_names): + if len(set(ch_names)) != nchan: raise ValueError("Channel names must be unique," " found a repeated name.") @@ -83,14 +92,40 @@ def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, raise ValueError("sfreq must be one of (float | int)") sfreq = float(sfreq) - if not isinstance(resolution, (int, float)): - raise ValueError("Resolution should be a (small) float") + resolution = np.atleast_1d(resolution) + if not np.issubdtype(resolution.dtype, np.number): + raise ValueError("Resolution should be numeric, is {}".format(resolution.dtype)) + + if resolution.shape != (1,) and resolution.shape != (nchan,): + raise ValueError("Resolution should be one or n_chan floats") # Write output files _write_vmrk_file(vmrk_fname, eeg_fname, events) _write_vhdr_file(vhdr_fname, vmrk_fname, eeg_fname, data, sfreq, ch_names, resolution=resolution) - _write_bveeg_file(eeg_fname, data, resolution=resolution) + _write_bveeg_file(eeg_fname, data, resolution=resolution, + scale_data=scale_data) + + +def _chk_fmt(fmt): + """Check that the format string is valid, return BVEF / numpy datatypes.""" + if fmt not in supported_formats: + errmsg = ('Data format {} not supported.'.format(format) + + 'Currently supported formats are: ' + + ', '.join(supported_formats)) + raise ValueError(errmsg) + return supported_formats[fmt] + + +def _chk_multiplexed(orientation): + """Validate an orientation, return if it is multiplexed or not.""" + orientation = orientation.lower() + if orientation not in supported_orients: + errmsg = ('Orientation {} not supported.'.format(orientation) + + 'Currently supported orientations are: ' + + ', '.join(supported_orients)) + raise ValueError(errmsg) + return orientation == 'multiplexed' def _write_vmrk_file(vmrk_fname, eeg_fname, events): @@ -124,23 +159,25 @@ def _write_vmrk_file(vmrk_fname, eeg_fname, events): .format(ii, tformat.format(i_val), i_ix), file=fout) +def _optimize_channel_unit(resolution): + """Calculate an optimal channel scaling factor and unit.""" + exp = np.log10(resolution) + if exp <= -8: + return resolution / 1e-9, 'nV' + elif exp <= -2: + return resolution / 1e-6, 'µV' + else: + return resolution, 'V' + + def _write_vhdr_file(vhdr_fname, vmrk_fname, eeg_fname, data, sfreq, ch_names, orientation='multiplexed', format='binary_float32', resolution=1e-7): """Write BrainvVision header file.""" fmt = format.lower() + bvfmt, _ = _chk_fmt(format) - if orientation.lower() not in supported_orients: - errmsg = ('Orientation {} not supported.'.format(orientation) + - 'Currently supported orientations are: ' + - ', '.join(supported_orients)) - raise ValueError(errmsg) - - if fmt not in supported_formats: - errmsg = ('Data format {} not supported.'.format(format) + - 'Currently supported formats are: ' + - ', '.join(supported_formats)) - raise ValueError(errmsg) + multiplexed = _chk_multiplexed(orientation) with codecs.open(vhdr_fname, 'w', encoding='utf-8') as fout: print(r'Brain Vision Data Exchange Header File Version 1.0', file=fout) # noqa: E501 @@ -151,10 +188,10 @@ def _write_vhdr_file(vhdr_fname, vmrk_fname, eeg_fname, data, sfreq, ch_names, print(r'DataFile={}'.format(op.basename(eeg_fname)), file=fout) # noqa: E501 print(r'MarkerFile={}'.format(op.basename(vmrk_fname)), file=fout) # noqa: E501 - if 'binary' in format.lower(): + if fmt.startswith('binary'): print(r'DataFormat=BINARY', file=fout) - if 'multiplexed' == orientation.lower(): + if multiplexed: print(r'; DataOrientation: MULTIPLEXED=ch1,pt1, ch2,pt1 ...', file=fout) # noqa: E501 print(r'DataOrientation=MULTIPLEXED', file=fout) @@ -163,50 +200,45 @@ def _write_vhdr_file(vhdr_fname, vmrk_fname, eeg_fname, data, sfreq, ch_names, print(r'SamplingInterval={}'.format(int(1e6 / sfreq)), file=fout) # noqa: E501 print(r'', file=fout) - if 'binary' in format.lower(): + if fmt.startswith('binary'): print(r'[Binary Infos]', file=fout) - print(r'BinaryFormat={}'.format(supported_formats[format]), file=fout) # noqa: E501 + print(r'BinaryFormat={}'.format(bvfmt), file=fout) # noqa: E501 print(r'', file=fout) print(r'[Channel Infos]', file=fout) print(r'; Each entry: Ch<Channel number>=<Name>,<Reference channel name>,', file=fout) # noqa: E501 - print(r'; <Resolution in microvolts>,<Future extensions..', file=fout) # noqa: E501 + print(r'; <Resolution in "unit">,<unit>,Future extensions…', file=fout) print(r'; Fields are delimited by commas, some fields might be omitted (empty).', file=fout) # noqa: E501 print(r'; Commas in channel names are coded as "\1".', file=fout) - resolution_in_microv = resolution / 1e-6 - for ii, ch in enumerate(ch_names, start=1): - print(r'Ch{}={},,{:0.1f}' - .format(ii, ch, resolution_in_microv), file=fout) + nchan = len(ch_names) + # broadcast to nchan elements if necessary + resolutions = resolution * np.ones((nchan,)) + + for i in range(nchan): + resolution, unit = _optimize_channel_unit(resolutions[i]) + print(r'Ch{}={},,{:0.1f},{}' + .format(i + 1, ch_names[i], resolution, unit), file=fout) print(r'', file=fout) print(r'[Comment]', file=fout) print(r'', file=fout) def _write_bveeg_file(eeg_fname, data, orientation='multiplexed', - format='binary_float32', resolution=1e-7): + format='binary_float32', resolution=1e-7, + scale_data=True): """Write BrainVision data file.""" fmt = format.lower() - if orientation.lower() not in supported_orients: - errmsg = ('Orientation {} not supported.'.format(orientation) + - 'Currently supported orientations are: ' + - ', '.join(supported_orients)) - raise ValueError(errmsg) - - if fmt not in supported_formats: - errmsg = ('Data format {} not supported.'.format(format) + - 'Currently supported formats are: ' + - ', '.join(supported_formats)) - raise ValueError(errmsg) + multiplexed = _chk_multiplexed(orientation) + _, dtype = _chk_fmt(fmt) - if fmt[:len('binary')] == 'binary': - dtype = np.dtype(format.lower()[len('binary') + 1:]) - else: + if not fmt.startswith('binary'): errmsg = 'Cannot map data format {} to NumPy dtype'.format(format) raise ValueError(errmsg) # Invert the resolution so that we know how much to scale our data scaling_factor = 1 / resolution - data = data * scaling_factor + if scale_data: + data = data * np.atleast_2d(scaling_factor).T data.astype(dtype=dtype).ravel(order='F').tofile(eeg_fname)
bids-standard/pybv
f1c84c062b8f1e3596d63b331c5d205d5b9c109e
diff --git a/pybv/tests/test_bv_writer.py b/pybv/tests/test_bv_writer.py index 0e81f33..4362674 100644 --- a/pybv/tests/test_bv_writer.py +++ b/pybv/tests/test_bv_writer.py @@ -80,7 +80,8 @@ def test_bv_writer_oi_cycle(): tmpdir = _mktmpdir() # Write, then read the data to BV format - write_brainvision(data, sfreq, ch_names, fname, tmpdir, events=events) + write_brainvision(data, sfreq, ch_names, fname, tmpdir, events=events, + resolution=np.power(10., -np.arange(10))) annot = mne.read_annotations(op.join(tmpdir, fname + '.vmrk')) raw_written = mne.io.read_raw_brainvision(op.join(tmpdir, fname + '.vhdr'), preload=True, stim_channel=False) @@ -105,3 +106,12 @@ def test_bv_writer_oi_cycle(): assert ch_names == raw_written.ch_names rmtree(tmpdir) + + +def test_scale_data(): + """Test that scale_data=False keeps the data untouched.""" + tmpdir = _mktmpdir() + write_brainvision(data, sfreq, ch_names, fname, tmpdir, scale_data=False) + data_written = np.fromfile(tmpdir + '/' + fname + '.eeg', dtype=np.float32) + assert_allclose(data_written, data.T.flatten()) + rmtree(tmpdir)
Support channel-specific scaling factors The brainvision format allows each channel to have its own scaling factor, e.g. ``` ; Each entry: Ch<Channel number>=<Name>,<Reference channel name>,<Resolution in "Unit">,<Unit>, Future extensions.. Ch1=Fp1,,0.2,µV Ch2=Fp2,,6.5,nV ``` This is really useful with int16 as datatype since it maximizes each channel's range without impairing precision too much (and it's also what BrainVision Analyzer does). The only required (backwards compatible) change in the interface would be in the `resolution` parameter, which could either accept `float` array with `nchan` elements or a single `float` that gets promoted automatically.
0.0
f1c84c062b8f1e3596d63b331c5d205d5b9c109e
[ "pybv/tests/test_bv_writer.py::test_scale_data" ]
[ "pybv/tests/test_bv_writer.py::test_bv_writer_events", "pybv/tests/test_bv_writer.py::test_bv_bad_format" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-04-27 10:54:30+00:00
bsd-3-clause
1,372
bids-standard__pybv-26
diff --git a/README.rst b/README.rst index b9ab195..248c3b0 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,3 @@ - - .. image:: https://circleci.com/gh/bids-standard/pybv.svg?style=svg :target: https://circleci.com/gh/bids-standard/pybv :alt: CircleCI @@ -8,6 +6,9 @@ :target: https://codecov.io/gh/bids-standard/pybv :alt: codecov +.. image:: https://badge.fury.io/py/pybv.svg + :target: https://badge.fury.io/py/pybv + .. image:: https://pepy.tech/badge/pybv :target: https://pepy.tech/project/pybv :alt: Downloads @@ -111,8 +112,7 @@ Here is an example of the MNE code required to read in BrainVision data: import mne # Import the BrainVision data into an MNE Raw object - raw = mne.io.read_raw_brainvision('tmp/test.vhdr', preload=True, - stim_channel=False) + raw = mne.io.read_raw_brainvision('tmp/test.vhdr', preload=True) # Read in the event information as MNE annotations annot = mne.read_annotations('tmp/test.vmrk') diff --git a/docs/changelog.rst b/docs/changelog.rst index ad42263..4e671ea 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -14,14 +14,27 @@ Here we list a changelog of pybv. current ======= + +Changelog +~~~~~~~~~ Add binary format parameter to public API by `Tristan Stenner`_ (`#22 <https://github.com/bids-standard/pybv/pull/22>`_) +Bug +~~~ +fix bug with events that only have integer codes of length less than 3, by `Stefan Appelhoff`_ (`#26 <https://github.com/bids-standard/pybv/pull/26>`_) + 0.0.2 ===== + +Changelog +~~~~~~~~~ Support channel-specific scaling factors by `Tristan Stenner`_ (`#17 <https://github.com/bids-standard/pybv/pull/17>`_) 0.0.1 ===== + +Changelog +~~~~~~~~~ Initial import from palday's philistine package and removing dependency on MNE-Python, by `Chris Holdgraf`_ and `Stefan Appelhoff`_ .. _Chris Holdgraf: https://bids.berkeley.edu/people/chris-holdgraf diff --git a/pybv/__init__.py b/pybv/__init__.py index 1a07b00..1da4f72 100644 --- a/pybv/__init__.py +++ b/pybv/__init__.py @@ -8,6 +8,6 @@ # # License: BSD (3-clause) -__version__ = '0.1.0.dev0' +__version__ = '0.3.dev0' from .io import (write_brainvision, ) # noqa: F401 diff --git a/pybv/io.py b/pybv/io.py index f084169..ba6615f 100644 --- a/pybv/io.py +++ b/pybv/io.py @@ -156,8 +156,13 @@ def _write_vmrk_file(vmrk_fname, eeg_fname, events): if events is None or len(events) == 0: return - # Handle events + # Handle events: We write all of them as "Stimulus" events for now. + # This is a string staring with "S" and followed by an integer of + # minimum length 3, padded with "space" if the integer is < length 3. + # For example "S 1", "S 23", "S345" + # XXX: see https://github.com/bids-standard/pybv/issues/24#issuecomment-512746677 # noqa: E501 twidth = int(np.ceil(np.log10(np.max(events[:, 1])))) + twidth = twidth if twidth > 3 else 3 tformat = 'S{:>' + str(twidth) + '}' for ii, irow in enumerate(range(len(events)), start=2): diff --git a/requirements.txt b/requirements.txt index e8fdf42..301052e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ numpy -mne==0.17 +mne>0.17 pytest pytest-cov coverage
bids-standard/pybv
cf8aa9d6904149d68ad9d9500315db5e785664a2
diff --git a/pybv/tests/test_bv_writer.py b/pybv/tests/test_bv_writer.py index 877d843..b5ff9e9 100644 --- a/pybv/tests/test_bv_writer.py +++ b/pybv/tests/test_bv_writer.py @@ -82,8 +82,8 @@ def test_bv_writer_oi_cycle(): # Write, then read the data to BV format write_brainvision(data, sfreq, ch_names, fname, tmpdir, events=events, resolution=np.power(10., -np.arange(10))) - raw_written = mne.io.read_raw_brainvision(op.join(tmpdir, fname + '.vhdr'), - preload=True, stim_channel=False) + vhdr_fname = op.join(tmpdir, fname + '.vhdr') + raw_written = mne.io.read_raw_brainvision(vhdr_fname, preload=True) # Delete the first annotation because it's just marking a new segment raw_written.annotations.delete(0) # Convert our annotations to events
Update to MNE 0.18 and prepare for MNE 0.19 Our tests fail with MNE 0.18: ```Shell ___________________________ test_bv_writer_oi_cycle ____________________________ def test_bv_writer_oi_cycle(): """Test that a write-read cycle produces identical data.""" tmpdir = _mktmpdir() # Write, then read the data to BV format write_brainvision(data, sfreq, ch_names, fname, tmpdir, events=events, resolution=np.power(10., -np.arange(10))) raw_written = mne.io.read_raw_brainvision(op.join(tmpdir, fname + '.vhdr'), preload=True, stim_channel=False) # Delete the first annotation because it's just marking a new segment raw_written.annotations.delete(0) # Convert our annotations to events events_written, event_id = mne.events_from_annotations(raw_written) # sfreq assert sfreq == raw_written.info['sfreq'] # Event timing should be within one index of originals assert_allclose(events[:, 0], events_written[:, 0], 1) > assert_array_equal(events[:, 1], events_written[:, 2]) E AssertionError: E Arrays are not equal E E Mismatch: 100% E Max absolute difference: 10000 E Max relative difference: 0.99990001 E x: array([1, 1, 2, 2]) E y: array([10001, 10001, 10002, 10002]) pybv/tests/test_bv_writer.py:97: AssertionError ``` and in MNE 0.19.dev0, the stim_channel will be gone: https://github.com/mne-tools/mne-python/pull/6348/files --> which makes this test fail as well: ```Shell =================================== FAILURES =================================== ___________________________ test_bv_writer_oi_cycle ____________________________ def test_bv_writer_oi_cycle(): """Test that a write-read cycle produces identical data.""" tmpdir = _mktmpdir() # Write, then read the data to BV format write_brainvision(data, sfreq, ch_names, fname, tmpdir, events=events, resolution=np.power(10., -np.arange(10))) raw_written = mne.io.read_raw_brainvision(op.join(tmpdir, fname + '.vhdr'), > preload=True, stim_channel=False) E TypeError: read_raw_brainvision() got an unexpected keyword argument 'stim_channel' pybv/tests/test_bv_writer.py:86: TypeError ```
0.0
cf8aa9d6904149d68ad9d9500315db5e785664a2
[ "pybv/tests/test_bv_writer.py::test_scale_data", "pybv/tests/test_bv_writer.py::test_bv_bad_format", "pybv/tests/test_bv_writer.py::test_bv_writer_events", "pybv/tests/test_bv_writer.py::test_bv_writer_oi_cycle" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-07-18 09:20:48+00:00
bsd-3-clause
1,373
bids-standard__pybv-29
diff --git a/docs/changelog.rst b/docs/changelog.rst index 4e671ea..48ace17 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -17,10 +17,12 @@ current Changelog ~~~~~~~~~ +Add measurement date parameter to public API, by `Stefan Appelhoff`_ (`#29 <https://github.com/bids-standard/pybv/pull/29>`_) Add binary format parameter to public API by `Tristan Stenner`_ (`#22 <https://github.com/bids-standard/pybv/pull/22>`_) Bug ~~~ +fix bug with events indexing. VMRK events are now correctly written with 1-based indexing, by `Stefan Appelhoff`_ (`#29 <https://github.com/bids-standard/pybv/pull/29>`_) fix bug with events that only have integer codes of length less than 3, by `Stefan Appelhoff`_ (`#26 <https://github.com/bids-standard/pybv/pull/26>`_) 0.0.2 diff --git a/pybv/io.py b/pybv/io.py index ba6615f..ff9a9ae 100644 --- a/pybv/io.py +++ b/pybv/io.py @@ -11,9 +11,11 @@ import codecs import os import os.path as op +import datetime + import numpy as np -from . import __version__ +from pybv import __version__ # ascii as future formats supported_formats = { @@ -26,7 +28,7 @@ supported_orients = {'multiplexed'} def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, events=None, resolution=1e-7, scale_data=True, - fmt='binary_float32'): + fmt='binary_float32', meas_date=None): """Write raw data to BrainVision format. Parameters @@ -45,9 +47,9 @@ def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, The folder where output files will be saved. events : ndarray, shape (n_events, 2) Events to write in the marker file. This array has two columns. - The first column is the index of each event (corresponding to the - "time" dimension of the data array). The second column is a number - associated with the "type" of event. + The first column is the zero-based index of each event (corresponding + to the "time" dimension of the data array). The second column is a + number associated with the "type" of event. resolution : float | ndarray The resolution **in volts** in which you'd like the data to be stored. By default, this will be 1e-7, or .1 microvolts. Since data is stored @@ -64,6 +66,11 @@ def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, fmt : str Binary format the data should be written as. Valid choices are 'binary_float32' (default) and 'binary_int16'. + meas_date : datetime.datetime | str | None + The measurement date of the data specified as a datetime.datetime + object. Alternatively, can be a string in the format: + "YYYYMMDDhhmmssuuuuuu". "u" stands for microseconds. If None, defaults + to '00000000000000000000'. """ # Create output file names/paths if not op.isdir(folder_out): @@ -106,8 +113,22 @@ def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, _chk_fmt(fmt) + # measurement date + if not isinstance(meas_date, (str, datetime.datetime, type(None))): + raise ValueError('`meas_date` must be of type str, datetime.datetime, ' + 'or None but is of type ' + '"{}"'.format(type(meas_date))) + elif meas_date is None: + meas_date = '00000000000000000000' + elif isinstance(meas_date, datetime.datetime): + meas_date = meas_date.strftime('%Y%m%d%H%M%S%f') + elif not (meas_date.isdigit() and len(meas_date) == 20): + raise ValueError('Got a str for `meas_date`, but it was not formatted ' + 'as expected. Please supply a str in the format: ' + '"YYYYMMDDhhmmssuuuuuu".') + # Write output files - _write_vmrk_file(vmrk_fname, eeg_fname, events) + _write_vmrk_file(vmrk_fname, eeg_fname, events, meas_date) _write_vhdr_file(vhdr_fname, vmrk_fname, eeg_fname, data, sfreq, ch_names, orientation='multiplexed', format=fmt, resolution=resolution) @@ -136,7 +157,7 @@ def _chk_multiplexed(orientation): return orientation == 'multiplexed' -def _write_vmrk_file(vmrk_fname, eeg_fname, events): +def _write_vmrk_file(vmrk_fname, eeg_fname, events, meas_date): """Write BrainvVision marker file.""" with codecs.open(vmrk_fname, 'w', encoding='utf-8') as fout: print(r'Brain Vision Data Exchange Marker File, Version 1.0', file=fout) # noqa: E501 @@ -149,9 +170,10 @@ def _write_vmrk_file(vmrk_fname, eeg_fname, events): print(r'[Marker Infos]', file=fout) print(r'; Each entry: Mk<Marker number>=<Type>,<Description>,<Position in data points>,', file=fout) # noqa: E501 print(r'; <Size in data points>, <Channel number (0 = marker is related to all channels)>', file=fout) # noqa: E501 + print(r'; <Date (YYYYMMDDhhmmssuuuuuu)>', file=fout) print(r'; Fields are delimited by commas, some fields might be omitted (empty).', file=fout) # noqa: E501 print(r'; Commas in type or description text are coded as "\1".', file=fout) # noqa: E501 - print(r'Mk1=New Segment,,1,1,0,0', file=fout) + print(r'Mk1=New Segment,,1,1,0,{}'.format(meas_date), file=fout) if events is None or len(events) == 0: return @@ -165,11 +187,12 @@ def _write_vmrk_file(vmrk_fname, eeg_fname, events): twidth = twidth if twidth > 3 else 3 tformat = 'S{:>' + str(twidth) + '}' - for ii, irow in enumerate(range(len(events)), start=2): - i_ix = events[irow, 0] + for marker_number, irow in enumerate(range(len(events)), start=2): + i_ix = events[irow, 0] + 1 # BrainVision uses 1-based indexing i_val = events[irow, 1] print(r'Mk{}=Stimulus,{},{},1,0' - .format(ii, tformat.format(i_val), i_ix), file=fout) + .format(marker_number, tformat.format(i_val), i_ix), + file=fout) def _optimize_channel_unit(resolution):
bids-standard/pybv
ae5ab65ab2819a3cb1e3643e631b1bba5ba198fe
diff --git a/pybv/tests/test_bv_writer.py b/pybv/tests/test_bv_writer.py index b5ff9e9..222029b 100644 --- a/pybv/tests/test_bv_writer.py +++ b/pybv/tests/test_bv_writer.py @@ -11,6 +11,8 @@ import os import os.path as op from shutil import rmtree from tempfile import mkdtemp +from time import gmtime +from datetime import datetime import pytest @@ -75,13 +77,31 @@ def test_bv_bad_format(): rmtree(tmpdir) -def test_bv_writer_oi_cycle(): [email protected]("meas_date,match", + [(1, '`meas_date` must be of type str, datetime'), + ('', 'Got a str for `meas_date`, but it was'), + ('1973', 'Got a str for `meas_date`, but it was')]) +def test_bad_meas_date(meas_date, match): + """Test that bad measurement dates raise errors.""" + tmpdir = _mktmpdir() + with pytest.raises(ValueError, match=match): + write_brainvision(data, sfreq, ch_names, fname, tmpdir, + meas_date=meas_date) + + rmtree(tmpdir) + + [email protected]("meas_date", + [('20000101120000000000'), + (datetime(2000, 1, 1, 12, 0, 0, 0))]) +def test_bv_writer_oi_cycle(meas_date): """Test that a write-read cycle produces identical data.""" tmpdir = _mktmpdir() # Write, then read the data to BV format write_brainvision(data, sfreq, ch_names, fname, tmpdir, events=events, - resolution=np.power(10., -np.arange(10))) + resolution=np.power(10., -np.arange(10)), + meas_date=meas_date) vhdr_fname = op.join(tmpdir, fname + '.vhdr') raw_written = mne.io.read_raw_brainvision(vhdr_fname, preload=True) # Delete the first annotation because it's just marking a new segment @@ -92,8 +112,8 @@ def test_bv_writer_oi_cycle(): # sfreq assert sfreq == raw_written.info['sfreq'] - # Event timing should be within one index of originals - assert_allclose(events[:, 0], events_written[:, 0], 1) + # Event timing should be exactly the same + assert_array_equal(events[:, 0], events_written[:, 0]) assert_array_equal(events[:, 1], events_written[:, 2]) # Should be 2 unique event types assert len(event_id) == 2 @@ -104,6 +124,11 @@ def test_bv_writer_oi_cycle(): # channels assert ch_names == raw_written.ch_names + # measurement date, we do not test microsecs + unix_seconds = raw_written.info['meas_date'][0] + time_str = ('{:04}{:02}{:02}{:02}{:02}{:02}'.format(*gmtime(unix_seconds))) + assert time_str == '20000101120000' # 1st of Jan, 2000 at 12:00 and 0 secs + rmtree(tmpdir)
potential indexing issue Could it be that we have an indexing issue in our code? BrainVision uses 1-based indexing for writing the events, whereas in Python and MNE-Python, we use 0-based indexing. Now imagine a user passing `events`: https://github.com/bids-standard/pybv/blob/cf8aa9d6904149d68ad9d9500315db5e785664a2/pybv/io.py#L46-L50 These events will most likely be 0-indexed. However, later down in the code -when we write a VMRK file, we translate these indices one to one, disregarding that BrainVision used 1-based indexing. See: https://github.com/bids-standard/pybv/blob/cf8aa9d6904149d68ad9d9500315db5e785664a2/pybv/io.py#L163-L167 Later on in the testing, we actually see this issue: https://github.com/bids-standard/pybv/blob/cf8aa9d6904149d68ad9d9500315db5e785664a2/pybv/tests/test_bv_writer.py#L95-L96 note the comment. I may be wrong ... but it seems to me we should handle the indexing better cc @tstenner @choldgraf
0.0
ae5ab65ab2819a3cb1e3643e631b1bba5ba198fe
[ "pybv/tests/test_bv_writer.py::test_bad_meas_date[1-`meas_date`", "pybv/tests/test_bv_writer.py::test_bad_meas_date[-Got", "pybv/tests/test_bv_writer.py::test_bad_meas_date[1973-Got" ]
[ "pybv/tests/test_bv_writer.py::test_bv_writer_events", "pybv/tests/test_bv_writer.py::test_bv_bad_format", "pybv/tests/test_bv_writer.py::test_scale_data" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2019-07-21 13:08:12+00:00
bsd-3-clause
1,374
bids-standard__pybv-39
diff --git a/README.rst b/README.rst index cae8c4c..3e38712 100644 --- a/README.rst +++ b/README.rst @@ -95,7 +95,7 @@ collection of BrainVision files on disk. # for further parameters see our API documentation in the docs write_brainvision(data, sfreq, ch_names, fname, tmpdir, events, - resolution=1e-6, fmt='binary_float32') + resolution=1e-6, unit='µV', fmt='binary_float32') Reading BrainVision files ------------------------- diff --git a/docs/changelog.rst b/docs/changelog.rst index 13e6bc9..0042da9 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -15,6 +15,10 @@ Here we list a changelog of pybv. Current ======= +Changelog +~~~~~~~~~ +- Add ``unit`` parameter for exporting signals in a specific unit (V, mV, µV or uV, nV) by `Clemens Brunner`_ (`#39 <https://github.com/bids-standard/pybv/pull/39>`_) + 0.2.0 ===== diff --git a/pybv/io.py b/pybv/io.py index 23f7815..3dc1366 100644 --- a/pybv/io.py +++ b/pybv/io.py @@ -18,7 +18,7 @@ import numpy as np from pybv import __version__ -# ascii as future formats +# ASCII as future formats supported_formats = { 'binary_float32' : ('IEEE_FLOAT_32', np.float32), # noqa: E203 'binary_int16' : ('INT_16', np.int16), # noqa: E203 @@ -28,15 +28,15 @@ supported_orients = {'multiplexed'} def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, - events=None, resolution=1e-7, scale_data=True, + events=None, resolution=1e-7, unit='µV', scale_data=True, fmt='binary_float32', meas_date=None): """Write raw data to BrainVision format. Parameters ---------- data : ndarray, shape (n_channels, n_times) - The raw data to export. Data is assumed to be in - **volts**. The data will be stored in **microvolts**. + The raw data to export. Data is assumed to be in **volts** and will be + stored as specified by `unit`. sfreq : int | float The sampling frequency of the data ch_names : list of strings, shape (n_channels,) @@ -54,12 +54,13 @@ def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, third column specifies the length of each event (default 1 sample). resolution : float | ndarray The resolution **in volts** in which you'd like the data to be stored. - By default, this will be 1e-7, or .1 microvolts. Since data is stored - in microvolts, the data will be multiplied by the inverse of this - factor, and all decimals will be cut off after this. So, this number - controls the amount of round-trip resolution you want. - This can be either a single float for all channels or an array with - nchan elements. + By default, this will be 1e-7, or 0.1 µV. This number controls the + amount of round-trip resolution. This can be either a single float for + all channels or an array with n_channels elements. + unit : str | None + The unit of the exported data. This can be one of 'V', 'mV', 'µV' (or + equivalently 'uV') , 'nV' or None. If None, a suitable unit based on + the selected resolution is chosen automatically. scale_data : bool Boolean indicating if the data is in volts and should be scaled to `resolution` (True), or if the data is already in the previously @@ -135,7 +136,7 @@ def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, _write_vmrk_file(vmrk_fname, eeg_fname, events, meas_date) _write_vhdr_file(vhdr_fname, vmrk_fname, eeg_fname, data, sfreq, ch_names, orientation='multiplexed', format=fmt, - resolution=resolution) + resolution=resolution, unit=unit) _write_bveeg_file(eeg_fname, data, orientation='multiplexed', format=fmt, resolution=resolution, scale_data=scale_data) @@ -165,11 +166,11 @@ def _write_vmrk_file(vmrk_fname, eeg_fname, events, meas_date): """Write BrainvVision marker file.""" with codecs.open(vmrk_fname, 'w', encoding='utf-8') as fout: print(r'Brain Vision Data Exchange Marker File, Version 1.0', file=fout) # noqa: E501 - print(r';Exported using pybv {}'.format(__version__), file=fout) # noqa: E501 + print(r';Exported using pybv {}'.format(__version__), file=fout) print(r'', file=fout) print(r'[Common Infos]', file=fout) print(r'Codepage=UTF-8', file=fout) - print(r'DataFile={}'.format(eeg_fname.split(os.sep)[-1]), file=fout) # noqa: E501 + print(r'DataFile={}'.format(eeg_fname.split(os.sep)[-1]), file=fout) print(r'', file=fout) print(r'[Marker Infos]', file=fout) print(r'; Each entry: Mk<Marker number>=<Type>,<Description>,<Position in data points>,', file=fout) # noqa: E501 @@ -204,20 +205,29 @@ def _write_vmrk_file(vmrk_fname, eeg_fname, events, meas_date): file=fout) -def _optimize_channel_unit(resolution): +def _optimize_channel_unit(resolution, unit): """Calculate an optimal channel scaling factor and unit.""" exp = np.log10(resolution) - if exp <= -7: - return resolution / 1e-9, 'nV' - elif exp <= -2: - return resolution / 1e-6, 'µV' - else: + if unit is None: + if exp <= -7: + return resolution / 1e-9, 'nV' + elif exp <= -2: + return resolution / 1e-6, 'µV' + else: + return resolution, 'V' + elif unit == 'V': return resolution, 'V' + elif unit == 'mV': + return resolution / 1e-3, 'mV' + elif unit in ('µV', 'uV'): + return resolution / 1e-6, 'µV' + elif unit == 'nV': + return resolution / 1e-9, 'nV' def _write_vhdr_file(vhdr_fname, vmrk_fname, eeg_fname, data, sfreq, ch_names, orientation='multiplexed', format='binary_float32', - resolution=1e-7): + resolution=1e-7, unit='µV'): """Write BrainvVision header file.""" fmt = format.lower() bvfmt, _ = _chk_fmt(format) @@ -225,13 +235,13 @@ def _write_vhdr_file(vhdr_fname, vmrk_fname, eeg_fname, data, sfreq, ch_names, multiplexed = _chk_multiplexed(orientation) with codecs.open(vhdr_fname, 'w', encoding='utf-8') as fout: - print(r'Brain Vision Data Exchange Header File Version 1.0', file=fout) # noqa: E501 - print(r';Written using pybv {}'.format(__version__), file=fout) # noqa: E501 + print(r'Brain Vision Data Exchange Header File Version 1.0', file=fout) + print(r';Written using pybv {}'.format(__version__), file=fout) print(r'', file=fout) print(r'[Common Infos]', file=fout) print(r'Codepage=UTF-8', file=fout) - print(r'DataFile={}'.format(op.basename(eeg_fname)), file=fout) # noqa: E501 - print(r'MarkerFile={}'.format(op.basename(vmrk_fname)), file=fout) # noqa: E501 + print(r'DataFile={}'.format(op.basename(eeg_fname)), file=fout) + print(r'MarkerFile={}'.format(op.basename(vmrk_fname)), file=fout) if fmt.startswith('binary'): print(r'DataFormat=BINARY', file=fout) @@ -240,14 +250,14 @@ def _write_vhdr_file(vhdr_fname, vmrk_fname, eeg_fname, data, sfreq, ch_names, print(r'; DataOrientation: MULTIPLEXED=ch1,pt1, ch2,pt1 ...', file=fout) # noqa: E501 print(r'DataOrientation=MULTIPLEXED', file=fout) - print(r'NumberOfChannels={}'.format(len(data)), file=fout) # noqa: E501 + print(r'NumberOfChannels={}'.format(len(data)), file=fout) print(r'; Sampling interval in microseconds', file=fout) - print(r'SamplingInterval={}'.format(int(1e6 / sfreq)), file=fout) # noqa: E501 + print(r'SamplingInterval={}'.format(int(1e6 / sfreq)), file=fout) print(r'', file=fout) if fmt.startswith('binary'): print(r'[Binary Infos]', file=fout) - print(r'BinaryFormat={}'.format(bvfmt), file=fout) # noqa: E501 + print(r'BinaryFormat={}'.format(bvfmt), file=fout) print(r'', file=fout) print(r'[Channel Infos]', file=fout) @@ -259,11 +269,14 @@ def _write_vhdr_file(vhdr_fname, vmrk_fname, eeg_fname, data, sfreq, ch_names, nchan = len(ch_names) # broadcast to nchan elements if necessary resolutions = resolution * np.ones((nchan,)) + units = [unit] * nchan for i in range(nchan): - resolution, unit = _optimize_channel_unit(resolutions[i]) - print(r'Ch{}={},,{:0.3f},{}' - .format(i + 1, ch_names[i], resolution, unit), file=fout) + resolution, unit = _optimize_channel_unit(resolutions[i], units[i]) + s = r'Ch{}={},,{:0.{precision}f},{}' + print(s.format(i + 1, ch_names[i], resolution, unit, + precision=max(0, int(np.log10(1 / resolution)))), + file=fout) print(r'', file=fout) print(r'[Comment]', file=fout) print(r'', file=fout)
bids-standard/pybv
dadee0072cffd001be60e5ff9b67358b2811f900
diff --git a/pybv/tests/test_bv_writer.py b/pybv/tests/test_bv_writer.py index 7e2087d..cb5bd1f 100644 --- a/pybv/tests/test_bv_writer.py +++ b/pybv/tests/test_bv_writer.py @@ -21,16 +21,16 @@ from numpy.testing import assert_allclose, assert_array_equal from pybv.io import write_brainvision, _write_bveeg_file, _write_vhdr_file -# Create data we'll use for testing +# create testing data fname = 'pybv' np.random.seed(1337) n_chans = 10 -ch_names = ['ch_{}'.format(ii) for ii in range(n_chans)] -sfreq = 1000. +ch_names = ['ch_{}'.format(i) for i in range(n_chans)] +sfreq = 1000 n_seconds = 5 -n_times = int(n_seconds * sfreq) -event_times = np.array([1., 2., 3., 4.]) -events = np.column_stack([(event_times * sfreq).astype(int), [1, 1, 2, 2]]) +n_times = n_seconds * sfreq +event_times = np.arange(1, 5) +events = np.column_stack([event_times * sfreq, [1, 1, 2, 2]]) data = np.random.randn(n_chans, n_times) @@ -43,24 +43,23 @@ def test_bv_writer_events(): """Test that all event options work without throwing an error.""" tmpdir = _mktmpdir() - # Events should be none or ndarray + # events should be none or ndarray with pytest.raises(ValueError): write_brainvision(data, sfreq, ch_names, fname, tmpdir, events=[]) - # Correct arguments should work + # correct arguments should work write_brainvision(data, sfreq, ch_names, fname, tmpdir, events=events) write_brainvision(data, sfreq, ch_names, fname, tmpdir, events=None) rmtree(tmpdir) def test_bv_bad_format(): - """Test that bad formats cause an error.""" + """Test that bad formats throw an error.""" tmpdir = _mktmpdir() - vhdr_fname = os.path.join(tmpdir, fname+".vhdr") - vmrk_fname = os.path.join(tmpdir, fname+".vmrk") - eeg_fname = os.path.join(tmpdir, fname+".eeg") - # events = np.array([[10, 0, 31]]) + vhdr_fname = os.path.join(tmpdir, fname + ".vhdr") + vmrk_fname = os.path.join(tmpdir, fname + ".vmrk") + eeg_fname = os.path.join(tmpdir, fname + ".eeg") with pytest.raises(ValueError): _write_vhdr_file(vhdr_fname, vmrk_fname, @@ -86,44 +85,41 @@ def test_bad_meas_date(meas_date, match): with pytest.raises(ValueError, match=match): write_brainvision(data, sfreq, ch_names, fname, tmpdir, meas_date=meas_date) - rmtree(tmpdir) @pytest.mark.parametrize("meas_date", [('20000101120000000000'), (datetime(2000, 1, 1, 12, 0, 0, 0))]) -def test_bv_writer_oi_cycle(meas_date): - """Test that a write-read cycle produces identical data.""" +def test_write_read_cycle(meas_date): + """Test that a write/read cycle produces identical data.""" tmpdir = _mktmpdir() - # Write, then read the data to BV format + # write and read data to BV format write_brainvision(data, sfreq, ch_names, fname, tmpdir, events=events, resolution=np.power(10., -np.arange(10)), meas_date=meas_date) vhdr_fname = op.join(tmpdir, fname + '.vhdr') raw_written = mne.io.read_raw_brainvision(vhdr_fname, preload=True) - # Delete the first annotation because it's just marking a new segment + # delete the first annotation because it's just marking a new segment raw_written.annotations.delete(0) - # Convert our annotations to events + # convert our annotations to events events_written, event_id = mne.events_from_annotations(raw_written) # sfreq assert sfreq == raw_written.info['sfreq'] - # Event timing should be exactly the same + # event timing should be exactly the same assert_array_equal(events[:, 0], events_written[:, 0]) assert_array_equal(events[:, 1], events_written[:, 2]) - # Should be 2 unique event types - assert len(event_id) == 2 - # data round-trip. - assert_allclose(data, raw_written._data) + assert len(event_id) == 2 # there should be two unique event types + + assert_allclose(data, raw_written._data) # data round-trip - # channels - assert ch_names == raw_written.ch_names + assert ch_names == raw_written.ch_names # channels - # measurement date, we do not test microsecs + # measurement dates must match assert raw_written.info['meas_date'] == datetime(2000, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc) @@ -137,3 +133,16 @@ def test_scale_data(): data_written = np.fromfile(tmpdir + '/' + fname + '.eeg', dtype=np.float32) assert_allclose(data_written, data.T.flatten()) rmtree(tmpdir) + + [email protected]("resolution", np.logspace(-3, -9, 7)) [email protected]("unit", ["V", "mV", "uV", "µV", "nV", None]) +def test_unit_resolution(resolution, unit): + """Test different combinations of units and resolutions.""" + tmpdir = _mktmpdir() + write_brainvision(data, sfreq, ch_names, fname, tmpdir, + resolution=resolution, unit=unit) + vhdr_fname = op.join(tmpdir, fname + '.vhdr') + raw_written = mne.io.read_raw_brainvision(vhdr_fname, preload=True) + assert np.allclose(data, raw_written.get_data()) + rmtree(tmpdir)
Set unit to microvolts It seems like it is currently not possible to set the resolution and unit independently. As soon as `resolution <= -7` (which is `True` for the default), the unit is set to nV. However, I just had a conversation with a BrainProducts developer regarding potential issues with the unit. Specifically, he said that BrainVision Analyzer currently expects the unit to be µV. If it's something else, auto-scaling doesn't work (at least the initial view). Therefore, I would like to suggest to add an option that controls the unit. Possible values could include `'V'`, `'mV'`, `'uV'`, `'µV'`, and `'nV'`. I would set the default to `'uV'` (which is also the expected unit if the unit field in the exported file is empty).
0.0
dadee0072cffd001be60e5ff9b67358b2811f900
[ "pybv/tests/test_bv_writer.py::test_unit_resolution[V-0.001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-0.0001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-9.999999999999999e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-1e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-1e-07]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-1e-08]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-1e-09]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-0.001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-0.0001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-9.999999999999999e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-1e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-1e-07]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-1e-08]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-1e-09]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-0.001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-0.0001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-9.999999999999999e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-1e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-1e-07]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-1e-08]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-1e-09]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-0.001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-0.0001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-9.999999999999999e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-1e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-1e-07]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-1e-08]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-1e-09]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-0.001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-0.0001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-9.999999999999999e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-1e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-1e-07]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-1e-08]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-1e-09]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-0.001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-0.0001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-9.999999999999999e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-1e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-1e-07]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-1e-08]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-1e-09]" ]
[ "pybv/tests/test_bv_writer.py::test_bv_writer_events", "pybv/tests/test_bv_writer.py::test_bv_bad_format", "pybv/tests/test_bv_writer.py::test_bad_meas_date[1-`meas_date`", "pybv/tests/test_bv_writer.py::test_bad_meas_date[-Got", "pybv/tests/test_bv_writer.py::test_bad_meas_date[1973-Got", "pybv/tests/test_bv_writer.py::test_write_read_cycle[20000101120000000000]", "pybv/tests/test_bv_writer.py::test_write_read_cycle[meas_date1]", "pybv/tests/test_bv_writer.py::test_scale_data" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-03-30 13:01:04+00:00
bsd-3-clause
1,375
bids-standard__pybv-47
diff --git a/README.rst b/README.rst index b49be74..3345e67 100644 --- a/README.rst +++ b/README.rst @@ -22,9 +22,10 @@ pybv ==== -``pybv`` is a lightweight exporter to the BrainVision data format. It is meant -for use with electrophysiology datasets stored in the -`Brain Imaging Data Structure <https://bids.neuroimaging.io>`_. +``pybv`` is a lightweight exporter to the BrainVision data format. + +The BrainVision data format is a recommended data format +for use in the `Brain Imaging Data Structure <https://bids.neuroimaging.io>`_. The documentation can be found under the following links: @@ -64,9 +65,11 @@ as hosted by Brain Products. Installation ============ +``pybv`` runs on Python version 3.6 or higher. + ``pybv``'s only dependency is ``numpy``. However, we currently recommend that you install MNE-Python for reading BrainVision data. See their instructions -`here <https://www.martinos.org/mne/stable/install_mne_python.html>`_. +`here <https://mne.tools/stable/install/index.html>`_. After you have a working installation of MNE-Python (or only ``numpy`` if you don't want to read data and only write it), you can install ``pybv`` through diff --git a/docs/changelog.rst b/docs/changelog.rst index 0939b1a..3bd08ba 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -15,14 +15,20 @@ Here we list a changelog of pybv. Current ======= -- no entries yet +Changelog +~~~~~~~~~ +- Passing a "greek small letter mu" instead of a "micro sign" as a ``unit`` is now permitted, because :func:`pybv.write_brainvision` will convert from one to the other, by `Stefan Appelhoff`_ (`#47 <https://github.com/bids-standard/pybv/pull/47>`_) + +Authors +~~~~~~~ +- `Stefan Appelhoff`_ 0.3.0 ===== Changelog ~~~~~~~~~ -- Add ``unit`` parameter for exporting signals in a specific unit (V, mV, µV or uV, nV) by `Clemens Brunner`_ (`#39 <https://github.com/bids-standard/pybv/pull/39>`_) +- Add ``unit`` parameter for exporting signals in a specific unit (V, mV, µV or uV, nV), by `Clemens Brunner`_ (`#39 <https://github.com/bids-standard/pybv/pull/39>`_) API ~~~ @@ -39,8 +45,8 @@ Authors Changelog ~~~~~~~~~ -- Add option to disable writing a meas_date event (which is also the new default) by `Clemens Brunner`_ (`#32 <https://github.com/bids-standard/pybv/pull/32>`_) -- Support event durations by passing an (N, 3) array to the events parameter (the third column contains the event durations) by `Clemens Brunner`_ (`#33 <https://github.com/bids-standard/pybv/pull/33>`_) +- Add option to disable writing a meas_date event (which is also the new default), by `Clemens Brunner`_ (`#32 <https://github.com/bids-standard/pybv/pull/32>`_) +- Support event durations by passing an (N, 3) array to the events parameter (the third column contains the event durations), by `Clemens Brunner`_ (`#33 <https://github.com/bids-standard/pybv/pull/33>`_) Authors ~~~~~~~ @@ -53,7 +59,7 @@ Authors Changelog ~~~~~~~~~ - Add measurement date parameter to public API, by `Stefan Appelhoff`_ (`#29 <https://github.com/bids-standard/pybv/pull/29>`_) -- Add binary format parameter to public API by `Tristan Stenner`_ (`#22 <https://github.com/bids-standard/pybv/pull/22>`_) +- Add binary format parameter to public API, by `Tristan Stenner`_ (`#22 <https://github.com/bids-standard/pybv/pull/22>`_) Bug ~~~ @@ -71,7 +77,7 @@ Authors Changelog ~~~~~~~~~ -- Support channel-specific scaling factors by `Tristan Stenner`_ (`#17 <https://github.com/bids-standard/pybv/pull/17>`_) +- Support channel-specific scaling factors, by `Tristan Stenner`_ (`#17 <https://github.com/bids-standard/pybv/pull/17>`_) Authors ~~~~~~~ @@ -84,7 +90,7 @@ Authors Changelog ~~~~~~~~~ -- Initial import from philistine package by `Phillip Alday`_ and removing dependency on MNE-Python, by `Chris Holdgraf`_ and `Stefan Appelhoff`_ +- Initial import from philistine package by `Phillip Alday`_ and removing dependency on MNE-Python, by `Chris Holdgraf`_, and `Stefan Appelhoff`_ Authors ~~~~~~~ diff --git a/pybv/io.py b/pybv/io.py index 1e8f37f..f93d53c 100644 --- a/pybv/io.py +++ b/pybv/io.py @@ -13,18 +13,21 @@ import codecs import datetime import os import os.path as op +import warnings import numpy as np from pybv import __version__ # ASCII as future formats -supported_formats = { +SUPPORTED_FORMATS = { 'binary_float32': ('IEEE_FLOAT_32', np.float32), 'binary_int16': ('INT_16', np.int16), } -supported_orients = {'multiplexed'} +SUPPORTED_ORIENTS = {'multiplexed'} + +SUPPORTED_UNITS = ['V', 'mV', 'µV', 'uV', 'nV'] def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, @@ -120,6 +123,15 @@ def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, _chk_fmt(fmt) + if unit == 'μV': + # this is greek mu: μ + # https://www.compart.com/de/unicode/U+03BC + warnings.warn( + f"Encountered small greek letter mu: 'μ' in unit: {unit} ... " + f"converting to micro sign: 'µ': {unit.replace('μ', 'µ')}" + ) + unit = 'µV' + # measurement date if not isinstance(meas_date, (str, datetime.datetime, type(None))): raise ValueError('`meas_date` must be of type str, datetime.datetime, ' @@ -145,21 +157,21 @@ def write_brainvision(data, sfreq, ch_names, fname_base, folder_out, def _chk_fmt(fmt): """Check that the format string is valid, return BVEF / numpy datatypes.""" - if fmt not in supported_formats: + if fmt not in SUPPORTED_FORMATS: errmsg = ('Data format {} not supported.'.format(fmt) + 'Currently supported formats are: ' + - ', '.join(supported_formats)) + ', '.join(SUPPORTED_FORMATS)) raise ValueError(errmsg) - return supported_formats[fmt] + return SUPPORTED_FORMATS[fmt] def _chk_multiplexed(orientation): """Validate an orientation, return if it is multiplexed or not.""" orientation = orientation.lower() - if orientation not in supported_orients: + if orientation not in SUPPORTED_ORIENTS: errmsg = ('Orientation {} not supported.'.format(orientation) + 'Currently supported orientations are: ' + - ', '.join(supported_orients)) + ', '.join(SUPPORTED_ORIENTS)) raise ValueError(errmsg) return orientation == 'multiplexed' @@ -225,6 +237,12 @@ def _optimize_channel_unit(resolution, unit): return resolution / 1e-6, 'µV' elif unit == 'nV': return resolution / 1e-9, 'nV' + else: + raise ValueError( + f'Encountered unsupported unit: {unit}' + '\nUse either "None" for `unit`, or one of the following: ' + f'{SUPPORTED_UNITS}' + ) def _write_vhdr_file(vhdr_fname, vmrk_fname, eeg_fname, data, sfreq, ch_names, diff --git a/setup.py b/setup.py index b863d9b..3dc4ae6 100644 --- a/setup.py +++ b/setup.py @@ -33,8 +33,6 @@ VERSION = version if __name__ == "__main__": - if os.path.exists('MANIFEST'): - os.remove('MANIFEST') setup(name=DISTNAME, maintainer=MAINTAINER, @@ -48,16 +46,19 @@ if __name__ == "__main__": long_description=open('README.rst').read(), long_description_content_type='text/x-rst', zip_safe=True, # the package can run out of an .egg file - classifiers=['Intended Audience :: Science/Research', - 'Intended Audience :: Developers', - 'License :: OSI Approved', - 'Programming Language :: Python', - 'Topic :: Software Development', - 'Topic :: Scientific/Engineering', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: POSIX', - 'Operating System :: Unix', - 'Operating System :: MacOS'], + python_requires='~=3.6', + classifiers=[ + 'Intended Audience :: Science/Research', + 'Intended Audience :: Developers', + 'License :: OSI Approved', + 'Programming Language :: Python', + 'Topic :: Software Development', + 'Topic :: Scientific/Engineering', + 'Operating System :: Microsoft :: Windows', + 'Operating System :: POSIX', + 'Operating System :: Unix', + 'Operating System :: MacOS' + ], platforms='any', keywords='Brain Products BrainVision vhdr vmrk eeg', packages=find_packages(),
bids-standard/pybv
76e8bea84d6594ca8e87f8fe367896167e43c641
diff --git a/.github/workflows/python_tests.yml b/.github/workflows/python_tests.yml index b81e8d7..bb8ef4a 100644 --- a/.github/workflows/python_tests.yml +++ b/.github/workflows/python_tests.yml @@ -5,6 +5,12 @@ on: branches: [ master ] pull_request: branches: [ master ] + create: + branches: [master] + tags: ['**'] + schedule: + - cron: "0 4 1 * *" + jobs: build: @@ -14,6 +20,10 @@ jobs: platform: [ubuntu-18.04, ubuntu-latest, macos-latest, windows-latest] python-version: [3.8] + env: + TZ: Europe/Berlin + FORCE_COLOR: true + runs-on: ${{ matrix.platform }} steps: @@ -36,6 +46,14 @@ jobs: run: | pip install -U https://api.github.com/repos/mne-tools/mne-python/zipball/master + - name: Display versions and environment information + run: | + echo $TZ + date + python --version + which python + mne sys_info + - name: Check formatting if: "matrix.platform == 'ubuntu-18.04'" run: | diff --git a/pybv/tests/test_bv_writer.py b/pybv/tests/test_bv_writer.py index c055396..846029d 100644 --- a/pybv/tests/test_bv_writer.py +++ b/pybv/tests/test_bv_writer.py @@ -123,10 +123,19 @@ def test_write_read_cycle(meas_date): # check that we create a folder that does not yet exist tmpdir = op.join(tmpdir, 'newfolder') + # First fail writing due to wrong unit + unsupported_unit = "rV" + with pytest.raises(ValueError, match='Encountered unsupported unit'): + write_brainvision(data, sfreq, ch_names, fname, tmpdir, + unit=unsupported_unit) + # write and read data to BV format - write_brainvision(data, sfreq, ch_names, fname, tmpdir, events=events, - resolution=np.power(10., -np.arange(10)), - meas_date=meas_date) + # ensure that greek small letter mu gets converted to micro sign + with pytest.warns(UserWarning, match="Encountered small greek letter mu"): + write_brainvision(data, sfreq, ch_names, fname, tmpdir, events=events, + resolution=np.power(10., -np.arange(10)), + unit='μV', + meas_date=meas_date) vhdr_fname = op.join(tmpdir, fname + '.vhdr') raw_written = mne.io.read_raw_brainvision(vhdr_fname, preload=True) # delete the first annotation because it's just marking a new segment
"Units" parameter is not checked for valid units. Passing "μV" fails, because μ is https://www.compart.com/de/unicode/U+03BC, but only µ (https://www.compart.com/de/unicode/U+00B5) is supported in BV according to the spec. Also passing nonsense like "sV" results in an ungraceful error. ![image](https://user-images.githubusercontent.com/9084751/98238603-e855e280-1f66-11eb-8aa3-9745b5417e20.png)
0.0
76e8bea84d6594ca8e87f8fe367896167e43c641
[ "pybv/tests/test_bv_writer.py::test_write_read_cycle[20000101120000000000]", "pybv/tests/test_bv_writer.py::test_write_read_cycle[meas_date1]" ]
[ "pybv/tests/test_bv_writer.py::test_bv_writer_events", "pybv/tests/test_bv_writer.py::test_bv_writer_inputs", "pybv/tests/test_bv_writer.py::test_bv_bad_format", "pybv/tests/test_bv_writer.py::test_bad_meas_date[1-`meas_date`", "pybv/tests/test_bv_writer.py::test_bad_meas_date[-Got", "pybv/tests/test_bv_writer.py::test_bad_meas_date[1973-Got", "pybv/tests/test_bv_writer.py::test_scale_data", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-0.1]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-0.01]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-0.001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-0.0001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-9.999999999999999e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-1e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-1e-07]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-1e-08]", "pybv/tests/test_bv_writer.py::test_unit_resolution[V-1e-09]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-0.1]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-0.01]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-0.001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-0.0001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-9.999999999999999e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-1e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-1e-07]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-1e-08]", "pybv/tests/test_bv_writer.py::test_unit_resolution[mV-1e-09]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-0.1]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-0.01]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-0.001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-0.0001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-9.999999999999999e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-1e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-1e-07]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-1e-08]", "pybv/tests/test_bv_writer.py::test_unit_resolution[uV-1e-09]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-0.1]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-0.01]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-0.001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-0.0001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-9.999999999999999e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-1e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-1e-07]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-1e-08]", "pybv/tests/test_bv_writer.py::test_unit_resolution[\\xb5V-1e-09]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-0.1]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-0.01]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-0.001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-0.0001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-9.999999999999999e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-1e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-1e-07]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-1e-08]", "pybv/tests/test_bv_writer.py::test_unit_resolution[nV-1e-09]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-0.1]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-0.01]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-0.001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-0.0001]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-9.999999999999999e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-1e-06]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-1e-07]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-1e-08]", "pybv/tests/test_bv_writer.py::test_unit_resolution[None-1e-09]" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-11-05 13:56:31+00:00
bsd-3-clause
1,376
bihealth__biomedsheets-23
diff --git a/biomedsheets/io_tsv/germline.py b/biomedsheets/io_tsv/germline.py index 2214d46..9aa02c9 100644 --- a/biomedsheets/io_tsv/germline.py +++ b/biomedsheets/io_tsv/germline.py @@ -104,17 +104,7 @@ class GermlineTSVReader(BaseTSVReader): optional_body_header_columns = ('seqPlatform', 'bioSample', 'testSample') def check_tsv_line(self, mapping, lineno): - """Cancer sample sheet--specific valiation""" - # Check for hyphen in patient or sample name - if '-' in mapping['patientName']: - raise GermlineTSVSheetException( - 'Hyphen not allowed in patientName column') # pragma: no cover - if mapping['fatherName'] and '-' in mapping['fatherName']: - raise GermlineTSVSheetException( - 'Hyphen not allowed in fatherName column') # pragma: no cover - if mapping['motherName'] and '-' in mapping['motherName']: - raise GermlineTSVSheetException( - 'Hyphen not allowed in motherName column') # pragma: no cover + """Germline sample sheet--specific validation""" # Check "libraryType" field if mapping['libraryType'] and ( mapping['libraryType'] not in LIBRARY_TYPES):
bihealth/biomedsheets
cfc01e0fd7ad5f2df454b7715bf8b297dd40c6a2
diff --git a/tests/test_io_tsv_germline.py b/tests/test_io_tsv_germline.py index 40e48c6..ace0084 100644 --- a/tests/test_io_tsv_germline.py +++ b/tests/test_io_tsv_germline.py @@ -45,6 +45,18 @@ def tsv_sheet_germline_no_header(): return f [email protected] +def tsv_sheet_germline_no_header_hyphened_identifier(): + """Germline TSV sheet without header""" + return io.StringIO(textwrap.dedent(""" + patientName\tfatherName\tmotherName\tsex\tisAffected\tlibraryType\tfolderName\thpoTerms + 12-345\t12-346\t12-347\tM\tY\tWGS\t12-345\tHP:0009946,HP:0009899 + 12-348\t12-346\t12-347\tM\tN\tWGS\t12-348\t. + 12-346\t.\t.\tM\tN\t.\t.\t. + 12-347\t.\t.\tF\tN\tWGS\t12-347\t. + """.lstrip())) + + @pytest.fixture def tsv_sheet_germline_platform_name(): """Germline TSV sheet with seqPlatform name""" @@ -510,6 +522,232 @@ EXPECTED_GERMLINE_SHEET_JSON_NO_HEADER = r""" } }""".lstrip() +# Expected value for the germline sheet JSON without header and hyphen in identifiers +EXPECTED_GERMLINE_SHEET_JSON_NO_HEADER_HYPHEN = r""" +{ + "identifier": "file://<unknown>", + "title": "Germline Sample Sheet", + "description": "Sample Sheet constructed from germline compact TSV file", + "extraInfoDefs": { + "bioEntity": { + "ncbiTaxon": { + "docs": "Reference to NCBI taxonomy", + "key": "taxon", + "type": "string", + "pattern": "^NCBITaxon_[1-9][0-9]*$" + }, + "fatherPk": { + "docs": "Primary key of mother", + "key": "fatherPk", + "type": "string" + }, + "motherPk": { + "docs": "Primary key of mother", + "key": "motherPk", + "type": "string" + }, + "fatherName": { + "docs": "secondary_id of father, used for construction only", + "key": "fatherName", + "type": "string" + }, + "motherName": { + "key": "motherName", + "docs": "secondary_id of mother, used for construction only", + "type": "string" + }, + "sex": { + "docs": "Biological sex of individual", + "key": "sex", + "type": "enum", + "choices": [ + "male", + "female", + "unknown" + ] + }, + "isAffected": { + "docs": "Flag for marking individiual as (un-)affected", + "key": "isAffected", + "type": "enum", + "choices": [ + "affected", + "unaffected", + "unknown" + ] + }, + "hpoTerms": { + "docs": "HPO terms for individual", + "key": "hpoTerms", + "type": "array", + "entry": "string", + "pattern": "^HPO:[0-9]+$" + } + }, + "bioSample": {}, + "testSample": { + "extractionType": { + "docs": "Describes extracted", + "key": "extractionType", + "type": "enum", + "choices": [ + "DNA", + "RNA", + "other" + ] + } + }, + "ngsLibrary": { + "seqPlatform": { + "docs": "Sequencing platform used", + "key": "kitName", + "type": "enum", + "choices": [ + "Illumina", + "PacBio", + "other" + ] + }, + "libraryType": { + "docs": "Rough classificiation of the library type", + "key": "libraryType", + "type": "enum", + "choices": [ + "Panel-seq", + "WES", + "WGS", + "mRNA-seq", + "tRNA-seq", + "other" + ] + }, + "folderName": { + "docs": "Name of folder with FASTQ files", + "key": "folderName", + "type": "string" + } + } + }, + "bioEntities": { + "12-345": { + "pk": 1, + "extraInfo": { + "fatherName": "12-346", + "motherName": "12-347", + "sex": "male", + "isAffected": "affected", + "hpoTerms": [ + "HP:0009946", + "HP:0009899" + ], + "ncbiTaxon": "NCBITaxon_9606", + "fatherPk": 9, + "motherPk": 10 + }, + "bioSamples": { + "N1": { + "pk": 2, + "extraInfo": {}, + "testSamples": { + "DNA1": { + "pk": 3, + "extraInfo": { + "extractionType": "DNA" + }, + "ngsLibraries": { + "WGS1": { + "pk": 4, + "extraInfo": { + "seqPlatform": "Illumina", + "folderName": "12-345", + "libraryType": "WGS" + } + } + } + } + } + } + } + }, + "12-348": { + "pk": 5, + "extraInfo": { + "fatherName": "12-346", + "motherName": "12-347", + "sex": "male", + "isAffected": "unaffected", + "ncbiTaxon": "NCBITaxon_9606", + "fatherPk": 9, + "motherPk": 10 + }, + "bioSamples": { + "N1": { + "pk": 6, + "extraInfo": {}, + "testSamples": { + "DNA1": { + "pk": 7, + "extraInfo": { + "extractionType": "DNA" + }, + "ngsLibraries": { + "WGS1": { + "pk": 8, + "extraInfo": { + "seqPlatform": "Illumina", + "folderName": "12-348", + "libraryType": "WGS" + } + } + } + } + } + } + } + }, + "12-346": { + "pk": 9, + "extraInfo": { + "sex": "male", + "isAffected": "unaffected", + "ncbiTaxon": "NCBITaxon_9606" + }, + "bioSamples": {} + }, + "12-347": { + "pk": 10, + "extraInfo": { + "sex": "female", + "isAffected": "unaffected", + "ncbiTaxon": "NCBITaxon_9606" + }, + "bioSamples": { + "N1": { + "pk": 11, + "extraInfo": {}, + "testSamples": { + "DNA1": { + "pk": 12, + "extraInfo": { + "extractionType": "DNA" + }, + "ngsLibraries": { + "WGS1": { + "pk": 13, + "extraInfo": { + "seqPlatform": "Illumina", + "folderName": "12-347", + "libraryType": "WGS" + } + } + } + } + } + } + } + } + } +}""".lstrip() # Expected value when platform name is given EXPECTED_GERMLINE_SHEET_JSON_PLATFORM_NAME = r""" @@ -673,6 +911,13 @@ def test_read_germline_sheet_no_header(tsv_sheet_germline_no_header): assert EXPECTED_GERMLINE_SHEET_JSON_NO_HEADER == json.dumps( sheet.json_data, indent=' ') +def test_read_germline_sheet_no_header_hyphened_identifiers( + tsv_sheet_germline_no_header_hyphened_identifier +): + sheet = io_tsv.read_germline_tsv_sheet(tsv_sheet_germline_no_header_hyphened_identifier) + assert EXPECTED_GERMLINE_SHEET_JSON_NO_HEADER_HYPHEN == json.dumps( + sheet.json_data, indent=' ') + def test_read_germline_sheet_platform_name(tsv_sheet_germline_platform_name): sheet = io_tsv.read_germline_tsv_sheet(tsv_sheet_germline_platform_name)
Hyphen not allowed in patientName column - Germline **Issue** Hyphens are allowed in SODAR sample sheet (specifically in the patient column), but not by the [biomedsheets](https://github.com/bihealth/biomedsheets) - used while running genomic pipeline. ``` Tue Feb 15 16:55:58 CET 2022 + snappy-snake --printshellcmds --snappy-pipeline-use-drmaa --snappy-pipeline-drmaa-jobs 500 --restart-times 0 --drmaa-snippet=--partition=medium --rerun-incomplete -- 02-15 16:55 root INFO Creating directory /data/gpfs-1/work/projects/medgen_genomes/2022-02-11_Aortopathy_WGS/GRCh37/ngs_mapping/slurm_log/681930 02-15 16:55 root INFO Executing snakemake '--directory' '/data/...Aortopathy_WGS/GRCh37/ngs_mapping' '--snakefile' '.../snappy-pipeline/snappy_pipeline/workflows/ngs_mapping/Snakefile' '--jobscript' '.../snappy-pipeline/snappy_pipeline/apps/tpls/jobscript.sh' '--rerun-incomplete' '-p' '--use-conda' '--conda-frontend' 'mamba' '--max-jobs-per-second' '10' '--max-status-checks-per-second' '10' '--drmaa' ' --mem={cluster.mem} --time={cluster.time} --cpus-per-task={cluster.ntasks} --output=/data/gpfs-1/work/projects/medgen_genomes/2022-02-11_Aortopathy_WGS/GRCh37/ngs_mapping/slurm_log/681930/slurm-%x-%J.log --partition=medium' '-j' '500' GermlineTSVSheetException in line 23 of .../snappy-pipeline/snappy_pipeline/workflows/ngs_mapping/Snakefile: Hyphen not allowed in patientName column File ".../snappy-pipeline/snappy_pipeline/workflows/ngs_mapping/Snakefile", line 23, in <module> File ".../snappy-pipeline/snappy_pipeline/workflows/ngs_mapping/__init__.py", line 1033, in __init__ File ".../snappy-pipeline/snappy_pipeline/workflows/abstract/__init__.py", line 504, in __init__ File ".../snappy-pipeline/snappy_pipeline/workflows/abstract/__init__.py", line 745, in _load_data_set_infos File ".../snappy-pipeline/snappy_pipeline/workflows/abstract/__init__.py", line 363, in __init__ File ".../snappy-pipeline/snappy_pipeline/workflows/abstract/__init__.py", line 386, in _load_sheet File ".../snappy-pipeline/snappy_pipeline/workflows/abstract/__init__.py", line 266, in _cached_read_germline_tsv_sheet File "miniconda3/lib/python3.8/site-packages/biomedsheets/io_tsv/germline.py", line 186, in read_germline_tsv_sheet File "miniconda3/lib/python3.8/site-packages/biomedsheets/io_tsv/base.py", line 359, in read_sheet File "miniconda3/lib/python3.8/site-packages/biomedsheets/io_tsv/base.py", line 353, in read_json_data File "miniconda3/lib/python3.8/site-packages/biomedsheets/io_tsv/base.py", line 395, in _create_sheet_json File "miniconda3/lib/python3.8/site-packages/biomedsheets/io_tsv/germline.py", line 110, in check_tsv_line 02-15 16:56 snakemake.logging ERROR GermlineTSVSheetException in line 23 of .../snappy-pipeline/snappy_pipeline/workflows/ngs_mapping/Snakefile: Hyphen not allowed in patientName column ``` biomedsheets 0.11.1 pypi_0 pypi **Additional info** _"[This] restriction is in SNAPPY/biomedsheets only ... Its origin is that I originally wanted to be able to resolve SAMPLE-NORMALTUMORNUMBER-EXTRACTIONNUMBER-LIBRARYNUMBER unambiguously to SAMPLE etc. This functionality is actually used it can be removed."_
0.0
cfc01e0fd7ad5f2df454b7715bf8b297dd40c6a2
[ "tests/test_io_tsv_germline.py::test_read_germline_sheet_no_header_hyphened_identifiers" ]
[ "tests/test_io_tsv_germline.py::test_read_germline_sheet_header", "tests/test_io_tsv_germline.py::test_read_germline_sheet_no_header", "tests/test_io_tsv_germline.py::test_read_germline_sheet_platform_name", "tests/test_io_tsv_germline.py::test_read_tumor_json_header", "tests/test_io_tsv_germline.py::test_read_tumor_json_no_header", "tests/test_io_tsv_germline.py::test_read_tumor_json_platform_name" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2022-08-09 13:59:44+00:00
mit
1,377
bihealth__biomedsheets-25
diff --git a/biomedsheets/io_tsv/germline.py b/biomedsheets/io_tsv/germline.py index 2214d46..9aa02c9 100644 --- a/biomedsheets/io_tsv/germline.py +++ b/biomedsheets/io_tsv/germline.py @@ -104,17 +104,7 @@ class GermlineTSVReader(BaseTSVReader): optional_body_header_columns = ('seqPlatform', 'bioSample', 'testSample') def check_tsv_line(self, mapping, lineno): - """Cancer sample sheet--specific valiation""" - # Check for hyphen in patient or sample name - if '-' in mapping['patientName']: - raise GermlineTSVSheetException( - 'Hyphen not allowed in patientName column') # pragma: no cover - if mapping['fatherName'] and '-' in mapping['fatherName']: - raise GermlineTSVSheetException( - 'Hyphen not allowed in fatherName column') # pragma: no cover - if mapping['motherName'] and '-' in mapping['motherName']: - raise GermlineTSVSheetException( - 'Hyphen not allowed in motherName column') # pragma: no cover + """Germline sample sheet--specific validation""" # Check "libraryType" field if mapping['libraryType'] and ( mapping['libraryType'] not in LIBRARY_TYPES): diff --git a/biomedsheets/shortcuts/germline.py b/biomedsheets/shortcuts/germline.py index 5ae32ed..67c5367 100644 --- a/biomedsheets/shortcuts/germline.py +++ b/biomedsheets/shortcuts/germline.py @@ -43,6 +43,18 @@ class UndefinedFieldException(Exception): defined in extra_infos.""" +class InconsistentPedigreeException(Exception): + """Raised if pedigree information from custom field is inconsistent with row definition. + + Example for field 'familyId': + [Data] + familyId | patientName | fatherName | motherName | ... + family1 | index1 | father1 | mother1 | ... + family2 | father1 | 0 | 0 | ... + family3 | mother1 | 0 | 0 | ... + """ + + class Pedigree: """Class for accessing information in a pedigree @@ -325,13 +337,27 @@ class CohortBuilder: """Return :py:class:`Cohort` object with :py:class:`Pedigree` sub structure """ + error_msg = ( + "Found inconsistent in input sample sheet. For index '{id_}' pedigree description from " + "row is not the same as the one found using custom join field '{join_by_field}'." + ) cohort = Cohort(self._yield_pedigrees()) for pedigree in cohort.pedigrees: for donor in pedigree.donors: if donor.father_pk: donor._father = cohort.pk_to_donor[int(donor.father_pk)] + # Consistent check - it shouldn't be 'None' if pedigree correctly joint. + if not pedigree.pk_to_donor.get(donor.father_pk, None): + raise InconsistentPedigreeException(error_msg.format( + id_=donor.bio_entity.secondary_id, join_by_field=self.join_by_field) + ) if donor.mother_pk: donor._mother = cohort.pk_to_donor[int(donor.mother_pk)] + # Consistent check - it shouldn't be 'None' if pedigree correctly joint + if not pedigree.pk_to_donor.get(donor.father_pk, None): + raise InconsistentPedigreeException(error_msg.format( + id_=donor.bio_entity.secondary_id, join_by_field=self.join_by_field) + ) return cohort def _yield_pedigrees(self):
bihealth/biomedsheets
cfc01e0fd7ad5f2df454b7715bf8b297dd40c6a2
diff --git a/tests/test_io_tsv_germline.py b/tests/test_io_tsv_germline.py index 40e48c6..ace0084 100644 --- a/tests/test_io_tsv_germline.py +++ b/tests/test_io_tsv_germline.py @@ -45,6 +45,18 @@ def tsv_sheet_germline_no_header(): return f [email protected] +def tsv_sheet_germline_no_header_hyphened_identifier(): + """Germline TSV sheet without header""" + return io.StringIO(textwrap.dedent(""" + patientName\tfatherName\tmotherName\tsex\tisAffected\tlibraryType\tfolderName\thpoTerms + 12-345\t12-346\t12-347\tM\tY\tWGS\t12-345\tHP:0009946,HP:0009899 + 12-348\t12-346\t12-347\tM\tN\tWGS\t12-348\t. + 12-346\t.\t.\tM\tN\t.\t.\t. + 12-347\t.\t.\tF\tN\tWGS\t12-347\t. + """.lstrip())) + + @pytest.fixture def tsv_sheet_germline_platform_name(): """Germline TSV sheet with seqPlatform name""" @@ -510,6 +522,232 @@ EXPECTED_GERMLINE_SHEET_JSON_NO_HEADER = r""" } }""".lstrip() +# Expected value for the germline sheet JSON without header and hyphen in identifiers +EXPECTED_GERMLINE_SHEET_JSON_NO_HEADER_HYPHEN = r""" +{ + "identifier": "file://<unknown>", + "title": "Germline Sample Sheet", + "description": "Sample Sheet constructed from germline compact TSV file", + "extraInfoDefs": { + "bioEntity": { + "ncbiTaxon": { + "docs": "Reference to NCBI taxonomy", + "key": "taxon", + "type": "string", + "pattern": "^NCBITaxon_[1-9][0-9]*$" + }, + "fatherPk": { + "docs": "Primary key of mother", + "key": "fatherPk", + "type": "string" + }, + "motherPk": { + "docs": "Primary key of mother", + "key": "motherPk", + "type": "string" + }, + "fatherName": { + "docs": "secondary_id of father, used for construction only", + "key": "fatherName", + "type": "string" + }, + "motherName": { + "key": "motherName", + "docs": "secondary_id of mother, used for construction only", + "type": "string" + }, + "sex": { + "docs": "Biological sex of individual", + "key": "sex", + "type": "enum", + "choices": [ + "male", + "female", + "unknown" + ] + }, + "isAffected": { + "docs": "Flag for marking individiual as (un-)affected", + "key": "isAffected", + "type": "enum", + "choices": [ + "affected", + "unaffected", + "unknown" + ] + }, + "hpoTerms": { + "docs": "HPO terms for individual", + "key": "hpoTerms", + "type": "array", + "entry": "string", + "pattern": "^HPO:[0-9]+$" + } + }, + "bioSample": {}, + "testSample": { + "extractionType": { + "docs": "Describes extracted", + "key": "extractionType", + "type": "enum", + "choices": [ + "DNA", + "RNA", + "other" + ] + } + }, + "ngsLibrary": { + "seqPlatform": { + "docs": "Sequencing platform used", + "key": "kitName", + "type": "enum", + "choices": [ + "Illumina", + "PacBio", + "other" + ] + }, + "libraryType": { + "docs": "Rough classificiation of the library type", + "key": "libraryType", + "type": "enum", + "choices": [ + "Panel-seq", + "WES", + "WGS", + "mRNA-seq", + "tRNA-seq", + "other" + ] + }, + "folderName": { + "docs": "Name of folder with FASTQ files", + "key": "folderName", + "type": "string" + } + } + }, + "bioEntities": { + "12-345": { + "pk": 1, + "extraInfo": { + "fatherName": "12-346", + "motherName": "12-347", + "sex": "male", + "isAffected": "affected", + "hpoTerms": [ + "HP:0009946", + "HP:0009899" + ], + "ncbiTaxon": "NCBITaxon_9606", + "fatherPk": 9, + "motherPk": 10 + }, + "bioSamples": { + "N1": { + "pk": 2, + "extraInfo": {}, + "testSamples": { + "DNA1": { + "pk": 3, + "extraInfo": { + "extractionType": "DNA" + }, + "ngsLibraries": { + "WGS1": { + "pk": 4, + "extraInfo": { + "seqPlatform": "Illumina", + "folderName": "12-345", + "libraryType": "WGS" + } + } + } + } + } + } + } + }, + "12-348": { + "pk": 5, + "extraInfo": { + "fatherName": "12-346", + "motherName": "12-347", + "sex": "male", + "isAffected": "unaffected", + "ncbiTaxon": "NCBITaxon_9606", + "fatherPk": 9, + "motherPk": 10 + }, + "bioSamples": { + "N1": { + "pk": 6, + "extraInfo": {}, + "testSamples": { + "DNA1": { + "pk": 7, + "extraInfo": { + "extractionType": "DNA" + }, + "ngsLibraries": { + "WGS1": { + "pk": 8, + "extraInfo": { + "seqPlatform": "Illumina", + "folderName": "12-348", + "libraryType": "WGS" + } + } + } + } + } + } + } + }, + "12-346": { + "pk": 9, + "extraInfo": { + "sex": "male", + "isAffected": "unaffected", + "ncbiTaxon": "NCBITaxon_9606" + }, + "bioSamples": {} + }, + "12-347": { + "pk": 10, + "extraInfo": { + "sex": "female", + "isAffected": "unaffected", + "ncbiTaxon": "NCBITaxon_9606" + }, + "bioSamples": { + "N1": { + "pk": 11, + "extraInfo": {}, + "testSamples": { + "DNA1": { + "pk": 12, + "extraInfo": { + "extractionType": "DNA" + }, + "ngsLibraries": { + "WGS1": { + "pk": 13, + "extraInfo": { + "seqPlatform": "Illumina", + "folderName": "12-347", + "libraryType": "WGS" + } + } + } + } + } + } + } + } + } +}""".lstrip() # Expected value when platform name is given EXPECTED_GERMLINE_SHEET_JSON_PLATFORM_NAME = r""" @@ -673,6 +911,13 @@ def test_read_germline_sheet_no_header(tsv_sheet_germline_no_header): assert EXPECTED_GERMLINE_SHEET_JSON_NO_HEADER == json.dumps( sheet.json_data, indent=' ') +def test_read_germline_sheet_no_header_hyphened_identifiers( + tsv_sheet_germline_no_header_hyphened_identifier +): + sheet = io_tsv.read_germline_tsv_sheet(tsv_sheet_germline_no_header_hyphened_identifier) + assert EXPECTED_GERMLINE_SHEET_JSON_NO_HEADER_HYPHEN == json.dumps( + sheet.json_data, indent=' ') + def test_read_germline_sheet_platform_name(tsv_sheet_germline_platform_name): sheet = io_tsv.read_germline_tsv_sheet(tsv_sheet_germline_platform_name) diff --git a/tests/test_shortcuts_germline.py b/tests/test_shortcuts_germline.py index 67510a6..ecc30f9 100644 --- a/tests/test_shortcuts_germline.py +++ b/tests/test_shortcuts_germline.py @@ -7,7 +7,7 @@ import textwrap import pytest from biomedsheets import io_tsv, naming, shortcuts -from biomedsheets.shortcuts.germline import UndefinedFieldException +from biomedsheets.shortcuts.germline import InconsistentPedigreeException, UndefinedFieldException __author__ = 'Manuel Holtgrewe <[email protected]>' @@ -66,6 +66,31 @@ def tsv_sheet_germline_trio_plus(): """.lstrip())) return f [email protected] +def tsv_sheet_germline_inconsistent_pedigree(): + """Example TSV germline sheet with inconsistent pedigree definition. + + :return: Returns StringIO with sample sheet for with inconsistent pedigree definition: + family identifier and row information don't agree. + """ + return io.StringIO(textwrap.dedent(""" + [Metadata] + schema\tgermline_variants + schema_version\tv1 + title\tExample germline study + description\tSimple study with one trio plus + + [Custom Fields] + key\tannotatedEntity\tdocs\ttype\tminimum\tmaximum\tunit\tchoices\tpattern + familyId\tbioEntity\tFamily\tstring\t.\t.\t.\t.\t. + + [Data] + familyId\tpatientName\tfatherName\tmotherName\tsex\tisAffected\tlibraryType\tfolderName\thpoTerms + family1\tindex1\tfather1\tmother1\tM\tY\tWES\tindex1\t. + family2\tfather1\t0\t0\tM\tN\tWES\tfather1\t. + family3\tmother1\t0\t0\tM\tN\tWES\tmother1\t. + """.lstrip())) + @pytest.fixture def sheet_germline_trio_plus(tsv_sheet_germline_trio_plus): @@ -247,7 +272,7 @@ def test_undefined_field_exception(): def test_sheet_germline_trio_plus_exception(tsv_sheet_germline_trio_plus): """Tests UndefinedFieldException raise while creating GermlineCaseSheet""" - with pytest.raises(Exception): + with pytest.raises(UndefinedFieldException): shortcuts.GermlineCaseSheet( sheet=io_tsv.read_germline_tsv_sheet(tsv_sheet_germline_trio_plus), join_by_field='undefined_field' @@ -388,3 +413,21 @@ def test_cohorts(sheet_germline): assert set(cohort.secondary_id_to_donor) == {'index1', 'father1', 'mother2', 'index2', 'mother1', 'father2'} assert cohort.member_count == 6 assert cohort.pedigree_count == 2 + + +def test_sheet_germline_inconsistent_pedigree( + tsv_sheet_germline_inconsistent_pedigree, + tsv_sheet_germline_trio_plus, +): + """Tests Germline sheet for sheet with conflict information for joint field and row.""" + # Sanity check + shortcuts.GermlineCaseSheet( + sheet=io_tsv.read_germline_tsv_sheet(tsv_sheet_germline_trio_plus), + join_by_field='familyId' + ) + # Expect error as each member of the pedigree has its own `familyId` instead of a common one + with pytest.raises(InconsistentPedigreeException): + shortcuts.GermlineCaseSheet( + sheet=io_tsv.read_germline_tsv_sheet(tsv_sheet_germline_inconsistent_pedigree), + join_by_field='familyId' + )
Cryptic error message if sample sheet inconsistent **Issue** Inconsistent in sample sheet leads to cryptic error message while running `variant_calling`: ``` Error in rule variant_calling_write_pedigree_run: jobid: 0 output: work/write_pedigree.16_0933_WGS-N1-DNA1-WGS1/out/16_0933_WGS-N1-DNA1-WGS1.ped RuleException: KeyError in line 67 of /path/to/snappy-pipeline/snappy_pipeline/workflows/variant_calling/Snakefile: '9' File "/path/to/snappy-pipeline/snappy_pipeline/workflows/variant_calling/Snakefile", line 67, in __rule_variant_calling_write_pedigree_run File "/path/to/snappy-pipeline/snappy_pipeline/workflows/abstract/__init__.py", line 732, in substep_dispatch File "/path/to/snappy-pipeline/snappy_pipeline/workflows/abstract/__init__.py", line 203, in run File "/path/to/miniconda/lib/python3.8/site-packages/biomedsheets/shortcuts/germline.py", line 198, in write_pedigree_to_ped File "/path/to/miniconda/lib/python3.8/site-packages/biomedsheets/shortcuts/germline.py", line 171, in _append_pedigree_to_ped File "/path/to/miniconda/lib/python3.8/concurrent/futures/thread.py", line 57, in run ``` **To Reproduce** Steps to reproduce the behavior: - Assign pedigree based on a custom sample sheet column (_e.g._, 'familyId') in the config. ``` ... data_sets: test_cohort: sodar_uuid: ... file: 2022_test_cohort.tsv type: germline_variants pedigree_field: familyId ... ``` - Include conflicts in pedigree: ``` ... [Data] familyId patientName fatherName motherName sex isAffected libraryType folderName batchNo hpoTerms projectUuid seqPlatform libraryKit FAM_P001 P001 P003 P002 F Y WGS 99999 5 . ... ILLUMINA Illumina TruSeq PCR-free FAM_P002 P002 0 0 F N WGS 99998 5 . ... ILLUMINA Illumina TruSeq PCR-free FAM_P003 P003 0 0 M N WGS 99997 5 . ... ILLUMINA Illumina TruSeq PCR-free ``` **Possible Solution** Implement consistency check with informative error message. Workflow shouldn't start.
0.0
cfc01e0fd7ad5f2df454b7715bf8b297dd40c6a2
[ "tests/test_io_tsv_germline.py::test_read_germline_sheet_header", "tests/test_io_tsv_germline.py::test_read_germline_sheet_no_header", "tests/test_io_tsv_germline.py::test_read_germline_sheet_no_header_hyphened_identifiers", "tests/test_io_tsv_germline.py::test_read_germline_sheet_platform_name", "tests/test_io_tsv_germline.py::test_read_tumor_json_header", "tests/test_io_tsv_germline.py::test_read_tumor_json_no_header", "tests/test_io_tsv_germline.py::test_read_tumor_json_platform_name", "tests/test_shortcuts_germline.py::test_germline_case_sheet", "tests/test_shortcuts_germline.py::test_undefined_field_exception", "tests/test_shortcuts_germline.py::test_sheet_germline_trio_plus_exception", "tests/test_shortcuts_germline.py::test_germline_case_sheet_trio_plus", "tests/test_shortcuts_germline.py::test_germline_case_sheet_multiple_trio_plus", "tests/test_shortcuts_germline.py::test_germline_case_sheet_two_libs", "tests/test_shortcuts_germline.py::test_germline_case_sheet_two_bio_samples", "tests/test_shortcuts_germline.py::test_germline_case_sheet_two_test_samples", "tests/test_shortcuts_germline.py::test_germline_case_sheet_only_parent_samples", "tests/test_shortcuts_germline.py::test_germline_donor", "tests/test_shortcuts_germline.py::test_pedigree", "tests/test_shortcuts_germline.py::test_cohorts", "tests/test_shortcuts_germline.py::test_sheet_germline_inconsistent_pedigree" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2022-08-15 16:25:35+00:00
mit
1,378
bihealth__biomedsheets-27
diff --git a/HISTORY.rst b/HISTORY.rst index 3958c1e..31e84e1 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -2,6 +2,13 @@ BioMed Sheets Changelog ======================= + +----------------- +HEAD (unreleased) +----------------- + +- Hotfix: fixed consistency check for Duo Cases (#26) + ------- v0.11.4 ------- diff --git a/biomedsheets/shortcuts/germline.py b/biomedsheets/shortcuts/germline.py index 67c5367..de3d9ac 100644 --- a/biomedsheets/shortcuts/germline.py +++ b/biomedsheets/shortcuts/germline.py @@ -354,7 +354,7 @@ class CohortBuilder: if donor.mother_pk: donor._mother = cohort.pk_to_donor[int(donor.mother_pk)] # Consistent check - it shouldn't be 'None' if pedigree correctly joint - if not pedigree.pk_to_donor.get(donor.father_pk, None): + if not pedigree.pk_to_donor.get(donor.mother_pk, None): raise InconsistentPedigreeException(error_msg.format( id_=donor.bio_entity.secondary_id, join_by_field=self.join_by_field) )
bihealth/biomedsheets
5c04b2d12232e6dd1c8998ef9b88419978524771
diff --git a/tests/test_shortcuts_germline.py b/tests/test_shortcuts_germline.py index ecc30f9..30022c9 100644 --- a/tests/test_shortcuts_germline.py +++ b/tests/test_shortcuts_germline.py @@ -66,6 +66,52 @@ def tsv_sheet_germline_trio_plus(): """.lstrip())) return f [email protected] +def tsv_sheet_germline_duo_w_mother(): + """Example TSV germline sheet with duo - mother present. + + :return: Returns StringIO with sample sheet for duo: index, mother. + """ + return io.StringIO(textwrap.dedent(""" + [Metadata] + schema\tgermline_variants + schema_version\tv1 + title\tExample germline study + description\tSimple study with one trio plus + + [Custom Fields] + key\tannotatedEntity\tdocs\ttype\tminimum\tmaximum\tunit\tchoices\tpattern + familyId\tbioEntity\tFamily\tstring\t.\t.\t.\t.\t. + + [Data] + familyId\tpatientName\tfatherName\tmotherName\tsex\tisAffected\tlibraryType\tfolderName\thpoTerms + family1\tmother1\t0\t0\tM\tN\tWES\tmother1\t. + family1\tindex1\t0\tmother1\tM\tY\tWES\tindex1\t. + """.lstrip())) + [email protected] +def tsv_sheet_germline_duo_w_father(): + """Example TSV germline sheet with duo - father present. + + :return: Returns StringIO with sample sheet for duo: index, father. + """ + return io.StringIO(textwrap.dedent(""" + [Metadata] + schema\tgermline_variants + schema_version\tv1 + title\tExample germline study + description\tSimple study with one trio plus + + [Custom Fields] + key\tannotatedEntity\tdocs\ttype\tminimum\tmaximum\tunit\tchoices\tpattern + familyId\tbioEntity\tFamily\tstring\t.\t.\t.\t.\t. + + [Data] + familyId\tpatientName\tfatherName\tmotherName\tsex\tisAffected\tlibraryType\tfolderName\thpoTerms + family1\tfather1\t0\t0\tM\tN\tWES\tfather1\t. + family1\tindex1\tfather1\t0\tM\tY\tWES\tindex1\t. + """.lstrip())) + @pytest.fixture def tsv_sheet_germline_inconsistent_pedigree(): """Example TSV germline sheet with inconsistent pedigree definition. @@ -418,13 +464,25 @@ def test_cohorts(sheet_germline): def test_sheet_germline_inconsistent_pedigree( tsv_sheet_germline_inconsistent_pedigree, tsv_sheet_germline_trio_plus, + tsv_sheet_germline_duo_w_mother, + tsv_sheet_germline_duo_w_father, + ): """Tests Germline sheet for sheet with conflict information for joint field and row.""" - # Sanity check + # Sanity checks shortcuts.GermlineCaseSheet( sheet=io_tsv.read_germline_tsv_sheet(tsv_sheet_germline_trio_plus), join_by_field='familyId' ) + shortcuts.GermlineCaseSheet( + sheet=io_tsv.read_germline_tsv_sheet(tsv_sheet_germline_duo_w_mother), + join_by_field='familyId' + ) + shortcuts.GermlineCaseSheet( + sheet=io_tsv.read_germline_tsv_sheet(tsv_sheet_germline_duo_w_father), + join_by_field='familyId' + ) + # Expect error as each member of the pedigree has its own `familyId` instead of a common one with pytest.raises(InconsistentPedigreeException): shortcuts.GermlineCaseSheet(
InconsistentPedigreeException raised if father is missing **Describe the bug** In the case of incomplete families, InconsistentPedigreeException maybe raised incorrectly.
0.0
5c04b2d12232e6dd1c8998ef9b88419978524771
[ "tests/test_shortcuts_germline.py::test_sheet_germline_inconsistent_pedigree" ]
[ "tests/test_shortcuts_germline.py::test_germline_case_sheet", "tests/test_shortcuts_germline.py::test_undefined_field_exception", "tests/test_shortcuts_germline.py::test_sheet_germline_trio_plus_exception", "tests/test_shortcuts_germline.py::test_germline_case_sheet_trio_plus", "tests/test_shortcuts_germline.py::test_germline_case_sheet_multiple_trio_plus", "tests/test_shortcuts_germline.py::test_germline_case_sheet_two_libs", "tests/test_shortcuts_germline.py::test_germline_case_sheet_two_bio_samples", "tests/test_shortcuts_germline.py::test_germline_case_sheet_two_test_samples", "tests/test_shortcuts_germline.py::test_germline_case_sheet_only_parent_samples", "tests/test_shortcuts_germline.py::test_germline_donor", "tests/test_shortcuts_germline.py::test_pedigree", "tests/test_shortcuts_germline.py::test_cohorts" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2022-09-12 14:58:29+00:00
mit
1,379
bimpression__sosw-53
diff --git a/sosw/components/helpers.py b/sosw/components/helpers.py index 8d90320..76aef77 100644 --- a/sosw/components/helpers.py +++ b/sosw/components/helpers.py @@ -25,6 +25,7 @@ __all__ = ['validate_account_to_dashed', 'validate_list_of_words_from_csv_or_list', 'first_or_none', 'recursive_update', + 'trim_arn_to_name', ] import re @@ -668,3 +669,22 @@ def recursive_update(d: Dict, u: Mapping) -> Dict: new[k] = v return new + + +def trim_arn_to_name(arn: str) -> str: + """ + Extract just the name of function from full ARN. Supports versions, aliases or raw name (without ARN). + + More information about ARN Format: + https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-arns + """ + + # Special handling for super global services (e.g. S3 buckets) + if arn.count(':') < 6 and '/' not in arn: + return arn.split(':')[-1] + + # Seems a little messy, but passes more/less any test of different ARNs we tried. + pattern = "(arn:aws:[0-9a-zA-Z-]{2,20}:[0-9a-zA-Z-]{0,12}:[0-9]{12}:[0-9a-zA-Z-]{2,20}[:/])?" \ + "(?P<name>[0-9a-zA-Z_=,.@-]*)(:)?([0-9a-zA-Z$]*)?" + + return re.search(pattern, arn).group('name') diff --git a/sosw/scheduler.py b/sosw/scheduler.py index 98074a2..2756d89 100644 --- a/sosw/scheduler.py +++ b/sosw/scheduler.py @@ -22,7 +22,7 @@ from copy import deepcopy from typing import List, Set, Tuple, Union, Optional, Dict from sosw.app import Processor -from sosw.components.helpers import get_list_of_multiple_or_one_or_empty_from_dict +from sosw.components.helpers import get_list_of_multiple_or_one_or_empty_from_dict, trim_arn_to_name from sosw.labourer import Labourer from sosw.managers.task import TaskManager @@ -331,20 +331,11 @@ class Scheduler(Processor): job = load(jh['job']) if 'job' in jh else jh assert 'lambda_name' in job, f"Job is missing required parameter 'lambda_name': {job}" - job['lambda_name'] = job['lambda_name'] + job['lambda_name'] = trim_arn_to_name(job['lambda_name']) return job - def get_name_from_arn(self, arn): - """ Extract just the name of function from full ARN. Supports versions, aliases or raw name (without ARN). """ - - pattern = "(arn:aws:lambda:[0-9a-zA-Z-]{6,12}:[0-9]{12}:function:)?" \ - "(?P<name>[0-9a-zA-Z_=,.@-]*)(:)?([0-9a-zA-Z$]*)?" - - return re.search(pattern, arn).group('name') - - def process_file(self): file_name = self.get_and_lock_queue_file()
bimpression/sosw
d9e652977ca6564f366fcc3ab632ede547aeba94
diff --git a/sosw/components/test/unit/test_helpers.py b/sosw/components/test/unit/test_helpers.py index 2949462..4563593 100644 --- a/sosw/components/test/unit/test_helpers.py +++ b/sosw/components/test/unit/test_helpers.py @@ -545,5 +545,26 @@ class helpers_UnitTestCase(unittest.TestCase): self.assertIsNone(recursive_update(a, b)['b']) + def test_trim_arn_to_name(self): + + TESTS = [ + ('bar_with_no_arn', 'bar_with_no_arn'), + ('arn:aws:lambda:us-west-2:000000000000:function:bar', 'bar'), + ('arn:aws:lambda:us-west-2:000000000000:function:bar:', 'bar'), + ('arn:aws:lambda:us-west-2:000000000000:function:bar:$LATEST', 'bar'), + ('arn:aws:lambda:us-west-2:000000000000:function:bar:12', 'bar'), + ('arn:aws:lambda:us-west-2:000000000000:function:bar:12', 'bar'), + ('arn:aws:s3:::autotest-sosw', 'autotest-sosw'), + ('arn:aws:iam::000000000000:role/aws-code-deploy-role', 'aws-code-deploy-role'), + ('arn:aws:rds:us-west-2:000000000000:cluster:aws-cluster-01', 'aws-cluster-01'), + ('arn:aws:rds:us-west-2:000000000000:db:aws-01-00', 'aws-01-00'), + ('arn:aws:events:us-west-2:123456000000:rule/aws-sr-01', 'aws-sr-01'), + ('arn:aws:dynamodb:us-west-2:123456000321:table/sosw_tasks', 'sosw_tasks'), + ] + + for test, expected in TESTS: + self.assertEqual(trim_arn_to_name(test), expected) + + if __name__ == '__main__': unittest.main() diff --git a/sosw/test/unit/test_scheduler.py b/sosw/test/unit/test_scheduler.py index f590877..11c1479 100644 --- a/sosw/test/unit/test_scheduler.py +++ b/sosw/test/unit/test_scheduler.py @@ -211,6 +211,8 @@ class Scheduler_UnitTestCase(unittest.TestCase): TESTS = [ ({'job': {'lambda_name': 'foo', 'payload_attr': 'val'}}, {'lambda_name': 'foo', 'payload_attr': 'val'}), ({'lambda_name': 'foo', 'payload_attr': 'val'}, {'lambda_name': 'foo', 'payload_attr': 'val'}), + ({'lambda_name': 'arn:aws:lambda:us-west-2:000000000000:function:foo', 'payload_attr': 'val'}, + {'lambda_name': 'foo', 'payload_attr': 'val'}), ({'job': {'lambda_name': 'foo', 'payload_attr': 'val'}}, {'lambda_name': 'foo', 'payload_attr': 'val'}), # JSONs @@ -239,20 +241,6 @@ class Scheduler_UnitTestCase(unittest.TestCase): self.assertRaises(Exception, self.scheduler.extract_job_from_payload, test) - def test_get_name_from_arn(self): - - TESTS = [ - ('bar_with_no_arn', 'bar_with_no_arn'), - ('arn:aws:lambda:us-west-2:000000000000:function:bar', 'bar'), - ('arn:aws:lambda:us-west-2:000000000000:function:bar:', 'bar'), - ('arn:aws:lambda:us-west-2:000000000000:function:bar:$LATEST', 'bar'), - ('arn:aws:lambda:us-west-2:000000000000:function:bar:12', 'bar'), - ] - - for test, expected in TESTS: - self.assertEqual(self.scheduler.get_name_from_arn(test), expected) - - def test_needs_chunking__isolate_root(self): pl = deepcopy(self.PAYLOAD)
Scheduler: allow lambda_name in job to be ARN If the passed in the job `lambda_name` (aka Labourer.id) is a full ARN trim it using regex to leave just the name.
0.0
d9e652977ca6564f366fcc3ab632ede547aeba94
[ "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_trim_arn_to_name", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_extract_job_from_payload" ]
[ "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_chunks", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_construct_dates_from_event__conflict_of_attributes", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_construct_dates_from_event__missing_attributes", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_construct_dates_from_event__ok", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_convert_string_to_words", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_first_or_none", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_get_list_of_multiple_or_one_or_empty_from_dict", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_get_one_from_dict", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_get_one_or_none_from_dict", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_negative_validate_date_from_something", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_match_extract", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_matches_soft", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_matches_strict", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_update", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_update_2", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_update__does_overwrite_with_none", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_update__inserts_new_keys", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_rstrip_all", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_account_to_dashed_invalid", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_account_to_dashed_valid", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_date_from_something", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_date_list_from_event_or_days_back", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_datetime_from_something", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_list_of_numbers_from_csv", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_list_of_words_from_csv_or_list__ok", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_list_of_words_from_csv_or_list__raises", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_string_matches_datetime_format", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test__local_queue_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test__queue_bucket", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test__remote_queue_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test__remote_queue_locked_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_call__sample", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data__not_chunkable_config", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data__not_raises__notchunkable__if_no_isolation", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data__raises__unsupported_vals__list_not_as_value", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data__raises__unsupported_vals__string", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data__raises_unchunkable_subtask", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data__unchunckable_preserve_custom_attrs", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_extract_job_from_payload_raises", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_get_and_lock_queue_file__local_file_exists", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_get_and_lock_queue_file__s3_calls", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_get_index_from_list", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_get_next_chunkable_attr", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_init__chunkable_attrs_not_end_with_s", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_needs_chunking__isolate_root", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_needs_chunking__isolate_subdata", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_needs_chunking__isolate_subdata_deep", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_parse_job_to_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_parse_job_to_file__multiple_rows", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_pop_rows_from_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_pop_rows_from_file__missing_or_empty_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_pop_rows_from_file__reads_from_top", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_process_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_validate_list_of_vals" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2019-04-09 13:09:16+00:00
mit
1,380
bimpression__sosw-88
diff --git a/sosw/managers/ecology.py b/sosw/managers/ecology.py index dbe0fb5..14ff1e8 100644 --- a/sosw/managers/ecology.py +++ b/sosw/managers/ecology.py @@ -31,8 +31,7 @@ ECO_STATUSES = ( class EcologyManager(Processor): - DEFAULT_CONFIG = { - } + DEFAULT_CONFIG = {} running_tasks = defaultdict(int) task_client: TaskManager = None # Will be Circular import! Careful! @@ -124,11 +123,10 @@ class EcologyManager(Processor): def get_max_labourer_duration(self, labourer: Labourer) -> int: """ Maximum duration of `labourer` executions. - Should ask this from aws:lambda API, but at the moment use the hardcoded maximum. - # TODO implement me. """ - return 900 + resp = self.task_client.lambda_client.get_function_configuration(FunctionName=labourer.arn) + return resp['Timeout'] # The task_client of EcologyManager is just a pointer. We skip recursive stats to avoid infinite loop.
bimpression/sosw
ad7dc503c3482b3455e50d647553c2765d35e85f
diff --git a/sosw/managers/test/unit/test_ecology.py b/sosw/managers/test/unit/test_ecology.py index 7637e2d..90f552f 100644 --- a/sosw/managers/test/unit/test_ecology.py +++ b/sosw/managers/test/unit/test_ecology.py @@ -109,3 +109,10 @@ class ecology_manager_UnitTestCase(unittest.TestCase): # But the counter of tasks in cache should have. self.assertEqual(self.manager.running_tasks[self.LABOURER.id], tm.get_count_of_running_tasks_for_labourer.return_value + 1 + 5) + + + def test_get_max_labourer_duration(self): + self.manager.task_client = MagicMock() + self.manager.task_client.lambda_client.get_function_configuration.return_value = {'Timeout': 300} + + self.assertEqual(self.manager.get_max_labourer_duration(self.LABOURER), 300)
Labourer: get_max_labourer_duration Implement me. Should receive this information from AWS API probably with DescribeFunction and cache this data in the Ecology Manager
0.0
ad7dc503c3482b3455e50d647553c2765d35e85f
[ "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_get_max_labourer_duration" ]
[ "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_add_running_tasks_for_labourer", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_count_running_tasks_for_labourer__calls_task_manager", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_count_running_tasks_for_labourer__raises_not_task_client", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_count_running_tasks_for_labourer__use_local_cache", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_eco_statuses", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_register_task_manager__resets_stats" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2019-05-07 19:08:08+00:00
mit
1,381
bimpression__sosw-90
diff --git a/sosw/scheduler.py b/sosw/scheduler.py index 5ba5ae0..33234a3 100644 --- a/sosw/scheduler.py +++ b/sosw/scheduler.py @@ -200,6 +200,23 @@ class Scheduler(Processor): return [str(today - datetime.timedelta(days=x)) for x in range(num, 0, -1)] + def previous_x_days(self, pattern: str) -> List[str]: + """ + Returns a list of string dates from today - x - x + + For example, consider today's date as 2019-04-30. + If I call for previous_x_days(pattern='previous_2_days'), I will receive a list of string dates equal to: + ['2019-04-26', '2019-04-27'] + """ + assert re.match('previous_[0-9]+_days', pattern) is not None, "Invalid pattern {pattern} for `previous_x_days()`" + + num = int(pattern.split('_')[1]) + today = datetime.date.today() + end_date = today - datetime.timedelta(days=num) + + return [str(end_date - datetime.timedelta(days=x)) for x in range(num, 0, -1)] + + def x_days_back(self, pattern: str) -> List[str]: """ Finds the exact date X days back from now. @@ -216,6 +233,38 @@ class Scheduler(Processor): return [str(today - datetime.timedelta(days=num))] + def yesterday(self, pattern: str = 'yesterday') -> List[str]: + """ + Simple wrapper for x_days_back() to return yesterday's date. + """ + assert re.match('yesterday', pattern) is not None, "Invalid pattern {pattern} for `yesterday()`" + return self.x_days_back('1_days_back') + + + def today(self, pattern: str = 'today') -> List[str]: + """ + Returns list with one datetime string (YYYY-MM-DD) equal to today's date. + """ + assert re.match('today', pattern) is not None, "Invalid pattern {pattern} for `today()`" + return [str(datetime.date.today())] + + + def last_week(self, pattern: str = 'last_week') -> List[str]: + """ + Returns list of dates (YYYY-MM-DD) as strings for last week (Sunday - Saturday) + :param pattern: + :return: + """ + assert re.match('last_week', pattern) is not None, "Invalid pattern {pattern} for `last_week()`" + + today = datetime.date.today() + end_date = today - datetime.timedelta(days=today.weekday() + 8) + + return [str(end_date + datetime.timedelta(days=x)) for x in range(7)] + + + + def chunk_dates(self, job: Dict, skeleton: Dict = None) -> List[Dict]: """ There is a support for multiple not nested parameters to chunk. Dates is one very specific of them. @@ -228,7 +277,7 @@ class Scheduler(Processor): period = job.pop('period', None) isolate = job.pop('isolate_days', None) - PERIOD_KEYS = ['last_[0-9]+_days', '[0-9]+_days_back'] # , 'yesterday'] + PERIOD_KEYS = ['last_[0-9]+_days', '[0-9]+_days_back', 'yesterday', 'today', 'previous_[0-9]+_days', 'last_week'] if period: @@ -242,7 +291,7 @@ class Scheduler(Processor): break else: raise ValueError(f"Unsupported period requested: {period}. Valid options are: " - f"'last_X_days', 'X_days_back'") + f"'last_X_days', 'X_days_back', 'yesterday', 'today', 'previous_[0-9]+_days', 'last_week'") if isolate: assert len(date_list) > 0, f"The chunking period: {period} did not generate date_list. Bad."
bimpression/sosw
eb2ce2b4609c8ad1e6d7742d634f80bc7f91f3ac
diff --git a/sosw/test/unit/test_scheduler.py b/sosw/test/unit/test_scheduler.py index baf0701..25fec9e 100644 --- a/sosw/test/unit/test_scheduler.py +++ b/sosw/test/unit/test_scheduler.py @@ -275,6 +275,32 @@ class Scheduler_UnitTestCase(unittest.TestCase): ### Tests of chunk_dates ### + def test_chunk_dates(self): + TESTS = [ + ({'period': 'today'}, 'today'), + ({'period': 'yesterday'}, 'yesterday'), + ({'period': 'last_3_days'}, 'last_x_days'), + ({'period': '10_days_back'}, 'x_days_back'), + ({'period': 'previous_2_days'}, 'previous_x_days'), + ({'period': 'last_week'}, 'last_week') + ] + + for test, func_name in TESTS: + FUNCTIONS = ['today', 'yesterday', 'last_x_days', 'x_days_back', 'previous_x_days', 'last_week'] + for f in FUNCTIONS: + setattr(self.scheduler, f, MagicMock()) + + self.scheduler.chunk_dates(test) + + func = getattr(self.scheduler, func_name) + func.assert_called_once() + + for bad_f_name in [x for x in FUNCTIONS if not x == func_name]: + bad_f = getattr(self.scheduler, bad_f_name) + bad_f.assert_not_called() + + + def test_chunk_dates__preserve_skeleton(self): TESTS = [ {'period': 'last_1_days', 'a': 'foo'}, @@ -384,6 +410,66 @@ class Scheduler_UnitTestCase(unittest.TestCase): self.assertEqual(today.weekday(), datetime.datetime.strptime(last_week, '%Y-%m-%d').weekday()) + def test_yesterday(self): + + TESTS = [ + ('yesterday', ['2019-04-10']), + ] + + today = datetime.date(2019, 4, 11) + + with patch('sosw.scheduler.datetime.date') as mdt: + mdt.today.return_value = today + + for test, expected in TESTS: + self.assertEqual(self.scheduler.yesterday(test), expected) + + def test_today(self): + TESTS = [ + ('today', ['2019-04-10']), + ] + today = datetime.date(2019, 4, 10) + + with patch('sosw.scheduler.datetime.date') as mdt: + mdt.today.return_value = today + + for test, expected in TESTS: + self.assertEqual(self.scheduler.today(test), expected) + + def test_previous_x_days(self): + today = datetime.date(2019, 4, 30) + + TESTS = [ + ('previous_2_days', ['2019-04-26', '2019-04-27']), + ('previous_3_days', ['2019-04-24', '2019-04-25', '2019-04-26']) + ] + + with patch('sosw.scheduler.datetime.date') as mdt: + mdt.today.return_value = today + + for test, expected in TESTS: + self.assertEqual(self.scheduler.previous_x_days(test), expected) + + def test_last_week(self): + today = datetime.date(2019, 4, 30) + + TESTS = [ + ('last_week', ['2019-04-21', + '2019-04-22', + '2019-04-23', + '2019-04-24', + '2019-04-25', + '2019-04-26', + '2019-04-27']) + ] + + with patch('sosw.scheduler.datetime.date') as mdt: + mdt.today.return_value = today + + for test, expected in TESTS: + self.assertEqual(self.scheduler.last_week(test), expected) + + ### Tests of chunk_job ### def test_chunk_job__not_chunkable_config(self): self.scheduler.chunkable_attrs = []
Scheduler: chunk_days() - more patterns Support more generic patterns for chunking, e.g.: - previous_X_days - last_week - previous_week - yesterday - today - smart_X_of_Y_days_back (more often recent days, less and less often days that were long ago up to `Y`; the total number should be `X`)
0.0
eb2ce2b4609c8ad1e6d7742d634f80bc7f91f3ac
[ "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_dates", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_last_week", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_previous_x_days", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_today", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_yesterday" ]
[ "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test__queue_bucket", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test__remote_queue_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test__remote_queue_locked_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_call__sample", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_dates__last_x_days", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_dates__pops_period", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_dates__preserve_skeleton", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_dates__preserve_skeleton__if_no_chunking", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_dates__raises_invalid_period_pattern", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job__not_chunkable_config", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job__not_raises__notchunkable__if_no_isolation", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job__not_raises_unchunkable_subtask__but_preserves_in_payload", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job__raises__unsupported_vals__list_not_as_value", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job__raises__unsupported_vals__string", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job__unchunckable_preserve_custom_attrs", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data__empty_job", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data__preserve_skeleton_through_chunkers", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data__real_payload__for_debuging_logs", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_extract_job_from_payload", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_extract_job_from_payload_raises", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_get_and_lock_queue_file__local_file_exists", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_get_and_lock_queue_file__s3_calls", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_get_index_from_list", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_get_next_chunkable_attr", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_init__chunkable_attrs_not_end_with_s", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_last_x_days", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_needs_chunking__isolate_root", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_needs_chunking__isolate_subdata", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_needs_chunking__isolate_subdata_deep", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_parse_job_to_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_parse_job_to_file__multiple_rows", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_pop_rows_from_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_pop_rows_from_file__missing_or_empty_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_pop_rows_from_file__reads_from_top", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_process_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_validate_list_of_vals", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_x_days_back" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-05-08 19:55:52+00:00
mit
1,382
bimpression__sosw-93
diff --git a/README.md b/README.md index b3a2627..4145fa5 100644 --- a/README.md +++ b/README.md @@ -34,18 +34,24 @@ $ pytest ./sosw/test/suite_3_6_unit.py ### Contribution Guidelines #### Release cycle -- Master branch commits are automatically packaged and published to PyPI. -- Branches for staging versions follow the pattern: `X_X_X` -- Make your pull requests to the staging branch with highest number -- Latest documentation is compiled from branch `docme`. It should be up to date with latest **staging** branch, not the master. Make PRs with documentation change directly to `docme`. +- We follow both [Semantic Versioning](https://semver.org/) pattern + and [PEP440](https://www.python.org/dev/peps/pep-0440/) recommendations where comply +- Master branch commits (merges) are automatically packaged and published to PyPI. +- Branches for planned staging versions follow the pattern: `X_Y_Z` (Major.Minor.Micro) +- Make your pull requests to the latest staging branch (with highest number) +- Latest documentation is compiled from branch `docme`. + It should be up to date with latest **staging** branch, not the master. + Make PRs with documentation change directly to `docme`. #### Code formatting -Follow [PEP8](https://www.python.org/dev/peps/pep-0008/), but both classes and functions are padded with 2 empty lines. +Follow [PEP8](https://www.python.org/dev/peps/pep-0008/), but: +- both classes and functions are padded with 2 empty lines +- dictionaries are value-alligned #### Initialization 1. Fork the repository: https://github.com/bimpression/sosw 2. Register Account in AWS: [SignUp](https://portal.aws.amazon.com/billing/signup#/start) -3. Run `pipenv sync –dev`to setup your virtual environment and download the required dependencies +3. Run `pipenv sync –dev` to setup your virtual environment and download the required dependencies 4. Create DynamoDB Tables: - You can find the CloudFormation template for the databases [in the example](https://raw.githubusercontent.com/bimpression/sosw/docme/docs/yaml/sosw-shared-dynamodb.yaml). - If you are not familiar with CloudFormation, we highly recommend at least learning the basics from [the tutorial](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/GettingStarted.Walkthrough.html). diff --git a/docs/orchestrator.rst b/docs/orchestrator.rst index 7ca2af4..22f2c83 100644 --- a/docs/orchestrator.rst +++ b/docs/orchestrator.rst @@ -49,6 +49,31 @@ The following diagram represents the basic Task Workflow initiated by the Orches 'some_function': { 'arn': f"arn:aws:lambda:us-west-2:737060422660:function:some_function", 'max_simultaneous_invocations': 10, + 'health_metrics': { + 'SomeDBCPU': { + 'details': { + 'Name': 'CPUUtilization', + 'Namespace': 'AWS/RDS', + 'Period': 60, + 'Statistics': ['Average'], + 'Dimensions': [ + { + 'Name': 'DBInstanceIdentifier', + 'Value': 'YOUR-DB' + }, + ], + }, + + # These is the mapping of how the Labourer should "feel" about this metric. + # See EcologyManager.ECO_STATUSES. + # This is just a mapping ``ECO_STATUS: value`` using ``feeling_comparison_operator``. + 'feelings': { + 3: 50, + 4: 25, + }, + 'feeling_comparison_operator': '<=' + }, + }, }, }, } diff --git a/sosw/components/helpers.py b/sosw/components/helpers.py index 7de6b05..0f26f79 100644 --- a/sosw/components/helpers.py +++ b/sosw/components/helpers.py @@ -719,17 +719,28 @@ def make_hash(o): Makes a hash from a dictionary, list, tuple or set to any level, that contains only other hashable types (including any lists, tuples, sets, and dictionaries). + + Original idea from this user: https://stackoverflow.com/users/660554/jomido + + Plus some upgrades to work with sets and dicts having different types of keys appropriately. + See source unittests of this function for some more details. """ - if isinstance(o, (set, tuple, list)): + if isinstance(o, (tuple, list)): return tuple([make_hash(e) for e in o]) + # Set should be sorted (by hashes of elements) before returns + elif isinstance(o, set): + return tuple(sorted([make_hash(e) for e in o])) + elif not isinstance(o, dict): return hash(o) - new_o = deepcopy(o) - for k, v in new_o.items(): - new_o[k] = make_hash(v) + # We are left with a dictionary + new_o = dict() + for k, v in o.items(): + # hash both keys and values to make sure types and order doesn't affect. + new_o[make_hash(k)] = make_hash(v) return hash(tuple(frozenset(sorted(new_o.items())))) diff --git a/sosw/labourer.py b/sosw/labourer.py index b9b08dc..09eb93f 100644 --- a/sosw/labourer.py +++ b/sosw/labourer.py @@ -18,8 +18,8 @@ logger.setLevel(logging.INFO) class Labourer: ATTRIBUTES = ('id', 'arn') - CUSTOM_ATTRIBUTES = ('start', 'invoked', 'expired', 'health', 'max_attempts', 'average_duration', 'max_duration', - 'max_simultaneous_invocations', 'arn') + CUSTOM_ATTRIBUTES = ('arn', 'start', 'invoked', 'expired', 'health', 'health_metrics', 'average_duration', + 'max_duration', 'max_attempts', 'max_simultaneous_invocations') id = None arn = None diff --git a/sosw/managers/ecology.py b/sosw/managers/ecology.py index 14ff1e8..007297f 100644 --- a/sosw/managers/ecology.py +++ b/sosw/managers/ecology.py @@ -5,16 +5,19 @@ __version__ = "1.0" import boto3 import json import logging +import operator import os import random import time from collections import defaultdict -from typing import Dict, List, Optional +from collections import OrderedDict +from typing import Dict, List, Optional, Union from sosw.app import Processor from sosw.labourer import Labourer from sosw.components.benchmark import benchmark +from sosw.components.helpers import make_hash from sosw.managers.task import TaskManager @@ -34,7 +37,9 @@ class EcologyManager(Processor): DEFAULT_CONFIG = {} running_tasks = defaultdict(int) + health_metrics: Dict = None task_client: TaskManager = None # Will be Circular import! Careful! + cloudwatch_client: boto3.client = None def __init__(self, *args, **kwargs): @@ -42,7 +47,7 @@ class EcologyManager(Processor): def __call__(self, event): - raise NotImplemented + raise NotImplementedError def register_task_manager(self, task_manager: TaskManager): @@ -60,16 +65,82 @@ class EcologyManager(Processor): logger.info("Reset cache of running_tasks counter in EcologyManager") self.running_tasks = defaultdict(int) + logger.info("Reset cache of health_metrics in EcologyManager") + self.health_metrics = dict() + @property def eco_statuses(self): return [x[0] for x in ECO_STATUSES] + def fetch_metric_stats(self, **kwargs): + + result = self.cloudwatch_client.get_metric_statistics(**kwargs) + + return result + + def get_labourer_status(self, labourer: Labourer) -> int: - """ FIXME """ - return 4 - # return random.choice(self.eco_statuses) + """ + Get the worst (lowest) health status according to preconfigured health metrics of the Labourer. + + .. _ECO_STATUSES: + + Current ECO_STATUSES: + + - (0, 'Bad') + - (1, 'Poor') + - (2, 'Moderate') + - (3, 'Good') + - (4, 'High') + """ + + health = max(map(lambda x: x[0], ECO_STATUSES)) + + for health_metric in getattr(labourer, 'health_metrics', dict()).values(): + + metric_hash = make_hash(health_metric['details']) + if metric_hash not in self.health_metrics: + self.health_metrics[metric_hash] = self.fetch_metric_stats(**health_metric['details']) + logger.info(f"Updated the cache of Ecology metric {metric_hash} - {health_metric} " + f"with {self.health_metrics[metric_hash]}") + + value = self.health_metrics[metric_hash] + logger.debug(f"Ecology metric {metric_hash} has {value}") + + health = min(health, self.get_health(value, metric=health_metric)) + + logger.info(f"Ecology health of Labourer {labourer} is {health}") + + return health + + + def get_health(self, value: Union[int, float], metric: Dict) -> int: + """ + Checks the value against the health_metric configuration. + """ + + op = getattr(operator, metric.get('feeling_comparison_operator')) + + # Find the first configured feeling from the map that does not comply. + # Order and validate the feelings + feelings = OrderedDict([(key, metric['feelings'][key]) + for key in sorted(metric['feelings'].keys(), reverse=True)]) + + last_target = 0 + for health, target in feelings.items(): + if op(target, last_target): + raise ValueError(f"Order of values if feelings is invalid and doesn't match expected eco statuses: " + f"{feelings.items()}. Failed: {last_target} not " + f"{metric.get('feeling_comparison_operator')} {target}") + + if op(value, target): + return health + + last_target = target + + return 0 def count_running_tasks_for_labourer(self, labourer: Labourer) -> int: @@ -85,7 +156,8 @@ class EcologyManager(Processor): if labourer.id not in self.running_tasks.keys(): self.running_tasks[labourer.id] = self.task_client.get_count_of_running_tasks_for_labourer(labourer) - logger.debug(f"EcologyManager.count_running_tasks_for_labourer() recalculated cache for Labourer {labourer}") + logger.debug(f"EcologyManager.count_running_tasks_for_labourer() recalculated cache for Labourer " + f"{labourer}") logger.debug(f"EcologyManager.count_running_tasks_for_labourer() returns: {self.running_tasks[labourer.id]}") return self.running_tasks[labourer.id] diff --git a/sosw/managers/task.py b/sosw/managers/task.py index cd19e44..303b6c1 100644 --- a/sosw/managers/task.py +++ b/sosw/managers/task.py @@ -17,6 +17,7 @@ from sosw.app import Processor from sosw.components.benchmark import benchmark from sosw.components.dynamo_db import DynamoDbClient from sosw.components.helpers import first_or_none +# from sosw.managers.ecology import EcologyManager from sosw.labourer import Labourer @@ -38,11 +39,11 @@ class TaskManager(Processor): """ DEFAULT_CONFIG = { - 'init_clients': ['DynamoDb', 'lambda', 'Ecology'], - 'dynamo_db_config': { + 'init_clients': ['DynamoDb', 'lambda', 'Ecology'], + 'dynamo_db_config': { 'table_name': 'sosw_tasks', 'index_greenfield': 'sosw_tasks_greenfield', - 'row_mapper': { + 'row_mapper': { 'task_id': 'S', 'labourer_id': 'S', 'created_at': 'N', @@ -70,9 +71,37 @@ class TaskManager(Processor): 'greenfield_task_step': 1000, 'labourers': { # 'some_function': { - # 'arn': 'arn:aws:lambda:us-west-2:0000000000:function:some_function', + # 'arn': 'arn:aws:lambda:us-west-2:0000000000:function:some_function', # 'max_simultaneous_invocations': 10, - # } + # # Health metrics for this Labourer should be stored in a dictionary. + # 'health_metrics': { + # # Name of the metric is just for human readability (probaly some future GUI interfaces), + # 'SomeDBCPU': { + # # The value must have ``'details'`` as a dict with kwargs for CloudWatch client. + # 'details': { + # 'Name': 'CPUUtilization', + # 'Namespace': 'AWS/RDS', + # 'Period': 60, + # 'Statistics': ['Average'], + # 'Dimensions': [ + # { + # 'Name': 'DBInstanceIdentifier', + # 'Value': 'YOUR-DB' + # }, + # ], + # }, + # + # # These is the mapping of how the Labourer should "feel" about this metric. + # # See EcologyManager.ECO_STATUSES. + # # This is just a mapping ``ECO_STATUS: value`` using ``feeling_comparison_operator``. + # 'feelings': { + # 3: 50, + # 4: 25, + # }, + # 'feeling_comparison_operator': '<=' + # }, + # }, + # }, }, 'max_attempts': 3, 'max_closed_to_analyse_for_duration': 10, @@ -82,9 +111,10 @@ class TaskManager(Processor): __labourers = None # these clients will be initialized by Processor constructor + # ecology_client: EcologyManager = None ecology_client = None dynamo_db_client: DynamoDbClient = None - lambda_client = None + lambda_client: boto3.client = None def get_oldest_greenfield_for_labourer(self, labourer: Labourer, reverse: bool = False) -> int: @@ -165,6 +195,7 @@ class TaskManager(Processor): ('invoked', lambda x: x.get_attr('start') + self.config['greenfield_invocation_delta']), ('expired', lambda x: x.get_attr('invoked') - (x.duration + x.cooldown)), ('health', lambda x: self.ecology_client.get_labourer_status(x)), + ('health_metrics', lambda x: _cfg('labourers')[x.id].get('health_metrics')), ('max_attempts', lambda x: self.config.get(f'max_attempts_{x.id}') or self.config['max_attempts']), ('max_duration', lambda x: self.ecology_client.get_max_labourer_duration(x)), ('average_duration', lambda x: self.ecology_client.get_labourer_average_duration(x)), @@ -538,12 +569,12 @@ class TaskManager(Processor): _ = self.get_db_field_name query_args = { - 'keys': { + 'keys': { _('labourer_id'): labourer.id, _('greenfield'): str(time.time()), }, - 'comparisons': {_('greenfield'): '>='}, - 'index_name': self.config['dynamo_db_config']['index_greenfield'], + 'comparisons': {_('greenfield'): '>='}, + 'index_name': self.config['dynamo_db_config']['index_greenfield'], 'filter_expression': f"attribute_exists {_('completed_at')}", } @@ -611,7 +642,7 @@ class TaskManager(Processor): for task in tasks: assert task[_('labourer_id')] == labourer.id, f"Task labourer_id must be {labourer.id}, " \ - f"bad value: {task[_('labourer_id')]}" + f"bad value: {task[_('labourer_id')]}" lowest_greenfield = self.get_oldest_greenfield_for_labourer(labourer) @@ -637,6 +668,7 @@ class TaskManager(Processor): self.stats['due_for_retry_tasks'] += 1 + @benchmark def get_average_labourer_duration(self, labourer: Labourer) -> int: """
bimpression/sosw
c3bd62526e62090f986b24a0b88decec4644b2b9
diff --git a/sosw/components/test/unit/test_helpers.py b/sosw/components/test/unit/test_helpers.py index cbb18b7..7d0f762 100644 --- a/sosw/components/test/unit/test_helpers.py +++ b/sosw/components/test/unit/test_helpers.py @@ -576,11 +576,11 @@ class helpers_UnitTestCase(unittest.TestCase): (("olleh", "hello"), False), (("hello", "hello"), True), (({1: 'a', 2: 'b'}, {2: 'b', 1: 'a'}), True), # Unordered Dictionary - (({1: 'a', 2: {'2a': {'set', 42}}}, {1: 'a', 2: {'2a': {42, 'set'}}}), True), # Nested Dictionary + (({1: 'a', 'bar': {'2a': {'set', 42}}}, {'bar': {'2a': {42, 'set'}}, 1: 'a'}), True), # Nested Dictionary ] for test, expected in TESTS: - self.assertEqual(make_hash(test[0]) == make_hash(test[1]), expected) + self.assertEqual(make_hash(test[0]) == make_hash(test[1]), expected, f"Failed specific test: {test}") if __name__ == '__main__': diff --git a/sosw/managers/test/unit/test_ecology.py b/sosw/managers/test/unit/test_ecology.py index 90f552f..c4154cb 100644 --- a/sosw/managers/test/unit/test_ecology.py +++ b/sosw/managers/test/unit/test_ecology.py @@ -4,9 +4,10 @@ import time import unittest import os -from collections import defaultdict +from copy import deepcopy from unittest.mock import MagicMock, patch +from sosw.components.helpers import make_hash logging.getLogger('botocore').setLevel(logging.WARNING) @@ -21,6 +22,11 @@ from sosw.test.variables import TEST_ECOLOGY_CLIENT_CONFIG class ecology_manager_UnitTestCase(unittest.TestCase): TEST_CONFIG = TEST_ECOLOGY_CLIENT_CONFIG LABOURER = Labourer(id='some_function', arn='arn:aws:lambda:us-west-2:000000000000:function:some_function') + SAMPLE_HEALTH_METRICS = { + 'test1': {'details': {'Name': 'CPUUtilization', 'Namespace': 'AWS/RDS'}}, + 'test2': {'details': {'Name': 'CPUUtilization2', 'Namespace': 'AWS/RDS'}}, + 'test3': {'details': {'Name': 'CPUUtilization3', 'Namespace': 'AWS/RDS'}}, + } def setUp(self): @@ -116,3 +122,93 @@ class ecology_manager_UnitTestCase(unittest.TestCase): self.manager.task_client.lambda_client.get_function_configuration.return_value = {'Timeout': 300} self.assertEqual(self.manager.get_max_labourer_duration(self.LABOURER), 300) + + + def test_get_health(self): + METRIC = { + 'details': {}, + 'feelings': { + 3: 50, + 4: 25, + }, + 'feeling_comparison_operator': '__le__' + } + + TESTS = [ + (0, 4), + (1.0, 4), + (25, 4), + (25.000001, 3), + (30, 3), + (50, 3), + (51, 0), + ] + + for value, expected in TESTS: + self.assertEqual(self.manager.get_health(value, METRIC), expected, f"Failed: {value} t") + + + def test_get_health__invalid(self): + METRIC = { + 'details': {}, + 'feelings': { + 1: 40, + 3: 50, + 4: 25, + }, + 'feeling_comparison_operator': '__le__' + } + + self.assertRaises(ValueError, self.manager.get_health, 60, METRIC), \ + "Did not raise while the feelings are invalid. Order of values should respect order of health statuses." + + + def test_get_labourer_status(self): + self.manager.get_health = MagicMock(side_effect=[3, 2, 4]) + self.manager.register_task_manager(MagicMock()) + self.manager.fetch_metric_stats = MagicMock() + self.health_metrics = dict() + + labourer = deepcopy(self.LABOURER) + setattr(labourer, 'health_metrics', self.SAMPLE_HEALTH_METRICS) + + # Calling the actual tested method. + result = self.manager.get_labourer_status(labourer) + + # The result should be the lowest of values get_health would have returned out of three calls. + self.assertEqual(result, 2, f"Did not get the lowest health result. Received: {result}") + + # Chech the the get_health had been called three times (for each metric). + self.manager.get_health.assert_called() + self.assertEqual(self.manager.get_health.call_count, 3) + + self.manager.fetch_metric_stats.assert_called() + self.assertEqual(self.manager.fetch_metric_stats.call_count, 3) + + + def test_get_labourer_status__uses_cache(self): + + self.manager.get_health = MagicMock(return_value=0) + self.manager.register_task_manager(MagicMock()) + self.manager.fetch_metric_stats = MagicMock() + + labourer = deepcopy(self.LABOURER) + setattr(labourer, 'health_metrics', self.SAMPLE_HEALTH_METRICS) + + self.manager.health_metrics = {make_hash(labourer.health_metrics['test1']['details']): 42} + + # Calling the actual tested method. + result = self.manager.get_labourer_status(labourer) + + # Assert calculator (get_health) was called 3 times. + self.assertEqual(self.manager.get_health.call_count, 3) + self.assertEqual(self.manager.fetch_metric_stats.call_count, 2, + f"Fetcher was supposed to be called only for 2 metrics. One is in cache.") + + + def test_fetch_metric_stats__calls_boto(self): + + self.manager.cloudwatch_client = MagicMock() + self.manager.fetch_metric_stats(a=1, b={3: 42}) + + self.manager.cloudwatch_client.get_metric_statistics.assert_called_once_with(a=1, b={3: 42}) diff --git a/sosw/managers/test/unit/test_task.py b/sosw/managers/test/unit/test_task.py index 7c79ea7..77455fb 100644 --- a/sosw/managers/test/unit/test_task.py +++ b/sosw/managers/test/unit/test_task.py @@ -521,3 +521,36 @@ class task_manager_UnitTestCase(unittest.TestCase): test.pop(field) self.assertFalse(self.manager.is_valid_task(test)) + + + def test_health_metrics_received(self): + TEST_CFG = { + 'some_function': { + 'arn': 'arn:aws:lambda:us-west-2:0000000000:function:some_function', + 'max_simultaneous_invocations': 10, + 'health_metrics': { + 'SomeDBCPU': { + 'Name': 'CPUUtilization', + 'Namespace': 'AWS/RDS', + 'Period': 60, + 'Statistics': ['Average'], + 'Dimensions': [ + { + 'Name': 'DBInstanceIdentifier', + 'Value': 'YOUR-DB' + }, + ], + + # These is the mapping of how the Labourer should "feel" about this metric. + # See EcologyManager.ECO_STATUSES. + # This is just a mapping ``ECO_STATUS: value`` using ``feeling_comparison_operator``. + 'feelings': { + 3: 50, + 4: 25, + }, + 'feeling_comparison_operator': '<=' + }, + }, + } + } +
Ecology: Design the mechanisms - Get from properties of Labourer his dependencies. - Get acceptable Values - Call CloudWatch manager - Calculate the colour status
0.0
c3bd62526e62090f986b24a0b88decec4644b2b9
[ "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_make_hash", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_fetch_metric_stats__calls_boto", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_get_health", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_get_health__invalid", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_get_labourer_status", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_get_labourer_status__uses_cache" ]
[ "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_chunks", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_construct_dates_from_event__conflict_of_attributes", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_construct_dates_from_event__missing_attributes", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_construct_dates_from_event__ok", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_convert_string_to_words", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_first_or_none", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_get_list_of_multiple_or_one_or_empty_from_dict", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_get_one_from_dict", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_get_one_or_none_from_dict", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_negative_validate_date_from_something", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_match_extract", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_matches_soft", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_matches_strict", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_update", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_update_2", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_update__does_overwrite_with_none", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_update__inserts_new_keys", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_rstrip_all", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_trim_arn_to_name", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_account_to_dashed_invalid", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_account_to_dashed_valid", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_date_from_something", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_date_list_from_event_or_days_back", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_datetime_from_something", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_list_of_numbers_from_csv", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_list_of_words_from_csv_or_list__ok", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_list_of_words_from_csv_or_list__raises", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_string_matches_datetime_format", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_add_running_tasks_for_labourer", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_count_running_tasks_for_labourer__calls_task_manager", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_count_running_tasks_for_labourer__raises_not_task_client", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_count_running_tasks_for_labourer__use_local_cache", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_eco_statuses", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_get_max_labourer_duration", "sosw/managers/test/unit/test_ecology.py::ecology_manager_UnitTestCase::test_register_task_manager__resets_stats", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_archive_task", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_construct_payload_for_task", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_create_task", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_create_task__combine_complex_payload", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_get_average_labourer_duration__calculates_average", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_get_average_labourer_duration__calls_dynamo_twice", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_get_count_of_running_tasks_for_labourer", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_get_db_field_name", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_get_labourers", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_get_newest_greenfield_for_labourer__no_queued_tasks", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_get_oldest_greenfield_for_labourer__no_queued_tasks", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_get_tasks_to_retry_for_labourer", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_get_tasks_to_retry_for_labourer__limit", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_get_tasks_to_retry_for_labourer__respects_greenfield", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_health_metrics_received", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_invoke_task__calls__get_task_by_id", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_invoke_task__calls__lambda_client", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_invoke_task__calls__mark_task_invoked", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_invoke_task__not_calls__lambda_client_if_raised_conditional_exception", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_invoke_task__validates_task", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_invoke_task__with_explicit_task__not_calls_get_task_by_id", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_mark_task_invoked__calls_dynamo", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_register_labourers", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_register_labourers__calls_register_task_manager", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_validate_task__bad", "sosw/managers/test/unit/test_task.py::task_manager_UnitTestCase::test_validate_task__good" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2019-05-13 12:29:39+00:00
mit
1,383
bimpression__sosw-96
diff --git a/sosw/app.py b/sosw/app.py index 4aa6713..3858b40 100644 --- a/sosw/app.py +++ b/sosw/app.py @@ -32,9 +32,10 @@ class Processor: """ DEFAULT_CONFIG = {} - # TODO USE context.invoked_function_arn. + aws_account = None - aws_region = None + aws_region = os.getenv('AWS_REGION', None) + lambda_context = None def __init__(self, custom_config=None, **kwargs): @@ -48,6 +49,10 @@ class Processor: if self.test and not custom_config: raise RuntimeError("You must specify a custom config from your testcase to run processor in test mode.") + self.lambda_context = kwargs.pop('context', None) + if self.lambda_context: + self.aws_account = trim_arn_to_account(self.lambda_context.invoked_function_arn) + self.config = self.DEFAULT_CONFIG self.config = recursive_update(self.config, self.get_config(f"{os.environ.get('AWS_LAMBDA_FUNCTION_NAME')}_config")) self.config = recursive_update(self.config, custom_config or {}) @@ -158,38 +163,33 @@ class Processor: @property def _account(self): """ - Get current AWS Account to construct different ARNs. The autodetection process is pretty heavy (~0.3 seconds), - so it is not called by default. This method should be used only if you really need it. + Get current AWS Account to construct different ARNs. + + We dont' have this parameter in Environmental variables, only can parse from Context. + Context is not global and is supposed to be passed by your `lambda_handler` during initialization. - It is highly recommended to provide the value of aws_account in your configs. + As a fallback we have an autodetection mechanism, but it is pretty heavy (~0.3 seconds). + So it is not called by default. This method should be used only if you really need it. + + It is highly recommended to pass the `context` during initialization. Some things to note: - We store this value in class variable for fast access - If not yet set on the first call we initialise it. - - We first try from your config and only if not provided - use the autodetection. - - TODO This method is overcomplicated. Change to to parsing the ARN from context object. But config can overwrite. - TODO https://github.com/bimpression/sosw/issues/40 + - We first try from context and only if not provided - use the autodetection. """ + if not self.aws_account: - try: - self.aws_account = self.config['aws_account'] - except KeyError: - self.aws_account = boto3.client('sts').get_caller_identity().get('Account') + self.aws_account = boto3.client('sts').get_caller_identity().get('Account') return self.aws_account @property def _region(self): - # TODO Implement this to get it effectively from context object. - # TODO https://github.com/bimpression/sosw/issues/40 - if not self.aws_region: - try: - self.aws_region = self.config['aws_region'] - except KeyError: - self.aws_region = 'us-west-2' - + """ + Property fetched from AWS Lambda Environmental variables. + """ return self.aws_region diff --git a/sosw/components/helpers.py b/sosw/components/helpers.py index 0f26f79..2bd13cc 100644 --- a/sosw/components/helpers.py +++ b/sosw/components/helpers.py @@ -27,6 +27,7 @@ __all__ = ['validate_account_to_dashed', 'first_or_none', 'recursive_update', 'trim_arn_to_name', + 'trim_arn_to_account', 'make_hash', ] @@ -714,6 +715,21 @@ def trim_arn_to_name(arn: str) -> str: return re.search(pattern, arn).group('name') +def trim_arn_to_account(arn: str) -> str: + """ + Extract just the ACCOUNT_ID from full ARN. Supports versions, aliases or raw name (without ARN). + + More information about ARN Format: + https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-arns + """ + + # Seems a little messy, but passes more/less any test of different ARNs we tried. + pattern = "(arn:aws:[0-9a-zA-Z-]{2,20}:[0-9a-zA-Z-]{0,12}:)?(?P<acc>[0-9]{12})(:[0-9a-zA-Z-]{2,20}[:/])?" \ + "(?P<name>[0-9a-zA-Z_=,.@-]*)(:)?([0-9a-zA-Z$]*)?" + + return re.search(pattern, arn).group('acc') + + def make_hash(o): """ Makes a hash from a dictionary, list, tuple or set to any level, that contains diff --git a/sosw/scheduler.py b/sosw/scheduler.py index 5044d9e..efa0280 100644 --- a/sosw/scheduler.py +++ b/sosw/scheduler.py @@ -25,6 +25,7 @@ from typing import List, Set, Tuple, Union, Optional, Dict from sosw.app import Processor from sosw.components.helpers import get_list_of_multiple_or_one_or_empty_from_dict, trim_arn_to_name +from sosw.components.siblings import SiblingsManager from sosw.labourer import Labourer from sosw.managers.task import TaskManager @@ -32,7 +33,6 @@ from sosw.managers.task import TaskManager logger = logging.getLogger() logger.setLevel(logging.DEBUG) -lambda_context = None def single_or_plural(attr): """ Simple function. Gives versions with 's' at the end and without it. """ @@ -79,6 +79,7 @@ class Scheduler(Processor): # these clients will be initialized by Processor constructor task_client: TaskManager = None + siblings_client: SiblingsManager = None s3_client = None sns_client = None base_query = ... @@ -93,7 +94,7 @@ class Scheduler(Processor): self.chunkable_attrs = list([x[0] for x in self.config['job_schema']['chunkable_attrs']]) assert not any(x.endswith('s') for x in self.chunkable_attrs), \ f"We do not currently support attributes that end with 's'. " \ - f"In the config you should use singular form of attribute. Received from config: {self.chunkable_attrs}" + f"In the config you should use singular form of attribute. Received from config: {self.chunkable_attrs}" def __call__(self, event): @@ -208,7 +209,7 @@ class Scheduler(Processor): If I call for previous_x_days(pattern='previous_2_days'), I will receive a list of string dates equal to: ['2019-04-26', '2019-04-27'] """ - assert re.match('previous_[0-9]+_days', pattern) is not None, "Invalid pattern {pattern} for `previous_x_days()`" + assert re.match('previous_[0-9]+_days', pattern) is not None, "Invalid pattern {pattern} for `previous_x_days`" num = int(pattern.split('_')[1]) today = datetime.date.today() @@ -263,8 +264,6 @@ class Scheduler(Processor): return [str(end_date + datetime.timedelta(days=x)) for x in range(7)] - - def chunk_dates(self, job: Dict, skeleton: Dict = None) -> List[Dict]: """ There is a support for multiple not nested parameters to chunk. Dates is one very specific of them. @@ -277,7 +276,8 @@ class Scheduler(Processor): period = job.pop('period', None) isolate = job.pop('isolate_days', None) - PERIOD_KEYS = ['last_[0-9]+_days', '[0-9]+_days_back', 'yesterday', 'today', 'previous_[0-9]+_days', 'last_week'] + PERIOD_KEYS = ['last_[0-9]+_days', '[0-9]+_days_back', 'yesterday', 'today', 'previous_[0-9]+_days', + 'last_week'] if period: @@ -503,8 +503,8 @@ class Scheduler(Processor): if next_attr: for a in attrs: current_vals = get_list_of_multiple_or_one_or_empty_from_dict(data, a) - logger.debug( - f"needs_chunking(): For {a} got current_vals: {current_vals} from {data}. Analysing {next_attr}") + logger.debug(f"needs_chunking(): For {a} got current_vals: {current_vals} from {data}. " + f"Analysing {next_attr}") for val in current_vals: @@ -566,14 +566,12 @@ class Scheduler(Processor): else: # Spawning another sibling to continue the processing try: - global lambda_context - payload = dict(file_name=file_name) - self.siblings_client.spawn_sibling(lambda_context, payload=payload) + self.siblings_client.spawn_sibling(self.lambda_context, payload=payload) self.stats['siblings_spawned'] += 1 except Exception as err: - logger.exception(f"Could not spawn sibling with context: {lambda_context} and payload: {payload}") + logger.exception(f"Could not spawn sibling with context: {self.lambda_context}, payload: {payload}") self.upload_and_unlock_queue_file() self.clean_tmp() @@ -588,7 +586,7 @@ class Scheduler(Processor): Therefore multiple capacity units are calculated as a fraction of the """ logging.debug(dir(self.task_client.dynamo_db_client)) - return 1/self.task_client.dynamo_db_client.get_capacity()['write'] + return 1 / self.task_client.dynamo_db_client.get_capacity()['write'] @staticmethod @@ -647,9 +645,8 @@ class Scheduler(Processor): Return if there is a sufficient execution time for processing ('shutdown period' is in seconds). """ - global lambda_context + return self.lambda_context.get_remaining_time_in_millis() > self.config['shutdown_period'] * 1000 - return lambda_context.get_remaining_time_in_millis() > self.config['shutdown_period'] * 1000 def get_and_lock_queue_file(self) -> str: """ @@ -713,12 +710,10 @@ class Scheduler(Processor): Initialize a unique file_name to store the queue of tasks to write. """ - global lambda_context - if name is None: filename_parts = self.config['queue_file'].rsplit('.', 1) assert len(filename_parts) == 2, "Got bad file name" - self._queue_file_name = f"{filename_parts[0]}_{lambda_context.aws_request_id}.{filename_parts[1]}" + self._queue_file_name = f"{filename_parts[0]}_{self.lambda_context.aws_request_id}.{filename_parts[1]}" else: self._queue_file_name = name
bimpression/sosw
0b84a348f05bbc2a8d306bbf2b32b990fde02620
diff --git a/sosw/components/test/unit/test_helpers.py b/sosw/components/test/unit/test_helpers.py index 7d0f762..85a794d 100644 --- a/sosw/components/test/unit/test_helpers.py +++ b/sosw/components/test/unit/test_helpers.py @@ -568,6 +568,27 @@ class helpers_UnitTestCase(unittest.TestCase): self.assertEqual(trim_arn_to_name(test), expected) + def test_trim_arn_to_account(self): + + TESTS = [ + ('111000000000', '111000000000'), + ('000000000123', '000000000123'), + ('arn:aws:lambda:us-west-2:123000000000:function:bar', '123000000000'), + ('arn:aws:lambda:us-west-2:000000000123:function:bar:', '000000000123'), + ('arn:aws:lambda:us-west-2:123000000000:function:bar:$LATEST', '123000000000'), + ('arn:aws:lambda:us-west-2:123000000000:function:bar:12', '123000000000'), + ('arn:aws:lambda:us-west-2:123000000000:function:bar:12', '123000000000'), + ('arn:aws:iam::123000000000:role/aws-code-deploy-role', '123000000000'), + ('arn:aws:rds:us-west-2:123000000000:cluster:aws-cluster-01', '123000000000'), + ('arn:aws:rds:us-west-2:000123000000:db:aws-01-00', '000123000000'), + ('arn:aws:events:us-west-2:123456000000:rule/aws-sr-01', '123456000000'), + ('arn:aws:dynamodb:us-west-2:123456000321:table/sosw_tasks', '123456000321'), + ] + + for test, expected in TESTS: + self.assertEqual(trim_arn_to_account(test), expected) + + def test_make_hash(self): TESTS = [ diff --git a/sosw/managers/test/integration/test_task_i.py b/sosw/managers/test/integration/test_task_i.py index 8c02cf7..3c8d9cb 100644 --- a/sosw/managers/test/integration/test_task_i.py +++ b/sosw/managers/test/integration/test_task_i.py @@ -47,6 +47,8 @@ class TaskManager_IntegrationTestCase(unittest.TestCase): self.HASH_KEY = ('task_id', 'S') self.RANGE_KEY = ('labourer_id', 'S') + self.NOW_TIME = 100000 + self.table_name = self.config['dynamo_db_config']['table_name'] self.completed_tasks_table = self.config['sosw_closed_tasks_table'] self.retry_tasks_table = self.config['sosw_retry_tasks_table'] @@ -195,7 +197,7 @@ class TaskManager_IntegrationTestCase(unittest.TestCase): def test_mark_task_invoked(self): - greenfield = round(time.time() - random.randint(100, 1000)) + greenfield = 1000 delta = self.manager.config['greenfield_invocation_delta'] self.register_labourers() @@ -208,13 +210,15 @@ class TaskManager_IntegrationTestCase(unittest.TestCase): # print(f"Saved initial version with greenfield some date not long ago: {row}") # Do the actual tested job - self.manager.mark_task_invoked(self.LABOURER, row) - time.sleep(1) + with patch('time.time') as mock_time: + mock_time.return_value = self.NOW_TIME + self.manager.mark_task_invoked(self.LABOURER, row) + result = self.dynamo_client.get_by_query({self.HASH_KEY[0]: f"task_id_{self.LABOURER.id}_256"}, strict=False) # print(f"The new updated value of task is: {result}") # Rounded -2 we check that the greenfield was updated - self.assertAlmostEqual(round(int(time.time()) + delta, -2), round(result[0]['greenfield'], -2)) + self.assertAlmostEqual(self.NOW_TIME + delta, result[0]['greenfield']) def test_get_invoked_tasks_for_labourer(self): @@ -301,23 +305,26 @@ class TaskManager_IntegrationTestCase(unittest.TestCase): self.dynamo_client.put(task) - self.manager.move_task_to_retry_table(task, delay) + # Call + with patch('time.time') as mock_time: + mock_time.return_value = self.NOW_TIME + self.manager.move_task_to_retry_table(task, delay) - result_tasks = self.dynamo_client.get_by_query({_('task_id'): '123'}) - self.assertEqual(len(result_tasks), 0) + result_tasks = self.dynamo_client.get_by_query({_('task_id'): '123'}) + self.assertEqual(len(result_tasks), 0) - result_retry_tasks = self.dynamo_client.get_by_query({_('labourer_id'): labourer_id}, - table_name=self.retry_tasks_table) - self.assertEqual(len(result_retry_tasks), 1) - result = first_or_none(result_retry_tasks) + result_retry_tasks = self.dynamo_client.get_by_query({_('labourer_id'): labourer_id}, + table_name=self.retry_tasks_table) + self.assertEqual(len(result_retry_tasks), 1) + result = first_or_none(result_retry_tasks) - for k in task: - self.assertEqual(task[k], result[k]) - for k in result: - if k != _('desired_launch_time'): - self.assertEqual(result[k], task[k]) + for k in task: + self.assertEqual(task[k], result[k]) + for k in result: + if k != _('desired_launch_time'): + self.assertEqual(result[k], task[k]) - self.assertTrue(time.time() + delay - 60 < result[_('desired_launch_time')] < time.time() + delay + 60) + self.assertTrue(time.time() + delay - 60 < result[_('desired_launch_time')] < time.time() + delay + 60) def test_get_tasks_to_retry_for_labourer(self): diff --git a/sosw/test/integration/test_scheduler_i.py b/sosw/test/integration/test_scheduler_i.py index e979321..f37e098 100644 --- a/sosw/test/integration/test_scheduler_i.py +++ b/sosw/test/integration/test_scheduler_i.py @@ -1,6 +1,7 @@ import boto3 import os import random +import types import unittest from unittest.mock import MagicMock, patch @@ -67,8 +68,12 @@ class Scheduler_IntegrationTestCase(unittest.TestCase): self.get_config_patch = self.patcher.start() self.custom_config = self.TEST_CONFIG.copy() - with patch('sosw.scheduler.lambda_context') as mock_lambda_context: - self.scheduler = Scheduler(self.custom_config) + self.lambda_context = types.SimpleNamespace() + self.lambda_context.aws_request_id = 'AWS_REQ_ID' + self.lambda_context.invoked_function_arn = 'arn:aws:lambda:us-west-2:000000000000:function:some_function' + self.lambda_context.get_remaining_time_in_millis = MagicMock(side_effect=[100000, 100]) + + self.scheduler = Scheduler(self.custom_config, context=self.lambda_context) self.s3_client = boto3.client('s3') diff --git a/sosw/test/unit/test_scheduler.py b/sosw/test/unit/test_scheduler.py index 25fec9e..a0b99fd 100644 --- a/sosw/test/unit/test_scheduler.py +++ b/sosw/test/unit/test_scheduler.py @@ -78,13 +78,13 @@ class Scheduler_UnitTestCase(unittest.TestCase): 'auto_spawning': True } - module.lambda_context = types.SimpleNamespace() - module.lambda_context.aws_request_id = 'AWS_REQ_ID' - module.lambda_context.invoked_function_arn = 'arn:aws:lambda:us-west-2:000000000000:function:some_function' - module.lambda_context.get_remaining_time_in_millis = MagicMock(side_effect=[100000, 100]) + self.lambda_context = types.SimpleNamespace() + self.lambda_context.aws_request_id = 'AWS_REQ_ID' + self.lambda_context.invoked_function_arn = 'arn:aws:lambda:us-west-2:000000000000:function:some_function' + self.lambda_context.get_remaining_time_in_millis = MagicMock(side_effect=[100000, 100]) with patch('boto3.client'): - self.scheduler = module.Scheduler(self.custom_config) + self.scheduler = module.Scheduler(self.custom_config, context=self.lambda_context) self.scheduler.s3_client = MagicMock() self.scheduler.sns_client = MagicMock() @@ -129,7 +129,7 @@ class Scheduler_UnitTestCase(unittest.TestCase): config['job_schema']['chunkable_attrs'] = [('bad_name_ending_with_s', {})] with patch('boto3.client'): - self.assertRaises(AssertionError, Scheduler, custom_config=config) + self.assertRaises(AssertionError, Scheduler, custom_config=config, context=self.lambda_context) def test_get_next_chunkable_attr(self): @@ -146,13 +146,13 @@ class Scheduler_UnitTestCase(unittest.TestCase): def test__remote_queue_file(self): self.assertIn(f"{self.scheduler.config['s3_prefix'].strip('/')}", self.scheduler.remote_queue_file) - self.assertIn(module.lambda_context.aws_request_id, self.scheduler.remote_queue_file) + self.assertIn(self.lambda_context.aws_request_id, self.scheduler.remote_queue_file) def test__remote_queue_locked_file(self): self.assertIn(f"{self.scheduler.config['s3_prefix'].strip('/')}", self.scheduler.remote_queue_locked_file) self.assertIn('locked_', self.scheduler.remote_queue_locked_file) - self.assertIn(module.lambda_context.aws_request_id, self.scheduler.remote_queue_locked_file) + self.assertIn(self.lambda_context.aws_request_id, self.scheduler.remote_queue_locked_file) ### Tests of file operations ###
Implement global context Research the best practices of making the Context object from the Lambda handler accessible by core Processor. He should implement internal interfaces for some of the Context object method.
0.0
0b84a348f05bbc2a8d306bbf2b32b990fde02620
[ "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_trim_arn_to_account", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test__queue_bucket", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test__remote_queue_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test__remote_queue_locked_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_call__sample", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_dates", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_dates__last_x_days", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_dates__pops_period", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_dates__preserve_skeleton", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_dates__preserve_skeleton__if_no_chunking", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_dates__raises_invalid_period_pattern", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job__not_chunkable_config", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job__not_raises__notchunkable__if_no_isolation", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job__not_raises_unchunkable_subtask__but_preserves_in_payload", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job__raises__unsupported_vals__list_not_as_value", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job__raises__unsupported_vals__string", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_chunk_job__unchunckable_preserve_custom_attrs", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data__empty_job", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data__preserve_skeleton_through_chunkers", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_construct_job_data__real_payload__for_debuging_logs", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_extract_job_from_payload", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_extract_job_from_payload_raises", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_get_and_lock_queue_file__local_file_exists", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_get_and_lock_queue_file__s3_calls", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_get_index_from_list", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_get_next_chunkable_attr", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_init__chunkable_attrs_not_end_with_s", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_last_week", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_last_x_days", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_needs_chunking__isolate_root", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_needs_chunking__isolate_subdata", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_needs_chunking__isolate_subdata_deep", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_parse_job_to_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_parse_job_to_file__multiple_rows", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_pop_rows_from_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_pop_rows_from_file__missing_or_empty_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_pop_rows_from_file__reads_from_top", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_previous_x_days", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_process_file", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_today", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_validate_list_of_vals", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_x_days_back", "sosw/test/unit/test_scheduler.py::Scheduler_UnitTestCase::test_yesterday" ]
[ "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_chunks", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_construct_dates_from_event__conflict_of_attributes", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_construct_dates_from_event__missing_attributes", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_construct_dates_from_event__ok", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_convert_string_to_words", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_first_or_none", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_get_list_of_multiple_or_one_or_empty_from_dict", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_get_one_from_dict", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_get_one_or_none_from_dict", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_make_hash", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_negative_validate_date_from_something", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_match_extract", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_matches_soft", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_matches_strict", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_update", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_update_2", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_update__does_overwrite_with_none", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_recursive_update__inserts_new_keys", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_rstrip_all", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_trim_arn_to_name", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_account_to_dashed_invalid", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_account_to_dashed_valid", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_date_from_something", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_date_list_from_event_or_days_back", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_datetime_from_something", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_list_of_numbers_from_csv", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_list_of_words_from_csv_or_list__ok", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_list_of_words_from_csv_or_list__raises", "sosw/components/test/unit/test_helpers.py::helpers_UnitTestCase::test_validate_string_matches_datetime_format" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2019-05-23 09:04:05+00:00
mit
1,384
biocore__deblur-156
diff --git a/ChangeLog.md b/ChangeLog.md index ea3e5aa..a8981f9 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -4,6 +4,8 @@ ### Features +* Added `--left-trim-length` to allow for trimming nucleotides on the 5' end of each sequence. Please see [issue #154](https://github.com/biocore/deblur/issues/154) for more information. + ### Backward-incompatible changes [stable] ### Performance enhancements diff --git a/deblur/workflow.py b/deblur/workflow.py index 6eb21f7..0f7d55a 100644 --- a/deblur/workflow.py +++ b/deblur/workflow.py @@ -100,7 +100,7 @@ def sequence_generator(input_fp): yield (record.metadata['id'], str(record)) -def trim_seqs(input_seqs, trim_len): +def trim_seqs(input_seqs, trim_len, left_trim_len): """Trim FASTA sequences to specified length. Parameters @@ -109,6 +109,9 @@ def trim_seqs(input_seqs, trim_len): The list of input sequences in (label, sequence) format trim_len : int Sequence trimming length. Specify a value of -1 to disable trimming. + left_trim_len : int + Sequence trimming from the 5' end. A value of 0 will disable this trim. + Returns ------- @@ -132,7 +135,7 @@ def trim_seqs(input_seqs, trim_len): yield label, seq elif len(seq) >= trim_len: okseqs += 1 - yield label, seq[:trim_len] + yield label, seq[left_trim_len:trim_len] if okseqs < 0.01*totseqs: logger = logging.getLogger(__name__) @@ -771,8 +774,8 @@ def create_otu_table(output_fp, deblurred_list, def launch_workflow(seqs_fp, working_dir, mean_error, error_dist, - indel_prob, indel_max, trim_length, min_size, ref_fp, - ref_db_fp, threads_per_sample=1, + indel_prob, indel_max, trim_length, left_trim_length, + min_size, ref_fp, ref_db_fp, threads_per_sample=1, sim_thresh=None, coverage_thresh=None): """Launch full deblur workflow for a single post split-libraries fasta file @@ -792,6 +795,8 @@ def launch_workflow(seqs_fp, working_dir, mean_error, error_dist, maximal indel number trim_length: integer sequence trim length + left_trim_length: integer + trim the first n reads min_size: integer upper limit on sequence abundance (discard sequences below limit) ref_fp: tuple @@ -823,7 +828,8 @@ def launch_workflow(seqs_fp, working_dir, mean_error, error_dist, with open(output_trim_fp, 'w') as out_f: for label, seq in trim_seqs( input_seqs=sequence_generator(seqs_fp), - trim_len=trim_length): + trim_len=trim_length, + left_trim_len=left_trim_length): out_f.write(">%s\n%s\n" % (label, seq)) # Step 2: Dereplicate sequences output_derep_fp = join(working_dir, diff --git a/scripts/deblur b/scripts/deblur index bd9704d..f5d9dcc 100755 --- a/scripts/deblur +++ b/scripts/deblur @@ -420,6 +420,10 @@ def build_biom_table(seqs_fp, output_biom_fp, min_reads, file_type, log_level, "trim-length will be discarded. A value of -1 can be " "specified to skip trimming; this assumes all sequences " "have an identical length.")) [email protected]('--left-trim-length', required=False, type=int, + show_default=True, default=0, + help=("Trim the first N bases from every sequence. A value of 0 " + "disables this trim.")) @click.option('--pos-ref-fp', required=False, multiple=True, default=[], show_default=False, type=click.Path(resolve_path=True, readable=True, exists=True, @@ -518,9 +522,9 @@ def build_biom_table(seqs_fp, output_biom_fp, min_reads, file_type, log_level, def workflow(seqs_fp, output_dir, pos_ref_fp, pos_ref_db_fp, neg_ref_fp, neg_ref_db_fp, overwrite, mean_error, error_dist, indel_prob, indel_max, - trim_length, min_reads, min_size, threads_per_sample, - keep_tmp_files, log_level, log_file, jobs_to_start, - is_worker_thread): + trim_length, left_trim_length, min_reads, min_size, + threads_per_sample, keep_tmp_files, log_level, log_file, + jobs_to_start, is_worker_thread): """Launch deblur workflow""" start_log(level=log_level * 10, filename=log_file) logger = logging.getLogger(__name__) @@ -623,6 +627,7 @@ def workflow(seqs_fp, output_dir, pos_ref_fp, pos_ref_db_fp, mean_error=mean_error, error_dist=error_dist, indel_prob=indel_prob, indel_max=indel_max, trim_length=trim_length, + left_trim_length=left_trim_length, min_size=min_size, ref_fp=ref_fp, ref_db_fp=ref_db_fp, threads_per_sample=threads_per_sample) if deblurred_file_name is None: @@ -651,7 +656,8 @@ def workflow(seqs_fp, output_dir, pos_ref_fp, pos_ref_db_fp, # also create biom tables with # only sequences matching the pos_ref_fp sequences (reference-hit.biom) - # and only sequences not matching the pos_ref_fp sequences (reference-non-hit.biom) + # and only sequences not matching the pos_ref_fp sequences + # (reference-non-hit.biom) tmp_files = remove_artifacts_from_biom_table(output_fp, outputfasta_fp, pos_ref_fp, output_dir, pos_ref_db_fp,
biocore/deblur
fd259a74f516a22db8db790ab2cd3a1afab96b22
diff --git a/deblur/test/test_workflow.py b/deblur/test/test_workflow.py index 0ea7174..b9bc8c6 100644 --- a/deblur/test/test_workflow.py +++ b/deblur/test/test_workflow.py @@ -128,7 +128,7 @@ class workflowTests(TestCase): ("seq5", "gagtgcgagatgcgtggtgagg"), ("seq6", "ggatgcgagatgcgtggtgatt"), ("seq7", "agggcgagattcctagtgga--")] - obs = trim_seqs(seqs, -1) + obs = trim_seqs(seqs, -1, 0) self.assertEqual(list(obs), seqs) def test_trim_seqs_notrim_outofbounds(self): @@ -140,7 +140,7 @@ class workflowTests(TestCase): ("seq6", "ggatgcgagatgcgtggtgatt"), ("seq7", "agggcgagattcctagtgga--")] with self.assertRaises(ValueError): - list(trim_seqs(seqs, -2)) + list(trim_seqs(seqs, -2, 0)) def test_trim_seqs(self): seqs = [("seq1", "tagggcaagactccatggtatga"), @@ -150,7 +150,7 @@ class workflowTests(TestCase): ("seq5", "gagtgcgagatgcgtggtgagg"), ("seq6", "ggatgcgagatgcgtggtgatt"), ("seq7", "agggcgagattcctagtgga--")] - obs = trim_seqs(seqs, 20) + obs = trim_seqs(seqs, 20, 0) self.assertTrue(isinstance(obs, GeneratorType)) @@ -162,6 +162,26 @@ class workflowTests(TestCase): ("seq7", "agggcgagattcctagtgga")] self.assertEqual(list(obs), exp) + def test_trim_seqs_left(self): + seqs = [("seq1", "tagggcaagactccatggtatga"), + ("seq2", "cggaggcgagatgcgtggta"), + ("seq3", "tactagcaagattcctggtaaagga"), + ("seq4", "aggatgcgagatgcgtg"), + ("seq5", "gagtgcgagatgcgtggtgagg"), + ("seq6", "ggatgcgagatgcgtggtgatt"), + ("seq7", "agggcgagattcctagtgga--")] + obs = trim_seqs(seqs, 20, 5) + + self.assertTrue(isinstance(obs, GeneratorType)) + + exp = [("seq1", "caagactccatggta"), + ("seq2", "gcgagatgcgtggta"), + ("seq3", "gcaagattcctggta"), + ("seq5", "cgagatgcgtggtga"), + ("seq6", "cgagatgcgtggtga"), + ("seq7", "gagattcctagtgga")] + self.assertEqual(list(obs), exp) + def test_dereplicate_seqs_remove_singletons(self): """ Test dereplicate_seqs() method functionality with removing singletons @@ -622,12 +642,14 @@ class workflowTests(TestCase): indel_prob = 0.01 indel_max = 3 min_size = 2 + left_trim_length = 0 nochimera = launch_workflow(seqs_fp=seqs_fp, working_dir=output_fp, mean_error=mean_error, error_dist=error_dist, indel_prob=indel_prob, indel_max=indel_max, trim_length=trim_length, + left_trim_length=left_trim_length, min_size=min_size, ref_fp=(ref_fp,), ref_db_fp=ref_db_fp, @@ -789,6 +811,7 @@ class workflowTests(TestCase): min_size = 2 # trim length longer than sequences trim_length = -1 + left_trim_length = 0 threads = 1 output_fp = launch_workflow(seqs_fp=seqs_fp, @@ -798,6 +821,7 @@ class workflowTests(TestCase): indel_prob=indel_prob, indel_max=indel_max, trim_length=trim_length, + left_trim_length=left_trim_length, min_size=min_size, ref_fp=(ref_fp,), ref_db_fp=ref_db_fp, @@ -826,6 +850,7 @@ class workflowTests(TestCase): min_size = 2 # trim length longer than sequences trim_length = 151 + left_trim_length = 0 threads = 1 with self.assertWarns(UserWarning): launch_workflow(seqs_fp=seqs_fp, working_dir=output_fp, @@ -834,6 +859,7 @@ class workflowTests(TestCase): indel_prob=indel_prob, indel_max=indel_max, trim_length=trim_length, + left_trim_length=left_trim_length, min_size=min_size, ref_fp=(ref_fp,), ref_db_fp=ref_db_fp,
Support "left trimming" on sequences It would be useful if in addition to the normal trimming (at the end of the sequence), there was a parameter to trim initial bases of a sequence.
0.0
fd259a74f516a22db8db790ab2cd3a1afab96b22
[ "deblur/test/test_workflow.py::workflowTests::test_trim_seqs_notrim_outofbounds", "deblur/test/test_workflow.py::workflowTests::test_trim_seqs", "deblur/test/test_workflow.py::workflowTests::test_trim_seqs_notrim", "deblur/test/test_workflow.py::workflowTests::test_trim_seqs_left" ]
[ "deblur/test/test_workflow.py::workflowTests::test_sequence_generator_invalid_format", "deblur/test/test_workflow.py::workflowTests::test_get_files_for_table", "deblur/test/test_workflow.py::workflowTests::test_filter_minreads_samples_from_table", "deblur/test/test_workflow.py::workflowTests::test_fasta_from_biom", "deblur/test/test_workflow.py::workflowTests::test_get_fastq_variant", "deblur/test/test_workflow.py::workflowTests::test_sequence_generator_fastq", "deblur/test/test_workflow.py::workflowTests::test_sample_id_from_read_id", "deblur/test/test_workflow.py::workflowTests::test_sequence_generator_fasta", "deblur/test/test_workflow.py::workflowTests::test_split_sequence_file_on_sample_ids_to_files" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2017-09-11 17:42:04+00:00
bsd-3-clause
1,385
biolink__biolink-model-toolkit-51
diff --git a/bmt/utils.py b/bmt/utils.py index f2ac488..e9a1d7c 100644 --- a/bmt/utils.py +++ b/bmt/utils.py @@ -4,6 +4,18 @@ import stringcase from linkml_runtime.linkml_model.meta import ClassDefinition, SlotDefinition, Element, ClassDefinitionName, \ SlotDefinitionName, ElementName, TypeDefinition +lowercase_pattern = re.compile(r"[a-zA-Z]*[a-z][a-zA-Z]*") +underscore_pattern = re.compile(r"(?<!^)(?=[A-Z][a-z])") + + +def from_camel(s: str, sep: str = " ") -> str: + underscored = underscore_pattern.sub(sep, s) + lowercased = lowercase_pattern.sub( + lambda match: match.group(0).lower(), + underscored, + ) + return lowercased + def camelcase_to_sentencecase(s: str) -> str: """ @@ -18,7 +30,7 @@ def camelcase_to_sentencecase(s: str) -> str: str string in sentence case form """ - return stringcase.sentencecase(s).lower() + return from_camel(s, sep=" ") def snakecase_to_sentencecase(s: str) -> str:
biolink/biolink-model-toolkit
d4495980870ca6805c40f434fc3325d73da3e9f4
diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 3a7203f..a72f7fa 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -18,6 +18,7 @@ from bmt.utils import parse_name ('RNA product', 'RNA product'), ('RNA Product', 'RNA Product'), ('Rna Product', 'Rna Product'), + ('biolink:RNAProduct', 'RNA product'), ]) def test_parse_name(query): n = parse_name(query[0])
Camel-case names with acronyms are not parsed well Classes like "RNA product", etc. behave in strange ways. ```python >>> print(tk.get_element("RNA product").class_uri) biolink:RNAProduct >>> print(tk.get_element("biolink:RNAProduct")) None ``` I suspect that we are failing to convert "RNAProduct" to the expected sentence-case representation.
0.0
d4495980870ca6805c40f434fc3325d73da3e9f4
[ "tests/unit/test_utils.py::test_parse_name[query14]" ]
[ "tests/unit/test_utils.py::test_parse_name[query0]", "tests/unit/test_utils.py::test_parse_name[query1]", "tests/unit/test_utils.py::test_parse_name[query2]", "tests/unit/test_utils.py::test_parse_name[query3]", "tests/unit/test_utils.py::test_parse_name[query4]", "tests/unit/test_utils.py::test_parse_name[query5]", "tests/unit/test_utils.py::test_parse_name[query6]", "tests/unit/test_utils.py::test_parse_name[query7]", "tests/unit/test_utils.py::test_parse_name[query8]", "tests/unit/test_utils.py::test_parse_name[query9]", "tests/unit/test_utils.py::test_parse_name[query10]", "tests/unit/test_utils.py::test_parse_name[query11]", "tests/unit/test_utils.py::test_parse_name[query12]", "tests/unit/test_utils.py::test_parse_name[query13]" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2021-07-02 20:48:05+00:00
bsd-3-clause
1,386
biolink__biolink-model-toolkit-52
diff --git a/bmt/utils.py b/bmt/utils.py index f2ac488..f018ac7 100644 --- a/bmt/utils.py +++ b/bmt/utils.py @@ -4,6 +4,18 @@ import stringcase from linkml_runtime.linkml_model.meta import ClassDefinition, SlotDefinition, Element, ClassDefinitionName, \ SlotDefinitionName, ElementName, TypeDefinition +lowercase_pattern = re.compile(r"[a-zA-Z]*[a-z][a-zA-Z]*") +underscore_pattern = re.compile(r"(?<!^)(?=[A-Z][a-z])") + + +def from_camel(s: str, sep: str = " ") -> str: + underscored = underscore_pattern.sub(sep, s) + lowercased = lowercase_pattern.sub( + lambda match: match.group(0).lower(), + underscored, + ) + return lowercased + def camelcase_to_sentencecase(s: str) -> str: """ @@ -18,7 +30,7 @@ def camelcase_to_sentencecase(s: str) -> str: str string in sentence case form """ - return stringcase.sentencecase(s).lower() + return from_camel(s, sep=" ") def snakecase_to_sentencecase(s: str) -> str: @@ -65,7 +77,11 @@ def sentencecase_to_camelcase(s: str) -> str: str string in CamelCase form """ - return stringcase.pascalcase(stringcase.snakecase(s)) + return re.sub( + r"(?:^| )([a-zA-Z])", + lambda match: match.group(1).upper(), + s + ) def format_element(element: Element) -> str:
biolink/biolink-model-toolkit
d4495980870ca6805c40f434fc3325d73da3e9f4
diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 3a7203f..5a15f71 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -1,6 +1,11 @@ +from bmt.toolkit import Toolkit import pytest -from bmt.utils import parse_name +from bmt.utils import parse_name, format_element, sentencecase_to_camelcase + [email protected](scope="module") +def toolkit(): + return Toolkit() @pytest.mark.parametrize('query', [ @@ -18,8 +23,28 @@ from bmt.utils import parse_name ('RNA product', 'RNA product'), ('RNA Product', 'RNA Product'), ('Rna Product', 'Rna Product'), + ('biolink:RNAProduct', 'RNA product'), ]) def test_parse_name(query): n = parse_name(query[0]) assert n == query[1] + [email protected]('query', [ + ('phenotypic feature', 'PhenotypicFeature'), + ('noncoding RNA product', 'NoncodingRNAProduct'), +]) +def test_sentencecase_to_camelcase(query): + n = sentencecase_to_camelcase(query[0]) + assert n == query[1] + + [email protected]('query', [ + ('related to', 'biolink:related_to'), + ('caused_by', 'biolink:caused_by'), + ('PhenotypicFeature', 'biolink:PhenotypicFeature'), + ('noncoding RNA product', 'biolink:NoncodingRNAProduct'), +]) +def test_format_element(query, toolkit): + n = format_element(toolkit.get_element(query[0])) + assert n == query[1]
Camel-case class names are not converted to CURIEs well ```python >>> tk.get_descendants("transcript", formatted=True) ['biolink:Transcript', 'biolink:RNAProduct', 'biolink:Noncoding_RNAProduct', 'biolink:SiRNA', 'biolink:MicroRNA', 'biolink:RNAProductIsoform'] ``` `biolink:Noncoding_RNAProduct` should not have an underscore. Yet somehow, at the same time, ```python >>> tk.get_element("noncoding RNA product").class_uri 'biolink:NoncodingRNAProduct' ``` These transformations should be unified, too.
0.0
d4495980870ca6805c40f434fc3325d73da3e9f4
[ "tests/unit/test_utils.py::test_parse_name[query14]", "tests/unit/test_utils.py::test_sentencecase_to_camelcase[query1]", "tests/unit/test_utils.py::test_format_element[query3]" ]
[ "tests/unit/test_utils.py::test_parse_name[query0]", "tests/unit/test_utils.py::test_parse_name[query1]", "tests/unit/test_utils.py::test_parse_name[query2]", "tests/unit/test_utils.py::test_parse_name[query3]", "tests/unit/test_utils.py::test_parse_name[query4]", "tests/unit/test_utils.py::test_parse_name[query5]", "tests/unit/test_utils.py::test_parse_name[query6]", "tests/unit/test_utils.py::test_parse_name[query7]", "tests/unit/test_utils.py::test_parse_name[query8]", "tests/unit/test_utils.py::test_parse_name[query9]", "tests/unit/test_utils.py::test_parse_name[query10]", "tests/unit/test_utils.py::test_parse_name[query11]", "tests/unit/test_utils.py::test_parse_name[query12]", "tests/unit/test_utils.py::test_parse_name[query13]", "tests/unit/test_utils.py::test_sentencecase_to_camelcase[query0]", "tests/unit/test_utils.py::test_format_element[query0]", "tests/unit/test_utils.py::test_format_element[query1]", "tests/unit/test_utils.py::test_format_element[query2]" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2021-07-02 21:06:02+00:00
bsd-3-clause
1,387
biolink__biolinkml-130
diff --git a/biolinkml/generators/jsonschemagen.py b/biolinkml/generators/jsonschemagen.py index 6af50246..77d77de1 100644 --- a/biolinkml/generators/jsonschemagen.py +++ b/biolinkml/generators/jsonschemagen.py @@ -57,14 +57,25 @@ class JsonSchemaGenerator(Generator): self.schemaobj.definitions[camelcase(cls.name)] = self.clsobj def visit_class_slot(self, cls: ClassDefinition, aliased_slot_name: str, slot: SlotDefinition) -> None: - if slot.range in self.schema.classes or slot.range in self.schema.types: + if slot.range in self.schema.classes and slot.inlined: rng = f"#/definitions/{camelcase(slot.range)}" + elif slot.range in self.schema.types: + rng = self.schema.types[slot.range].base else: + # note we assume string for non-lined complex objects rng = "string" - #slotrange = camelcase( - # slot.range) if slot.range in self.schema.classes or slot.range in self.schema.types else "String" - if self.inline or slot.inlined: + # translate to json-schema builtins + if rng == 'int': + rng = 'integer' + elif rng == 'Bool': + rng = 'boolean' + elif rng == 'str': + rng = 'string' + elif rng == 'float' or rng == 'double': + rng = 'number' + + if slot.inlined: # If inline we have to include redefined slots ref = JsonObj() ref['$ref'] = rng @@ -73,7 +84,10 @@ class JsonSchemaGenerator(Generator): else: prop = ref else: - prop = JsonObj(type="string") #TODO + if slot.multivalued: + prop = JsonObj(type="array", items={'type':rng}) + else: + prop = JsonObj(type=rng) if slot.description: prop.description = slot.description if slot.required: @@ -83,15 +97,6 @@ class JsonSchemaGenerator(Generator): if self.topCls is not None and camelcase(self.topCls) == camelcase(cls.name): self.schemaobj.properties[underscore(aliased_slot_name)] = prop - def xxxvisit_slot(self, slot_name: str, slot: SlotDefinition) -> None: - # Don't emit redefined slots unless we are inlining - if slot_name == slot.name or self.inline: - defn = JsonObj(type="array", items=self.type_or_ref(slot.range)) if slot.multivalued \ - else self.type_or_ref(slot.range) - if slot.description: - defn.description = slot.description - self.schemaobj.definitions[underscore(slot.name)] = defn - @shared_arguments(JsonSchemaGenerator) @click.command()
biolink/biolinkml
0039fa7f15b31f043fd11fe514597ed378804eaa
diff --git a/tests/test_issues/source/issue_129.yaml b/tests/test_issues/source/issue_129.yaml new file mode 100644 index 00000000..7f115188 --- /dev/null +++ b/tests/test_issues/source/issue_129.yaml @@ -0,0 +1,56 @@ +id: http://example.org/sample/types + +prefixes: + biolinkml: https://w3id.org/biolink/biolinkml/ + +imports: + - biolinkml:types + +types: + yearCount: + base: int + uri: xsd:int + +classes: + c: + slots: + - id + - age in years + - scores + - has prop + - has d + - has ds + - children + - parent + d: + slots: + - id + + +slots: + + id: + identifier: true + + scores: + range: float + multivalued: true + has prop: + range: boolean + age in years: + range: yearCount + has d: + range: d + multivalued: false + inlined: true + has ds: + range: d + multivalued: true + inlined: true + children: + range: c + multivalued: true + parent: + range: c + multivalued: false + diff --git a/tests/test_issues/test_issue_129.py b/tests/test_issues/test_issue_129.py new file mode 100644 index 00000000..f6b2560e --- /dev/null +++ b/tests/test_issues/test_issue_129.py @@ -0,0 +1,47 @@ +import os +import unittest +import json + +from biolinkml.generators.jsonschemagen import JsonSchemaGenerator +from biolinkml.generators.owlgen import OwlSchemaGenerator +from tests.test_issues import sourcedir + + +class IssueJSONSchemaTypesTestCase(unittest.TestCase): + + def header(self, txt: str) -> str: + return '\n' + ("=" * 20) + f" {txt} " + ("=" * 20) + + def test_issue_types(self): + """ Make sure that types are generated as part of the output """ + yaml_fname = os.path.join(sourcedir, 'issue_129.yaml') + gen = JsonSchemaGenerator(yaml_fname) + gen.topCls = 'c' + jsonschema = gen.serialize() + print(self.header("JSONSchema")) + print(jsonschema) + sobj = json.loads(jsonschema) + defs = sobj['definitions'] + C = defs['C'] + props = C['properties'] + assert props['age_in_years']['type'] == 'integer' + assert props['has_prop']['type'] == 'boolean' + # multivalued primitive type, inlined + assert props['scores']['type'] == 'array' + assert props['scores']['items']['type'] == 'number' + # single-valued complex type, inlined + assert props['has_d']['$ref'] == "#/definitions/D" + + # multi-valued, inlined + assert props['has_ds']['type'] == 'array' + assert props['has_ds']['items']['$ref'] == "#/definitions/D" + + # single-valued, non-inlined (foreign key) + assert props['parent']['type'] == "string" + + # multi-valued, non-inlined (foreign key) + assert props['children']['type'] == 'array' + assert props['children']['items']['type'] == "string" + +if __name__ == '__main__': + unittest.main()
json-schema ```yaml id: http://example.org/sample/types prefixes: biolinkml: https://w3id.org/biolink/biolinkml/ imports: - biolinkml:types types: yearCount: base: int uri: xsd:int classes: c: slots: - id - age in years - scores - has prop - has d - has ds - children - parent d: slots: - id slots: id: identifier: true scores: range: float multivalued: true has prop: range: boolean age in years: range: yearCount has d: range: d multivalued: false inlined: true has ds: range: d multivalued: true inlined: true children: range: c multivalued: true parent: range: c multivalued: false ``` current json output: ```json "C": { "description": "", "properties": { "age_in_years": { "type": "string" }, "children": { "items": { "type": "string" }, "type": "array" }, "has_d": { "type": "string" }, "has_ds": { "items": { "type": "string" }, "type": "array" }, "has_prop": { "type": "string" }, "id": { "required": true, "type": "string" }, "parent": { "type": "string" }, "scores": { "items": { "type": "string" }, "type": "array" } }, "title": "C", "type": "object" ``` There should be a `$ref` for any inlined reference to D
0.0
0039fa7f15b31f043fd11fe514597ed378804eaa
[ "tests/test_issues/test_issue_129.py::IssueJSONSchemaTypesTestCase::test_issue_types" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2020-06-07 18:18:44+00:00
cc0-1.0
1,388
biolink__biolinkml-369
diff --git a/biolinkml/generators/pythongen.py b/biolinkml/generators/pythongen.py index 63f4a7ca..48c9d2e1 100644 --- a/biolinkml/generators/pythongen.py +++ b/biolinkml/generators/pythongen.py @@ -197,12 +197,15 @@ dataclasses._init_fn = dataclasses_init_fn_with_kwargs add_type_ref(self.schema.types[typ.typeof]) rval.add_element(typ) + def add_enum_ref(e: EnumDefinition) -> None: + rval.add_element(e) + def add_slot_range(slot: SlotDefinition) -> None: if slot.range: if slot.range in self.schema.types: add_type_ref(self.schema.types[slot.range]) elif slot.range in self.schema.enums: - pass + add_enum_ref(self.schema.enums[slot.range]) else: cls = self.schema.classes[slot.range] if cls.imported_from:
biolink/biolinkml
0b62bffabb4938208703acac990a6b2b6461fa7e
diff --git a/tests/test_issues/input/issue_368.yaml b/tests/test_issues/input/issue_368.yaml new file mode 100644 index 00000000..0bc27c5b --- /dev/null +++ b/tests/test_issues/input/issue_368.yaml @@ -0,0 +1,19 @@ +id: https://microbiomedata/schema + +prefixes: + biolinkml: https://w3id.org/biolink/biolinkml/ + +imports: + - biolinkml:types + - issues_368_imports + +classes: + + c: + is_a: parent_class + slots: + - s + +slots: + s: + range: e diff --git a/tests/test_issues/input/issues_368_imports.yaml b/tests/test_issues/input/issues_368_imports.yaml new file mode 100644 index 00000000..7e576cb0 --- /dev/null +++ b/tests/test_issues/input/issues_368_imports.yaml @@ -0,0 +1,10 @@ +id: https://microbiomedata/schema/mixs + +classes: + parent_class: {} + +enums: + e: + permissible_values: + a: A + b: B diff --git a/tests/test_issues/test_issue_368.py b/tests/test_issues/test_issue_368.py new file mode 100644 index 00000000..fa90edd4 --- /dev/null +++ b/tests/test_issues/test_issue_368.py @@ -0,0 +1,57 @@ +import os +import unittest + +from jsonasobj import as_json + +from biolinkml.generators.pythongen import PythonGenerator +from tests.test_issues.environment import env +from tests.utils.python_comparator import compare_python, compile_python +from tests.utils.test_environment import TestEnvironmentTestCase + + +class Issue368TestCase(TestEnvironmentTestCase): + env = env + + def header(self, txt: str) -> str: + return '\n' + ("=" * 20) + f" {txt} " + ("=" * 20) + + def test_issue_368(self): + """ Make sure that types are generated as part of the output """ + env.generate_single_file('issues_368_imports.py', + lambda: PythonGenerator(env.input_path('issues_368_imports.yaml'), + mergeimports=False).serialize(), + comparator=lambda exp, act: compare_python(exp, act, self.env.expected_path('issues_368_imports.py')), + value_is_returned=True) + env.generate_single_file('issue_368.py', + lambda: PythonGenerator(env.input_path('issue_368.yaml'), + mergeimports=False).serialize(), + comparator=lambda exp, act: compare_python(exp, act, self.env.expected_path('issue_368.py')), + value_is_returned=True) + with open(env.expected_path('issue_368.py')) as f: + python= f.read() + + has_imports = False + for line in python.split("\n"): + if line.startswith("from . issues_368_imports"): + imps = line.replace("from . issues_368_imports import ","").split(", ") + assert 'E' in imps + assert 'ParentClass' in imps + has_imports = True + assert has_imports + module = compile_python(env.expected_path('issue_368.py')) + + enum_inst = module.E("a") # EnumInstanceImpl + example = module.C(s="a") + assert hasattr(example, "s") + assert example.s.code.text == enum_inst.code.text + assert str(example.s) == "a: A" + def output_generator(dirname) -> None: + with open(os.path.join(dirname, 'issue_368_1.json'), 'w') as f: + f.write(as_json(example)) + + # TODO: fix this + # env.generate_directory('issue_368', lambda dirname: output_generator(dirname)) + + +if __name__ == '__main__': + unittest.main()
enums are not imported in generated python code E.g ```yaml id: https://microbiomedata/schema prefixes: biolinkml: https://w3id.org/biolink/biolinkml/ imports: - biolinkml:types - issues_368_imports classes: c: is_a: parent_class slots: - s slots: s: range: e ``` where the imported file is: ```yaml id: https://microbiomedata/schema/mixs classes: parent_class: {} enums: e: permissible_values: a: A b: B ``` ```bash $ gen-python --no-mergeimports tests/test_issues/input/issue_368.yaml ``` makes: ```python ... from . issues_368_imports import ParentClass ... ``` but the python should also import `E`
0.0
0b62bffabb4938208703acac990a6b2b6461fa7e
[ "tests/test_issues/test_issue_368.py::Issue368TestCase::test_issue_368" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2021-02-16 18:12:42+00:00
cc0-1.0
1,389
biomass-dev__biomass-232
diff --git a/biomass/construction/reaction_rules.py b/biomass/construction/reaction_rules.py index f780835..41921c8 100644 --- a/biomass/construction/reaction_rules.py +++ b/biomass/construction/reaction_rules.py @@ -1709,11 +1709,21 @@ class ReactionRules(ThermodynamicRestrictions): if arrow in description[0]: is_unidirectional = True if arrow in self.fwd_arrows else False two_species = description[0].split(arrow) - reactants = two_species[0].replace(" ", "").split("+") - products = two_species[1].replace(" ", "").split("+") + reactants = ( + None + if len(two_species[0].split(" ")) > 1 and "+" not in two_species[0] + else two_species[0].replace(" ", "").split("+") + ) + products = ( + None + if len(two_species[1].split(" ")) > 1 and "+" not in two_species[1] + else two_species[1].replace(" ", "").split("+") + ) break else: raise ArrowError(self._get_arrow_error_message(line_num) + ".") + if reactants is None or products is None: + raise DetectionError(f"Unregistered words in line{line_num:d}: {line}") if list(set(reactants) & set(products)): modifiers = list(set(reactants) & set(products)) for modifier in modifiers:
biomass-dev/biomass
9ac0db3e444f9eee6ed120ff223cb19fe7bb9772
diff --git a/tests/test_text2model/test_error_message.py b/tests/test_text2model/test_error_message.py index f21705b..5a90608 100644 --- a/tests/test_text2model/test_error_message.py +++ b/tests/test_text2model/test_error_message.py @@ -39,6 +39,19 @@ def test_typo_detection(): assert "Maybe: 'binds'." in str(e.value) +def test_unregistered_rule(): + ubiquitination = Text2Model( + os.path.join( + os.path.dirname(__file__), + "text_files", + "unregistered_rule.txt", + ) + ) + with pytest.raises(DetectionError) as e: + ubiquitination.convert() + assert "Unregistered words in line1: A ubiquitinates B --> uB" in str(e.value) + + def test_cleanup(): for i in ["1", "2"]: assert os.path.isdir( @@ -55,3 +68,17 @@ def test_cleanup(): f"typo_{i}", ) ) + assert os.path.isdir( + os.path.join( + os.path.dirname(__file__), + "text_files", + "unregistered_rule", + ) + ) + shutil.rmtree( + os.path.join( + os.path.dirname(__file__), + "text_files", + "unregistered_rule", + ) + ) diff --git a/tests/test_text2model/test_graph.py b/tests/test_text2model/test_graph.py index 54ba13c..933c5d8 100644 --- a/tests/test_text2model/test_graph.py +++ b/tests/test_text2model/test_graph.py @@ -1,13 +1,13 @@ import os import shutil -import pytest - from biomass import Text2Model file_dir = os.path.join(os.path.dirname(__file__), "text_files") txt_files = [name for name in os.listdir(file_dir) if "__" not in name] +skipped_files = ["duplicate_binding.txt", "typo_1.txt", "typo_2.txt", "unregistered_rule.txt"] + def test_preprocessing(): for model in txt_files: @@ -30,7 +30,7 @@ def test_preprocessing(): def test_graph(): for model_file in txt_files: - if model_file in ["duplicate_binding.txt", "typo_1.txt", "typo_2.txt"]: + if model_file in skipped_files: continue model = Text2Model(os.path.join(os.path.dirname(__file__), "text_files", model_file)) model_path = os.path.join(file_dir, model_file.split(".")[0]) @@ -44,7 +44,7 @@ def test_graph(): def test_cleanup(): for model_file in txt_files: - if model_file in ["duplicate_binding.txt", "typo_1.txt", "typo_2.txt"]: + if model_file in skipped_files: continue model = model_file.split(".")[0] assert os.path.isdir( diff --git a/tests/test_text2model/text_files/unregistered_rule.txt b/tests/test_text2model/text_files/unregistered_rule.txt new file mode 100644 index 0000000..3566be5 --- /dev/null +++ b/tests/test_text2model/text_files/unregistered_rule.txt @@ -0,0 +1,1 @@ +A ubiquitinates B --> uB \ No newline at end of file
Raise `DetectionError` instead of creating wrong model ### Discussed in https://github.com/biomass-dev/biomass/discussions/223 Prepare `test_ubiquitination.txt` including: ``` A ubiquitinates B --> uB ``` Since there is no rule like 'ubiquitinate', we expect ``` # This is what I expect. DetectionError: Unregistered words in line1: A ubiquitinates B --> uB ``` However, when you run the following code: ```python testmodel = Text2Model("test_ubiquitination.txt") testmodel.convert(overwrite=True) ``` This does NOT raise errors but creates model. When you look into the resulting model, you will soon find the resulting model is wrong. For example, in `name2idx/species.py`: ```python from dataclasses import make_dataclass from typing import Dict, List NAMES: List[str] = [ "AubiquitinatesB", # <-- this is not desired :( "uB", ] ... ``` It seems that this unregistered word is recognized as [`state transition rule`](https://github.com/biomass-dev/biomass/pull/169).
0.0
9ac0db3e444f9eee6ed120ff223cb19fe7bb9772
[ "tests/test_text2model/test_error_message.py::test_unregistered_rule" ]
[ "tests/test_text2model/test_error_message.py::test_preprocessing", "tests/test_text2model/test_error_message.py::test_typo_detection", "tests/test_text2model/test_error_message.py::test_cleanup", "tests/test_text2model/test_graph.py::test_preprocessing" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-01-17 01:33:13+00:00
apache-2.0
1,390
biopragmatics__bioregistry-977
diff --git a/exports/contexts/obo.context.jsonld b/exports/contexts/obo.context.jsonld index 87f6613ff..1ec1187ef 100644 --- a/exports/contexts/obo.context.jsonld +++ b/exports/contexts/obo.context.jsonld @@ -254,7 +254,7 @@ "PLANA": "http://purl.obolibrary.org/obo/PLANA_", "PLANP": "http://purl.obolibrary.org/obo/PLANP_", "PLO": "http://purl.obolibrary.org/obo/PLO_", - "PMID": "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/", + "PMID": "https://www.ncbi.nlm.nih.gov/pubmed/", "PO": "http://purl.obolibrary.org/obo/PO_", "PORO": "http://purl.obolibrary.org/obo/PORO_", "PPO": "http://purl.obolibrary.org/obo/PPO_", diff --git a/exports/contexts/obo.context.ttl b/exports/contexts/obo.context.ttl index b2a060a28..5f6b1ca5d 100644 --- a/exports/contexts/obo.context.ttl +++ b/exports/contexts/obo.context.ttl @@ -257,7 +257,7 @@ [ sh:prefix "PLANA" ; sh:namespace "http://purl.obolibrary.org/obo/PLANA_"^^xsd:anyURI ; sh:pattern "^\\d{7}$" ], [ sh:prefix "PLANP" ; sh:namespace "http://purl.obolibrary.org/obo/PLANP_"^^xsd:anyURI ; sh:pattern "^\\d+$" ], [ sh:prefix "PLO" ; sh:namespace "http://purl.obolibrary.org/obo/PLO_"^^xsd:anyURI ], - [ sh:prefix "PMID" ; sh:namespace "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/"^^xsd:anyURI ; sh:pattern "^\\d+$" ], + [ sh:prefix "PMID" ; sh:namespace "https://www.ncbi.nlm.nih.gov/pubmed/"^^xsd:anyURI ; sh:pattern "^\\d+$" ], [ sh:prefix "PO" ; sh:namespace "http://purl.obolibrary.org/obo/PO_"^^xsd:anyURI ; sh:pattern "^\\d+$" ], [ sh:prefix "PORO" ; sh:namespace "http://purl.obolibrary.org/obo/PORO_"^^xsd:anyURI ; sh:pattern "^\\d{7}$" ], [ sh:prefix "PPO" ; sh:namespace "http://purl.obolibrary.org/obo/PPO_"^^xsd:anyURI ; sh:pattern "^\\d{7}$" ], diff --git a/exports/contexts/obo.epm.json b/exports/contexts/obo.epm.json index 957e24d25..a784428fa 100644 --- a/exports/contexts/obo.epm.json +++ b/exports/contexts/obo.epm.json @@ -4366,7 +4366,7 @@ "pmid", "pubmed" ], - "uri_prefix": "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/", + "uri_prefix": "https://www.ncbi.nlm.nih.gov/pubmed/", "uri_prefix_synonyms": [ "http://bio2rdf.org/pubmed:", "http://bioregistry.io/MEDLINE:", @@ -4382,6 +4382,7 @@ "http://pubmed.ncbi.nlm.nih.gov/", "http://purl.uniprot.org/citations/", "http://purl.uniprot.org/pubmed/", + "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/", "http://scholia.toolforge.org/pubmed/", "http://www.hubmed.org/display.cgi?uids=", "http://www.ncbi.nlm.nih.gov/pubmed/", @@ -4401,8 +4402,7 @@ "https://purl.uniprot.org/pubmed/", "https://rdf.ncbi.nlm.nih.gov/pubchem/reference/", "https://scholia.toolforge.org/pubmed/", - "https://www.hubmed.org/display.cgi?uids=", - "https://www.ncbi.nlm.nih.gov/pubmed/" + "https://www.hubmed.org/display.cgi?uids=" ] }, { diff --git a/exports/contexts/obo_synonyms.context.jsonld b/exports/contexts/obo_synonyms.context.jsonld index 5ec744242..a6ccfc1e3 100644 --- a/exports/contexts/obo_synonyms.context.jsonld +++ b/exports/contexts/obo_synonyms.context.jsonld @@ -297,7 +297,7 @@ "MC": "http://purl.obolibrary.org/obo/MC_", "MCO": "http://purl.obolibrary.org/obo/MCO_", "MCRO": "http://purl.obolibrary.org/obo/MCRO_", - "MEDLINE": "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/", + "MEDLINE": "https://www.ncbi.nlm.nih.gov/pubmed/", "MEDRA": "http://purl.bioontology.org/ontology/MDRFRE/", "MESH": "http://id.nlm.nih.gov/mesh/", "MESHA": "http://id.nlm.nih.gov/mesh/", @@ -437,7 +437,7 @@ "PLO": "http://purl.obolibrary.org/obo/PLO_", "PMC": "http://europepmc.org/articles/", "PMCID": "http://europepmc.org/articles/", - "PMID": "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/", + "PMID": "https://www.ncbi.nlm.nih.gov/pubmed/", "PO": "http://purl.obolibrary.org/obo/PO_", "PORO": "http://purl.obolibrary.org/obo/PORO_", "PPO": "http://purl.obolibrary.org/obo/PPO_", @@ -459,7 +459,7 @@ "Progenetix": "https://progenetix.org/services/ids/", "PuRO": "http://purl.org/spar/pro/", "PubChem_Compound_CID": "http://rdf.ncbi.nlm.nih.gov/pubchem/compound/CID", - "PubMed": "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/", + "PubMed": "https://www.ncbi.nlm.nih.gov/pubmed/", "Pubchem": "http://rdf.ncbi.nlm.nih.gov/pubchem/compound/CID", "RBO": "http://purl.obolibrary.org/obo/RBO_", "RCSB_PDB": "https://rdf.wwpdb.org/pdb/", @@ -1657,7 +1657,7 @@ "pmap.substratedb": "https://identifiers.org/pmap.substratedb:", "pmc": "http://europepmc.org/articles/", "pmdb": "https://bioinformatics.cineca.it/PMDB/user//search.php?idsearch=", - "pmid": "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/", + "pmid": "https://www.ncbi.nlm.nih.gov/pubmed/", "pmp": "http://purl.uniprot.org/uniprot/", "pmr": "https://models.physiomeproject.org/exposure/", "pmr.workspace": "https://models.physiomeproject.org/workspace/", @@ -1697,7 +1697,7 @@ "pubchem_id": "http://rdf.ncbi.nlm.nih.gov/pubchem/compound/CID", "publons.publication": "https://publons.com/publon/", "publons.researcher": "https://publons.com/researcher/", - "pubmed": "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/", + "pubmed": "https://www.ncbi.nlm.nih.gov/pubmed/", "px": "http://proteomecentral.proteomexchange.org/cgi/GetDataset?ID=", "pypi": "https://pypi.org/project/", "qb": "http://purl.org/linked-data/cube#", diff --git a/exports/contexts/obo_synonyms.context.ttl b/exports/contexts/obo_synonyms.context.ttl index 7a1617eb6..2dc12eb7c 100644 --- a/exports/contexts/obo_synonyms.context.ttl +++ b/exports/contexts/obo_synonyms.context.ttl @@ -338,11 +338,11 @@ [ sh:prefix "PLANA" ; sh:namespace "http://purl.obolibrary.org/obo/PLANA_"^^xsd:anyURI ; sh:pattern "^\\d{7}$" ], [ sh:prefix "PLANP" ; sh:namespace "http://purl.obolibrary.org/obo/PLANP_"^^xsd:anyURI ; sh:pattern "^\\d+$" ], [ sh:prefix "PLO" ; sh:namespace "http://purl.obolibrary.org/obo/PLO_"^^xsd:anyURI ], - [ sh:prefix "PMID" ; sh:namespace "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/"^^xsd:anyURI ; sh:pattern "^\\d+$" ], - [ sh:prefix "MEDLINE" ; sh:namespace "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/"^^xsd:anyURI ; sh:pattern "^\\d+$" ], - [ sh:prefix "PubMed" ; sh:namespace "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/"^^xsd:anyURI ; sh:pattern "^\\d+$" ], - [ sh:prefix "pmid" ; sh:namespace "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/"^^xsd:anyURI ; sh:pattern "^\\d+$" ], - [ sh:prefix "pubmed" ; sh:namespace "http://rdf.ncbi.nlm.nih.gov/pubchem/reference/"^^xsd:anyURI ; sh:pattern "^\\d+$" ], + [ sh:prefix "PMID" ; sh:namespace "https://www.ncbi.nlm.nih.gov/pubmed/"^^xsd:anyURI ; sh:pattern "^\\d+$" ], + [ sh:prefix "MEDLINE" ; sh:namespace "https://www.ncbi.nlm.nih.gov/pubmed/"^^xsd:anyURI ; sh:pattern "^\\d+$" ], + [ sh:prefix "PubMed" ; sh:namespace "https://www.ncbi.nlm.nih.gov/pubmed/"^^xsd:anyURI ; sh:pattern "^\\d+$" ], + [ sh:prefix "pmid" ; sh:namespace "https://www.ncbi.nlm.nih.gov/pubmed/"^^xsd:anyURI ; sh:pattern "^\\d+$" ], + [ sh:prefix "pubmed" ; sh:namespace "https://www.ncbi.nlm.nih.gov/pubmed/"^^xsd:anyURI ; sh:pattern "^\\d+$" ], [ sh:prefix "PO" ; sh:namespace "http://purl.obolibrary.org/obo/PO_"^^xsd:anyURI ; sh:pattern "^\\d+$" ], [ sh:prefix "PORO" ; sh:namespace "http://purl.obolibrary.org/obo/PORO_"^^xsd:anyURI ; sh:pattern "^\\d{7}$" ], [ sh:prefix "PPO" ; sh:namespace "http://purl.obolibrary.org/obo/PPO_"^^xsd:anyURI ; sh:pattern "^\\d{7}$" ], diff --git a/src/bioregistry/record_accumulator.py b/src/bioregistry/record_accumulator.py index 1dad79705..ef9b06ea5 100644 --- a/src/bioregistry/record_accumulator.py +++ b/src/bioregistry/record_accumulator.py @@ -117,6 +117,7 @@ def get_converter( strict: bool = False, blacklist: Optional[Collection[str]] = None, remapping: Optional[Mapping[str, str]] = None, + rewiring: Optional[Mapping[str, str]] = None, ) -> Converter: """Generate a converter from resources.""" records = _get_records( @@ -130,6 +131,8 @@ def get_converter( converter = curies.Converter(records) if remapping: converter = curies.remap_curie_prefixes(converter, remapping) + if rewiring: + converter = curies.rewire(converter, rewiring) return converter diff --git a/src/bioregistry/resource_manager.py b/src/bioregistry/resource_manager.py index 279fc8408..d1c3c0a39 100644 --- a/src/bioregistry/resource_manager.py +++ b/src/bioregistry/resource_manager.py @@ -640,6 +640,7 @@ class Manager: include_prefixes: bool = False, strict: bool = False, remapping: Optional[Mapping[str, str]] = None, + rewiring: Optional[Mapping[str, str]] = None, blacklist: Optional[typing.Collection[str]] = None, ) -> curies.Converter: """Get a converter from this manager. @@ -657,6 +658,7 @@ class Manager: If true, errors on URI prefix collisions. If false, sends logging and skips them. :param remapping: A mapping from bioregistry prefixes to preferred prefixes. + :param rewiring: A mapping from bioregistry prefixes to new URI prefixes. :param blacklist: A collection of prefixes to skip @@ -677,6 +679,7 @@ class Manager: strict=strict, blacklist=blacklist, remapping=remapping, + rewiring=rewiring, ) return converter @@ -727,6 +730,7 @@ class Manager: prefix_priority: Optional[Sequence[str]] = None, include_synonyms: bool = False, remapping: Optional[Mapping[str, str]] = None, + rewiring: Optional[Mapping[str, str]] = None, blacklist: Optional[typing.Collection[str]] = None, ) -> Mapping[str, str]: """Get a mapping from Bioregistry prefixes to their URI prefixes . @@ -740,6 +744,7 @@ class Manager: :param include_synonyms: Should synonyms of each prefix also be included as additional prefixes, but with the same URI prefix? :param remapping: A mapping from Bioregistry prefixes to preferred prefixes. + :param rewiring: A mapping from Bioregistry prefixes to URI prefixes. :param blacklist: Prefixes to skip :return: A mapping from prefixes to URI prefixes. """ @@ -747,15 +752,10 @@ class Manager: prefix_priority=prefix_priority, uri_prefix_priority=uri_prefix_priority, remapping=remapping, + rewiring=rewiring, blacklist=blacklist, ) - rv = {} - for record in converter.records: - rv[record.prefix] = record.uri_prefix - if include_synonyms: - for prefix in record.prefix_synonyms: - rv[prefix] = record.uri_prefix - return rv + return converter.prefix_map if include_synonyms else converter.bimap def get_curie_pattern(self, prefix: str, *, use_preferred: bool = False) -> Optional[str]: r"""Get the CURIE pattern for this resource. @@ -1586,6 +1586,7 @@ class Manager: uri_prefix_priority=context.uri_prefix_priority, strict=strict, remapping=context.prefix_remapping, + rewiring=context.custom_prefix_map, blacklist=context.blacklist, include_prefixes=include_prefixes, ) @@ -1606,6 +1607,7 @@ class Manager: prefix_priority=context.prefix_priority, include_synonyms=include_synonyms, blacklist=context.blacklist, + rewiring=context.custom_prefix_map, ) prescriptive_pattern_map = self.get_pattern_map( remapping=context.prefix_remapping,
biopragmatics/bioregistry
0c4ff7203189c6470196c0d50200e1eaf85a866b
diff --git a/tests/test_contexts.py b/tests/test_contexts.py index 7428da168..dab8798b5 100644 --- a/tests/test_contexts.py +++ b/tests/test_contexts.py @@ -46,6 +46,7 @@ class TestContexts(unittest.TestCase): self.assertEqual(f"{p}/FBcv_", prefix_map["FBcv"]) self.assertIn("GEO", prefix_map) self.assertEqual(f"{p}/GEO_", prefix_map["GEO"]) + self.assertEqual("https://www.ncbi.nlm.nih.gov/pubmed/", prefix_map["PMID"]) self.assertNotIn("biomodels.kisao", prefix_map) @@ -64,6 +65,7 @@ class TestContexts(unittest.TestCase): self.assertEqual("Orphanet", converter.standardize_prefix("ordo")) self.assertEqual("GO", converter.standardize_prefix("GO")) self.assertEqual("GO", converter.standardize_prefix("gomf")) + self.assertEqual("https://www.ncbi.nlm.nih.gov/pubmed/", converter.bimap["PMID"]) # FIXME later, handle adding canonical bioregistry prefix # as synonym when non-default prefix priority ordering is given # self.assertEqual("GO", converter.standardize_prefix("go"))
Rewiring from prescriptive context is ignored
0.0
0c4ff7203189c6470196c0d50200e1eaf85a866b
[ "tests/test_contexts.py::TestContexts::test_obo_context", "tests/test_contexts.py::TestContexts::test_obo_converter" ]
[ "tests/test_contexts.py::TestContexts::test_data", "tests/test_contexts.py::TestContexts::test_linted" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-11-02 11:50:00+00:00
mit
1,391
biopython__biopython-3776
diff --git a/Bio/Seq.py b/Bio/Seq.py index bd8fd9eee..38f31bc33 100644 --- a/Bio/Seq.py +++ b/Bio/Seq.py @@ -281,6 +281,20 @@ class SequenceDataAbstractBaseClass(ABC): """Return a copy of data with all ASCII characters converted to lowercase.""" return bytes(self).lower() + def isupper(self): + """Return True if all ASCII characters in data are uppercase. + + If there are no cased characters, method returns False. + """ + return bytes(self).isupper() + + def islower(self): + """Return True if all ASCII characters in data are lowercase. + + If there are no cased characters, method returns False. + """ + return bytes(self).islower() + def replace(self, old, new): """Return a copy with all occurrences of substring old replaced by new.""" return bytes(self).replace(old, new) @@ -1305,6 +1319,20 @@ class _SeqAbstractBaseClass(ABC): else: return self.__class__(data) + def isupper(self): + """Return True if all ASCII characters in data are uppercase. + + If there are no cased characters, method returns False. + """ + return self._data.isupper() + + def islower(self): + """Return True if all ASCII characters in data are lowercase. + + If there are no cased characters, method returns False. + """ + return self._data.islower() + def translate( self, table="Standard", stop_symbol="*", to_stop=False, cds=False, gap="-" ): @@ -2891,6 +2919,22 @@ class _UndefinedSequenceData(SequenceDataAbstractBaseClass): # sequence of the same length return _UndefinedSequenceData(self._length) + def isupper(self): + """Return True if all ASCII characters in data are uppercase. + + If there are no cased characters, method returns False. + """ + # Character case is irrelevant for an undefined sequence + raise UndefinedSequenceError("Sequence content is undefined") + + def islower(self): + """Return True if all ASCII characters in data are lowercase. + + If there are no cased characters, method returns False. + """ + # Character case is irrelevant for an undefined sequence + raise UndefinedSequenceError("Sequence content is undefined") + def replace(self, old, new): """Return a copy with all occurrences of substring old replaced by new.""" # Replacing substring old by new in an undefined sequence will result @@ -3084,6 +3128,22 @@ class _PartiallyDefinedSequenceData(SequenceDataAbstractBaseClass): data = {start: seq.lower() for start, seq in self._data.items()} return _PartiallyDefinedSequenceData(self._length, data) + def isupper(self): + """Return True if all ASCII characters in data are uppercase. + + If there are no cased characters, method returns False. + """ + # Character case is irrelevant for an undefined sequence + raise UndefinedSequenceError("Sequence content is only partially defined") + + def islower(self): + """Return True if all ASCII characters in data are lowercase. + + If there are no cased characters, method returns False. + """ + # Character case is irrelevant for an undefined sequence + raise UndefinedSequenceError("Sequence content is only partially defined") + def translate(self, table, delete=b""): """Return a copy with each character mapped by the given translation table. diff --git a/CONTRIB.rst b/CONTRIB.rst index f554eb45b..d64d3f2b4 100644 --- a/CONTRIB.rst +++ b/CONTRIB.rst @@ -105,6 +105,7 @@ please open an issue on GitHub or mention it on the mailing list. - Eric Talevich <https://github.com/etal> - Erick Matsen <surname at fhcrc dot org> - Erik Cederstrand <https://github.com/ecederstrand> +- Erik Whiting <https://github.com/erik-whiting> - Fabian Egli <https://github.com/fabianegli> - Fei Qi <https://github.com/qifei9> - Foen Peng <https://github.com/foenpeng> diff --git a/NEWS.rst b/NEWS.rst index 72026a49e..0a685501f 100644 --- a/NEWS.rst +++ b/NEWS.rst @@ -45,6 +45,7 @@ possible, especially the following contributors: - Tim Burke - Michiel de Hoon - Peter Cock +- Erik Whiting 3 June 2021: Biopython 1.79 ===========================
biopython/biopython
f1f49deb8057f86a84d80a1dfedbc23fc2d3dfb2
diff --git a/Tests/test_SeqIO_TwoBitIO.py b/Tests/test_SeqIO_TwoBitIO.py index 262afea0c..47e3c992a 100644 --- a/Tests/test_SeqIO_TwoBitIO.py +++ b/Tests/test_SeqIO_TwoBitIO.py @@ -459,6 +459,14 @@ class TestBaseClassMethods(unittest.TestCase): self.assertEqual(seq2_twobit_lower, seq2_fasta_lower) self.assertEqual(seq2_twobit_lower[140:210], seq2_fasta_lower[140:210]) + def test_isupper(self): + self.assertEqual(self.seq1_twobit.isupper(), self.seq1_fasta.isupper()) + self.assertEqual(self.seq2_twobit.isupper(), self.seq2_fasta.isupper()) + + def test_islower(self): + self.assertEqual(self.seq1_twobit.islower(), self.seq1_fasta.islower()) + self.assertEqual(self.seq2_twobit.islower(), self.seq2_fasta.islower()) + def test_replace(self): # seq.transcribe uses seq._data.replace self.assertEqual(self.seq1_twobit.transcribe(), self.seq1_fasta.transcribe()) diff --git a/Tests/test_Seq_objs.py b/Tests/test_Seq_objs.py index 051d394a8..0e725a2e0 100644 --- a/Tests/test_Seq_objs.py +++ b/Tests/test_Seq_objs.py @@ -608,6 +608,28 @@ class StringMethodTests(unittest.TestCase): Seq("ABCD").lower(inplace=True) self.assertEqual(str(cm.exception), "Sequence is immutable") + def test_str_isupper(self): + """Check matches the python string isupper method.""" + for example1 in self._examples: + str1 = str(example1) + if isinstance(example1, _UndefinedSequenceData): + with self.assertRaises(UndefinedSequenceError): + example1.isupper() + else: + example1 = example1.isupper() + self.assertEqual(example1, str1.isupper()) + + def test_str_islower(self): + """Check matches the python string islower method.""" + for example1 in self._examples: + str1 = str(example1) + if isinstance(example1, _UndefinedSequenceData): + with self.assertRaises(UndefinedSequenceError): + example1.islower() + else: + example1 = example1.islower() + self.assertEqual(example1, str1.islower()) + def test_str_replace(self): """Check matches the python string replace method.""" s = Seq("AAGTACGT") diff --git a/Tests/test_seq.py b/Tests/test_seq.py index e861d4494..e3237233f 100644 --- a/Tests/test_seq.py +++ b/Tests/test_seq.py @@ -175,6 +175,8 @@ class TestSeqStringMethods(unittest.TestCase): for a in self.dna + self.rna + self.nuc + self.protein: self.assertEqual(a.lower(), str(a).lower()) self.assertEqual(a.upper(), str(a).upper()) + self.assertEqual(a.islower(), str(a).islower()) + self.assertEqual(a.isupper(), str(a).isupper()) self.assertEqual(a.strip(), str(a).strip()) self.assertEqual(a.lstrip(), str(a).lstrip()) self.assertEqual(a.rstrip(), str(a).rstrip()) @@ -184,22 +186,34 @@ class TestSeqStringMethods(unittest.TestCase): lseq = seq.lower() self.assertEqual(lseq, "acgt") self.assertEqual(seq, "ACgt") + self.assertTrue(lseq.islower()) + self.assertFalse(seq.islower()) lseq = seq.lower(inplace=False) self.assertEqual(lseq, "acgt") self.assertEqual(seq, "ACgt") + self.assertTrue(lseq.islower()) + self.assertFalse(seq.islower()) lseq = seq.lower(inplace=True) self.assertEqual(lseq, "acgt") self.assertIs(lseq, seq) + self.assertTrue(lseq.islower()) + self.assertTrue(lseq.islower()) seq = Seq.MutableSeq("ACgt") useq = seq.upper() self.assertEqual(useq, "ACGT") self.assertEqual(seq, "ACgt") + self.assertTrue(useq.isupper()) + self.assertFalse(seq.isupper()) useq = seq.upper(inplace=False) self.assertEqual(useq, "ACGT") self.assertEqual(seq, "ACgt") + self.assertTrue(useq.isupper()) + self.assertFalse(seq.isupper()) useq = seq.upper(inplace=True) self.assertEqual(useq, "ACGT") self.assertIs(useq, seq) + self.assertTrue(useq.isupper()) + self.assertTrue(seq.isupper()) def test_hash(self): with warnings.catch_warnings(record=True):
Add .islower() and .isupper() methods to Seq? Some time ago there was the aim to make the Seq object more 'string-like'. This could include .islower() and .isupper() methods. Relevant SO question: https://stackoverflow.com/questions/69176192/attributeerror-seq-object-has-no-attribute-islower-in-protparam-module-of-b
0.0
f1f49deb8057f86a84d80a1dfedbc23fc2d3dfb2
[ "Tests/test_Seq_objs.py::StringMethodTests::test_str_islower", "Tests/test_Seq_objs.py::StringMethodTests::test_str_isupper", "Tests/test_seq.py::TestSeqStringMethods::test_mutableseq_upper_lower", "Tests/test_seq.py::TestSeqStringMethods::test_string_methods" ]
[ "Tests/test_Seq_objs.py::StringMethodTests::test_MutableSeq_extend", "Tests/test_Seq_objs.py::StringMethodTests::test_MutableSeq_init_error", "Tests/test_Seq_objs.py::StringMethodTests::test_MutableSeq_setitem", "Tests/test_Seq_objs.py::StringMethodTests::test_Seq_init_error", "Tests/test_Seq_objs.py::StringMethodTests::test_count_overlap", "Tests/test_Seq_objs.py::StringMethodTests::test_count_overlap_start_end_GG", "Tests/test_Seq_objs.py::StringMethodTests::test_count_overlap_start_end_NN", "Tests/test_Seq_objs.py::StringMethodTests::test_equality", "Tests/test_Seq_objs.py::StringMethodTests::test_join_MutableSeq", "Tests/test_Seq_objs.py::StringMethodTests::test_join_MutableSeq_TypeError_iter", "Tests/test_Seq_objs.py::StringMethodTests::test_join_MutableSeq_mixed", "Tests/test_Seq_objs.py::StringMethodTests::test_join_Seq", "Tests/test_Seq_objs.py::StringMethodTests::test_join_Seq_TypeError", "Tests/test_Seq_objs.py::StringMethodTests::test_join_UnknownSeq", "Tests/test_Seq_objs.py::StringMethodTests::test_join_UnknownSeq_TypeError_iter", "Tests/test_Seq_objs.py::StringMethodTests::test_str_comparison", "Tests/test_Seq_objs.py::StringMethodTests::test_str_count", "Tests/test_Seq_objs.py::StringMethodTests::test_str_count_overlap_GG", "Tests/test_Seq_objs.py::StringMethodTests::test_str_count_overlap_NN", "Tests/test_Seq_objs.py::StringMethodTests::test_str_encode", "Tests/test_Seq_objs.py::StringMethodTests::test_str_endswith", "Tests/test_Seq_objs.py::StringMethodTests::test_str_find", "Tests/test_Seq_objs.py::StringMethodTests::test_str_getitem", "Tests/test_Seq_objs.py::StringMethodTests::test_str_hash", "Tests/test_Seq_objs.py::StringMethodTests::test_str_index", "Tests/test_Seq_objs.py::StringMethodTests::test_str_length", "Tests/test_Seq_objs.py::StringMethodTests::test_str_lower", "Tests/test_Seq_objs.py::StringMethodTests::test_str_lstrip", "Tests/test_Seq_objs.py::StringMethodTests::test_str_rfind", "Tests/test_Seq_objs.py::StringMethodTests::test_str_rindex", "Tests/test_Seq_objs.py::StringMethodTests::test_str_rsplit", "Tests/test_Seq_objs.py::StringMethodTests::test_str_rstrip", "Tests/test_Seq_objs.py::StringMethodTests::test_str_split", "Tests/test_Seq_objs.py::StringMethodTests::test_str_startswith", "Tests/test_Seq_objs.py::StringMethodTests::test_str_strip", "Tests/test_Seq_objs.py::StringMethodTests::test_str_upper", "Tests/test_Seq_objs.py::StringMethodTests::test_the_back_transcription", "Tests/test_Seq_objs.py::StringMethodTests::test_the_complement", "Tests/test_Seq_objs.py::StringMethodTests::test_the_reverse_complement", "Tests/test_Seq_objs.py::StringMethodTests::test_the_transcription", "Tests/test_Seq_objs.py::StringMethodTests::test_the_translate", "Tests/test_Seq_objs.py::StringMethodTests::test_the_translation_of_ambig_codons", "Tests/test_Seq_objs.py::StringMethodTests::test_the_translation_of_invalid_codons", "Tests/test_Seq_objs.py::StringMethodTests::test_the_translation_of_stops", "Tests/test_Seq_objs.py::StringMethodTests::test_tomutable", "Tests/test_Seq_objs.py::StringMethodTests::test_toseq", "Tests/test_Seq_objs.py::ComparisonTests::test_eq", "Tests/test_Seq_objs.py::ComparisonTests::test_ge", "Tests/test_Seq_objs.py::ComparisonTests::test_gt", "Tests/test_Seq_objs.py::ComparisonTests::test_le", "Tests/test_Seq_objs.py::ComparisonTests::test_lt", "Tests/test_Seq_objs.py::ComparisonTests::test_ne", "Tests/test_Seq_objs.py::PartialSequenceTests::test_complement", "Tests/test_Seq_objs.py::PartialSequenceTests::test_getitem", "Tests/test_Seq_objs.py::PartialSequenceTests::test_init", "Tests/test_Seq_objs.py::PartialSequenceTests::test_lower_upper", "Tests/test_Seq_objs.py::PartialSequenceTests::test_multiplication", "Tests/test_Seq_objs.py::PartialSequenceTests::test_replace", "Tests/test_Seq_objs.py::PartialSequenceTests::test_repr", "Tests/test_Seq_objs.py::PartialSequenceTests::test_transcribe", "Tests/test_seq.py::TestSeq::test_as_string", "Tests/test_seq.py::TestSeq::test_cast_to_list", "Tests/test_seq.py::TestSeq::test_concatenation_of_seq", "Tests/test_seq.py::TestSeq::test_extract_third_nucleotide", "Tests/test_seq.py::TestSeq::test_first_nucleotide", "Tests/test_seq.py::TestSeq::test_last_nucleotide", "Tests/test_seq.py::TestSeq::test_length", "Tests/test_seq.py::TestSeq::test_replace", "Tests/test_seq.py::TestSeq::test_repr", "Tests/test_seq.py::TestSeq::test_reverse", "Tests/test_seq.py::TestSeq::test_seq_construction", "Tests/test_seq.py::TestSeq::test_slicing", "Tests/test_seq.py::TestSeq::test_truncated_repr", "Tests/test_seq.py::TestSeqStringMethods::test_add_method_using_wrong_object", "Tests/test_seq.py::TestSeqStringMethods::test_append_nucleotides", "Tests/test_seq.py::TestSeqStringMethods::test_append_proteins", "Tests/test_seq.py::TestSeqStringMethods::test_contains_method", "Tests/test_seq.py::TestSeqStringMethods::test_counting_characters", "Tests/test_seq.py::TestSeqStringMethods::test_endswith", "Tests/test_seq.py::TestSeqStringMethods::test_finding_characters", "Tests/test_seq.py::TestSeqStringMethods::test_greater_than_comparison", "Tests/test_seq.py::TestSeqStringMethods::test_greater_than_comparison_of_incompatible_types", "Tests/test_seq.py::TestSeqStringMethods::test_greater_than_or_equal_comparison", "Tests/test_seq.py::TestSeqStringMethods::test_greater_than_or_equal_comparison_of_incompatible_types", "Tests/test_seq.py::TestSeqStringMethods::test_hash", "Tests/test_seq.py::TestSeqStringMethods::test_less_than_comparison", "Tests/test_seq.py::TestSeqStringMethods::test_less_than_comparison_of_incompatible_types", "Tests/test_seq.py::TestSeqStringMethods::test_less_than_or_equal_comparison", "Tests/test_seq.py::TestSeqStringMethods::test_less_than_or_equal_comparison_of_incompatible_types", "Tests/test_seq.py::TestSeqStringMethods::test_not_equal_comparsion", "Tests/test_seq.py::TestSeqStringMethods::test_radd_method_using_wrong_object", "Tests/test_seq.py::TestSeqStringMethods::test_splits", "Tests/test_seq.py::TestSeqStringMethods::test_startswith", "Tests/test_seq.py::TestSeqStringMethods::test_stripping_characters", "Tests/test_seq.py::TestSeqAddition::test_adding_generic_nucleotide_with_other_nucleotides", "Tests/test_seq.py::TestSeqAddition::test_adding_generic_nucleotide_with_other_nucleotides_inplace", "Tests/test_seq.py::TestSeqAddition::test_adding_protein_with_nucleotides", "Tests/test_seq.py::TestSeqAddition::test_addition_dna_rna_with_generic_nucleotides", "Tests/test_seq.py::TestSeqAddition::test_addition_dna_rna_with_generic_nucleotides_inplace", "Tests/test_seq.py::TestSeqAddition::test_addition_dna_with_dna", "Tests/test_seq.py::TestSeqAddition::test_addition_dna_with_dna_inplace", "Tests/test_seq.py::TestSeqAddition::test_addition_dna_with_rna", "Tests/test_seq.py::TestSeqAddition::test_addition_proteins", "Tests/test_seq.py::TestSeqAddition::test_addition_proteins_inplace", "Tests/test_seq.py::TestSeqAddition::test_addition_rna_with_rna", "Tests/test_seq.py::TestSeqAddition::test_addition_rna_with_rna_inplace", "Tests/test_seq.py::TestSeqMultiplication::test_imul_method", "Tests/test_seq.py::TestSeqMultiplication::test_imul_method_exceptions", "Tests/test_seq.py::TestSeqMultiplication::test_mul_method", "Tests/test_seq.py::TestSeqMultiplication::test_mul_method_exceptions", "Tests/test_seq.py::TestSeqMultiplication::test_rmul_method", "Tests/test_seq.py::TestSeqMultiplication::test_rmul_method_exceptions", "Tests/test_seq.py::TestMutableSeq::test_add_method", "Tests/test_seq.py::TestMutableSeq::test_appending", "Tests/test_seq.py::TestMutableSeq::test_as_string", "Tests/test_seq.py::TestMutableSeq::test_complement", "Tests/test_seq.py::TestMutableSeq::test_complement_dna_string", "Tests/test_seq.py::TestMutableSeq::test_complement_mixed_aphabets", "Tests/test_seq.py::TestMutableSeq::test_complement_old", "Tests/test_seq.py::TestMutableSeq::test_complement_rna", "Tests/test_seq.py::TestMutableSeq::test_complement_rna_string", "Tests/test_seq.py::TestMutableSeq::test_contains_method", "Tests/test_seq.py::TestMutableSeq::test_converting_to_immutable", "Tests/test_seq.py::TestMutableSeq::test_count", "Tests/test_seq.py::TestMutableSeq::test_delete_stride_slice", "Tests/test_seq.py::TestMutableSeq::test_deleting_item", "Tests/test_seq.py::TestMutableSeq::test_deleting_slice", "Tests/test_seq.py::TestMutableSeq::test_endswith", "Tests/test_seq.py::TestMutableSeq::test_equal_comparison", "Tests/test_seq.py::TestMutableSeq::test_extend_method", "Tests/test_seq.py::TestMutableSeq::test_extend_with_mutable_seq", "Tests/test_seq.py::TestMutableSeq::test_extract_third_nucleotide", "Tests/test_seq.py::TestMutableSeq::test_first_nucleotide", "Tests/test_seq.py::TestMutableSeq::test_greater_than_comparison", "Tests/test_seq.py::TestMutableSeq::test_greater_than_comparison_of_incompatible_types", "Tests/test_seq.py::TestMutableSeq::test_greater_than_comparison_with_str", "Tests/test_seq.py::TestMutableSeq::test_greater_than_or_equal_comparison", "Tests/test_seq.py::TestMutableSeq::test_greater_than_or_equal_comparison_of_incompatible_types", "Tests/test_seq.py::TestMutableSeq::test_greater_than_or_equal_comparison_with_str", "Tests/test_seq.py::TestMutableSeq::test_index", "Tests/test_seq.py::TestMutableSeq::test_inserting", "Tests/test_seq.py::TestMutableSeq::test_length", "Tests/test_seq.py::TestMutableSeq::test_less_than_comparison", "Tests/test_seq.py::TestMutableSeq::test_less_than_comparison_of_incompatible_types", "Tests/test_seq.py::TestMutableSeq::test_less_than_comparison_with_str", "Tests/test_seq.py::TestMutableSeq::test_less_than_or_equal_comparison", "Tests/test_seq.py::TestMutableSeq::test_less_than_or_equal_comparison_of_incompatible_types", "Tests/test_seq.py::TestMutableSeq::test_less_than_or_equal_comparison_with_str", "Tests/test_seq.py::TestMutableSeq::test_mutableseq_construction", "Tests/test_seq.py::TestMutableSeq::test_not_equal_comparison", "Tests/test_seq.py::TestMutableSeq::test_popping_last_item", "Tests/test_seq.py::TestMutableSeq::test_radd_method_wrong_type", "Tests/test_seq.py::TestMutableSeq::test_remove_items", "Tests/test_seq.py::TestMutableSeq::test_repr", "Tests/test_seq.py::TestMutableSeq::test_reverse", "Tests/test_seq.py::TestMutableSeq::test_reverse_complement", "Tests/test_seq.py::TestMutableSeq::test_reverse_complement_old", "Tests/test_seq.py::TestMutableSeq::test_reverse_complement_rna", "Tests/test_seq.py::TestMutableSeq::test_reverse_with_stride", "Tests/test_seq.py::TestMutableSeq::test_set_wobble_codon_to_n", "Tests/test_seq.py::TestMutableSeq::test_setting_item", "Tests/test_seq.py::TestMutableSeq::test_setting_slices", "Tests/test_seq.py::TestMutableSeq::test_startswith", "Tests/test_seq.py::TestMutableSeq::test_transcribe", "Tests/test_seq.py::TestMutableSeq::test_truncated_repr", "Tests/test_seq.py::TestUnknownSeq::test_add_method", "Tests/test_seq.py::TestUnknownSeq::test_back_transcribe", "Tests/test_seq.py::TestUnknownSeq::test_complement", "Tests/test_seq.py::TestUnknownSeq::test_count", "Tests/test_seq.py::TestUnknownSeq::test_getitem_method", "Tests/test_seq.py::TestUnknownSeq::test_length", "Tests/test_seq.py::TestUnknownSeq::test_lower", "Tests/test_seq.py::TestUnknownSeq::test_repr", "Tests/test_seq.py::TestUnknownSeq::test_reverse_complement", "Tests/test_seq.py::TestUnknownSeq::test_transcribe", "Tests/test_seq.py::TestUnknownSeq::test_translation", "Tests/test_seq.py::TestUnknownSeq::test_ungap", "Tests/test_seq.py::TestUnknownSeq::test_unknownseq_construction", "Tests/test_seq.py::TestUnknownSeq::test_upper", "Tests/test_seq.py::TestAmbiguousComplements::test_ambiguous_values", "Tests/test_seq.py::TestComplement::test_complement_ambiguous_dna_values", "Tests/test_seq.py::TestComplement::test_complement_ambiguous_rna_values", "Tests/test_seq.py::TestComplement::test_complement_incompatible_letters", "Tests/test_seq.py::TestComplement::test_complement_of_dna", "Tests/test_seq.py::TestComplement::test_complement_of_mixed_dna_rna", "Tests/test_seq.py::TestComplement::test_complement_of_rna", "Tests/test_seq.py::TestComplement::test_immutable", "Tests/test_seq.py::TestReverseComplement::test_immutable", "Tests/test_seq.py::TestReverseComplement::test_reverse_complement", "Tests/test_seq.py::TestReverseComplement::test_reverse_complement_of_dna", "Tests/test_seq.py::TestReverseComplement::test_reverse_complement_of_mixed_dna_rna", "Tests/test_seq.py::TestReverseComplement::test_reverse_complement_of_rna", "Tests/test_seq.py::TestDoubleReverseComplement::test_reverse_complements", "Tests/test_seq.py::TestTranscription::test_back_transcribe_rna_into_dna", "Tests/test_seq.py::TestTranscription::test_back_transcribe_rna_string_into_dna", "Tests/test_seq.py::TestTranscription::test_seq_object_back_transcription_method", "Tests/test_seq.py::TestTranscription::test_seq_object_transcription_method", "Tests/test_seq.py::TestTranscription::test_transcription_dna_into_rna", "Tests/test_seq.py::TestTranscription::test_transcription_dna_string_into_rna", "Tests/test_seq.py::TestTranslating::test_gapped_seq_no_gap_char_given", "Tests/test_seq.py::TestTranslating::test_gapped_seq_with_gap_char_given", "Tests/test_seq.py::TestTranslating::test_translation", "Tests/test_seq.py::TestTranslating::test_translation_extra_stop_codon", "Tests/test_seq.py::TestTranslating::test_translation_incomplete_codon", "Tests/test_seq.py::TestTranslating::test_translation_of_asparagine", "Tests/test_seq.py::TestTranslating::test_translation_of_gapped_string_no_gap_char_given", "Tests/test_seq.py::TestTranslating::test_translation_of_gapped_string_with_gap_char_given", "Tests/test_seq.py::TestTranslating::test_translation_of_glutamine", "Tests/test_seq.py::TestTranslating::test_translation_of_invalid_codon", "Tests/test_seq.py::TestTranslating::test_translation_of_leucine", "Tests/test_seq.py::TestTranslating::test_translation_of_string", "Tests/test_seq.py::TestTranslating::test_translation_on_proteins", "Tests/test_seq.py::TestTranslating::test_translation_to_stop", "Tests/test_seq.py::TestTranslating::test_translation_using_cds", "Tests/test_seq.py::TestTranslating::test_translation_using_tables_with_ambiguous_stop_codons", "Tests/test_seq.py::TestTranslating::test_translation_with_bad_table_argument", "Tests/test_seq.py::TestTranslating::test_translation_with_codon_table_as_table_argument", "Tests/test_seq.py::TestTranslating::test_translation_wrong_type", "Tests/test_seq.py::TestStopCodons::test_stops", "Tests/test_seq.py::TestStopCodons::test_translation_of_stops", "Tests/test_seq.py::TestAttributes::test_mutable_seq", "Tests/test_seq.py::TestAttributes::test_seq" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2021-10-31 16:40:52+00:00
bsd-3-clause
1,392
bird-house__birdy-116
diff --git a/birdy/client/__init__.py b/birdy/client/__init__.py index b48b844..2d570c3 100644 --- a/birdy/client/__init__.py +++ b/birdy/client/__init__.py @@ -54,4 +54,4 @@ If a WPS server with a simple `hello` process is running on the local host on po """ -from .base import WPSClient +from .base import WPSClient, nb_form diff --git a/birdy/client/base.py b/birdy/client/base.py index 507d5fb..1108a79 100644 --- a/birdy/client/base.py +++ b/birdy/client/base.py @@ -292,11 +292,12 @@ class WPSClient(object): else: self.logger.info("{} failed.".format(execution.process.identifier)) - def interact(self, pid): - """Return a Notebook form to enter input values and launch process.""" - if self._notebook: - return notebook.interact( - func=getattr(self, pid), - inputs=self._inputs[pid].items()) - else: - return None + +def nb_form(wps, pid): + """Return a Notebook form to enter input values and launch process.""" + if wps._notebook: + return notebook.interact( + func=getattr(wps, sanitize(pid)), + inputs=wps._inputs[pid].items()) + else: + return None diff --git a/birdy/client/notebook.py b/birdy/client/notebook.py index 1f5b705..403c205 100644 --- a/birdy/client/notebook.py +++ b/birdy/client/notebook.py @@ -4,6 +4,7 @@ from owslib.wps import Input from . import utils from birdy.dependencies import ipywidgets as widgets from birdy.dependencies import IPython +from birdy.utils import sanitize def is_notebook(): @@ -24,10 +25,14 @@ def is_notebook(): def interact(func, inputs): - """Return a Notebook form to enter input values and launch process.""" - ws = {key: input2widget(inpt) for key, inpt in inputs} + """Return a Notebook form to enter input values and launch process. + + The output is stored in the `widget.result` attribute of the response. + """ + ws = {sanitize(key): input2widget(inpt) for key, inpt in inputs} out = widgets.interact_manual(func, **ws) out.widget.children[-2].description = 'Launch process' + # IPython.display.display(out) return out diff --git a/notebooks/Interactive.ipynb b/notebooks/Interactive.ipynb new file mode 100644 index 0000000..84a6a9f --- /dev/null +++ b/notebooks/Interactive.ipynb @@ -0,0 +1,72 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from birdy import WPSClient\n", + "from birdy.client import nb_form\n", + "emu = WPSClient(url='http://localhost:5000/wps')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "resp = nb_form(emu, 'binaryoperatorfornumbers')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "resp.widget.result.get(asobj=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "nb_form(emu, 'non.py-id')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "nb_form(emu, 'chomsky')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/emu-example.ipynb b/notebooks/emu-example.ipynb index 80d4d55..8663c57 100644 --- a/notebooks/emu-example.ipynb +++ b/notebooks/emu-example.ipynb @@ -213,7 +213,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.2" + "version": "3.7.3" } }, "nbformat": 4,
bird-house/birdy
c1b6039c6412b9c5f96284647e4e54c5fb4243d5
diff --git a/tests/test_client.py b/tests/test_client.py index 0658b80..922601d 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -5,7 +5,7 @@ import json # from owslib import crs from pathlib import Path -from birdy.client import converters +from birdy.client import converters, nb_form from birdy.client.utils import is_embedded_in_request from birdy import WPSClient @@ -60,11 +60,11 @@ def test_wps_client_single_output(wps): @pytest.mark.online -def test_wps_interact(wps): +def test_wps_nb_form(wps): for pid in wps._processes.keys(): if pid in ['bbox', ]: # Unsupported continue - wps.interact(pid) + nb_form(wps, pid) @pytest.mark.online diff --git a/tests/test_notebook.py b/tests/test_notebook.py index 5e1a5d6..14eaeda 100644 --- a/tests/test_notebook.py +++ b/tests/test_notebook.py @@ -2,5 +2,5 @@ from birdy.client import notebook def test_is_notebook(): - # we excpect True or False but no exception + # we expect True or False but no exception notebook.is_notebook()
Interact shadows the process names ## Description I think it's a problem if the `interact` method is in the same scope as the actual WPS processes. Ideas ? Could we attach the method to the function ? ```wps.inout.interact()```
0.0
c1b6039c6412b9c5f96284647e4e54c5fb4243d5
[ "tests/test_client.py::test_converter", "tests/test_client.py::test_jsonconverter", "tests/test_client.py::TestIsEmbedded::test_string", "tests/test_client.py::TestIsEmbedded::test_file_like", "tests/test_client.py::TestIsEmbedded::test_local_fn", "tests/test_client.py::TestIsEmbedded::test_local_path", "tests/test_client.py::TestIsEmbedded::test_local_uri", "tests/test_client.py::TestIsEmbedded::test_url", "tests/test_notebook.py::test_is_notebook" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-04-01 22:44:09+00:00
apache-2.0
1,394
bird-house__birdy-137
diff --git a/birdy/client/converters.py b/birdy/client/converters.py index 8e6b3fc..f3e9471 100644 --- a/birdy/client/converters.py +++ b/birdy/client/converters.py @@ -17,7 +17,7 @@ else: class BaseConverter(object): mimetype = None extensions = [] - priority = 1 + priority = None nested = False def __init__(self, output=None, path=None, verify=True): @@ -41,11 +41,20 @@ class BaseConverter(object): @property def file(self): + """Return output Path object. Download from server if """ if self._file is None: self.output.writeToDisk(path=self.path, verify=self.verify) self._file = Path(self.output.filePath) return self._file + @property + def data(self): + """Return the data from the remote output in memory.""" + if self._file is not None: + return self.file.read_bytes() + else: + return self.output.retrieveData() + def check_dependencies(self): pass @@ -62,13 +71,26 @@ class BaseConverter(object): raise type(e)(message.format(self.__class__.__name__, name)) def convert(self): - return self.file.read_text(encoding='utf8') + """To be subclassed""" + raise NotImplementedError + + +class GenericConverter(BaseConverter): + priority = 0 + + def convert(self): + """Return raw bytes memory representation.""" + return self.data class TextConverter(BaseConverter): mimetype = "text/plain" - extensions = ['txt', 'csv'] + extensions = ['txt', 'csv', 'md', 'rst'] + priority = 1 + def convert(self): + """Return text content.""" + return self.file.read_text(encoding='utf8') # class HTMLConverter(BaseConverter): # """Create HTML cell in notebook.""" @@ -89,6 +111,7 @@ class TextConverter(BaseConverter): class JSONConverter(BaseConverter): mimetype = "application/json" extensions = ['json', ] + priority = 1 def convert(self): """ @@ -103,6 +126,7 @@ class JSONConverter(BaseConverter): class GeoJSONConverter(BaseConverter): mimetype = "application/vnd.geo+json" extensions = ['geojson', ] + priority = 1 def check_dependencies(self): self._check_import("geojson") @@ -117,6 +141,7 @@ class MetalinkConverter(BaseConverter): mimetype = "application/metalink+xml; version=3.0" extensions = ['metalink', ] nested = True + priority = 1 def check_dependencies(self): self._check_import("metalink.download") @@ -135,6 +160,7 @@ class Meta4Converter(MetalinkConverter): class Netcdf4Converter(BaseConverter): mimetype = "application/x-netcdf" extensions = ['nc', ] + priority = 1 def check_dependencies(self): self._check_import("netCDF4") @@ -180,6 +206,7 @@ class XarrayConverter(BaseConverter): class ShpFionaConverter(BaseConverter): mimetype = "application/x-zipped-shp" + priority = 1 def check_dependencies(self): self._check_import("fiona") @@ -193,6 +220,7 @@ class ShpFionaConverter(BaseConverter): class ShpOgrConverter(BaseConverter): mimetype = "application/x-zipped-shp" + priority = 1 def check_dependencies(self): self._check_import("ogr", package="osgeo") @@ -207,6 +235,7 @@ class ShpOgrConverter(BaseConverter): class ImageConverter(BaseConverter): mimetype = 'image/png' extensions = ['png', ] + priority = 1 def check_dependencies(self): return nb.is_notebook() @@ -220,6 +249,7 @@ class ZipConverter(BaseConverter): mimetype = 'application/zip' extensions = ['zip', ] nested = True + priority = 1 def convert(self): import zipfile @@ -231,7 +261,7 @@ class ZipConverter(BaseConverter): def _find_converter(mimetype=None, extension=None, converters=()): """Return a list of compatible converters ordered by priority. """ - select = [] + select = [GenericConverter] for obj in converters: if (mimetype == obj.mimetype) or (extension in obj.extensions): select.append(obj) @@ -269,13 +299,16 @@ def convert(output, path, converters=None, verify=True): Returns ------- objs - Python object or path to file if no converter was found. + Python object or file's content as bytes. """ + # Get all converters if converters is None: converters = all_subclasses(BaseConverter) + # Find converters matching mime type or extension. convs = find_converter(output, converters) + # Try converters in order of priority for cls in convs: try: converter = cls(output, path=path, verify=verify) @@ -287,13 +320,6 @@ def convert(output, path, converters=None, verify=True): except (ImportError, NotImplementedError): pass - if isinstance(output, Output): - warnings.warn(UserWarning("No converter was found for {}".format(output.identifier))) - return output.reference - else: - warnings.warn(UserWarning("No converter was found for {}".format(output))) - return output - def all_subclasses(cls): """Return all subclasses of a class."""
bird-house/birdy
4454a969d01f066a92792e02ce8e1fdbc7d45cf8
diff --git a/tests/test_client.py b/tests/test_client.py index 1f77f00..f9881d0 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -150,11 +150,10 @@ def test_asobj(wps): out = resp.get(asobj=True) assert 'URL' in out.output - # If the converter is missing, we should still get the reference. - with pytest.warns(UserWarning): - resp._converters = [] - out = resp.get(asobj=True) - assert out.output.startswith('http://') + # If the converter is missing, we should still get the data as bytes. + resp._converters = [] + out = resp.get(asobj=True) + assert isinstance(out.output, bytes) @pytest.mark.online @@ -316,13 +315,14 @@ def test_zipconverter(): assert len(ob.splitlines()) == 2 [email protected]("jpeg not supported yet") def test_jpeg_imageconverter(): + "Since the format is not supported, bytes will be returned." fn = tempfile.mktemp(suffix='.jpeg') with open(fn, 'w') as f: f.write('jpeg.jpg JPEG 1x1 1x1+0+0 8-bit Grayscale Gray 256c 107B 0.000u 0:00.000') - converters.convert(fn, path='/tmp') + b = converters.convert(fn, path='/tmp') + assert isinstance(b, bytes) class TestIsEmbedded():
Retrieve the data if no converter is found ## Description At the moment, if no converter is available, `get(asobj=True)` returns the link. I suggest that instead we return the output of `retrieveData()`.
0.0
4454a969d01f066a92792e02ce8e1fdbc7d45cf8
[ "tests/test_client.py::test_jpeg_imageconverter" ]
[ "tests/test_client.py::test_all_subclasses", "tests/test_client.py::test_jsonconverter", "tests/test_client.py::test_zipconverter", "tests/test_client.py::TestIsEmbedded::test_string", "tests/test_client.py::TestIsEmbedded::test_file_like", "tests/test_client.py::TestIsEmbedded::test_local_fn", "tests/test_client.py::TestIsEmbedded::test_local_path", "tests/test_client.py::TestIsEmbedded::test_local_uri", "tests/test_client.py::TestIsEmbedded::test_url" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-07-02 18:02:47+00:00
apache-2.0
1,395
bird-house__birdy-138
diff --git a/birdy/client/base.py b/birdy/client/base.py index 71f8eec..1fd5855 100644 --- a/birdy/client/base.py +++ b/birdy/client/base.py @@ -165,20 +165,6 @@ class WPSClient(object): process = self._processes[pid] - def sort_inputs_key(i): - """Function used as key when sorting process inputs. - - The order is: - - Inputs that have minOccurs >= 1 and no default value - - Inputs that have minOccurs >= 1 and a default value - - Every other input - """ - return list(reversed([ - i.minOccurs >= 1 and i.defaultValue is None, - i.minOccurs >= 1, - i.minOccurs == 0, - ])) - required_inputs_first = sorted(process.dataInputs, key=sort_inputs_key) input_names = [] @@ -325,6 +311,27 @@ class WPSClient(object): self.logger.info("{} failed.".format(execution.process.identifier)) +def sort_inputs_key(i): + """Function used as key when sorting process inputs. + + The order is: + - Inputs that have minOccurs >= 1 and no default value + - Inputs that have minOccurs >= 1 and a default value + - Every other input + + Parameters + ---------- + i: owslib.wps.Input + An owslib Input + """ + conditions = [ + i.minOccurs >= 1 and i.defaultValue is None, + i.minOccurs >= 1, + i.minOccurs == 0, + ] + return [not c for c in conditions] # False values are sorted first + + def nb_form(wps, pid): """Return a Notebook form to enter input values and launch process.""" if wps._notebook: diff --git a/birdy/dependencies.py b/birdy/dependencies.py index f5ccac5..a59d49b 100644 --- a/birdy/dependencies.py +++ b/birdy/dependencies.py @@ -11,7 +11,9 @@ Example usage:: import warnings from .exceptions import IPythonWarning -warnings.filterwarnings('default', category=IPythonWarning) +# TODO: we ignore warnings for now. They are only needed when birdy is used in a notebook, +# but we currently don't know how to handle this (see #89 and #138). +warnings.filterwarnings('ignore', category=IPythonWarning) try: import ipywidgets
bird-house/birdy
edd1e4d9b043fb67fe3f2c1fec3189e5bca28778
diff --git a/tests/test_client.py b/tests/test_client.py index f9881d0..97bb7bc 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,15 +1,17 @@ import datetime import os -import pytest import json +import tempfile +from pathlib import Path +from unittest import mock + +import pytest # from owslib import crs -from pathlib import Path from birdy.client import converters, nb_form +from birdy.client.base import sort_inputs_key from birdy.client.utils import is_embedded_in_request from birdy import WPSClient -from io import StringIO, BytesIO -import tempfile # These tests assume Emu is running on the localhost @@ -253,6 +255,34 @@ def test_xarray_converter(wps): assert isinstance(ncdata, xr.Dataset) +def test_sort_inputs(): + """ + The order should be: + - Inputs that have minOccurs >= 1 and no default value + - Inputs that have minOccurs >= 1 and a default value + - Every other input + """ + + i = mock.Mock() + i.minOccurs = 1 + i.defaultValue = None + assert sort_inputs_key(i) == [False, False, True] + + i = mock.Mock() + i.minOccurs = 1 + i.defaultValue = "default" + assert sort_inputs_key(i) == [True, False, True] + + i = mock.Mock() + i.minOccurs = 0 + assert sort_inputs_key(i) == [True, True, False] + + i = mock.Mock() + i.minOccurs = 0 + i.defaultValue = "default" + assert sort_inputs_key(i) == [True, True, False] + + def test_all_subclasses(): c = converters.all_subclasses(converters.BaseConverter) assert converters.Meta4Converter in c
Don't show ipython warnings when using command-line interface. ## Description When running the birdy command line ipython warnings are show when they are not installed: The CLI does not need ipython, so warnings can be deactivate in this case. ## Environment * Birdy version used, if any: 0.5.0 * Python version, if any: * Operating System: ## Steps to Reproduce ``` $ birdy -h /Users/pingu/Documents/GitHub/birdhouse/birdy/birdy/dependencies.py:20: IPythonWarning: Jupyter Notebook is not supported. Please install *ipywidgets*. warnings.warn('Jupyter Notebook is not supported. Please install *ipywidgets*.', IPythonWarning) /Users/pingu/Documents/GitHub/birdhouse/birdy/birdy/dependencies.py:26: IPythonWarning: IPython is not supported. Please install *ipython*. warnings.warn('IPython is not supported. Please install *ipython*.', IPythonWarning) Usage: birdy [OPTIONS] COMMAND [ARGS]... Birdy is a command line client for Web Processing Services. ``` ## Additional Information
0.0
edd1e4d9b043fb67fe3f2c1fec3189e5bca28778
[ "tests/test_client.py::test_sort_inputs", "tests/test_client.py::test_all_subclasses", "tests/test_client.py::test_jsonconverter", "tests/test_client.py::test_zipconverter", "tests/test_client.py::test_jpeg_imageconverter", "tests/test_client.py::TestIsEmbedded::test_string", "tests/test_client.py::TestIsEmbedded::test_file_like", "tests/test_client.py::TestIsEmbedded::test_local_fn", "tests/test_client.py::TestIsEmbedded::test_local_path", "tests/test_client.py::TestIsEmbedded::test_local_uri", "tests/test_client.py::TestIsEmbedded::test_url" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2019-07-22 19:53:41+00:00
apache-2.0
1,396
bird-house__birdy-139
diff --git a/birdy/client/base.py b/birdy/client/base.py index 71f8eec..1fd5855 100644 --- a/birdy/client/base.py +++ b/birdy/client/base.py @@ -165,20 +165,6 @@ class WPSClient(object): process = self._processes[pid] - def sort_inputs_key(i): - """Function used as key when sorting process inputs. - - The order is: - - Inputs that have minOccurs >= 1 and no default value - - Inputs that have minOccurs >= 1 and a default value - - Every other input - """ - return list(reversed([ - i.minOccurs >= 1 and i.defaultValue is None, - i.minOccurs >= 1, - i.minOccurs == 0, - ])) - required_inputs_first = sorted(process.dataInputs, key=sort_inputs_key) input_names = [] @@ -325,6 +311,27 @@ class WPSClient(object): self.logger.info("{} failed.".format(execution.process.identifier)) +def sort_inputs_key(i): + """Function used as key when sorting process inputs. + + The order is: + - Inputs that have minOccurs >= 1 and no default value + - Inputs that have minOccurs >= 1 and a default value + - Every other input + + Parameters + ---------- + i: owslib.wps.Input + An owslib Input + """ + conditions = [ + i.minOccurs >= 1 and i.defaultValue is None, + i.minOccurs >= 1, + i.minOccurs == 0, + ] + return [not c for c in conditions] # False values are sorted first + + def nb_form(wps, pid): """Return a Notebook form to enter input values and launch process.""" if wps._notebook:
bird-house/birdy
edd1e4d9b043fb67fe3f2c1fec3189e5bca28778
diff --git a/tests/test_client.py b/tests/test_client.py index f9881d0..97bb7bc 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,15 +1,17 @@ import datetime import os -import pytest import json +import tempfile +from pathlib import Path +from unittest import mock + +import pytest # from owslib import crs -from pathlib import Path from birdy.client import converters, nb_form +from birdy.client.base import sort_inputs_key from birdy.client.utils import is_embedded_in_request from birdy import WPSClient -from io import StringIO, BytesIO -import tempfile # These tests assume Emu is running on the localhost @@ -253,6 +255,34 @@ def test_xarray_converter(wps): assert isinstance(ncdata, xr.Dataset) +def test_sort_inputs(): + """ + The order should be: + - Inputs that have minOccurs >= 1 and no default value + - Inputs that have minOccurs >= 1 and a default value + - Every other input + """ + + i = mock.Mock() + i.minOccurs = 1 + i.defaultValue = None + assert sort_inputs_key(i) == [False, False, True] + + i = mock.Mock() + i.minOccurs = 1 + i.defaultValue = "default" + assert sort_inputs_key(i) == [True, False, True] + + i = mock.Mock() + i.minOccurs = 0 + assert sort_inputs_key(i) == [True, True, False] + + i = mock.Mock() + i.minOccurs = 0 + i.defaultValue = "default" + assert sort_inputs_key(i) == [True, True, False] + + def test_all_subclasses(): c = converters.all_subclasses(converters.BaseConverter) assert converters.Meta4Converter in c
function signature is confused by default and min_occurs mismatch ## Description Not sure if this is a bug, but when a process has inputs with min_occurs=1 followed by processes with min_occurs but with a default value (which is strange but allowed), the python signature assigns wrong defaults to these arguments. This can be fixed by making sure that inputs with default arguments have min_occurs=0.
0.0
edd1e4d9b043fb67fe3f2c1fec3189e5bca28778
[ "tests/test_client.py::test_sort_inputs", "tests/test_client.py::test_all_subclasses", "tests/test_client.py::test_jsonconverter", "tests/test_client.py::test_zipconverter", "tests/test_client.py::test_jpeg_imageconverter", "tests/test_client.py::TestIsEmbedded::test_string", "tests/test_client.py::TestIsEmbedded::test_file_like", "tests/test_client.py::TestIsEmbedded::test_local_fn", "tests/test_client.py::TestIsEmbedded::test_local_path", "tests/test_client.py::TestIsEmbedded::test_local_uri", "tests/test_client.py::TestIsEmbedded::test_url" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2019-07-23 14:51:01+00:00
apache-2.0
1,397
bird-house__birdy-172
diff --git a/CHANGES.rst b/CHANGES.rst index bf4ed82..613a7d0 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,14 @@ Change History ************** +0.6.7 (2020-03-10) +================== + +Changes: + +* Fixed passing Path objects (#169) +* Trying to guess mime type of inputs rather than taking the first value (#171) + 0.6.6 (2020-03-03) ================== diff --git a/birdy/client/base.py b/birdy/client/base.py index d220a83..9bbf39f 100644 --- a/birdy/client/base.py +++ b/birdy/client/base.py @@ -11,7 +11,7 @@ from owslib.wps import WPS_DEFAULT_VERSION, WebProcessingService, SYNC, ASYNC, C from birdy.exceptions import UnauthorizedException from birdy.client import utils -from birdy.utils import sanitize, fix_url, embed +from birdy.utils import sanitize, fix_url, embed, guess_type from birdy.client import notebook from birdy.client.outputs import WPSResult @@ -243,21 +243,35 @@ class WPSClient(object): continue values = [arg, ] if not isinstance(arg, (list, tuple)) else arg + supported_mimetypes = [v.mimeType for v in input_param.supportedValues] for value in values: + # if input_param.dataType == "ComplexData": seems simpler if isinstance(input_param.defaultValue, ComplexData): - encoding = input_param.defaultValue.encoding - mimetype = input_param.defaultValue.mimeType + + # Guess the mimetype of the input value + mimetype, encoding = guess_type(value) + + # If unrecognized, default to the first supported mimetype + if mimetype is None: + mimetype = supported_mimetypes[0] + else: + if mimetype not in supported_mimetypes: + raise ValueError(f"mimetype {mimetype} not in supported mimetypes {supported_mimetypes}.") + + if encoding is None: + encoding = input_param.defaultValue.encoding if isinstance(value, ComplexData): inp = value + # Either embed the file content or just the reference. else: if utils.is_embedded_in_request(self._wps.url, value): # If encoding is None, this will return the actual encoding used (utf-8 or base64). value, encoding = embed(value, mimetype, encoding=encoding) else: - value = fix_url(value) + value = fix_url(str(value)) inp = utils.to_owslib(value, data_type=input_param.dataType, diff --git a/birdy/utils.py b/birdy/utils.py index fbdba72..b892acf 100644 --- a/birdy/utils.py +++ b/birdy/utils.py @@ -120,3 +120,33 @@ def _encode(content, mimetype, encoding): return content, encoding # Do we need to escape content that is not HTML safe ? # return u'<![CDATA[{}]]>'.format(content) + + +def guess_type(url): + """Guess the mime type of the file link. + + Returns + ------- + mimetype, encoding + """ + import mimetypes + + try: + mime, enc = mimetypes.guess_type(url, strict=False) + except TypeError: + mime, enc = None, None + + # Special cases + # ------------- + + # netCDF + if mime == "application/x-netcdf" and "dodsC" in url: + mime = "application/x-ogc-dods" + + # application/zip vs application/x-zipped-shp + # TODO + + # All the various XML schemes + # TODO + + return mime, enc
bird-house/birdy
d7d2a6f83d786ce3820eee5147d00c85302007a0
diff --git a/tests/test_client.py b/tests/test_client.py index ff2d856..7918613 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -5,7 +5,6 @@ import tempfile from pathlib import Path from unittest import mock import owslib.wps - import pytest # from owslib import crs diff --git a/tests/test_utils.py b/tests/test_utils.py index 98ffa08..5f89691 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,6 +1,7 @@ from birdy import utils from .common import resource_file from pathlib import Path +import pytest def test_is_url(): @@ -63,3 +64,11 @@ class TestEncode: with open(self.nc, 'rb') as fp: nc, enc = utils.embed(fp, 'application/x-netcdf') assert isinstance(nc, bytes) + + [email protected]("value,expected", [("LSJ_LL.zip", "application/zip"), + ("https://remote.org/thredds/dodsC/a.nc", "application/x-ogc-dods"), + ("https://remote.org/thredds/file/a.nc", "application/x-netcdf")]) +def test_guess_type(value, expected): + mime, enc = utils.guess_type(value) + assert mime == expected
Inputs are given default mimetype ## Description If an input has multiple supported formats, birdy will always use the first one in the list when creating a request. If an input has to be embedded in the request, and its mimetype is a binary format while the default mimetype is not, it crashes the `embed` function. ## Environment * Birdy version used, if any: 0.6.6
0.0
d7d2a6f83d786ce3820eee5147d00c85302007a0
[ "tests/test_utils.py::test_guess_type[LSJ_LL.zip-application/zip]", "tests/test_utils.py::test_guess_type[https://remote.org/thredds/dodsC/a.nc-application/x-ogc-dods]", "tests/test_utils.py::test_guess_type[https://remote.org/thredds/file/a.nc-application/x-netcdf]" ]
[ "tests/test_client.py::test_sort_inputs", "tests/test_client.py::test_sort_inputs_conditions", "tests/test_client.py::test_all_subclasses", "tests/test_client.py::test_jsonconverter", "tests/test_client.py::test_zipconverter", "tests/test_client.py::test_jpeg_imageconverter", "tests/test_client.py::TestIsEmbedded::test_string", "tests/test_client.py::TestIsEmbedded::test_file_like", "tests/test_client.py::TestIsEmbedded::test_local_fn", "tests/test_client.py::TestIsEmbedded::test_local_path", "tests/test_client.py::TestIsEmbedded::test_local_uri", "tests/test_client.py::TestIsEmbedded::test_url", "tests/test_utils.py::test_is_url", "tests/test_utils.py::test_is_file", "tests/test_utils.py::test_sanitize", "tests/test_utils.py::test_delist", "tests/test_utils.py::TestEncode::test_str", "tests/test_utils.py::TestEncode::test_local_fn", "tests/test_utils.py::TestEncode::test_local_uri", "tests/test_utils.py::TestEncode::test_path", "tests/test_utils.py::TestEncode::test_file" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-03-10 14:28:16+00:00
apache-2.0
1,398
bird-house__birdy-173
diff --git a/CHANGES.rst b/CHANGES.rst index 613a7d0..82eee94 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,7 +1,7 @@ Change History ************** -0.6.7 (2020-03-10) +0.6.8 (2020-03-10) ================== Changes: diff --git a/birdy/client/base.py b/birdy/client/base.py index 9bbf39f..d4bdf2e 100644 --- a/birdy/client/base.py +++ b/birdy/client/base.py @@ -250,14 +250,7 @@ class WPSClient(object): if isinstance(input_param.defaultValue, ComplexData): # Guess the mimetype of the input value - mimetype, encoding = guess_type(value) - - # If unrecognized, default to the first supported mimetype - if mimetype is None: - mimetype = supported_mimetypes[0] - else: - if mimetype not in supported_mimetypes: - raise ValueError(f"mimetype {mimetype} not in supported mimetypes {supported_mimetypes}.") + mimetype, encoding = guess_type(value, supported_mimetypes) if encoding is None: encoding = input_param.defaultValue.encoding diff --git a/birdy/utils.py b/birdy/utils.py index b892acf..0a6afae 100644 --- a/birdy/utils.py +++ b/birdy/utils.py @@ -122,8 +122,16 @@ def _encode(content, mimetype, encoding): # return u'<![CDATA[{}]]>'.format(content) -def guess_type(url): - """Guess the mime type of the file link. +def guess_type(url, supported): + """Guess the mime type of the file link. If the mimetype is not recognized, default to the first supported value. + + + Parameters + ---------- + url : str + Path or URL to file. + supported : list, tuple + Supported mimetypes. Returns ------- @@ -143,10 +151,20 @@ def guess_type(url): if mime == "application/x-netcdf" and "dodsC" in url: mime = "application/x-ogc-dods" - # application/zip vs application/x-zipped-shp - # TODO + zips = ["application/zip", "application/x-zipped-shp"] + + if mime not in supported: + if mime in zips and set(zips).intersection(supported): + mime = set(zips).intersection(supported).pop() # All the various XML schemes # TODO + # If unrecognized, default to the first supported mimetype + if mime is None: + mime = supported[0] + else: + if mime not in supported: + raise ValueError(f"mimetype {mime} not in supported mimetypes {supported}.") + return mime, enc
bird-house/birdy
299acb5929877db576a645bc24d209d61b1f4a36
diff --git a/tests/test_utils.py b/tests/test_utils.py index 5f89691..06aad07 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,7 +1,6 @@ from birdy import utils from .common import resource_file from pathlib import Path -import pytest def test_is_url(): @@ -66,9 +65,22 @@ class TestEncode: assert isinstance(nc, bytes) [email protected]("value,expected", [("LSJ_LL.zip", "application/zip"), - ("https://remote.org/thredds/dodsC/a.nc", "application/x-ogc-dods"), - ("https://remote.org/thredds/file/a.nc", "application/x-netcdf")]) -def test_guess_type(value, expected): - mime, enc = utils.guess_type(value) - assert mime == expected +class TestGuessType: + def test_zip(self): + mime, enc = utils.guess_type("LSJ_LL.zip", ["application/gml+xml", + "application/zip", + "application/x-zipped-shp", ]) + assert mime == "application/zip" + + mime, enc = utils.guess_type("LSJ_LL.zip", ["application/gml+xml", + "application/x-zipped-shp", ]) + assert mime == "application/x-zipped-shp" + + def test_nc(self): + mime, enc = utils.guess_type("https://remote.org/thredds/dodsC/a.nc", ["application/x-netcdf", + "application/x-ogc-dods"]) + assert mime == "application/x-ogc-dods" + + mime, enc = utils.guess_type("https://remote.org/thredds/file/a.nc", ["application/x-ogc-dods", + "application/x-netcdf"]) + assert mime == "application/x-netcdf"
Inputs are given default mimetype ## Description If an input has multiple supported formats, birdy will always use the first one in the list when creating a request. If an input has to be embedded in the request, and its mimetype is a binary format while the default mimetype is not, it crashes the `embed` function. ## Environment * Birdy version used, if any: 0.6.6
0.0
299acb5929877db576a645bc24d209d61b1f4a36
[ "tests/test_utils.py::TestGuessType::test_zip", "tests/test_utils.py::TestGuessType::test_nc" ]
[ "tests/test_utils.py::test_is_url", "tests/test_utils.py::test_is_file", "tests/test_utils.py::test_sanitize", "tests/test_utils.py::test_delist", "tests/test_utils.py::TestEncode::test_str", "tests/test_utils.py::TestEncode::test_local_fn", "tests/test_utils.py::TestEncode::test_local_uri", "tests/test_utils.py::TestEncode::test_path", "tests/test_utils.py::TestEncode::test_file" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-03-10 15:32:40+00:00
apache-2.0
1,399
bird-house__birdy-202
diff --git a/CHANGES.rst b/CHANGES.rst index eee7b6e..74850b8 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -8,6 +8,7 @@ Changes: * Added a converter for loading GeoTIFF using xarray/rioxarray (#193). * Update notebook process forms. See `client.gui` function. +* Add support for Path objects in `utils.guess_type` * Support multiple mimetypes in converters. API change: mimetype (str) replaced by mimetypes (tuple) * Removed geojson mimetypes from BINARY_MIMETYPES so it's embedded as a string rather than bytes. diff --git a/birdy/utils.py b/birdy/utils.py index eed9134..39cf0e5 100644 --- a/birdy/utils.py +++ b/birdy/utils.py @@ -136,7 +136,7 @@ def guess_type(url, supported): Parameters ---------- - url : str + url : str, Path Path or URL to file. supported : list, tuple Supported mimetypes. @@ -148,7 +148,7 @@ def guess_type(url, supported): import mimetypes try: - mime, enc = mimetypes.guess_type(url, strict=False) + mime, enc = mimetypes.guess_type(str(url), strict=False) except TypeError: mime, enc = None, None @@ -158,7 +158,7 @@ def guess_type(url, supported): # netCDF if ( mime == "application/x-netcdf" - and "dodsC" in url + and "dodsC" in str(url) and "application/x-ogc-dods" in supported ): mime = "application/x-ogc-dods" @@ -169,14 +169,15 @@ def guess_type(url, supported): if mime in zips and set(zips).intersection(supported): mime = set(zips).intersection(supported).pop() + # GeoJSON + if mime == "application/json" and "application/geo+json" in supported: + mime = "application/geo+json" + # FIXME: Verify whether this code is needed. Remove if not. # # GeoTIFF (workaround since this mimetype isn't correctly understoud) # if mime == "image/tiff" and (".tif" in url or ".tiff" in "url"): # mime = "image/tiff; subtype=geotiff" # - # # GeoJSON (workaround since this mimetype isn't correctly understoud) - # if mime == "application/geo+json": - # mime = "application/vnd.geo+json" # All the various XML schemes # TODO
bird-house/birdy
1fa66cf314d5276c14b9008ac70408ec2278a489
diff --git a/tests/test_utils.py b/tests/test_utils.py index 2121501..1aa827b 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -95,3 +95,22 @@ class TestGuessType: # noqa: D101 ["application/x-ogc-dods", "application/x-netcdf"], ) assert mime == "application/x-netcdf" + + def test_path(self): # noqa: D102 + from pathlib import Path + + mime, enc = utils.guess_type( + Path("shape.json"), ["wrong", "application/geo+json"] + ) + assert mime == "application/geo+json" + + mime, enc = utils.guess_type( + Path("data.nc"), ["application/x-ogc-dods", "application/x-netcdf"] + ) + assert mime == "application/x-netcdf" + + mime, enc = utils.guess_type( + Path("file:///dodsC/data.nc"), + ["application/x-netcdf", "application/x-ogc-dods"], + ) + assert mime == "application/x-ogc-dods"
`guess_type` function does not support Path objects.
0.0
1fa66cf314d5276c14b9008ac70408ec2278a489
[ "tests/test_utils.py::TestGuessType::test_path" ]
[ "tests/test_utils.py::test_is_url", "tests/test_utils.py::test_is_file", "tests/test_utils.py::test_sanitize", "tests/test_utils.py::test_delist", "tests/test_utils.py::TestEncode::test_str", "tests/test_utils.py::TestEncode::test_local_fn", "tests/test_utils.py::TestEncode::test_local_uri", "tests/test_utils.py::TestEncode::test_path", "tests/test_utils.py::TestEncode::test_file", "tests/test_utils.py::TestGuessType::test_zip", "tests/test_utils.py::TestGuessType::test_nc" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-04-26 21:01:37+00:00
apache-2.0
1,400
bird-house__birdy-217
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0c235c8..2b48946 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -14,7 +14,7 @@ jobs: - name: Install packages run: | sudo apt-get -y install pandoc - if: matrix.python-version == 3.8 + if: matrix.python-version == 3.9 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: @@ -28,10 +28,10 @@ jobs: run: make test - name: Lint with flake8 ⚙️ run: make lint - if: matrix.python-version == 3.8 + if: matrix.python-version == 3.9 - name: Check formatting with black ⚙️ - run: black --check --target-version py36 birdy tests - if: matrix.python-version == 3.8 + run: black --check --target-version py39 birdy tests + if: matrix.python-version == 3.9 - name: Build docs 🏗️ run: make docs - if: matrix.python-version == 3.8 + if: matrix.python-version == 3.9 diff --git a/CHANGES.rst b/CHANGES.rst index 487478e..76dd327 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -9,6 +9,7 @@ Changes: * Relax dependency check on GeoTiff rioxarray and rasterio converters due to some mysterious gdal error. * Remove tests with live 52North WPS server since it seems offline. * Remove Python 3.6 from test matrix and add 3.10. +* Handle the removal of the `verbose` argument in `OWSLib.WebProcessingService` 0.29.0. 0.8.1 (2021-12-01) ================== diff --git a/birdy/client/base.py b/birdy/client/base.py index 9a2684e..ccfa9d5 100644 --- a/birdy/client/base.py +++ b/birdy/client/base.py @@ -17,6 +17,8 @@ from owslib.wps import ( ComplexData, WebProcessingService, ) +from warnings import warn +import packaging.version from birdy.client import notebook, utils from birdy.client.outputs import WPSResult @@ -46,13 +48,13 @@ class WPSClient(object): auth=None, verify=True, cert=None, - verbose=False, progress=False, version=WPS_DEFAULT_VERSION, caps_xml=None, desc_xml=None, language=None, lineage=False, + **kwds, ): """Initialize WPSClient. @@ -77,8 +79,8 @@ class WPSClient(object): passed to :class:`owslib.wps.WebProcessingService` cert: str passed to :class:`owslib.wps.WebProcessingService` - verbose: str - passed to :class:`owslib.wps.WebProcessingService` + verbose: bool + Deprecated. passed to :class:`owslib.wps.WebProcessingService` for owslib < 0.29 progress: bool If True, enable interactive user mode. version: str @@ -117,17 +119,28 @@ class WPSClient(object): auth_headers = ["Authorization", "Proxy-Authorization", "Cookie"] headers.update({h: r.headers[h] for h in auth_headers if h in r.headers}) + if "verbose" in kwds: + if packaging.version.parse(owslib.__version__) >= packaging.version.parse( + "0.29.0" + ): + kwds.pop("verbose") + warn( + "The 'verbose' keyword is deprecated and will be removed in a future version. Starting with owslib " + "0.29.0, debugging information is logged instead of printed.", + DeprecationWarning, + ) + self._wps = WebProcessingService( url, version=version, username=username, password=password, - verbose=verbose, headers=headers, verify=verify, cert=cert, skip_caps=True, language=language, + **kwds, ) try: @@ -309,7 +322,6 @@ class WPSClient(object): for value in values: # if input_param.dataType == "ComplexData": seems simpler if isinstance(input_param.defaultValue, ComplexData): - # Guess the mimetype of the input value mimetype, encoding = guess_type(value, supported_mimetypes) diff --git a/birdy/client/notebook.py b/birdy/client/notebook.py index 9ea1012..2b52719 100644 --- a/birdy/client/notebook.py +++ b/birdy/client/notebook.py @@ -114,7 +114,7 @@ class Form: for (key, o) in outputs ] ): - for (key, output) in outputs: + for key, output in outputs: if hasattr(output, "supportedValues"): of[key] = widgets.RadioButtons( options=[o.mimeType for o in output.supportedValues], diff --git a/birdy/ipyleafletwfs/examples/ipyleafletwfs_guide.ipynb b/birdy/ipyleafletwfs/examples/ipyleafletwfs_guide.ipynb index 4981ea9..046f5c6 100644 --- a/birdy/ipyleafletwfs/examples/ipyleafletwfs_guide.ipynb +++ b/birdy/ipyleafletwfs/examples/ipyleafletwfs_guide.ipynb @@ -19,8 +19,8 @@ "from birdy import IpyleafletWFS\n", "from ipyleaflet import Map\n", "\n", - "url = 'http://boreas.ouranos.ca/geoserver/wfs'\n", - "version = '2.0.0'\n", + "url = \"http://boreas.ouranos.ca/geoserver/wfs\"\n", + "version = \"2.0.0\"\n", "\n", "wfs_connection = IpyleafletWFS(url, version)\n", "\n", @@ -57,7 +57,7 @@ "metadata": {}, "outputs": [], "source": [ - "wfs_connection.build_layer(layer_typename='public:HydroLAKES_poly', source_map=demo_map)" + "wfs_connection.build_layer(layer_typename=\"public:HydroLAKES_poly\", source_map=demo_map)" ] }, { @@ -95,8 +95,10 @@ "metadata": {}, "outputs": [], "source": [ - "wfs_connection.create_feature_property_widget(widget_name='Wshd_area', feature_property='Wshd_area', widget_position='bottomleft')\n", - "demo_map\n" + "wfs_connection.create_feature_property_widget(\n", + " widget_name=\"Wshd_area\", feature_property=\"Wshd_area\", widget_position=\"bottomleft\"\n", + ")\n", + "demo_map" ] }, { @@ -114,7 +116,9 @@ "metadata": {}, "outputs": [], "source": [ - "wfs_connection.create_feature_property_widget(widget_name='main_widget', feature_property='Lake_area')" + "wfs_connection.create_feature_property_widget(\n", + " widget_name=\"main_widget\", feature_property=\"Lake_area\"\n", + ")" ] }, { @@ -131,7 +135,7 @@ "outputs": [], "source": [ "gjson = wfs_connection.geojson\n", - "gjson['features'][0].keys()" + "gjson[\"features\"][0].keys()" ] }, { @@ -140,8 +144,7 @@ "metadata": {}, "outputs": [], "source": [ - "\n", - "gjson['totalFeatures']\n" + "gjson[\"totalFeatures\"]" ] }, { @@ -157,7 +160,7 @@ "metadata": {}, "outputs": [], "source": [ - "wfs_connection.create_feature_property_widget(widget_name='main_widget')\n", + "wfs_connection.create_feature_property_widget(widget_name=\"main_widget\")\n", "demo_map" ] }, diff --git a/birdy/ipyleafletwfs/examples/quickstart-template.ipynb b/birdy/ipyleafletwfs/examples/quickstart-template.ipynb index 365bc2e..09cf30a 100644 --- a/birdy/ipyleafletwfs/examples/quickstart-template.ipynb +++ b/birdy/ipyleafletwfs/examples/quickstart-template.ipynb @@ -18,11 +18,11 @@ "outputs": [], "source": [ "from birdy import IpyleafletWFS\n", - "from ipyleaflet import Map\n", + "from ipyleaflet import Map\n", "\n", "# Initialize the connection\n", - "url ='http://boreas.ouranos.ca/geoserver/wfs'\n", - "version ='2.0.0'\n", + "url = \"http://boreas.ouranos.ca/geoserver/wfs\"\n", + "version = \"2.0.0\"\n", "\n", "boreas_wfs = IpyleafletWFS(url, version)\n", "\n", @@ -48,8 +48,7 @@ "outputs": [], "source": [ "# Build the WFS layer\n", - "boreas_wfs.build_layer(layer_typename='public:HydroLAKES_poly', source_map=m)\n", - "\n" + "boreas_wfs.build_layer(layer_typename=\"public:HydroLAKES_poly\", source_map=m)" ] }, { diff --git a/birdy/ipyleafletwfs/examples/wfs_constructor.ipynb b/birdy/ipyleafletwfs/examples/wfs_constructor.ipynb index ab591ae..aa2ff3e 100644 --- a/birdy/ipyleafletwfs/examples/wfs_constructor.ipynb +++ b/birdy/ipyleafletwfs/examples/wfs_constructor.ipynb @@ -19,10 +19,10 @@ "from ipyleaflet import Map\n", "\n", "# Create connection\n", - "url = 'http://boreas.ouranos.ca/geoserver/wfs'\n", - "version = '2.0.0'\n", + "url = \"http://boreas.ouranos.ca/geoserver/wfs\"\n", + "version = \"2.0.0\"\n", "\n", - "wfs = IpyleafletWFS(url, version)\n" + "wfs = IpyleafletWFS(url, version)" ] }, { @@ -33,7 +33,7 @@ "source": [ "# Create the map instance\n", "m = Map(center=(47.90, -69.90), zoom=11)\n", - "m\n" + "m" ] }, { @@ -55,11 +55,19 @@ "# Create wfs layer\n", "# Move and zoom to the desired extent before running this cell\n", "# Do NOT zoom too far out, as large GeoJSON layers can be long to load and even cause crashed\n", - "basin_style = { 'color': '#d000ff', 'opacity': 1, 'dashArray': '10', 'fillOpacity': 0.0, 'weight': 3 }\n", - "lake_style = { 'color': '#00aeff', 'dashArray': '0', 'fillOpacity': 0.5, 'weight': 0.5 }\n", + "basin_style = {\n", + " \"color\": \"#d000ff\",\n", + " \"opacity\": 1,\n", + " \"dashArray\": \"10\",\n", + " \"fillOpacity\": 0.0,\n", + " \"weight\": 3,\n", + "}\n", + "lake_style = {\"color\": \"#00aeff\", \"dashArray\": \"0\", \"fillOpacity\": 0.5, \"weight\": 0.5}\n", "\n", - "lakes = wfs.create_wfsgeojson_layer('public:HydroLAKES_poly', m, layer_style=lake_style)\n", - "basins = wfs.create_wfsgeojson_layer('public:wshed_bound_n2', m, layer_style=basin_style)" + "lakes = wfs.create_wfsgeojson_layer(\"public:HydroLAKES_poly\", m, layer_style=lake_style)\n", + "basins = wfs.create_wfsgeojson_layer(\n", + " \"public:wshed_bound_n2\", m, layer_style=basin_style\n", + ")" ] }, {
bird-house/birdy
7398c8adc7264f31b9f7664998b3932cb6d89be8
diff --git a/tests/test_client.py b/tests/test_client.py index 78a8926..621c75e 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -409,3 +409,10 @@ class TestIsEmbedded: # noqa: D101 def test_url(self): # noqa: D102 assert not is_embedded_in_request(self.remote, self.url) assert not is_embedded_in_request(self.local, self.url) + + +def test_verbose_deprecation(): # noqa: D103 + with pytest.warns(DeprecationWarning): + WPSClient( + url=URL_EMU, caps_xml=EMU_CAPS_XML, desc_xml=EMU_DESC_XML, verbose=True + )
owslib removed verbose argument ## Description The client has a `verbose` argument that it passes to `owslib.WebProcessingService`. In https://github.com/geopython/OWSLib/pull/864, it was removed without deprecation warning. I think we'll need to update birdy and pin owslib. ## Additional Information Links to other issues or sources. https://github.com/geopython/OWSLib/pull/864
0.0
7398c8adc7264f31b9f7664998b3932cb6d89be8
[ "tests/test_client.py::test_emu_offline", "tests/test_client.py::test_wps_supported_languages", "tests/test_client.py::test_52north_offline", "tests/test_client.py::test_flyingpigeon_offline", "tests/test_client.py::test_wps_docs", "tests/test_client.py::test_wps_nb_form", "tests/test_client.py::test_verbose_deprecation" ]
[ "tests/test_client.py::test_sort_inputs", "tests/test_client.py::test_sort_inputs_conditions", "tests/test_client.py::TestIsEmbedded::test_string", "tests/test_client.py::TestIsEmbedded::test_file_like", "tests/test_client.py::TestIsEmbedded::test_local_fn", "tests/test_client.py::TestIsEmbedded::test_local_path", "tests/test_client.py::TestIsEmbedded::test_local_uri", "tests/test_client.py::TestIsEmbedded::test_url" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-04-27 12:56:02+00:00
apache-2.0
1,401
bjmorgan__py-sc-fermi-25
diff --git a/py_sc_fermi/defect_system.py b/py_sc_fermi/defect_system.py index 8206caf..0f61630 100644 --- a/py_sc_fermi/defect_system.py +++ b/py_sc_fermi/defect_system.py @@ -149,7 +149,6 @@ class DefectSystem(object): direction = +1.0 e_fermi = (emin + emax) / 2.0 step = 1.0 - converged = False reached_e_min = False reached_e_max = False @@ -167,7 +166,6 @@ class DefectSystem(object): reached_e_min = True direction = +1.0 if abs(q_tot) < self.convergence_tolerance: - converged = True break if q_tot > 0.0: if direction == +1.0: @@ -201,7 +199,7 @@ class DefectSystem(object): for ds in self.defect_species: concall = ds.get_concentration(e_fermi, self.temperature) if ds.fixed_concentration == None: - string += f"{ds.name:9} : {concall * 1e24 / self.volume} cm^-3\n" + string += f"{ds.name:9} : {concall * 1e24 / self.volume} cm^-3, (percentage of defective sites: {(concall / ds.nsites) * 100:.3} %)\n" else: string += ( f"{ds.name:9} : {concall * 1e24 / self.volume} cm^-3 [fixed]\n" @@ -336,3 +334,25 @@ class DefectSystem(object): for ds in self.defect_species } return {**run_stats, **decomp_concs} + + def site_percentages( + self, + ) -> Dict[str, float]: + """Returns a dictionary of the DefectSpecies in the DefectSystem which + giving the percentage of the sites in the structure that will host that + defect. + + Returns: + Dict[str, Any]: dictionary specifying the per-DefectSpecies site + concentrations. + """ + + e_fermi = self.get_sc_fermi()[0] + + sum_concs = { + str(ds.name): float( + (ds.get_concentration(e_fermi, self.temperature) / ds.nsites) * 100 + ) + for ds in self.defect_species + } + return sum_concs
bjmorgan/py-sc-fermi
9292dea25a4f93832179cea9ae4a4e46edc8f4ba
diff --git a/tests/test_defect_system.py b/tests/test_defect_system.py index 1903787..d68afb0 100644 --- a/tests/test_defect_system.py +++ b/tests/test_defect_system.py @@ -121,6 +121,19 @@ class TestDefectSystem(unittest.TestCase): {"Fermi Energy": 1, "p0": 1, "n0": 1, "O_i": 1, "v_O": 1}, ) + def test_site_percentages(self): + self.defect_system.get_sc_fermi = Mock(return_value=[1, {}]) + self.defect_system.dos.carrier_concentrations = Mock(return_value=(1, 1)) + self.defect_system.defect_species[0].get_concentration = Mock(return_value=1) + self.defect_system.defect_species[1].get_concentration = Mock(return_value=1) + self.defect_system.defect_species[0].nsites = 1 + self.defect_system.defect_species[1].nsites = 1 + self.defect_system.defect_species[0].name = "v_O" + self.defect_system.defect_species[1].name = "O_i" + self.assertEqual( + self.defect_system.site_percentages(), {"v_O": 100, "O_i": 100} + ) + def test__get_report_string(self): self.defect_system.get_sc_fermi = Mock(return_value=[0.5, {}]) self.defect_system.dos.carrier_concentrations = Mock(return_value=(100, 100))
add a warning when defect concentrations are "beyond dilute limit" It would be nice to warn the user if the code predicts defect concentrations beyond the dilute limit. This would be simple to implement, we would just need a definition of when the dilute limit approximation is becoming questionable. This could easily be heuristic.
0.0
9292dea25a4f93832179cea9ae4a4e46edc8f4ba
[ "tests/test_defect_system.py::TestDefectSystem::test_site_percentages" ]
[ "tests/test_defect_system.py::TestDefectSystemInit::test_defect_system_is_initialised", "tests/test_defect_system.py::TestDefectSystem::test_get_sc_fermi_tops_out", "tests/test_defect_system.py::TestDefectSystem::test_total_defect_charge_contributions", "tests/test_defect_system.py::TestDefectSystem::test_defect_species_by_name", "tests/test_defect_system.py::TestDefectSystem::test_as_dict", "tests/test_defect_system.py::TestDefectSystem::test_get_sc_fermi", "tests/test_defect_system.py::TestDefectSystem::test_from_yaml", "tests/test_defect_system.py::TestDefectSystem::test_defect_species_names", "tests/test_defect_system.py::TestDefectSystem::test_get_transition_levels", "tests/test_defect_system.py::TestDefectSystem::test__repr__", "tests/test_defect_system.py::TestDefectSystem::test_get_sc_fermi_bottoms_out", "tests/test_defect_system.py::TestDefectSystem::test_q_tot", "tests/test_defect_system.py::TestDefectSystem::test__get_report_string" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-01-23 10:41:58+00:00
mit
1,403
blairconrad__dicognito-127
diff --git a/pyproject.toml b/pyproject.toml index 66a8bf9..2aa5c7d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ exclude = ''' ''' [tool.pytest.ini_options] +xfail_strict=true filterwarnings = [ "error", "ignore:SelectableGroups:DeprecationWarning", diff --git a/src/dicognito/__main__.py b/src/dicognito/__main__.py index 5aa176f..2dafeb7 100644 --- a/src/dicognito/__main__.py +++ b/src/dicognito/__main__.py @@ -130,10 +130,12 @@ def main(main_args: Optional[Sequence[str]] = None) -> None: help="Set the log level. May be one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.", ) parser.add_argument( - "--seed", # currently only intended to make testing easier - help="The seed to use when generating random attribute values. Primarily " - "intended to make testing easier. Best anonymization practice is to omit " - "this value and let dicognito generate its own random seed.", + "--seed", + help="The seed to use when generating anonymized attribute values. " + "If the same value is supplied for subsequent dicognito invocations, then " + "the same input objects will result in consistent anonymized results. " + "Omitting this value allows dicognito to generate its own random seed, which " + "may be slightly more secure, but does not support reproducible anonymization.", ) parser.add_argument("--version", action=VersionAction) diff --git a/src/dicognito/anonymizer.py b/src/dicognito/anonymizer.py index 78f2f32..1113636 100644 --- a/src/dicognito/anonymizer.py +++ b/src/dicognito/anonymizer.py @@ -52,9 +52,9 @@ class Anonymizer: id_suffix : str A prefix to add to all unstructured ID fields, such as Patient ID, Accession Number, etc. - seed - Not intended for general use. Seeds the data randomizer in order - to produce consistent results. Used for testing. + seed : Optional[str] + Seeds the data randomizer, which will produce consistent results when + invoked with the same seed. """ minimum_offset_hours = 62 * 24 maximum_offset_hours = 730 * 24 @@ -79,7 +79,7 @@ class Anonymizer: "ReferencedPatientPhotoSequence", "ResponsibleOrganization", ), - UIAnonymizer(), + UIAnonymizer(randomizer), PNAnonymizer(randomizer), IDAnonymizer( randomizer, diff --git a/src/dicognito/randomizer.py b/src/dicognito/randomizer.py index aba519e..c81e676 100644 --- a/src/dicognito/randomizer.py +++ b/src/dicognito/randomizer.py @@ -11,9 +11,9 @@ class Randomizer: Parameters ---------- seed - Not intended for general use. Seeds the data randomizer so it - produces consistent results when anonymizing elements with the - same initial values. + Used to convert input values into large integers. + The results are completely determined by the + given seed and the input value. """ if seed is None: self.seed = str(os.urandom(20)) @@ -39,10 +39,10 @@ class Randomizer: result += c return result - def get_ints_from_ranges(self, original_value: Any, *suprenums: int) -> Sequence[int]: + def get_ints_from_ranges(self, original_value: Any, *suprema: int) -> Sequence[int]: """\ Convert an original data element value into a series of - integers, each between 0 (inclusive) and one of the suprenums + integers, each between 0 (inclusive) and one of the suprema (exclusive) passed in. Parameters @@ -55,7 +55,7 @@ class Randomizer: """ big_int = self.to_int(original_value) result = [] - for s in suprenums: + for s in suprema: result.append(big_int % s) big_int //= s return result diff --git a/src/dicognito/release_notes.md b/src/dicognito/release_notes.md index 88a7173..40af54d 100644 --- a/src/dicognito/release_notes.md +++ b/src/dicognito/release_notes.md @@ -2,6 +2,7 @@ - Now assuming DA fields are 8 characters long ([#123](https://github.com/blairconrad/dicognito/issues/123)) - Summary formatted as GitHub Flavored Markdown ([#125](https://github.com/blairconrad/dicognito/issues/125)) +- Anonymized UI values are now consistent between runs if the same seed is supplied ([#126](https://github.com/blairconrad/dicognito/issues/126)) ## 0.13.0 diff --git a/src/dicognito/uianonymizer.py b/src/dicognito/uianonymizer.py index c882e3f..0a05357 100644 --- a/src/dicognito/uianonymizer.py +++ b/src/dicognito/uianonymizer.py @@ -1,20 +1,15 @@ -from typing import Dict - -import collections -import datetime import pydicom import pydicom.dataelem -import random + +from dicognito.randomizer import Randomizer class UIAnonymizer: - def __init__(self) -> None: + def __init__(self, randomizer: Randomizer) -> None: """\ Create a new UIAnonymizer. """ - self._ui_map: Dict[str, str] = collections.defaultdict(self._new_ui) - self._creation_date: str = datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S%f") - self._counter: int = 10000000 + self._randomizer = randomizer def __call__(self, dataset: pydicom.dataset.Dataset, data_element: pydicom.DataElement) -> bool: """\ @@ -44,15 +39,10 @@ class UIAnonymizer: return False if isinstance(data_element.value, pydicom.multival.MultiValue): - data_element.value = list([self._ui_map[v] for v in data_element.value]) + data_element.value = list(self._new_ui(v) for v in data_element.value) else: - data_element.value = self._ui_map[data_element.value] + data_element.value = self._new_ui(data_element.value) return True - def _new_ui(self) -> str: - self._counter += 1 - counter_part = str(self._counter) - prefix = "2." + self._creation_date + "." + counter_part + "." - random_begin = pow(10, 63 - len(prefix)) - random_end = pow(10, 64 - len(prefix)) - 1 - return prefix + str(random.randint(random_begin, random_end)) + def _new_ui(self, ui: str) -> str: + return "2." + str(10**39 + self._randomizer.to_int(ui))
blairconrad/dicognito
c66e1b5772449e017710ee7a71fd9ba219dba681
diff --git a/tests/test_anonymize_through_time.py b/tests/test_anonymize_through_time.py new file mode 100644 index 0000000..1b915a4 --- /dev/null +++ b/tests/test_anonymize_through_time.py @@ -0,0 +1,37 @@ +import pydicom + +from itertools import filterfalse, tee +from typing import Callable, Tuple, Union, ValuesView + +from dicognito.anonymizer import Anonymizer + +from .data_for_tests import load_minimal_instance + + +def test_dataset_anonymizes_same_with_same_seed(): + anonymizer1 = Anonymizer(seed="SOME_FIXED_SEED") + anonymizer2 = Anonymizer(seed="SOME_FIXED_SEED") + + with load_minimal_instance() as dataset1, load_minimal_instance() as dataset2: + anonymizer1.anonymize(dataset1) + anonymizer2.anonymize(dataset2) + + mismatches, matches = partition(lambda value: value == dataset2[value.tag], dataset1.values()) + + assert list(value.name for value in matches) + assert not list(value.name for value in mismatches) + + +_DatasetValue = Union[pydicom.DataElement, pydicom.dataelem.RawDataElement] + + +def partition( + predicate: Callable[[_DatasetValue], bool], + iterable: ValuesView[_DatasetValue], + # pytest can't collect the tests when we subscript filterfalse and filter +) -> Tuple[filterfalse, filter]: # type: ignore[type-arg] + + "Use a predicate to partition entries into false entries and true entries" + # partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 + t1, t2 = tee(iterable) + return filterfalse(predicate, t1), filter(predicate, t2)
Support fully-reproducible deidentification When data on a given cohort is accumulated over long periods, users may wish to run dicognito in multiple passes in order to perform preliminary analyses on the partial dataset. It would be convenient to be able to checkpoint the `Anonymizer` state so that patients seen in previous dicognito runs over the same cohort would have matching anonymized IDs. Two options occur to me: 1) Use the anonymization map proposed in #124 as a simple checkpoint. I haven't looked at the code yet, so I'm not sure exactly what drawbacks this would have. I think there are some guarantees about the order of dates that might be broken in this case. 2) Serialize everything in the `Anonymizer` and save it to a pickle file. I think this would make starting from a checkpoint 'equivalent' to running in a single pass. It would have the disadvantage of adding another file with sensitive data to manage.
0.0
c66e1b5772449e017710ee7a71fd9ba219dba681
[ "tests/test_anonymize_through_time.py::test_dataset_anonymizes_same_with_same_seed" ]
[]
{ "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-05-12 15:10:37+00:00
mit
1,404
blairconrad__dicognito-158
diff --git a/src/dicognito/idanonymizer.py b/src/dicognito/idanonymizer.py index f2597f0..cb979a6 100644 --- a/src/dicognito/idanonymizer.py +++ b/src/dicognito/idanonymizer.py @@ -90,13 +90,11 @@ class IDAnonymizer(ElementAnonymizer): mitra_global_patient_id_element = 0x0020 if ( data_element.tag.group == mitra_linked_attributes_group - and data_element.tag.element % mitra_global_patient_id_element == 0 + and data_element.tag.element & 0x00FF == mitra_global_patient_id_element ): private_tag_group = data_element.tag.element >> 8 - if ( - dataset[(mitra_linked_attributes_group << 16) + private_tag_group].value - == "MITRA LINKED ATTRIBUTES 1.0" - ): + element = dataset.get((mitra_linked_attributes_group, private_tag_group), None) + if element and element.value == "MITRA LINKED ATTRIBUTES 1.0": # For pydicom 2.2.0 and above (at least to 2.2.2) the Mitra global patient ID tag # can be misidentified as VR IS, instead of its proper LO. This causes # the anonymize action to fail because most values can't be converted. diff --git a/src/dicognito/release_notes.md b/src/dicognito/release_notes.md index 96a1b65..d10e1ac 100644 --- a/src/dicognito/release_notes.md +++ b/src/dicognito/release_notes.md @@ -3,6 +3,7 @@ ### Fixed - Version table format has bad separator ([#147](https://github.com/blairconrad/dicognito/issues/147)) +- Private creator 0031,0020 breaks anonymization ([#157](https://github.com/blairconrad/dicognito/issues/157)) ## 0.16.0a1
blairconrad/dicognito
57abcb68a8ee636f06df67dadcdc76ac300f37b4
diff --git a/tests/test_anonymize_private_tags.py b/tests/test_anonymize_private_tags.py index 042f46f..a2c20d8 100644 --- a/tests/test_anonymize_private_tags.py +++ b/tests/test_anonymize_private_tags.py @@ -1,4 +1,5 @@ from dicognito.anonymizer import Anonymizer +from pydicom import Dataset from .data_for_tests import load_dcm @@ -16,3 +17,29 @@ def test_mitra_global_patient_id_is_updated(): actual = block[0x20].value assert actual != "GPIYMBB54" + + +def test_0031_0040_is_not_updated(): + with Dataset() as dataset: + dataset.ensure_file_meta() + dataset.add_new(0x00310040, "LO", "Some value") + expected = dataset[0x0031, 0x0040] + + anonymizer = Anonymizer() + anonymizer.anonymize(dataset) + + actual = dataset[0x0031, 0x0040] + assert actual == expected + + +def test_private_creator_0031_0020_is_not_updated(): + with Dataset() as dataset: + dataset.ensure_file_meta() + dataset.add_new(0x00310020, "LO", "Another value") + expected = dataset[0x0031, 0x0020] + + anonymizer = Anonymizer() + anonymizer.anonymize(dataset) + + actual = dataset[0x0031, 0x0020] + assert actual == expected
Private creator 0031,0020 breaks anonymization Anonymizing a dataset containing a value for 0031,0020, which would typically be a [Private Creator Data Element](https://dicom.nema.org/dicom/2013/output/chtml/part05/sect_7.8.html#sect_7.8.1), results in dicognito erroring out with ``` Error occurred while converting <_io.BytesIO object at 0x0000022DD4CEA160>. Aborting. Traceback (most recent call last): File "E:\Dev\dicognito\.venv\dicognito\Lib\site-packages\pydicom\tag.py", line 28, in tag_in_exception yield File "E:\Dev\dicognito\.venv\dicognito\Lib\site-packages\pydicom\dataset.py", line 2474, in walk callback(self, data_element) # self = this Dataset ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "E:\Dev\dicognito\src\dicognito\anonymizer.py", line 151, in _anonymize_element if handler(dataset, data_element): ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "E:\Dev\dicognito\src\dicognito\idanonymizer.py", line 67, in __call__ if self._anonymize_mitra_global_patient_id(dataset, data_element): ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "E:\Dev\dicognito\src\dicognito\idanonymizer.py", line 97, in _anonymize_mitra_global_patient_id dataset[(mitra_linked_attributes_group << 16) + private_tag_group].value ~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "E:\Dev\dicognito\.venv\dicognito\Lib\site-packages\pydicom\dataset.py", line 988, in __getitem__ elem = self._dict[tag] ~~~~~~~~~~^^^^^ KeyError: (0031, 0000) The above exception was the direct cause of the following exception: Traceback (most recent call last): File "E:\Dev\dicognito\src\dicognito\__main__.py", line 76, in main anonymizer.anonymize(dataset) File "E:\Dev\dicognito\src\dicognito\anonymizer.py", line 134, in anonymize dataset.walk(self._anonymize_element) File "E:\Dev\dicognito\.venv\dicognito\Lib\site-packages\pydicom\dataset.py", line 2472, in walk with tag_in_exception(tag): File "D:\Users\amidu\AppData\Local\Programs\Python\Python311\Lib\contextlib.py", line 155, in __exit__ self.gen.throw(typ, value, traceback) File "E:\Dev\dicognito\.venv\dicognito\Lib\site-packages\pydicom\tag.py", line 32, in tag_in_exception raise type(exc)(msg) from exc KeyError: 'With tag (0031, 0020) got exception: (0031, 0000)\nTraceback (most recent call last):\n File "E:\\Dev\\dicognito\\.venv\\dicognito\\Lib\\site-packages\\pydicom\\tag.py", line 28, in tag_in_exception\n yield\n File "E:\\Dev\\dicognito\\.venv\\dicognito\\Lib\\site-packages\\pydicom\\dataset.py", line 2474, in walk\n callback(self, data_element) # self = this Dataset\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "E:\\Dev\\dicognito\\src\\dicognito\\anonymizer.py", line 151, in _anonymize_element\n if handler(dataset, data_element):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "E:\\Dev\\dicognito\\src\\dicognito\\idanonymizer.py", line 67, in __call__\n if self._anonymize_mitra_global_patient_id(dataset, data_element):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "E:\\Dev\\dicognito\\src\\dicognito\\idanonymizer.py", line 97, in _anonymize_mitra_global_patient_id\n dataset[(mitra_linked_attributes_group << 16) + private_tag_group].value\n ~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "E:\\Dev\\dicognito\\.venv\\dicognito\\Lib\\site-packages\\pydicom\\dataset.py", line 988, in __getitem__\n elem = self._dict[tag]\n ~~~~~~~~~~^^^^^\nKeyError: (0031, 0000)\n' ``` It shouldn't error out.
0.0
57abcb68a8ee636f06df67dadcdc76ac300f37b4
[ "tests/test_anonymize_private_tags.py::test_0031_0040_is_not_updated", "tests/test_anonymize_private_tags.py::test_private_creator_0031_0020_is_not_updated" ]
[ "tests/test_anonymize_private_tags.py::test_mitra_global_patient_id_is_updated" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-11-02 10:44:29+00:00
mit
1,405
blairconrad__dicognito-51
diff --git a/src/dicognito/__main__.py b/src/dicognito/__main__.py index 985de48..e9d01dc 100644 --- a/src/dicognito/__main__.py +++ b/src/dicognito/__main__.py @@ -61,10 +61,10 @@ def main(args=None): help="Set the log level. May be one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.", ) parser.add_argument( - "--salt", # currently only intended to make testing easier - help="The salt to use when generating random attribute values. Primarily " + "--seed", # currently only intended to make testing easier + help="The seed to use when generating random attribute values. Primarily " "intended to make testing easier. Best anonymization practice is to omit " - "this value and let dicognito generate its own random salt.", + "this value and let dicognito generate its own random seed.", ) parser.add_argument("--version", action="version", version=dicognito.__version__) @@ -75,7 +75,7 @@ def main(args=None): raise ValueError("Invalid log level: %s" % args.log_level) logging.basicConfig(format="", level=numeric_level) - anonymizer = Anonymizer(id_prefix=args.id_prefix, id_suffix=args.id_suffix, salt=args.salt) + anonymizer = Anonymizer(id_prefix=args.id_prefix, id_suffix=args.id_suffix, seed=args.seed) ConvertedStudy = collections.namedtuple("ConvertedStudy", ["AccessionNumber", "PatientID", "PatientName"]) diff --git a/src/dicognito/anonymizer.py b/src/dicognito/anonymizer.py index 5722d98..8fe4912 100644 --- a/src/dicognito/anonymizer.py +++ b/src/dicognito/anonymizer.py @@ -39,7 +39,7 @@ class Anonymizer: >>> dataset.save_as("new-" + filename) """ - def __init__(self, id_prefix="", id_suffix="", salt=None): + def __init__(self, id_prefix="", id_suffix="", seed=None): """\ Create a new Anonymizer. @@ -51,13 +51,13 @@ class Anonymizer: id_suffix : str A prefix to add to all unstructured ID fields, such as Patient ID, Accession Number, etc. - salt + seed Not intended for general use. Seeds the data randomizer in order to produce consistent results. Used for testing. """ minimum_offset_hours = 62 * 24 maximum_offset_hours = 730 * 24 - randomizer = Randomizer(salt) + randomizer = Randomizer(seed) address_anonymizer = AddressAnonymizer(randomizer) self._element_handlers = [ UnwantedElementsStripper( diff --git a/src/dicognito/randomizer.py b/src/dicognito/randomizer.py index 589df27..cbcf4c9 100644 --- a/src/dicognito/randomizer.py +++ b/src/dicognito/randomizer.py @@ -3,20 +3,20 @@ import os class Randomizer: - def __init__(self, salt): + def __init__(self, seed): """\ Create a new Randomizer. Parameters ---------- - salt + seed Not intended for general use. Seeds the data randomizer so it produces consistent results when anonymizing elements with the same initial values. """ - if salt is None: - salt = os.urandom(20) - self.salt = str(salt) + if seed is None: + seed = os.urandom(20) + self.seed = str(seed) def to_int(self, original_value): """\ @@ -28,7 +28,7 @@ class Randomizer: original_value The original value that will ultimately be replaced. """ - message = self.salt + str(original_value) + message = self.seed + str(original_value) if isinstance(message, bytes): encoded = message hash = [ord(d) for d in hashlib.md5(encoded).digest()] diff --git a/src/dicognito/release_notes.md b/src/dicognito/release_notes.md index 9f3a779..56c75b1 100644 --- a/src/dicognito/release_notes.md +++ b/src/dicognito/release_notes.md @@ -1,3 +1,7 @@ +### Changed + +- Renamed "salt" to "seed" in command-line tool and `Anonymizer` class ([#49](https://github.com/blairconrad/dicognito/issues/49)) + ### New - Provide `--version` flag and `__version__` attribute ([#47](https://github.com/blairconrad/dicognito/issues/47))
blairconrad/dicognito
074b20b1c550b48b0295abeb90d99ca5c87bc811
diff --git a/tests/test_commandline.py b/tests/test_commandline.py index 24dc9ba..91933a8 100644 --- a/tests/test_commandline.py +++ b/tests/test_commandline.py @@ -41,9 +41,9 @@ def test_summary_mixed_files_reports_on_each_study(capsys): expected_output = """\ Accession Number Patient ID Patient Name ---------------- ---------- ------------ -DRVN05NEDUYD 2S183ZNON7HU RICHMOND^MARCY^NITA -8NZGNEJWE7QA NPC1XHSJT51Z MORROW^SUSANNA^LUCIEN -SXJXM4HE90EO NPC1XHSJT51Z MORROW^SUSANNA^LUCIEN +HGED6DXQTO1F DQFZ0HDKPYUX JENSEN^KELLIE^PATRICK +XV266HDCGIOH DQFZ0HDKPYUX JENSEN^KELLIE^PATRICK +UUM68P1IJHBE LXO0DMOPN7PV BUCHANAN^ALBA^MADGE """ run_dicognito(path_to("p*")) (actual_output, actual_error) = capsys.readouterr() @@ -125,7 +125,7 @@ def path_to(end_of_path): def run_dicognito(*extra_args): - dicognito.__main__.main(("--salt", "salt for test") + extra_args) + dicognito.__main__.main(("--seed", "") + extra_args) def read_file(*directory_parts):
Rename salt to seed From the outside, it should be billed as a seed for randomness, even though inside `Randomizer`, it's used as a salt when hashing values.
0.0
074b20b1c550b48b0295abeb90d99ca5c87bc811
[ "tests/test_commandline.py::test_ignores_file_that_do_not_match_glob" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-04-07 02:46:29+00:00
mit
1,406
blairconrad__dicognito-64
diff --git a/src/dicognito/datetimeanonymizer.py b/src/dicognito/datetimeanonymizer.py index 59639a0..ba7ad8e 100644 --- a/src/dicognito/datetimeanonymizer.py +++ b/src/dicognito/datetimeanonymizer.py @@ -1,4 +1,5 @@ import datetime +import pydicom class DateTimeAnonymizer: @@ -46,32 +47,65 @@ class DateTimeAnonymizer: def _anonymize_date_and_time(self, dataset, data_element): date_value = data_element.value - date_format = "%Y%m%d"[: len(date_value) - 2] - - old_date = datetime.datetime.strptime(date_value, date_format).date() + if isinstance(data_element.value, pydicom.multival.MultiValue): + dates = list([v for v in data_element.value]) + else: + dates = [data_element.value] - old_hours = datetime.time() - time_value = "" + times = [] time_name = data_element.keyword[:-4] + "Time" if time_name in dataset: time_value = dataset.data_element(time_name).value if time_value: - old_hours = datetime.datetime.strptime(time_value[:2], "%H").time() + if isinstance(time_value, pydicom.multival.MultiValue): + times = list([v for v in time_value]) + else: + times = [time_value] + + new_dates = [] + new_times = [] + for i in range(len(dates)): + date_value = dates[i] + date_format = "%Y%m%d"[: len(date_value) - 2] + old_date = datetime.datetime.strptime(date_value, date_format).date() + + time_value = "" + old_hours = datetime.time() + if i < len(times): + time_value = times[i] + if time_value: + old_hours = datetime.datetime.strptime(time_value[:2], "%H").time() + else: + old_hours = datetime.time() + + old_datetime = datetime.datetime.combine(old_date, old_hours) + new_datetime = old_datetime + self.offset + + new_dates.append(new_datetime.strftime(date_format)) + new_times.append(new_datetime.strftime("%H") + time_value[2:]) + + new_dates = "\\".join(new_dates) + new_times = "\\".join(new_times) + + data_element.value = new_dates + if times: + dataset.data_element(time_name).value = new_times - old_datetime = datetime.datetime.combine(old_date, old_hours) - new_datetime = old_datetime + self.offset + def _anonymize_datetime(self, dataset, data_element): + if isinstance(data_element.value, pydicom.multival.MultiValue): + datetimes = list([v for v in data_element.value]) + else: + datetimes = [data_element.value] - data_element.value = new_datetime.strftime(date_format) - if time_value: - dataset.data_element(time_name).value = new_datetime.strftime("%H") + time_value[2:] + new_datetimes = [] + for datetime_value in datetimes: + datetime_format = "%Y%m%d%H"[: len(datetime_value) - 2] - def _anonymize_datetime(self, dataset, data_element): - datetime_value = data_element.value - datetime_format = "%Y%m%d%H"[: len(datetime_value) - 2] + old_datetime = datetime.datetime.strptime(datetime_value[:10], datetime_format) + new_datetime = old_datetime + self.offset - old_datetime = datetime.datetime.strptime(datetime_value[:10], datetime_format) - new_datetime = old_datetime + self.offset + new_datetime_value = new_datetime.strftime(datetime_format) + new_datetime_value += datetime_value[len(new_datetime_value) :] + new_datetimes.append(new_datetime_value) - new_datetime_value = new_datetime.strftime(datetime_format) - new_datetime_value += datetime_value[len(new_datetime_value) :] - data_element.value = new_datetime_value + data_element.value = "\\".join(new_datetimes) diff --git a/src/dicognito/release_notes.md b/src/dicognito/release_notes.md index 0d2cec9..1ee3fec 100644 --- a/src/dicognito/release_notes.md +++ b/src/dicognito/release_notes.md @@ -7,6 +7,10 @@ - Anonymize placer- and filler-order numbers ([#58](https://github.com/blairconrad/dicognito/issues/58)) +### Fixed + +- Fails on multi-valued dates and times ([#61](https://github.com/blairconrad/dicognito/issues/61)) + ## 0.7.1 ### Fixed
blairconrad/dicognito
053aadf2b03ccfdcbf2734e24a1258d62da6c0ee
diff --git a/tests/test_anonymize_single_instance.py b/tests/test_anonymize_single_instance.py index c7bb461..74b7aef 100644 --- a/tests/test_anonymize_single_instance.py +++ b/tests/test_anonymize_single_instance.py @@ -422,6 +422,36 @@ def test_date_gets_anonymized_when_time_has_various_lengths(birth_time): assert len(new_time_string) == len(birth_time) +def test_multivalued_date_with_no_time_pair_gets_anonymized(): + with load_test_instance() as dataset: + dataset.DateOfLastCalibration = original_date = ["20010401", "20010402"] + + anonymizer = Anonymizer() + anonymizer.anonymize(dataset) + + new_date_string = dataset.DateOfLastCalibration + + assert new_date_string != original_date + assert len(new_date_string) == len(original_date) + + +def test_multivalued_date_and_time_pair_gets_anonymized(): + with load_test_instance() as dataset: + dataset.DateOfLastCalibration = original_date = ["20010401", "20010402"] + dataset.TimeOfLastCalibration = original_time = ["120000", "135959"] + + anonymizer = Anonymizer() + anonymizer.anonymize(dataset) + + new_date_string = dataset.DateOfLastCalibration + new_time_string = dataset.TimeOfLastCalibration + + assert new_date_string != original_date + assert len(new_date_string) == len(original_date) + assert new_time_string[2:] == original_time[2:] + assert len(new_time_string) == len(original_time) + + @pytest.mark.parametrize( "datetime_name", [ @@ -479,6 +509,19 @@ def test_datetime_of_various_lengths_gets_anonymized(acquisition_datetime): assert len(new_datetime_string) == len(acquisition_datetime) +def test_multivalued_datetime_gets_anonymized(): + with load_test_instance() as dataset: + dataset.AcquisitionDateTime = original_datetime = ["19741103121558", "19721004161558"] + + anonymizer = Anonymizer() + anonymizer.anonymize(dataset) + + new_datetime = dataset.AcquisitionDateTime + + assert new_datetime != original_datetime + assert len(new_datetime) == len(original_datetime) + + def test_no_sex_still_changes_patient_name(): with load_test_instance() as dataset: del dataset.PatientSex
Fails on multi-valued dates and times Example: ``` (0018, 1200) Date of Last Calibration DA: ['19900101', '19900101'] (0018, 1201) Time of Last Calibration TM: ['010000.000000', '010000.000000'] ``` gives ``` Traceback (most recent call last): File "c:\program files\python37\lib\site-packages\pydicom\tag.py", line 30, in tag_in_exception yield File "c:\program files\python37\lib\site-packages\pydicom\dataset.py", line 1354, in walk callback(self, data_element) # self = this Dataset File "c:\program files\python37\lib\site-packages\dicognito\anonymizer.py", line 114, in _anonymize_element if handler(dataset, data_element): File "c:\program files\python37\lib\site-packages\dicognito\datetimeanonymizer.py", line 42, in __call__ self._anonymize_date_and_time(dataset, data_element) File "c:\program files\python37\lib\site-packages\dicognito\datetimeanonymizer.py", line 51, in _anonymize_date_and_time old_date = datetime.datetime.strptime(date_value, date_format).date() TypeError: strptime() argument 1 must be str, not MultiValue ```
0.0
053aadf2b03ccfdcbf2734e24a1258d62da6c0ee
[ "tests/test_anonymize_single_instance.py::test_multivalued_date_with_no_time_pair_gets_anonymized", "tests/test_anonymize_single_instance.py::test_multivalued_date_and_time_pair_gets_anonymized", "tests/test_anonymize_single_instance.py::test_multivalued_datetime_gets_anonymized" ]
[ "tests/test_anonymize_single_instance.py::test_minimal_instance_anonymizes_safely", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.MediaStorageSOPClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.TransferSyntaxUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.ImplementationClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[SOPClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[SourceImageSequence[0].ReferencedSOPClassUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[file_meta.MediaStorageSOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SourceImageSequence[0].ReferencedSOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[StudyInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SeriesInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[FrameOfReferenceUID]", "tests/test_anonymize_single_instance.py::test_repeated_identifying_uis_get_same_values[file_meta.MediaStorageSOPInstanceUID-SOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[AccessionNumber]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberImagingServiceRequest]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberImagingServiceRequestRetired]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberProcedure]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDs]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[0].PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[0].IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[1].PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[1].IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PerformedProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberImagingServiceRequest]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberImagingServiceRequestRetired]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberProcedure]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[RequestAttributesSequence[0].RequestedProcedureID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[RequestAttributesSequence[0].ScheduledProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[ScheduledProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[StudyID]", "tests/test_anonymize_single_instance.py::test_other_patient_ids_anonymized_to_same_number_of_ids[2]", "tests/test_anonymize_single_instance.py::test_other_patient_ids_anonymized_to_same_number_of_ids[3]", "tests/test_anonymize_single_instance.py::test_issuer_of_patient_id_changed_if_not_empty", "tests/test_anonymize_single_instance.py::test_issuer_of_patient_id_not_added_if_empty", "tests/test_anonymize_single_instance.py::test_female_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_male_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_sex_other_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_other_patient_names_anonymized_to_same_number_of_names[2]", "tests/test_anonymize_single_instance.py::test_other_patient_names_anonymized_to_same_number_of_names[3]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[NameOfPhysiciansReadingStudy]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[OperatorsName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PatientBirthName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PatientMotherBirthName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PerformingPhysicianName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[ReferringPhysicianName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[RequestingPhysician]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[ResponsiblePerson]", "tests/test_anonymize_single_instance.py::test_patient_address_gets_anonymized", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[Occupation]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientInsurancePlanCodeSequence]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[MilitaryRank]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[BranchOfService]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientTelephoneNumbers]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientTelecomInformation]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientReligiousPreference]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[MedicalRecordLocator]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[ReferencedPatientPhotoSequence]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[ResponsibleOrganization]", "tests/test_anonymize_single_instance.py::test_equipment_gets_anonymized", "tests/test_anonymize_single_instance.py::test_requesting_service_gets_anonymized", "tests/test_anonymize_single_instance.py::test_current_patient_location_gets_anonymized", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[AcquisitionDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[ContentDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[InstanceCreationDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[PatientBirthDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[PerformedProcedureStepStartDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[SeriesDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[StudyDate]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_there_is_no_time", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[20180202]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[199901]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[1983]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[07]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[0911]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[131517]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.1]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.12]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.123]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.1234]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.12345]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.123456]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[AcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[FrameReferenceDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[FrameAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[StartAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[EndAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[PerformedProcedureStepStartDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[PerformedProcedureStepEndDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[1947]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[194711]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[1947110307]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[194711030911]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103131517]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.1]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.12]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.123]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.1234]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.12345]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.123456]", "tests/test_anonymize_single_instance.py::test_no_sex_still_changes_patient_name", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[None-DICOGNITO]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[DICOGNITO-DICOGNITO]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[SOMETHINGELSE-expected2]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[SOMETHING\\\\SOMETHINGELSE-expected3]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[DICOGNITO\\\\SOMETHINGELSE-expected4]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-None-None]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-YES-None]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-NO-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-None-NO]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-YES-NO]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-NO-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-None-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-YES-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-NO-YES]" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2019-09-18 02:27:39+00:00
mit
1,407
blairconrad__dicognito-65
diff --git a/src/dicognito/idanonymizer.py b/src/dicognito/idanonymizer.py index d7aa019..24d37e5 100644 --- a/src/dicognito/idanonymizer.py +++ b/src/dicognito/idanonymizer.py @@ -54,16 +54,32 @@ class IDAnonymizer: True if the element was anonymized, or False if not. """ if data_element.tag in self.id_tags: - if isinstance(data_element.value, pydicom.multival.MultiValue): - data_element.value = [self._new_id(id) for id in data_element.value] - else: - data_element.value = self._new_id(data_element.value) + self._replace_id(data_element) return True + + if self._anonymize_mitra_global_patient_id(dataset, data_element): + return True + if data_element.tag == self.issuer_tag and data_element.value: data_element.value = "DICOGNITO" return True return False + def _anonymize_mitra_global_patient_id(self, dataset, data_element): + if data_element.tag.group == 0x0031 and data_element.tag.element % 0x0020 == 0: + private_tag_group = data_element.tag.element >> 8 + if dataset[(0x0031 << 16) + private_tag_group].value == "MITRA LINKED ATTRIBUTES 1.0": + self._replace_id(data_element) + data_element.value = data_element.value.encode() + return True + return False + + def _replace_id(self, data_element): + if isinstance(data_element.value, pydicom.multival.MultiValue): + data_element.value = [self._new_id(id) for id in data_element.value] + else: + data_element.value = self._new_id(data_element.value) + def _new_id(self, original_value): indexes = self.randomizer.get_ints_from_ranges(original_value, *self._indices_for_randomizer) id_root = "".join([self._alphabet[i] for i in indexes]) diff --git a/src/dicognito/release_notes.md b/src/dicognito/release_notes.md index 1ee3fec..5e319e3 100644 --- a/src/dicognito/release_notes.md +++ b/src/dicognito/release_notes.md @@ -6,6 +6,7 @@ ### New - Anonymize placer- and filler-order numbers ([#58](https://github.com/blairconrad/dicognito/issues/58)) +- Anonymize Mitra Global Patient IDs ([#60](https://github.com/blairconrad/dicognito/issues/60)) ### Fixed
blairconrad/dicognito
a5c3fc767564aa643c7a6b5ca1098168b0d9e977
diff --git a/tests/test_anonymize_private_tags.py b/tests/test_anonymize_private_tags.py new file mode 100644 index 0000000..af8ec28 --- /dev/null +++ b/tests/test_anonymize_private_tags.py @@ -0,0 +1,18 @@ +from dicognito.anonymizer import Anonymizer + +from .data_for_tests import load_test_instance + + +def test_mitra_global_patient_id_is_updated(): + with load_test_instance() as dataset: + + block = dataset.private_block(0x0031, "MITRA LINKED ATTRIBUTES 1.0", create=True) + block.add_new(0x20, "LO", "GPIYMBB54") + + anonymizer = Anonymizer() + anonymizer.anonymize(dataset) + + block = dataset.private_block(0x0031, "MITRA LINKED ATTRIBUTES 1.0") + actual = block[0x20].value + + assert actual != "GPIYMBB54"
Anonymize Mitra Global Patient ID e.g. ``` 0031 0011 28 | private_creator | LO | 1 | "MITRA LINKED ATTRIBUTES 1.0" 0031 1120 10 | Unknown element | Unkn | ? | "GPIAPCB136" ```
0.0
a5c3fc767564aa643c7a6b5ca1098168b0d9e977
[ "tests/test_anonymize_private_tags.py::test_mitra_global_patient_id_is_updated" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2019-09-18 11:22:06+00:00
mit
1,408
blairconrad__dicognito-67
diff --git a/src/dicognito/datetimeanonymizer.py b/src/dicognito/datetimeanonymizer.py index ba7ad8e..14ccade 100644 --- a/src/dicognito/datetimeanonymizer.py +++ b/src/dicognito/datetimeanonymizer.py @@ -53,7 +53,11 @@ class DateTimeAnonymizer: dates = [data_element.value] times = [] - time_name = data_element.keyword[:-4] + "Time" + if data_element.keyword.endswith("Date"): + time_name = data_element.keyword[:-4] + "Time" + elif data_element.keyword.startswith("Date"): + time_name = "Time" + data_element.keyword[4:] + if time_name in dataset: time_value = dataset.data_element(time_name).value if time_value: diff --git a/src/dicognito/release_notes.md b/src/dicognito/release_notes.md index 2e4d91d..b894047 100644 --- a/src/dicognito/release_notes.md +++ b/src/dicognito/release_notes.md @@ -1,5 +1,9 @@ -## 0.8.0 - +### Fixed + +- Fails to anonymize TimeOfLastCalibration ([#66](https://github.com/blairconrad/dicognito/issues/66)) + +## 0.8.0 + ### Changed - Drop support for Python 2.7 ([#63](https://github.com/blairconrad/dicognito/issues/63))
blairconrad/dicognito
4bf5726fde832ae041c4b232693faaca85d0be6c
diff --git a/tests/test_anonymize_single_instance.py b/tests/test_anonymize_single_instance.py index 74b7aef..567e996 100644 --- a/tests/test_anonymize_single_instance.py +++ b/tests/test_anonymize_single_instance.py @@ -440,16 +440,16 @@ def test_multivalued_date_and_time_pair_gets_anonymized(): dataset.DateOfLastCalibration = original_date = ["20010401", "20010402"] dataset.TimeOfLastCalibration = original_time = ["120000", "135959"] - anonymizer = Anonymizer() + anonymizer = Anonymizer(seed="") anonymizer.anonymize(dataset) - new_date_string = dataset.DateOfLastCalibration - new_time_string = dataset.TimeOfLastCalibration + new_date = dataset.DateOfLastCalibration + new_time = dataset.TimeOfLastCalibration - assert new_date_string != original_date - assert len(new_date_string) == len(original_date) - assert new_time_string[2:] == original_time[2:] - assert len(new_time_string) == len(original_time) + assert new_date != original_date + assert len(new_date) == len(original_date) + assert new_time != original_time + assert len(new_time) == len(original_time) @pytest.mark.parametrize(
Fails to anonymize TimeOfLastCalibration because we expect to find "Date" and "Time" at the _end_ of the element name.
0.0
4bf5726fde832ae041c4b232693faaca85d0be6c
[ "tests/test_anonymize_single_instance.py::test_multivalued_date_and_time_pair_gets_anonymized" ]
[ "tests/test_anonymize_single_instance.py::test_minimal_instance_anonymizes_safely", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.MediaStorageSOPClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.TransferSyntaxUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.ImplementationClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[SOPClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[SourceImageSequence[0].ReferencedSOPClassUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[file_meta.MediaStorageSOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SourceImageSequence[0].ReferencedSOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[StudyInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SeriesInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[FrameOfReferenceUID]", "tests/test_anonymize_single_instance.py::test_repeated_identifying_uis_get_same_values[file_meta.MediaStorageSOPInstanceUID-SOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[AccessionNumber]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberImagingServiceRequest]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberImagingServiceRequestRetired]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberProcedure]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDs]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[0].PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[0].IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[1].PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[1].IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PerformedProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberImagingServiceRequest]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberImagingServiceRequestRetired]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberProcedure]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[RequestAttributesSequence[0].RequestedProcedureID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[RequestAttributesSequence[0].ScheduledProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[ScheduledProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[StudyID]", "tests/test_anonymize_single_instance.py::test_other_patient_ids_anonymized_to_same_number_of_ids[2]", "tests/test_anonymize_single_instance.py::test_other_patient_ids_anonymized_to_same_number_of_ids[3]", "tests/test_anonymize_single_instance.py::test_issuer_of_patient_id_changed_if_not_empty", "tests/test_anonymize_single_instance.py::test_issuer_of_patient_id_not_added_if_empty", "tests/test_anonymize_single_instance.py::test_female_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_male_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_sex_other_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_other_patient_names_anonymized_to_same_number_of_names[2]", "tests/test_anonymize_single_instance.py::test_other_patient_names_anonymized_to_same_number_of_names[3]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[NameOfPhysiciansReadingStudy]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[OperatorsName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PatientBirthName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PatientMotherBirthName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PerformingPhysicianName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[ReferringPhysicianName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[RequestingPhysician]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[ResponsiblePerson]", "tests/test_anonymize_single_instance.py::test_patient_address_gets_anonymized", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[Occupation]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientInsurancePlanCodeSequence]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[MilitaryRank]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[BranchOfService]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientTelephoneNumbers]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientTelecomInformation]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientReligiousPreference]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[MedicalRecordLocator]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[ReferencedPatientPhotoSequence]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[ResponsibleOrganization]", "tests/test_anonymize_single_instance.py::test_equipment_gets_anonymized", "tests/test_anonymize_single_instance.py::test_requesting_service_gets_anonymized", "tests/test_anonymize_single_instance.py::test_current_patient_location_gets_anonymized", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[AcquisitionDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[ContentDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[InstanceCreationDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[PatientBirthDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[PerformedProcedureStepStartDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[SeriesDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[StudyDate]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_there_is_no_time", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[20180202]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[199901]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[1983]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[07]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[0911]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[131517]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.1]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.12]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.123]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.1234]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.12345]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.123456]", "tests/test_anonymize_single_instance.py::test_multivalued_date_with_no_time_pair_gets_anonymized", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[AcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[FrameReferenceDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[FrameAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[StartAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[EndAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[PerformedProcedureStepStartDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[PerformedProcedureStepEndDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[1947]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[194711]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[1947110307]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[194711030911]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103131517]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.1]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.12]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.123]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.1234]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.12345]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.123456]", "tests/test_anonymize_single_instance.py::test_multivalued_datetime_gets_anonymized", "tests/test_anonymize_single_instance.py::test_no_sex_still_changes_patient_name", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[None-DICOGNITO]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[DICOGNITO-DICOGNITO]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[SOMETHINGELSE-expected2]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[SOMETHING\\\\SOMETHINGELSE-expected3]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[DICOGNITO\\\\SOMETHINGELSE-expected4]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-None-None]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-YES-None]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-NO-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-None-NO]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-YES-NO]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-NO-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-None-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-YES-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-NO-YES]" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2019-09-18 12:35:54+00:00
mit
1,409
blairconrad__dicognito-73
diff --git a/src/dicognito/datetimeanonymizer.py b/src/dicognito/datetimeanonymizer.py index 14ccade..90c6818 100644 --- a/src/dicognito/datetimeanonymizer.py +++ b/src/dicognito/datetimeanonymizer.py @@ -53,10 +53,7 @@ class DateTimeAnonymizer: dates = [data_element.value] times = [] - if data_element.keyword.endswith("Date"): - time_name = data_element.keyword[:-4] + "Time" - elif data_element.keyword.startswith("Date"): - time_name = "Time" + data_element.keyword[4:] + time_name = data_element.keyword.replace("Date", "Time") if time_name in dataset: time_value = dataset.data_element(time_name).value diff --git a/src/dicognito/release_notes.md b/src/dicognito/release_notes.md index 61c64a5..d3285cd 100644 --- a/src/dicognito/release_notes.md +++ b/src/dicognito/release_notes.md @@ -1,8 +1,12 @@ -## 0.9.0 - +### Fixed + +- Fails to anonymize object with Issue Date of Imaging Service Request ([#72](https://github.com/blairconrad/dicognito/issues/72)) + +## 0.9.0 + ### New -- Add option to write anonymized files to another directory([#69](https://github.com/blairconrad/dicognito/issues/69)) +- Add option to write anonymized files to another directory ([#69](https://github.com/blairconrad/dicognito/issues/69)) ## 0.8.1
blairconrad/dicognito
bc8a16fbc2ac369a09c867532abfa9553979cf13
diff --git a/tests/test_anonymize_single_instance.py b/tests/test_anonymize_single_instance.py index 567e996..23a05bc 100644 --- a/tests/test_anonymize_single_instance.py +++ b/tests/test_anonymize_single_instance.py @@ -452,6 +452,23 @@ def test_multivalued_date_and_time_pair_gets_anonymized(): assert len(new_time) == len(original_time) +def test_issue_date_of_imaging_service_request_gets_anonymized(): + original_datetime = datetime.datetime(1974, 11, 3, 12, 15, 58) + original_date_string = original_datetime.strftime("%Y%m%d") + original_time_string = original_datetime.strftime("%H%M%S") + + with load_test_instance() as dataset: + dataset.IssueDateOfImagingServiceRequest = original_date_string + dataset.IssueTimeOfImagingServiceRequest = original_time_string + + anonymizer = Anonymizer() + anonymizer.anonymize(dataset) + + new_date_string = dataset.IssueDateOfImagingServiceRequest + new_time_string = dataset.IssueTimeOfImagingServiceRequest + assert (new_date_string, new_time_string) != (original_date_string, original_time_string) + + @pytest.mark.parametrize( "datetime_name", [
Fails to anonymize object with Issue Date of Imaging Service Request … because we look for "Date" at the beginning or end of the element name: ``` UnboundLocalError: With tag (200b, 102b) got exception: local variable 'time_name' referenced before assignment Traceback (most recent call last): File "c:\program files\python37\lib\site-packages\pydicom\tag.py", line 30, in tag_in_exception yield File "c:\program files\python37\lib\site-packages\pydicom\dataset.py", line 1773, in walk callback(self, data_element) # self = this Dataset File "c:\program files\python37\lib\site-packages\dicognito\anonymizer.py", line 120, in _anonymize_element if handler(dataset, data_element): File "c:\program files\python37\lib\site-packages\dicognito\datetimeanonymizer.py", line 43, in __call__ self._anonymize_date_and_time(dataset, data_element) File "c:\program files\python37\lib\site-packages\dicognito\datetimeanonymizer.py", line 61, in _anonymize_date_and_time if time_name in dataset: UnboundLocalError: local variable 'time_name' referenced before assignment ```
0.0
bc8a16fbc2ac369a09c867532abfa9553979cf13
[ "tests/test_anonymize_single_instance.py::test_issue_date_of_imaging_service_request_gets_anonymized" ]
[ "tests/test_anonymize_single_instance.py::test_minimal_instance_anonymizes_safely", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.MediaStorageSOPClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.TransferSyntaxUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.ImplementationClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[SOPClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[SourceImageSequence[0].ReferencedSOPClassUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[file_meta.MediaStorageSOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SourceImageSequence[0].ReferencedSOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[StudyInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SeriesInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[FrameOfReferenceUID]", "tests/test_anonymize_single_instance.py::test_repeated_identifying_uis_get_same_values[file_meta.MediaStorageSOPInstanceUID-SOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[AccessionNumber]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberImagingServiceRequest]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberImagingServiceRequestRetired]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberProcedure]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDs]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[0].PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[0].IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[1].PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[1].IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PerformedProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberImagingServiceRequest]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberImagingServiceRequestRetired]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberProcedure]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[RequestAttributesSequence[0].RequestedProcedureID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[RequestAttributesSequence[0].ScheduledProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[ScheduledProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[StudyID]", "tests/test_anonymize_single_instance.py::test_other_patient_ids_anonymized_to_same_number_of_ids[2]", "tests/test_anonymize_single_instance.py::test_other_patient_ids_anonymized_to_same_number_of_ids[3]", "tests/test_anonymize_single_instance.py::test_issuer_of_patient_id_changed_if_not_empty", "tests/test_anonymize_single_instance.py::test_issuer_of_patient_id_not_added_if_empty", "tests/test_anonymize_single_instance.py::test_female_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_male_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_sex_other_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_other_patient_names_anonymized_to_same_number_of_names[2]", "tests/test_anonymize_single_instance.py::test_other_patient_names_anonymized_to_same_number_of_names[3]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[NameOfPhysiciansReadingStudy]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[OperatorsName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PatientBirthName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PatientMotherBirthName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PerformingPhysicianName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[ReferringPhysicianName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[RequestingPhysician]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[ResponsiblePerson]", "tests/test_anonymize_single_instance.py::test_patient_address_gets_anonymized", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[Occupation]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientInsurancePlanCodeSequence]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[MilitaryRank]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[BranchOfService]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientTelephoneNumbers]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientTelecomInformation]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientReligiousPreference]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[MedicalRecordLocator]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[ReferencedPatientPhotoSequence]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[ResponsibleOrganization]", "tests/test_anonymize_single_instance.py::test_equipment_gets_anonymized", "tests/test_anonymize_single_instance.py::test_requesting_service_gets_anonymized", "tests/test_anonymize_single_instance.py::test_current_patient_location_gets_anonymized", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[AcquisitionDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[ContentDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[InstanceCreationDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[PatientBirthDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[PerformedProcedureStepStartDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[SeriesDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[StudyDate]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_there_is_no_time", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[20180202]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[199901]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[1983]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[07]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[0911]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[131517]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.1]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.12]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.123]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.1234]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.12345]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.123456]", "tests/test_anonymize_single_instance.py::test_multivalued_date_with_no_time_pair_gets_anonymized", "tests/test_anonymize_single_instance.py::test_multivalued_date_and_time_pair_gets_anonymized", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[AcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[FrameReferenceDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[FrameAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[StartAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[EndAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[PerformedProcedureStepStartDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[PerformedProcedureStepEndDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[1947]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[194711]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[1947110307]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[194711030911]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103131517]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.1]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.12]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.123]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.1234]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.12345]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.123456]", "tests/test_anonymize_single_instance.py::test_multivalued_datetime_gets_anonymized", "tests/test_anonymize_single_instance.py::test_no_sex_still_changes_patient_name", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[None-DICOGNITO]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[DICOGNITO-DICOGNITO]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[SOMETHINGELSE-expected2]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[SOMETHING\\\\SOMETHINGELSE-expected3]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[DICOGNITO\\\\SOMETHINGELSE-expected4]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-None-None]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-YES-None]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-NO-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-None-NO]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-YES-NO]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-NO-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-None-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-YES-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-NO-YES]" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2019-09-20 19:53:32+00:00
mit
1,410
blairconrad__dicognito-87
diff --git a/src/dicognito/anonymizer.py b/src/dicognito/anonymizer.py index 80f8b54..b0f6a8b 100644 --- a/src/dicognito/anonymizer.py +++ b/src/dicognito/anonymizer.py @@ -12,7 +12,6 @@ from dicognito.unwantedelements import UnwantedElementsStripper from dicognito.randomizer import Randomizer import pydicom -import random class Anonymizer: @@ -58,8 +57,14 @@ class Anonymizer: """ minimum_offset_hours = 62 * 24 maximum_offset_hours = 730 * 24 + randomizer = Randomizer(seed) + + date_offset_hours = -( + randomizer.to_int("date_offset") % (maximum_offset_hours - minimum_offset_hours) + minimum_offset_hours + ) address_anonymizer = AddressAnonymizer(randomizer) + self._element_handlers = [ UnwantedElementsStripper( "BranchOfService", @@ -97,7 +102,7 @@ class Anonymizer: EquipmentAnonymizer(address_anonymizer), FixedValueAnonymizer("RequestingService", ""), FixedValueAnonymizer("CurrentPatientLocation", ""), - DateTimeAnonymizer(-random.randint(minimum_offset_hours, maximum_offset_hours)), + DateTimeAnonymizer(date_offset_hours), ] def anonymize(self, dataset): diff --git a/src/dicognito/release_notes.md b/src/dicognito/release_notes.md index e9933be..4a72ac6 100644 --- a/src/dicognito/release_notes.md +++ b/src/dicognito/release_notes.md @@ -8,6 +8,9 @@ - Deflated files are corrupt when anonymized from the command line ([#80](https://github.com/blairconrad/dicognito/issues/80)) +- Date/time offset is not always the same for a given seed + ([#86](https://github.com/blairconrad/dicognito/issues/86)) + ## 0.10.0
blairconrad/dicognito
2a4dd89edfd89504aaff1aeef9faf4b135d33e7c
diff --git a/tests/test_anonymize_single_instance.py b/tests/test_anonymize_single_instance.py index 23a05bc..5cffe58 100644 --- a/tests/test_anonymize_single_instance.py +++ b/tests/test_anonymize_single_instance.py @@ -452,6 +452,25 @@ def test_multivalued_date_and_time_pair_gets_anonymized(): assert len(new_time) == len(original_time) +def test_multivalued_date_and_time_pair_gets_anonymized_same_with_same_seed(): + with load_test_instance() as dataset1, load_test_instance() as dataset2: + dataset1.DateOfLastCalibration = original_date = ["20010401", "20010402"] + dataset1.TimeOfLastCalibration = original_time = ["120000", "135959"] + dataset2.DateOfLastCalibration = original_date + dataset2.TimeOfLastCalibration = original_time + + Anonymizer(seed="").anonymize(dataset1) + Anonymizer(seed="").anonymize(dataset2) + + new_date1 = dataset1.DateOfLastCalibration + new_time1 = dataset1.TimeOfLastCalibration + new_date2 = dataset2.DateOfLastCalibration + new_time2 = dataset2.TimeOfLastCalibration + + assert new_date1 == new_date2 + assert new_time1 == new_time2 + + def test_issue_date_of_imaging_service_request_gets_anonymized(): original_datetime = datetime.datetime(1974, 11, 3, 12, 15, 58) original_date_string = original_datetime.strftime("%Y%m%d")
Date/time offset is not always the same for a given seed When anonymizing using a set seed, the date (time) offsets will sometimes vary. To see this, anonymize an object a few times with the same seed. This is the reason that `test_multivalued_date_and_time_pair_gets_anonymized` fails from time to time.
0.0
2a4dd89edfd89504aaff1aeef9faf4b135d33e7c
[ "tests/test_anonymize_single_instance.py::test_multivalued_date_and_time_pair_gets_anonymized_same_with_same_seed" ]
[ "tests/test_anonymize_single_instance.py::test_minimal_instance_anonymizes_safely", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.MediaStorageSOPClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.TransferSyntaxUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.ImplementationClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[SOPClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[SourceImageSequence[0].ReferencedSOPClassUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[file_meta.MediaStorageSOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SourceImageSequence[0].ReferencedSOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[StudyInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SeriesInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[FrameOfReferenceUID]", "tests/test_anonymize_single_instance.py::test_repeated_identifying_uis_get_same_values[file_meta.MediaStorageSOPInstanceUID-SOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[AccessionNumber]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberImagingServiceRequest]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberImagingServiceRequestRetired]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberProcedure]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDs]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[0].PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[0].IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[1].PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[1].IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PerformedProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberImagingServiceRequest]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberImagingServiceRequestRetired]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberProcedure]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[RequestAttributesSequence[0].RequestedProcedureID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[RequestAttributesSequence[0].ScheduledProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[ScheduledProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[StudyID]", "tests/test_anonymize_single_instance.py::test_other_patient_ids_anonymized_to_same_number_of_ids[2]", "tests/test_anonymize_single_instance.py::test_other_patient_ids_anonymized_to_same_number_of_ids[3]", "tests/test_anonymize_single_instance.py::test_issuer_of_patient_id_changed_if_not_empty", "tests/test_anonymize_single_instance.py::test_issuer_of_patient_id_not_added_if_empty", "tests/test_anonymize_single_instance.py::test_female_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_male_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_sex_other_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_other_patient_names_anonymized_to_same_number_of_names[2]", "tests/test_anonymize_single_instance.py::test_other_patient_names_anonymized_to_same_number_of_names[3]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[NameOfPhysiciansReadingStudy]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[OperatorsName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PatientBirthName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PatientMotherBirthName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PerformingPhysicianName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[ReferringPhysicianName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[RequestingPhysician]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[ResponsiblePerson]", "tests/test_anonymize_single_instance.py::test_patient_address_gets_anonymized", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[Occupation]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientInsurancePlanCodeSequence]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[MilitaryRank]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[BranchOfService]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientTelephoneNumbers]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientTelecomInformation]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientReligiousPreference]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[MedicalRecordLocator]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[ReferencedPatientPhotoSequence]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[ResponsibleOrganization]", "tests/test_anonymize_single_instance.py::test_equipment_gets_anonymized", "tests/test_anonymize_single_instance.py::test_requesting_service_gets_anonymized", "tests/test_anonymize_single_instance.py::test_current_patient_location_gets_anonymized", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[AcquisitionDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[ContentDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[InstanceCreationDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[PatientBirthDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[PerformedProcedureStepStartDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[SeriesDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[StudyDate]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_there_is_no_time", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[20180202]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[199901]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[1983]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[07]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[0911]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[131517]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.1]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.12]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.123]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.1234]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.12345]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.123456]", "tests/test_anonymize_single_instance.py::test_multivalued_date_with_no_time_pair_gets_anonymized", "tests/test_anonymize_single_instance.py::test_multivalued_date_and_time_pair_gets_anonymized", "tests/test_anonymize_single_instance.py::test_issue_date_of_imaging_service_request_gets_anonymized", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[AcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[FrameReferenceDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[FrameAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[StartAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[EndAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[PerformedProcedureStepStartDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[PerformedProcedureStepEndDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[1947]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[194711]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[1947110307]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[194711030911]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103131517]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.1]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.12]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.123]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.1234]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.12345]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.123456]", "tests/test_anonymize_single_instance.py::test_multivalued_datetime_gets_anonymized", "tests/test_anonymize_single_instance.py::test_no_sex_still_changes_patient_name", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[None-DICOGNITO]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[DICOGNITO-DICOGNITO]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[SOMETHINGELSE-expected2]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[SOMETHING\\\\SOMETHINGELSE-expected3]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[DICOGNITO\\\\SOMETHINGELSE-expected4]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-None-None]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-YES-None]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-NO-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-None-NO]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-YES-NO]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-NO-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-None-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-YES-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-NO-YES]" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-05-29 18:42:26+00:00
mit
1,411
blairconrad__dicognito-98
diff --git a/src/dicognito/anonymizer.py b/src/dicognito/anonymizer.py index b0f6a8b..80e991f 100644 --- a/src/dicognito/anonymizer.py +++ b/src/dicognito/anonymizer.py @@ -96,6 +96,7 @@ class Anonymizer: "PlacerOrderNumberProcedure", "RequestedProcedureID", "ScheduledProcedureStepID", + "StationName", "StudyID", ), address_anonymizer, diff --git a/src/dicognito/equipmentanonymizer.py b/src/dicognito/equipmentanonymizer.py index 35e41de..cb4bd46 100644 --- a/src/dicognito/equipmentanonymizer.py +++ b/src/dicognito/equipmentanonymizer.py @@ -17,7 +17,6 @@ class EquipmentAnonymizer: pydicom.datadict.tag_for_keyword("InstitutionName"): self.anonymize_institution_name, pydicom.datadict.tag_for_keyword("InstitutionAddress"): self.anonymize_institution_address, pydicom.datadict.tag_for_keyword("InstitutionalDepartmentName"): self.anonymize_department_name, - pydicom.datadict.tag_for_keyword("StationName"): self.anonymize_station_name, } def __call__(self, dataset, data_element): @@ -33,8 +32,8 @@ class EquipmentAnonymizer: data_element : pydicom.dataset.DataElement The current element. Will be anonymized if it has a value and if its keyword is one of InstitutionName, - InstitutionAddress, InstitutionalDepartmentName, or - StationName. Additionally, if its keyword is InstitutionName, + InstitutionAddress, or InstitutionalDepartmentName. + Additionally, if its keyword is InstitutionName, then InstitutionAddress will also be anonymized. Returns @@ -62,6 +61,3 @@ class EquipmentAnonymizer: def anonymize_department_name(self, dataset, data_element): data_element.value = "RADIOLOGY" - - def anonymize_station_name(self, dataset, data_element): - data_element.value = dataset.Modality + "01" diff --git a/src/dicognito/release_notes.md b/src/dicognito/release_notes.md index 6a4a228..1a07c5f 100644 --- a/src/dicognito/release_notes.md +++ b/src/dicognito/release_notes.md @@ -9,8 +9,8 @@ ### Fixed - Deflated files are corrupt when anonymized from the command line ([#80](https://github.com/blairconrad/dicognito/issues/80)) - - Date/time offset is not always the same for a given seed ([#86](https://github.com/blairconrad/dicognito/issues/86)) +- Anonymizing dataset with StationName but no Modality fails ([#97](https://github.com/blairconrad/dicognito/issues/97)) ## 0.10.0
blairconrad/dicognito
7e9b0682a40fa746980b99fb9baf399e17eb0b15
diff --git a/tests/test_anonymize_single_instance.py b/tests/test_anonymize_single_instance.py index 5cffe58..912800c 100644 --- a/tests/test_anonymize_single_instance.py +++ b/tests/test_anonymize_single_instance.py @@ -313,6 +313,19 @@ def test_equipment_gets_anonymized(): assert new_station_name != original_station_name +def test_station_gets_anonymized_when_no_modality(): + with load_test_instance() as dataset: + original_station_name = dataset.StationName + del dataset.Modality + + anonymizer = Anonymizer() + anonymizer.anonymize(dataset) + + new_station_name = dataset.StationName + + assert new_station_name != original_station_name + + def test_requesting_service_gets_anonymized(): with load_test_instance() as dataset: original = dataset.RequestingService
Anonymizing dataset with StationName but no Modality fails Anonymizing a dataset with a supplied `StationName` attribute but no sibling `Modality` fails with this output: ``` Traceback (most recent call last): File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\tag.py", line 27, in tag_in_exception yield File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 2040, in walk callback(self, data_element) # self = this Dataset File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\anonymizer.py", line 120, in _anonymize_element if handler(dataset, data_element): File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\equipmentanonymizer.py", line 48, in __call__ element_anonymizer(dataset, data_element) File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\equipmentanonymizer.py", line 67, in anonymize_station_name data_element.value = dataset.Modality + "01" File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 778, in __getattr__ return object.__getattribute__(self, name) AttributeError: 'Dataset' object has no attribute 'Modality' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\tag.py", line 27, in tag_in_exception yield File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 2046, in walk dataset.walk(callback) File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 2046, in walk dataset.walk(callback) File "c:\program files (x86)\python38-32\lib\contextlib.py", line 131, in __exit__ self.gen.throw(type, value, traceback) File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\tag.py", line 34, in tag_in_exception raise type(ex)(msg) AttributeError: With tag (0008, 1010) got exception: 'Dataset' object has no attribute 'Modality' Traceback (most recent call last): File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\tag.py", line 27, in tag_in_exception yield File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 2040, in walk callback(self, data_element) # self = this Dataset File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\anonymizer.py", line 120, in _anonymize_element if handler(dataset, data_element): File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\equipmentanonymizer.py", line 48, in __call__ element_anonymizer(dataset, data_element) File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\equipmentanonymizer.py", line 67, in anonymize_station_name data_element.value = dataset.Modality + "01" File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 778, in __getattr__ return object.__getattribute__(self, name) AttributeError: 'Dataset' object has no attribute 'Modality' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "c:\program files (x86)\python38-32\lib\runpy.py", line 194, in _run_module_as_main return _run_code(code, main_globals, None, File "c:\program files (x86)\python38-32\lib\runpy.py", line 87, in _run_code exec(code, run_globals) File "C:\Program Files (x86)\Python38-32\Scripts\dicognito.exe\__main__.py", line 7, in <module> File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\__main__.py", line 119, in main anonymizer.anonymize(dataset) File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\anonymizer.py", line 114, in anonymize dataset.walk(self._anonymize_element) File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 2046, in walk dataset.walk(callback) File "c:\program files (x86)\python38-32\lib\contextlib.py", line 131, in __exit__ self.gen.throw(type, value, traceback) File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\tag.py", line 34, in tag_in_exception raise type(ex)(msg) AttributeError: With tag (0018, 9506) got exception: With tag (0008, 1010) got exception: 'Dataset' object has no attribute 'Modality' Traceback (most recent call last): File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\tag.py", line 27, in tag_in_exception yield File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 2040, in walk callback(self, data_element) # self = this Dataset File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\anonymizer.py", line 120, in _anonymize_element if handler(dataset, data_element): File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\equipmentanonymizer.py", line 48, in __call__ element_anonymizer(dataset, data_element) File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\equipmentanonymizer.py", line 67, in anonymize_station_name data_element.value = dataset.Modality + "01" File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 778, in __getattr__ return object.__getattribute__(self, name) AttributeError: 'Dataset' object has no attribute 'Modality' Traceback (most recent call last): File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\tag.py", line 27, in tag_in_exception yield File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 2040, in walk callback(self, data_element) # self = this Dataset File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\anonymizer.py", line 120, in _anonymize_element if handler(dataset, data_element): File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\equipmentanonymizer.py", line 48, in __call__ element_anonymizer(dataset, data_element) File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\equipmentanonymizer.py", line 67, in anonymize_station_name data_element.value = dataset.Modality + "01" File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 778, in __getattr__ return object.__getattribute__(self, name) AttributeError: 'Dataset' object has no attribute 'Modality' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\tag.py", line 27, in tag_in_exception yield File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 2046, in walk dataset.walk(callback) File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 2046, in walk dataset.walk(callback) File "c:\program files (x86)\python38-32\lib\contextlib.py", line 131, in __exit__ self.gen.throw(type, value, traceback) File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\tag.py", line 34, in tag_in_exception raise type(ex)(msg) AttributeError: With tag (0008, 1010) got exception: 'Dataset' object has no attribute 'Modality' Traceback (most recent call last): File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\tag.py", line 27, in tag_in_exception yield File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 2040, in walk callback(self, data_element) # self = this Dataset File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\anonymizer.py", line 120, in _anonymize_element if handler(dataset, data_element): File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\equipmentanonymizer.py", line 48, in __call__ element_anonymizer(dataset, data_element) File "c:\program files (x86)\python38-32\lib\site-packages\dicognito\equipmentanonymizer.py", line 67, in anonymize_station_name data_element.value = dataset.Modality + "01" File "c:\program files (x86)\python38-32\lib\site-packages\pydicom\dataset.py", line 778, in __getattr__ return object.__getattribute__(self, name) AttributeError: 'Dataset' object has no attribute 'Modality' ```
0.0
7e9b0682a40fa746980b99fb9baf399e17eb0b15
[ "tests/test_anonymize_single_instance.py::test_station_gets_anonymized_when_no_modality" ]
[ "tests/test_anonymize_single_instance.py::test_minimal_instance_anonymizes_safely", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.MediaStorageSOPClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.TransferSyntaxUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[file_meta.ImplementationClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[SOPClassUID]", "tests/test_anonymize_single_instance.py::test_nonidentifying_uis_are_left_alone[SourceImageSequence[0].ReferencedSOPClassUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[file_meta.MediaStorageSOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SourceImageSequence[0].ReferencedSOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[StudyInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[SeriesInstanceUID]", "tests/test_anonymize_single_instance.py::test_identifying_uis_are_updated[FrameOfReferenceUID]", "tests/test_anonymize_single_instance.py::test_repeated_identifying_uis_get_same_values[file_meta.MediaStorageSOPInstanceUID-SOPInstanceUID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[AccessionNumber]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberImagingServiceRequest]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberImagingServiceRequestRetired]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[FillerOrderNumberProcedure]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDs]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[0].PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[0].IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[1].PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[OtherPatientIDsSequence[1].IssuerOfPatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PatientID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PerformedProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberImagingServiceRequest]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberImagingServiceRequestRetired]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[PlacerOrderNumberProcedure]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[RequestAttributesSequence[0].RequestedProcedureID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[RequestAttributesSequence[0].ScheduledProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[ScheduledProcedureStepID]", "tests/test_anonymize_single_instance.py::test_ids_are_anonymized[StudyID]", "tests/test_anonymize_single_instance.py::test_other_patient_ids_anonymized_to_same_number_of_ids[2]", "tests/test_anonymize_single_instance.py::test_other_patient_ids_anonymized_to_same_number_of_ids[3]", "tests/test_anonymize_single_instance.py::test_issuer_of_patient_id_changed_if_not_empty", "tests/test_anonymize_single_instance.py::test_issuer_of_patient_id_not_added_if_empty", "tests/test_anonymize_single_instance.py::test_female_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_male_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_sex_other_patient_name_gets_anonymized", "tests/test_anonymize_single_instance.py::test_other_patient_names_anonymized_to_same_number_of_names[2]", "tests/test_anonymize_single_instance.py::test_other_patient_names_anonymized_to_same_number_of_names[3]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[NameOfPhysiciansReadingStudy]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[OperatorsName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PatientBirthName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PatientMotherBirthName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[PerformingPhysicianName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[ReferringPhysicianName]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[RequestingPhysician]", "tests/test_anonymize_single_instance.py::test_non_patient_names_get_anonymized[ResponsiblePerson]", "tests/test_anonymize_single_instance.py::test_patient_address_gets_anonymized", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[Occupation]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientInsurancePlanCodeSequence]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[MilitaryRank]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[BranchOfService]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientTelephoneNumbers]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientTelecomInformation]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[PatientReligiousPreference]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[MedicalRecordLocator]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[ReferencedPatientPhotoSequence]", "tests/test_anonymize_single_instance.py::test_extra_patient_attributes_are_removed[ResponsibleOrganization]", "tests/test_anonymize_single_instance.py::test_equipment_gets_anonymized", "tests/test_anonymize_single_instance.py::test_requesting_service_gets_anonymized", "tests/test_anonymize_single_instance.py::test_current_patient_location_gets_anonymized", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[AcquisitionDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[ContentDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[InstanceCreationDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[PatientBirthDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[PerformedProcedureStepStartDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[SeriesDate]", "tests/test_anonymize_single_instance.py::test_dates_and_times_get_anonymized_when_both_are_present[StudyDate]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_there_is_no_time", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[20180202]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[199901]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_date_has_various_lengths[1983]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[07]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[0911]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[131517]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.1]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.12]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.123]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.1234]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.12345]", "tests/test_anonymize_single_instance.py::test_date_gets_anonymized_when_time_has_various_lengths[192123.123456]", "tests/test_anonymize_single_instance.py::test_multivalued_date_with_no_time_pair_gets_anonymized", "tests/test_anonymize_single_instance.py::test_multivalued_date_and_time_pair_gets_anonymized", "tests/test_anonymize_single_instance.py::test_multivalued_date_and_time_pair_gets_anonymized_same_with_same_seed", "tests/test_anonymize_single_instance.py::test_issue_date_of_imaging_service_request_gets_anonymized", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[AcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[FrameReferenceDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[FrameAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[StartAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[EndAcquisitionDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[PerformedProcedureStepStartDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_gets_anonymized[PerformedProcedureStepEndDateTime]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[1947]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[194711]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[1947110307]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[194711030911]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103131517]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.1]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.12]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.123]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.1234]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.12345]", "tests/test_anonymize_single_instance.py::test_datetime_of_various_lengths_gets_anonymized[19471103192123.123456]", "tests/test_anonymize_single_instance.py::test_multivalued_datetime_gets_anonymized", "tests/test_anonymize_single_instance.py::test_no_sex_still_changes_patient_name", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[None-DICOGNITO]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[DICOGNITO-DICOGNITO]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[SOMETHINGELSE-expected2]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[SOMETHING\\\\SOMETHINGELSE-expected3]", "tests/test_anonymize_single_instance.py::test_deidentification_method_set_properly[DICOGNITO\\\\SOMETHINGELSE-expected4]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-None-None]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-YES-None]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[None-NO-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-None-NO]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-YES-NO]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[NO-NO-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-None-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-YES-YES]", "tests/test_anonymize_single_instance.py::test_patient_identity_removed[YES-NO-YES]" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-07-07 18:28:36+00:00
mit
1,412
bloomberg__attrs-strict-38
diff --git a/attrs_strict/_type_validation.py b/attrs_strict/_type_validation.py index fce8bc5..2dc75d2 100644 --- a/attrs_strict/_type_validation.py +++ b/attrs_strict/_type_validation.py @@ -105,6 +105,9 @@ def _handle_dict(attribute, container, expected_type): def _handle_tuple(attribute, container, expected_type): tuple_types = expected_type.__args__ + if len(tuple_types) == 2 and tuple_types[1] == Ellipsis: + element_type = tuple_types[0] + tuple_types = (element_type, ) * len(container) if len(container) != len(tuple_types): raise TupleError(container, attribute.type, tuple_types)
bloomberg/attrs-strict
21abe902debcc2eb1a7f373dac37ea05edf21b86
diff --git a/tests/test_tuple.py b/tests/test_tuple.py index a3428ab..765ac07 100644 --- a/tests/test_tuple.py +++ b/tests/test_tuple.py @@ -45,3 +45,45 @@ def test_tuple_of_tuple_raises(): "in typing.Tuple[typing.Tuple[int, int], typing.Tuple[int, int]]. " "Expected 2 received 3 in ((1, 2), (3, 4, 5))>" ) == repr(error.value) + + +def test_variable_length_tuple(): + element = (1, 2, 3, 4) + + attr = MagicMock() + attr.name = "foo" + attr.type = Tuple[int, ...] + + validator = type_validator() + + validator(None, attr, element) + + +def test_variable_length_tuple_empty(): + element = () + + attr = MagicMock() + attr.name = "foo" + attr.type = Tuple[int, ...] + + validator = type_validator() + + validator(None, attr, element) + + +def test_variable_length_tuple_raises(): + element = (1, 2, 3, "4") + + attr = MagicMock() + attr.name = "foo" + attr.type = Tuple[int, ...] + + validator = type_validator() + + with pytest.raises(ValueError) as error: + validator(None, attr, element) + + assert ( + "<foo must be typing.Tuple[int, ...] (got 4 that is a {}) " + "in (1, 2, 3, '4')>" + ).format(str) == repr(error.value)
Support for Tuple is incorrect/incomplete When declaring an attribute as a Tuple, there are 2 possible forms: Simple form, like `Tuple[str]` or `Tuple[int, int]`, should be used for fixed size tuple declarations, which seems to be what attrs-strict currently supports. But there is also the form with an ellipsis to declare a variable-length tuple of homogeneous type (see https://docs.python.org/3.7/library/typing.html#typing.Tuple ) It seems to me that this should at the very least be mentioned in the documentation (in the "What is currently supported ?" section), or be fixed to have proper support for this pattern. eg. ``` @attr.s() class Model: item = attr.ib(type=Tuple[int, ...], validator=type_validator()) obj = Model(item=(1, 2, 3)) ``` will raise: ``` TupleError: Element (1, 2, 3) has more elements than types specified in typing.Tuple[int, ...]. Expected 2 received 3 ```
0.0
21abe902debcc2eb1a7f373dac37ea05edf21b86
[ "tests/test_tuple.py::test_variable_length_tuple", "tests/test_tuple.py::test_variable_length_tuple_empty", "tests/test_tuple.py::test_variable_length_tuple_raises" ]
[ "tests/test_tuple.py::test_tuple_with_incorrect_number_of_arguments_raises", "tests/test_tuple.py::test_tuple_of_tuple_raises" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2020-05-25 11:11:35+00:00
apache-2.0
1,413
blue-yonder__tsfresh-666
diff --git a/CHANGES.rst b/CHANGES.rst index e114132..e091995 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -16,6 +16,7 @@ Unreleased - Added variation coefficient (#654) - Added the datetimeindex explanation from the notebook to the docs (#661) - Optimize RelevantFeatureAugmenter to avoid re-extraction (#669) + - Added a function `add_sub_time_series_index` (#666) - Bugfixes - Increase the extracted `ar` coefficients to the full parameter range. (#662) - Documentation fixes (#663, #664, #665) diff --git a/tsfresh/utilities/dataframe_functions.py b/tsfresh/utilities/dataframe_functions.py index 015c7dc..c039f10 100644 --- a/tsfresh/utilities/dataframe_functions.py +++ b/tsfresh/utilities/dataframe_functions.py @@ -553,3 +553,85 @@ def make_forecasting_frame(x, kind, max_timeshift, rolling_direction): df_shift = df_shift[mask] return df_shift, df["value"][1:] + + +def add_sub_time_series_index(df_or_dict, sub_length, column_id=None, column_sort=None, column_kind=None): + """ + Add a column "id" which contains: + 1. if column_id is None: for each kind (or if column_kind is None for the full dataframe) a new index built by + "sub-packaging" the data in packages of length "sub_length". For example if you have data with the + length of 11 and sub_length is 2, you will get 6 new packages: 0, 0; 1, 1; 2, 2; 3, 3; 4, 4; 5. + 2. if column_id is not None: the same as before, just for each id seperately. The old column_id values are added + to the new "id" column after a comma + + You can use this functions to turn a long measurement into sub-packages, where you want to extract features on. + + :param df_or_dict: a pandas DataFrame or a dictionary. The required shape/form of the object depends on the rest of + the passed arguments. + :type df_or_dict: pandas.DataFrame or dict + :param column_id: it must be present in the pandas DataFrame or in all DataFrames in the dictionary. + It is not allowed to have NaN values in this column. + :type column_id: basestring or None + :param column_sort: if not None, sort the rows by this column. It is not allowed to + have NaN values in this column. + :type column_sort: basestring or None + :param column_kind: It can only be used when passing a pandas DataFrame (the dictionary is already assumed to be + grouped by the kind). Is must be present in the DataFrame and no NaN values are allowed. + If the kind column is not passed, it is assumed that each column in the pandas DataFrame (except the id or + sort column) is a possible kind. + :type column_kind: basestring or None + + :return: The data frame or dictionary of data frames with a column "id" added + :rtype: the one from df_or_dict + """ + + if isinstance(df_or_dict, dict): + if column_kind is not None: + raise ValueError("You passed in a dictionary and gave a column name for the kind. Both are not possible.") + + return {key: add_sub_time_series_index(df_or_dict=df_or_dict[key], + sub_length=sub_length, + column_id=column_id, + column_sort=column_sort, + column_kind=column_kind) + for key in df_or_dict} + + df = df_or_dict + + grouper = [] + + if column_id is not None: + grouper.append(column_id) + if column_kind is not None: + grouper.append(column_kind) + + def _add_id_column(df_chunk): + chunk_length = len(df_chunk) + last_chunk_number = chunk_length // sub_length + reminder = chunk_length % sub_length + + indices = np.concatenate([np.repeat(np.arange(last_chunk_number), sub_length), + np.repeat(last_chunk_number, reminder)]) + assert(len(indices) == chunk_length) + + if column_id: + indices = [str(id) + "," + str(old_id) for id, old_id in zip(indices, df_chunk[column_id])] + + if column_sort: + df_chunk = df_chunk.sort_values(column_sort) + + df_chunk["id"] = indices + + return df_chunk + + if grouper: + df = df.groupby(grouper).apply(_add_id_column) + else: + df = _add_id_column(df) + + if column_sort: + df = df.sort_values(column_sort) + + df = df.set_index(df.index.get_level_values(-1)) + + return df
blue-yonder/tsfresh
c819de8e56680dfb1b66db6bcdd18fa4607b22b9
diff --git a/tests/units/utilities/test_dataframe_functions.py b/tests/units/utilities/test_dataframe_functions.py index 72646d6..a7a5705 100644 --- a/tests/units/utilities/test_dataframe_functions.py +++ b/tests/units/utilities/test_dataframe_functions.py @@ -9,8 +9,6 @@ from tsfresh.utilities import dataframe_functions import numpy as np from pandas.testing import assert_frame_equal, assert_series_equal -from tsfresh.utilities.dataframe_functions import get_ids - class NormalizeTestCase(TestCase): def test_with_dictionaries_one_row(self): @@ -775,13 +773,81 @@ class GetIDsTestCase(TestCase): def test_get_id__correct_DataFrame(self): df = pd.DataFrame({"_value": [1, 2, 3, 4, 10, 11], "id": [1, 1, 1, 1, 2, 2]}) - self.assertEqual(get_ids(df, "id"), {1, 2}) + self.assertEqual(dataframe_functions.get_ids(df, "id"), {1, 2}) def test_get_id__correct_dict(self): df_dict = {"a": pd.DataFrame({"_value": [1, 2, 3, 4, 10, 11], "id": [1, 1, 1, 1, 2, 2]}), "b": pd.DataFrame({"_value": [5, 6, 7, 8, 12, 13], "id": [4, 4, 3, 3, 2, 2]})} - self.assertEqual(get_ids(df_dict, "id"), {1, 2, 3, 4}) + self.assertEqual(dataframe_functions.get_ids(df_dict, "id"), {1, 2, 3, 4}) def test_get_id_wrong(self): other_type = np.array([1, 2, 3]) - self.assertRaises(TypeError, get_ids, other_type, "id") + self.assertRaises(TypeError, dataframe_functions.get_ids, other_type, "id") + + +class AddSubIdTestCase(TestCase): + def test_no_parameters(self): + dataframe = pd.DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8, 9]}) + extended_dataframe = dataframe_functions.add_sub_time_series_index(dataframe, 2) + + self.assertEqual(list(extended_dataframe["id"]), [0, 0, 1, 1, 2, 2, 3, 3, 4]) + assert_series_equal(dataframe["value"], extended_dataframe["value"]) + + def test_id_parameters(self): + dataframe = pd.DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8, 9], + "id": [1, 1, 1, 1, 2, 2, 2, 2, 2]}) + + extended_dataframe = dataframe_functions.add_sub_time_series_index(dataframe, 2, column_id="id") + + self.assertEqual(list(extended_dataframe["id"]), + ["0,1", "0,1", "1,1", "1,1", "0,2", "0,2", "1,2", "1,2", "2,2"]) + assert_series_equal(dataframe["value"], extended_dataframe["value"]) + + def test_kind_parameters(self): + dataframe = pd.DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8, 9], + "id": [1, 1, 1, 1, 2, 2, 2, 2, 2], + "kind": [0, 1, 0, 1, 0, 1, 0, 1, 0]}) + + extended_dataframe = dataframe_functions.add_sub_time_series_index(dataframe, 2, + column_id="id", + column_kind="kind") + + self.assertEqual(list(extended_dataframe["id"]), + ["0,1", "0,1", "0,1", "0,1", "0,2", "0,2", "0,2", "0,2", "1,2"]) + assert_series_equal(dataframe["value"], extended_dataframe["value"]) + assert_series_equal(dataframe["kind"], extended_dataframe["kind"]) + + def test_sort_parameters(self): + dataframe = pd.DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8, 9], + "id": [1, 1, 1, 1, 2, 2, 2, 2, 2], + "kind": [0, 1, 0, 1, 0, 1, 0, 1, 0], + "sort": [9, 8, 7, 6, 5, 4, 3, 2, 1]}) + + extended_dataframe = dataframe_functions.add_sub_time_series_index(dataframe, 2, + column_id="id", + column_kind="kind", + column_sort="sort") + + self.assertEqual(list(extended_dataframe["id"]), + ["0,2", "0,2", "0,2", "0,2", "1,2", "0,1", "0,1", "0,1", "0,1"]) + self.assertEqual(list(extended_dataframe["value"]), + [9, 8, 7, 6, 5, 4, 3, 2, 1]) + self.assertEqual(list(extended_dataframe["kind"]), + [0, 1, 0, 1, 0, 1, 0, 1, 0]) + self.assertEqual(list(extended_dataframe["sort"]), + [1, 2, 3, 4, 5, 6, 7, 8, 9]) + + def test_dict_input(self): + dataframe = pd.DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8, 9], + "id": [1, 1, 1, 1, 2, 2, 2, 2, 2]}) + + extended_dataframe = dataframe_functions.add_sub_time_series_index({"1": dataframe}, 2, + column_id="id") + + self.assertIn("1", extended_dataframe) + + extended_dataframe = extended_dataframe["1"] + + self.assertEqual(list(extended_dataframe["id"]), + ["0,1", "0,1", "1,1", "1,1", "0,2", "0,2", "1,2", "1,2", "2,2"]) + assert_series_equal(dataframe["value"], extended_dataframe["value"])
Generate sub-series from timeseries, to extract all possible features Using tsfresh ( for example the roll function): Is it possible to generate features as a matrix X, containing in each row, all the possible features of every sub-series of the time-series data ? ![image](https://user-images.githubusercontent.com/31764213/74353604-74612200-4dba-11ea-9885-05360aa9ee5e.png)
0.0
c819de8e56680dfb1b66db6bcdd18fa4607b22b9
[ "tests/units/utilities/test_dataframe_functions.py::AddSubIdTestCase::test_id_parameters", "tests/units/utilities/test_dataframe_functions.py::AddSubIdTestCase::test_dict_input", "tests/units/utilities/test_dataframe_functions.py::AddSubIdTestCase::test_no_parameters", "tests/units/utilities/test_dataframe_functions.py::AddSubIdTestCase::test_sort_parameters" ]
[ "tests/units/utilities/test_dataframe_functions.py::GetRangeValuesPerColumnTestCase::test_range_values_correct_with_uneven_length", "tests/units/utilities/test_dataframe_functions.py::GetRangeValuesPerColumnTestCase::test_range_values_correct_with_even_length", "tests/units/utilities/test_dataframe_functions.py::GetIDsTestCase::test_get_id_wrong", "tests/units/utilities/test_dataframe_functions.py::RestrictTestCase::test_restrict_wrong", "tests/units/utilities/test_dataframe_functions.py::GetIDsTestCase::test_get_id__correct_dict", "tests/units/utilities/test_dataframe_functions.py::RestrictTestCase::test_restrict_dict", "tests/units/utilities/test_dataframe_functions.py::GetIDsTestCase::test_get_id__correct_DataFrame", "tests/units/utilities/test_dataframe_functions.py::NormalizeTestCase::test_with_df_1", "tests/units/utilities/test_dataframe_functions.py::RestrictTestCase::test_restrict_dataframe", "tests/units/utilities/test_dataframe_functions.py::NormalizeTestCase::test_with_df_2", "tests/units/utilities/test_dataframe_functions.py::RollingTestCase::test_dict_rolling_maxshift_1", "tests/units/utilities/test_dataframe_functions.py::NormalizeTestCase::test_with_df_3", "tests/units/utilities/test_dataframe_functions.py::NormalizeTestCase::test_wide_dataframe_order_preserved_with_sort_column", "tests/units/utilities/test_dataframe_functions.py::NormalizeTestCase::test_with_dictionaries_two_rows_sorted", "tests/units/utilities/test_dataframe_functions.py::NormalizeTestCase::test_wide_dataframe_order_preserved", "tests/units/utilities/test_dataframe_functions.py::NormalizeTestCase::test_with_dictionaries_one_row", "tests/units/utilities/test_dataframe_functions.py::NormalizeTestCase::test_with_dictionaries_two_rows" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-04-13 07:08:23+00:00
mit
1,414
botblox__botblox-manager-software-36
diff --git a/manager/cli.py b/manager/cli.py index 75a07e1..446b919 100644 --- a/manager/cli.py +++ b/manager/cli.py @@ -87,7 +87,7 @@ def create_parser() -> argparse.ArgumentParser: 'vlan', help='Configure the ports to be in VLAN groups', ) - vlan_parser_group = vlan_parser.add_mutually_exclusive_group() + vlan_parser_group = vlan_parser.add_mutually_exclusive_group(required=True) vlan_parser_group.add_argument( '-g', '--group', @@ -97,8 +97,11 @@ def create_parser() -> argparse.ArgumentParser: choices=[1, 2, 3, 4, 5], required=False, help='''Define the VLAN member groups using port number, - i.e. --group 1 2 --group 3 4 puts makes Group A have - ports 1 and 2, and Group B have ports 3 and 4''' + i.e. --group 1 2 --group 3 4 makes Group A have + ports 1 and 2 and Group B have ports 3 and 4. All unmentioned + ports are assigned to default group. If a group has only 1 port, + the port gets isolated. In this example, port 5 would + not be allowed to communicate with any other port.''' ) vlan_parser_group.add_argument( '-r', diff --git a/manager/data_manager/vlan.py b/manager/data_manager/vlan.py index fcd52bc..3a5a353 100644 --- a/manager/data_manager/vlan.py +++ b/manager/data_manager/vlan.py @@ -19,7 +19,7 @@ class VlanConfig: 'sys_default': 0xFF, 'data': 0, 'choice_mapping': { - 1: 0b00000010, + 1: 0b00000100, } }, 2: { @@ -31,7 +31,7 @@ class VlanConfig: 'sys_default': 0xFF, 'data': 0, 'choice_mapping': { - 2: 0b00000100, + 2: 0b00001000, } }, 3: { @@ -43,7 +43,7 @@ class VlanConfig: 'sys_default': 0xFF, 'data': 0, 'choice_mapping': { - 3: 0b00001000, + 3: 0b00010000, } }, 4: {
botblox/botblox-manager-software
4592b63a22f90c1daec2b950f80b996ebb3cfc38
diff --git a/tests/vlan/test_set_groups.py b/tests/vlan/test_set_groups.py index 6a6a89c..ac1bb76 100644 --- a/tests/vlan/test_set_groups.py +++ b/tests/vlan/test_set_groups.py @@ -8,7 +8,7 @@ from typing import ( ) -class TestSetRxMode: +class TestSetGroups: package: List[str] = ['botblox'] base_args: List[str] = [ '--device', @@ -45,7 +45,7 @@ class TestSetRxMode: cli_status_code: int = subprocess.call(command) assert cli_status_code > 0, 'The command did not exit with an error code' - def test_single_rx_port( + def test_2x2_groups_1_isolated( self, parser: ArgumentParser, ) -> None: @@ -61,5 +61,5 @@ class TestSetRxMode: data = self._get_data_from_cli_args(parser=parser, args=args) self._assert_data_is_correct_type(data=data) - expected_result = [[23, 16, 6, 6], [23, 17, 72, 0], [23, 18, 72, 255]] + expected_result = [[23, 16, 12, 12], [23, 17, 80, 0], [23, 18, 80, 255]] assert data == expected_result
Switch not switching after setting up port-based VLAN I've set up port-based VLAN using the following command: python3.8 -m manager -D /dev/ttyACM0 vlan --group 1 2 --group 3 4 5 After this command, cables connected to ports 1 and 2 are not able to communicate. When I reset the vlan config, the communication is without problems. I also tried other ports, but it seems that setting any vlan config, all ports that are part of any vlan group are not able to communicate. Their LEDs blink, but no data pass through the switch. I'll try to investigate this more in depth.
0.0
4592b63a22f90c1daec2b950f80b996ebb3cfc38
[ "tests/vlan/test_set_groups.py::TestSetGroups::test_2x2_groups_1_isolated" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-04-08 20:33:26+00:00
mit
1,415
botify-labs__simpleflow-275
diff --git a/simpleflow/utils/json_tools.py b/simpleflow/utils/json_tools.py index affa6117..14dd8c61 100644 --- a/simpleflow/utils/json_tools.py +++ b/simpleflow/utils/json_tools.py @@ -34,6 +34,8 @@ def _serialize_complex_object(obj): return str(obj) elif isinstance(obj, lazy_object_proxy.Proxy): return str(obj) + elif isinstance(obj, (set, frozenset)): + return list(obj) raise TypeError( "Type %s couldn't be serialized. This is a bug in simpleflow," " please file a new issue on GitHub!" % type(obj))
botify-labs/simpleflow
9462e9e4eca3ad4f668401e79ad1d16683e8b297
diff --git a/tests/test_simpleflow/utils/test_json_dumps.py b/tests/test_simpleflow/utils/test_json_dumps.py index 49c9d09e..31e8b7df 100644 --- a/tests/test_simpleflow/utils/test_json_dumps.py +++ b/tests/test_simpleflow/utils/test_json_dumps.py @@ -1,4 +1,5 @@ import datetime +import json import unittest import pytz @@ -78,6 +79,20 @@ class TestJsonDumps(unittest.TestCase): actual = json_dumps(data) self.assertEqual(expected, actual) + def test_set(self): + data = [ + {1, 2, 3}, + frozenset([-1, -2, -3]), + ] + expected = [ + [1, 2, 3], + [-1, -2, -3], + ] + actual = json_dumps(data) + actual = json.loads(actual) + self.assertEqual(sorted(expected[0]), sorted(actual[0])) + self.assertEqual(sorted(expected[1]), sorted(actual[1])) + if __name__ == '__main__': unittest.main()
json_dumps: handle sets Like `ujson` does, we should dump sets as lists.
0.0
9462e9e4eca3ad4f668401e79ad1d16683e8b297
[ "tests/test_simpleflow/utils/test_json_dumps.py::TestJsonDumps::test_set" ]
[ "tests/test_simpleflow/utils/test_json_dumps.py::TestJsonDumps::test_bugfix_154_default", "tests/test_simpleflow/utils/test_json_dumps.py::TestJsonDumps::test_default", "tests/test_simpleflow/utils/test_json_dumps.py::TestJsonDumps::test_json_dumps_basics", "tests/test_simpleflow/utils/test_json_dumps.py::TestJsonDumps::test_json_dumps_futures", "tests/test_simpleflow/utils/test_json_dumps.py::TestJsonDumps::test_json_dumps_pretty", "tests/test_simpleflow/utils/test_json_dumps.py::TestJsonDumps::test_json_non_compact", "tests/test_simpleflow/utils/test_json_dumps.py::TestJsonDumps::test_proxy" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2017-07-05 10:10:48+00:00
mit
1,416
bottlepy__bottle-1232
diff --git a/bottle.py b/bottle.py index df867c1..45e85d1 100755 --- a/bottle.py +++ b/bottle.py @@ -474,10 +474,7 @@ class Router(object): verb = environ['REQUEST_METHOD'].upper() path = environ['PATH_INFO'] or '/' - if verb == 'HEAD': - methods = ['PROXY', verb, 'GET', 'ANY'] - else: - methods = ['PROXY', verb, 'ANY'] + methods = ('PROXY', 'HEAD', 'GET', 'ANY') if verb == 'HEAD' else ('PROXY', verb, 'ANY') for method in methods: if method in self.static and path in self.static[method]: @@ -575,7 +572,7 @@ class Route(object): callback = plugin(callback) except RouteReset: # Try again with changed configuration. return self._make_callback() - if not callback is self.callback: + if callback is not self.callback: update_wrapper(callback, self.callback) return callback @@ -2425,7 +2422,7 @@ class ConfigDict(dict): for section in conf.sections(): for key in conf.options(section): value = conf.get(section, key) - if section not in ['bottle', 'ROOT']: + if section not in ('bottle', 'ROOT'): key = section + '.' + key self[key.lower()] = value return self @@ -2626,7 +2623,7 @@ class AppStack(list): class WSGIFileWrapper(object): def __init__(self, fp, buffer_size=1024 * 64): self.fp, self.buffer_size = fp, buffer_size - for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'): + for attr in 'fileno', 'close', 'read', 'readlines', 'tell', 'seek': if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr)) def __iter__(self): @@ -2719,7 +2716,7 @@ class ResourceManager(object): """ Search for a resource and return an absolute file path, or `None`. The :attr:`path` list is searched in order. The first match is - returend. Symlinks are followed. The result is cached to speed up + returned. Symlinks are followed. The result is cached to speed up future lookups. """ if name not in self.cache or DEBUG: for path in self.path: @@ -2881,7 +2878,7 @@ def static_file(filename, root, root = os.path.join(os.path.abspath(root), '') filename = os.path.abspath(os.path.join(root, filename.strip('/\\'))) - headers = headers or {} + headers = headers.copy() if headers else {} if not filename.startswith(root): return HTTPError(403, "Access denied.") @@ -3054,7 +3051,7 @@ def _parse_http_header(h): def _parse_qsl(qs): r = [] - for pair in qs.replace(';', '&').split('&'): + for pair in qs.split('&'): if not pair: continue nv = pair.split('=', 1) if len(nv) != 2: nv.append('') @@ -3231,8 +3228,8 @@ class ServerAdapter(object): pass def __repr__(self): - args = ', '.join(['%s=%s' % (k, repr(v)) - for k, v in self.options.items()]) + args = ', '.join('%s=%s' % (k, repr(v)) + for k, v in self.options.items()) return "%s(%s)" % (self.__class__.__name__, args) @@ -3517,7 +3514,7 @@ class BjoernServer(ServerAdapter): def run(self, handler): from bjoern import run - run(handler, self.host, self.port) + run(handler, self.host, self.port, reuse_port=True) class AsyncioServerAdapter(ServerAdapter): """ Extend ServerAdapter for adding custom event loop """ @@ -4034,7 +4031,7 @@ class StplParser(object): # This huge pile of voodoo magic splits python code into 8 different tokens. # We use the verbose (?x) regex mode to make this more manageable - _re_tok = _re_inl = r'''( + _re_tok = r'''( [urbURB]* (?: ''(?!') |""(?!") diff --git a/docs/changelog.rst b/docs/changelog.rst index a8f000c..34887e7 100755 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -12,7 +12,7 @@ Release 0.13 .. rubric:: Dropped support for Python 2.5, 2.6, 3.1, 3.2 and 3.3 -These three Python versions are no longer maintained by the Python Software Foundation and reached their end of life a long time ago. Keeping up support for ancient Python versions hinders adaptation of new features and serves no real purpose. Even Debian 7 (wheezy) and Ubuntu 12.4 (precise), both outdated, ship with Python 2.7.3 and 3.2.3 already. For this reason, we decided to drop support for Python 2.5, 2.6, 3.1, 3.2 and 3.1. The updated list of tested and supported python releases is as follows: +These five Python versions are no longer maintained by the Python Software Foundation and reached their end of life a long time ago. Keeping up support for ancient Python versions hinders adaptation of new features and serves no real purpose. Even Debian 7 (wheezy) and Ubuntu 12.04 (precise), both outdated, ship with Python 2.7.3 and 3.2.3 already. For this reason, we decided to drop support for Python 2.5, 2.6, 3.1, 3.2 and 3.3. The updated list of tested and supported python releases is as follows: * Python 2.7 (>= 2.7.3) * Python 3.4 diff --git a/docs/plugins/index.rst b/docs/plugins/index.rst index 1db65da..ada5833 100644 --- a/docs/plugins/index.rst +++ b/docs/plugins/index.rst @@ -14,6 +14,9 @@ Have a look at :ref:`plugins` for general questions about plugins (installation, `Bottle-Cork <http://cork.firelet.net/>`_ Cork provides a simple set of methods to implement Authentication and Authorization in web applications based on Bottle. +`Bottle-Cors-plugin <http://pypi.org/project/bottle-cors-plugin/>`_ + Cors-plugin is the easiest way to implement cors on your bottle web application + `Bottle-Extras <http://pypi.python.org/pypi/bottle-extras/>`_ Meta package to install the bottle plugin collection. @@ -61,7 +64,7 @@ Have a look at :ref:`plugins` for general questions about plugins (installation, `bottle-jwt <https://github.com/agile4you/bottle-jwt/>`_ JSON Web Token authentication plugin for bottle.py - + `Bottle-jwt <https://github.com/agalera/bottlejwt>`_ JWT integration for bottle @@ -76,4 +79,3 @@ Have a look at :ref:`plugins` for general questions about plugins (installation, Plugins listed here are not part of Bottle or the Bottle project, but developed and maintained by third parties. - diff --git a/docs/recipes.rst b/docs/recipes.rst index cfda5ba..c1c3b0d 100755 --- a/docs/recipes.rst +++ b/docs/recipes.rst @@ -1,6 +1,6 @@ .. module:: bottle -.. _beaker: http://beaker.groovie.org/ +.. _beaker: https://beaker.readthedocs.io/en/latest/ .. _mod_python: http://www.modpython.org/ .. _mod_wsgi: http://code.google.com/p/modwsgi/ .. _werkzeug: http://werkzeug.pocoo.org/documentation/dev/debug.html diff --git a/setup.py b/setup.py index b0f5685..11b531c 100755 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ import sys from setuptools import setup if sys.version_info < (2, 7): - raise NotImplementedError("Sorry, you need at least Python 2.7 or Python 3.4+ to use bottle.") + raise NotImplementedError("Sorry, you need at least Python 2.7 or Python 3.6+ to use bottle.") import bottle @@ -33,9 +33,9 @@ setup(name='bottle', 'Topic :: Software Development :: Libraries :: Application Frameworks', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', ], ) diff --git a/tox.ini b/tox.ini index 2d19901..a1b674b 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py27,py34,py35,py36,py37,py27-most +envlist = py27,py36,py37,py38,py39,py27-most [testenv] deps=Mako
bottlepy/bottle
8294b30341f08da30b10d3d35aadb2fb13beda00
diff --git a/test/test_mount.py b/test/test_mount.py index 582c087..1fbef40 100644 --- a/test/test_mount.py +++ b/test/test_mount.py @@ -6,12 +6,17 @@ class TestAppMounting(ServerTestBase): def setUp(self): ServerTestBase.setUp(self) self.subapp = bottle.Bottle() - @self.subapp.route('') + @self.subapp.route('/') - @self.subapp.route('/test/:test') + @self.subapp.route('/test/<test>') def test(test='foo'): return test + def test_mount_unicode_path_bug602(self): + self.app.mount('/mount/', self.subapp) + self.assertBody('äöü', '/mount/test/äöü') + self.app.route('/route/<param>', callback=lambda param: param) + self.assertBody('äöü', '/route/äöü') def test_mount_order_bug581(self): self.app.mount('/test/', self.subapp) diff --git a/test/test_sendfile.py b/test/test_sendfile.py index 622d992..d6693c2 100755 --- a/test/test_sendfile.py +++ b/test/test_sendfile.py @@ -142,3 +142,13 @@ class TestSendFile(unittest.TestCase): self.assertEqual([(10, 100)], r('bytes=10-')) self.assertEqual([(5, 11)], r('bytes=5-10')) self.assertEqual([(10, 100), (90, 100), (5, 11)], r('bytes=10-,-10,5-10')) + + def test_custom_headers(self): + """ SendFile: Custom headers """ + headers = {'X-Custom-Header': 'test-value'} + headers_orig = headers.copy() + res = static_file(basename, root=root, headers=headers) + self.assertTrue('X-Custom-Header' in res.headers) + self.assertEqual('test-value', res.headers['X-Custom-Header']) + # Check the passed in headers dict isn't modified. + self.assertEqual(headers_orig, headers) diff --git a/test/test_server.py b/test/test_server.py index 0a48e41..b7732e0 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -53,13 +53,13 @@ class TestServer(unittest.TestCase): rv = self.p.poll() if rv is None: raise AssertionError("Server took too long to start up.") - if rv is 128: # Import error + if rv == 128: # Import error if os.environ.get('CI') != 'true' or \ os.environ.get('TRAVIS_PYTHON_VERSION') not in ('2.7', '3.6'): tools.warn("Skipping %r test (ImportError)." % self.server) self.skip = True return - if rv is 3: # Port in use + if rv == 3: # Port in use continue raise AssertionError("Server exited with error code %d" % rv) raise AssertionError("Could not find a free port to test server.")
SyntaxWarning in Python 3.8 tests due to using is to compare literals. Comparison of literals using "is" now raises SyntaxWarning from Python 3.8 . Use `==` to fix the warnings. This is a good beginner issue. ``` test/test_server.py:56 /home/runner/work/bottle/bottle/test/test_server.py:56: SyntaxWarning: "is" with a literal. Did you mean "=="? if rv is 128: # Import error test/test_server.py:62 /home/runner/work/bottle/bottle/test/test_server.py:62: SyntaxWarning: "is" with a literal. Did you mean "=="? if rv is 3: # Port in use ```
0.0
8294b30341f08da30b10d3d35aadb2fb13beda00
[ "test/test_sendfile.py::TestSendFile::test_custom_headers" ]
[ "test/test_mount.py::TestAppMounting::test_mount", "test/test_mount.py::TestAppMounting::test_mount_cookie", "test/test_mount.py::TestAppMounting::test_mount_get_url", "test/test_mount.py::TestAppMounting::test_mount_json_bug", "test/test_mount.py::TestAppMounting::test_mount_meta", "test/test_mount.py::TestAppMounting::test_mount_no_plugins", "test/test_mount.py::TestAppMounting::test_mount_order_bug581", "test/test_mount.py::TestAppMounting::test_mount_unicode_path_bug602", "test/test_mount.py::TestAppMounting::test_mount_wsgi", "test/test_mount.py::TestAppMounting::test_mount_wsgi_ctype_bug", "test/test_mount.py::TestAppMounting::test_no_slash_prefix", "test/test_mount.py::TestAppMerging::test_merge", "test/test_sendfile.py::TestDateParser::test_asctime", "test/test_sendfile.py::TestDateParser::test_bad", "test/test_sendfile.py::TestDateParser::test_rfc1123", "test/test_sendfile.py::TestDateParser::test_rfc850", "test/test_sendfile.py::TestSendFile::test_download", "test/test_sendfile.py::TestSendFile::test_etag", "test/test_sendfile.py::TestSendFile::test_file_not_readable", "test/test_sendfile.py::TestSendFile::test_ims", "test/test_sendfile.py::TestSendFile::test_invalid", "test/test_sendfile.py::TestSendFile::test_mime", "test/test_sendfile.py::TestSendFile::test_range", "test/test_sendfile.py::TestSendFile::test_range_parser", "test/test_sendfile.py::TestSendFile::test_valid", "test/test_server.py::TestServer::test_simple", "test/test_server.py::TestServerAdapter_cherrypy::test_simple", "test/test_server.py::TestServerAdapter_waitress::test_simple", "test/test_server.py::TestServerAdapter_cheroot::test_simple", "test/test_server.py::TestServerAdapter_meinheld::test_simple", "test/test_server.py::TestServerAdapter_eventlet::test_simple", "test/test_server.py::TestServerAdapter_twisted::test_simple", "test/test_server.py::TestServerAdapter_aiohttp::test_simple", "test/test_server.py::TestServerAdapter_paste::test_simple", "test/test_server.py::TestServerAdapter_tornado::test_simple", "test/test_server.py::TestServerAdapter_auto::test_simple", "test/test_server.py::TestServerAdapter_uvloop::test_simple", "test/test_server.py::TestServerAdapter_gunicorn::test_simple" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-05-10 04:15:22+00:00
mit
1,417
boutproject__xBOUT-190
diff --git a/xbout/load.py b/xbout/load.py index d28f1ed..1e43587 100644 --- a/xbout/load.py +++ b/xbout/load.py @@ -175,7 +175,26 @@ def open_boutdataset( ds = _add_options(ds, inputfilepath) # If geometry was set, apply geometry again - if "geometry" in ds.attrs: + if geometry is not None: + if "geometry" != ds.attrs.get("geometry", None): + warn( + f'open_boutdataset() called with geometry="{geometry}", but we are ' + f"reloading a Dataset that was saved after being loaded with " + f'geometry="{ds.attrs.get("geometry", None)}". Applying ' + f'geometry="{geometry}" from the argument.' + ) + if gridfilepath is not None: + grid = _open_grid( + gridfilepath, + chunks=chunks, + keep_xboundaries=keep_xboundaries, + keep_yboundaries=keep_yboundaries, + mxg=ds.metadata["MXG"], + ) + else: + grid = None + ds = geometries.apply_geometry(ds, geometry, grid=grid) + elif "geometry" in ds.attrs: ds = geometries.apply_geometry(ds, ds.attrs["geometry"]) else: ds = geometries.apply_geometry(ds, None)
boutproject/xBOUT
fa3ab8b1d737e6a8ee7a071f73748000cbaa92ff
diff --git a/xbout/tests/test_boutdataset.py b/xbout/tests/test_boutdataset.py index 2587e17..15b0526 100644 --- a/xbout/tests/test_boutdataset.py +++ b/xbout/tests/test_boutdataset.py @@ -1378,20 +1378,12 @@ class TestSave: @pytest.mark.parametrize("geometry", [None, "toroidal"]) def test_reload_all(self, tmpdir_factory, bout_xyt_example_files, geometry): - if geometry is not None: - grid = "grid" - else: - grid = None - # Create data path = bout_xyt_example_files( - tmpdir_factory, nxpe=4, nype=5, nt=1, grid=grid, write_to_disk=True + tmpdir_factory, nxpe=4, nype=5, nt=1, grid="grid", write_to_disk=True ) - if grid is not None: - gridpath = str(Path(path).parent) + "/grid.nc" - else: - gridpath = None + gridpath = str(Path(path).parent) + "/grid.nc" # Load it as a boutdataset if geometry is None: @@ -1400,14 +1392,14 @@ class TestSave: datapath=path, inputfilepath=None, geometry=geometry, - gridfilepath=gridpath, + gridfilepath=None if geometry is None else gridpath, ) else: original = open_boutdataset( datapath=path, inputfilepath=None, geometry=geometry, - gridfilepath=gridpath, + gridfilepath=None if geometry is None else gridpath, ) # Save it to a netCDF file @@ -1419,6 +1411,25 @@ class TestSave: xrt.assert_identical(original.load(), recovered.load()) + # Check if we can load with a different geometry argument + for reload_geometry in [None, "toroidal"]: + if reload_geometry is None or geometry == reload_geometry: + recovered = open_boutdataset( + savepath, + geometry=reload_geometry, + gridfilepath=None if reload_geometry is None else gridpath, + ) + xrt.assert_identical(original.load(), recovered.load()) + else: + # Expect a warning because we change the geometry + print("here", gridpath) + with pytest.warns(UserWarning): + recovered = open_boutdataset( + savepath, geometry=reload_geometry, gridfilepath=gridpath + ) + # Datasets won't be exactly the same because different geometry was + # applied + @pytest.mark.parametrize("save_dtype", [np.float64, np.float32]) @pytest.mark.parametrize( "separate_vars", [False, pytest.param(True, marks=pytest.mark.long)]
`geometry` ignored when reloading an xBOUT-saved Dataset open_boutdadaset() requires the option geometry="toroidal", etc. to put zShift array in the coordinates. Otherwise it prevents other manipulations on the data, such as bout.to_field_aligned().
0.0
fa3ab8b1d737e6a8ee7a071f73748000cbaa92ff
[ "xbout/tests/test_boutdataset.py::TestSave::test_reload_all[None]" ]
[ "xbout/tests/test_boutdataset.py::TestBoutDatasetIsXarrayDataset::test_concat", "xbout/tests/test_boutdataset.py::TestBoutDatasetIsXarrayDataset::test_isel", "xbout/tests/test_boutdataset.py::TestBoutDatasetMethods::test_remove_yboundaries[2-0]", "xbout/tests/test_boutdataset.py::TestBoutDatasetMethods::test_set_parallel_interpolation_factor", "xbout/tests/test_boutdataset.py::TestBoutDatasetMethods::test_interpolate_from_unstructured", "xbout/tests/test_boutdataset.py::TestBoutDatasetMethods::test_interpolate_from_unstructured_unstructured_output", "xbout/tests/test_boutdataset.py::TestSave::test_save_all", "xbout/tests/test_boutdataset.py::TestSave::test_reload_all[toroidal]", "xbout/tests/test_boutdataset.py::TestSave::test_save_dtype[False-float64]", "xbout/tests/test_boutdataset.py::TestSave::test_save_dtype[False-float32]", "xbout/tests/test_boutdataset.py::TestSave::test_save_separate_variables", "xbout/tests/test_boutdataset.py::TestSave::test_reload_separate_variables[None]", "xbout/tests/test_boutdataset.py::TestSave::test_reload_separate_variables[toroidal]", "xbout/tests/test_boutdataset.py::TestSave::test_reload_separate_variables_time_split[None]", "xbout/tests/test_boutdataset.py::TestSave::test_reload_separate_variables_time_split[toroidal]", "xbout/tests/test_boutdataset.py::TestSaveRestart::test_to_restart[None]", "xbout/tests/test_boutdataset.py::TestSaveRestart::test_to_restart_change_npe" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2021-04-08 13:07:58+00:00
apache-2.0
1,418
boutproject__xBOUT-85
diff --git a/README.md b/README.md index 4b159c3..e96ce8c 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,38 @@ for common BOUT++ analysis and plotting tasks. Currently only in alpha (until 1.0 released) so please report any bugs, and feel free to raise issues asking questions or making suggestions. + +### Installation + +xBOUT is not currently on pip or conda. Therefore to install xBOUT on +your system you must first clone the repository using: + +```bash +git clone [email protected]:boutproject/xBOUT.git +``` +or +```bash +git clone https://github.com/boutproject/xBOUT.git +``` + +Once cloned navigate to the xBOUT directory and run the following command: + +```bash +pip3 install --user ./ +``` +or +```bash +python3 setup.py install +``` + +You can run the tests by navigating to the `/xBOUT/` directory and +entering `pytest`. + +xBOUT requires other python packages, which will be installed when you +run one of the above install commands if they are not already installed on +your system. + + ### Loading your data The function `open_boutdataset()` uses xarray & dask to collect BOUT++ @@ -93,24 +125,6 @@ There is included an example of a extra calculated quantities which are specific to the STORM module. - -### Installation - -Currently not on pip or conda, so you will need to clone this repo and -install using `python setup.py` -You can run the tests by navigating to the `/xBOUT/` directory and -entering `pytest`. - -Requires xarray v0.12.2 or later. - -You will also need to install [dask](https://dask.org/), -as described in the xarray documentation -[here](http://xarray.pydata.org/en/stable/installing.html#for-parallel-computing), -as well as [natsort](https://github.com/SethMMorton/natsort) -and [animatplot](https://github.com/t-makaro/animatplot). - - - ### Contributing Feel free to raise issues about anything, or submit pull requests, diff --git a/xbout/boutdataset.py b/xbout/boutdataset.py index ce85d23..bf267d0 100644 --- a/xbout/boutdataset.py +++ b/xbout/boutdataset.py @@ -169,7 +169,7 @@ class BoutDatasetAccessor: def animate_list(self, variables, animate_over='t', save_as=None, show=False, fps=10, nrows=None, ncols=None, poloidal_plot=False, subplots_adjust=None, - vmin=None, vmax=None, logscale=None, **kwargs): + vmin=None, vmax=None, logscale=None, titles=None, **kwargs): """ Parameters ---------- @@ -207,6 +207,9 @@ class BoutDatasetAccessor: linthresh=min(abs(vmin),abs(vmax))*logscale, defaults to 1e-5 if True is passed. Per variable if sequence is given. + titles : sequence of str or None, optional + Custom titles for each plot. Pass None in the sequence to use the default for + a certain variable **kwargs : dict, optional Additional keyword arguments are passed on to each animation function """ @@ -248,12 +251,14 @@ class BoutDatasetAccessor: vmin = _expand_list_arg(vmin, 'vmin') vmax = _expand_list_arg(vmax, 'vmax') logscale = _expand_list_arg(logscale, 'logscale') + titles = _expand_list_arg(titles, 'titles') blocks = [] for subplot_args in zip(variables, axes, poloidal_plot, vmin, vmax, - logscale): + logscale, titles): - v, ax, this_poloidal_plot, this_vmin, this_vmax, this_logscale = subplot_args + (v, ax, this_poloidal_plot, this_vmin, this_vmax, this_logscale, + this_title) = subplot_args divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.1) @@ -296,6 +301,10 @@ class BoutDatasetAccessor: raise ValueError("Unsupported number of dimensions " + str(ndims) + ". Dims are " + str(v.dims)) + if this_title is not None: + # Replace default title with user-specified one + ax.set_title(this_title) + timeline = amp.Timeline(np.arange(v.sizes[animate_over]), fps=fps) anim = amp.Animation(blocks, timeline) anim.controls(timeline_slider_args={'text': animate_over}) diff --git a/xbout/load.py b/xbout/load.py index 4d8a2e3..52ce633 100644 --- a/xbout/load.py +++ b/xbout/load.py @@ -40,7 +40,7 @@ except ValueError: def open_boutdataset(datapath='./BOUT.dmp.*.nc', inputfilepath=None, geometry=None, gridfilepath=None, chunks={}, keep_xboundaries=True, keep_yboundaries=False, - run_name=None, info=True): + run_name=None, info=True, drop_variables=None): """ Load a dataset from a set of BOUT output files, including the input options file. Can also load from a grid file. @@ -85,6 +85,9 @@ def open_boutdataset(datapath='./BOUT.dmp.*.nc', inputfilepath=None, Useful if you are going to open multiple simulations and compare the results. info : bool, optional + drop_variables : sequence, optional + Drop variables in this list before merging. Allows user to ignore variables from + a particular physics model that are not consistent between processors. Returns ------- @@ -98,7 +101,8 @@ def open_boutdataset(datapath='./BOUT.dmp.*.nc', inputfilepath=None, # Gather pointers to all numerical data from BOUT++ output files ds = _auto_open_mfboutdataset(datapath=datapath, chunks=chunks, keep_xboundaries=keep_xboundaries, - keep_yboundaries=keep_yboundaries) + keep_yboundaries=keep_yboundaries, + drop_variables=drop_variables) else: # Its a grid file ds = _open_grid(datapath, chunks=chunks, @@ -245,7 +249,12 @@ def collect(varname, xind=None, yind=None, zind=None, tind=None, if selection: ds = ds.isel(selection) - return ds[varname].values + result = ds[varname].values + + # Close netCDF files to ensure they are not locked if collect is called again + ds.close() + + return result def _is_dump_files(datapath): @@ -266,7 +275,8 @@ def _is_dump_files(datapath): def _auto_open_mfboutdataset(datapath, chunks={}, info=True, - keep_xboundaries=False, keep_yboundaries=False): + keep_xboundaries=False, keep_yboundaries=False, + drop_variables=None): filepaths, filetype = _expand_filepaths(datapath) # Open just one file to read processor splitting @@ -277,7 +287,7 @@ def _auto_open_mfboutdataset(datapath, chunks={}, info=True, _preprocess = partial(_trim, guards={'x': mxg, 'y': myg}, keep_boundaries={'x': keep_xboundaries, 'y': keep_yboundaries}, - nxpe=nxpe, nype=nype) + nxpe=nxpe, nype=nype, drop_variables=drop_variables) ds = xr.open_mfdataset(paths_grid, concat_dim=concat_dims, combine='nested', data_vars='minimal', preprocess=_preprocess, engine=filetype, @@ -313,7 +323,7 @@ def _expand_wildcards(path): """Return list of filepaths matching wildcard""" # Find first parent directory which does not contain a wildcard - base_dir = next(parent for parent in path.parents if '*' not in str(parent)) + base_dir = Path(path.root) # Find path relative to parent search_pattern = str(path.relative_to(base_dir)) @@ -424,7 +434,7 @@ def _arrange_for_concatenation(filepaths, nxpe=1, nype=1): return paths_grid, concat_dims -def _trim(ds, *, guards, keep_boundaries, nxpe, nype): +def _trim(ds, *, guards, keep_boundaries, nxpe, nype, drop_variables): """ Trims all guard (and optionally boundary) cells off a single dataset read from a single BOUT dump file, to prepare for concatenation. @@ -462,6 +472,9 @@ def _trim(ds, *, guards, keep_boundaries, nxpe, nype): selection[dim] = slice(lower, upper) trimmed_ds = ds.isel(**selection) + if drop_variables is not None: + trimmed_ds = trimmed_ds.drop(drop_variables, errors='ignore') + # Ignore FieldPerps for now for name in trimmed_ds: if (trimmed_ds[name].dims == ('x', 'z')
boutproject/xBOUT
36fb6e0bb8cc7f63d2e4c700905a30e0364d27f9
diff --git a/xbout/tests/test_animate.py b/xbout/tests/test_animate.py index 689e7b2..d2a935f 100644 --- a/xbout/tests/test_animate.py +++ b/xbout/tests/test_animate.py @@ -286,3 +286,18 @@ class TestAnimate: assert isinstance(animation.blocks[0], Pcolormesh) assert isinstance(animation.blocks[1], Pcolormesh) assert isinstance(animation.blocks[2], Line) + + def test_animate_list_titles_list(self, create_test_file): + + save_dir, ds = create_test_file + + animation = ds.isel(z=3).bout.animate_list(['n', ds['T'].isel(x=2), + ds['n'].isel(y=1, z=2)], + titles=['a', None, 'b']) + + assert isinstance(animation.blocks[0], Pcolormesh) + assert animation.blocks[0].ax.title.get_text() == 'a' + assert isinstance(animation.blocks[1], Pcolormesh) + assert animation.blocks[1].ax.title.get_text() == 'T' + assert isinstance(animation.blocks[2], Line) + assert animation.blocks[2].ax.title.get_text() == 'b' diff --git a/xbout/tests/test_load.py b/xbout/tests/test_load.py index c9bd19e..99a62ab 100644 --- a/xbout/tests/test_load.py +++ b/xbout/tests/test_load.py @@ -70,6 +70,25 @@ class TestPathHandling: assert actual_filepaths == expected_filepaths + @pytest.mark.parametrize("ii, jj", [(1, 1), (1, 4), (3, 1), (5, 3), (1, 12), + (3, 111)]) + def test_glob_expansion_brackets(self, tmpdir, ii, jj): + files_dir = tmpdir.mkdir("data") + filepaths = [] + for i in range(ii): + example_run_dir = files_dir.mkdir('run' + str(i)) + for j in range(jj): + example_file = example_run_dir.join('example.' + str(j) + '.nc') + example_file.write("content") + filepaths.append(Path(str(example_file))) + expected_filepaths = natsorted(filepaths, + key=lambda filepath: str(filepath)) + + path = Path(str(files_dir.join('run[1-9]/example.*.nc'))) + actual_filepaths = _expand_wildcards(path) + + assert actual_filepaths == expected_filepaths[jj:] + def test_no_files(self, tmpdir): files_dir = tmpdir.mkdir("data") @@ -482,6 +501,15 @@ class TestOpen: save_dir = tmpdir_factory.mktemp('data') actual.bout.save(str(save_dir.join('boutdata.nc'))) + def test_drop_vars(self, tmpdir_factory, bout_xyt_example_files): + path = bout_xyt_example_files(tmpdir_factory, nxpe=4, nype=1, nt=1, + syn_data_type='stepped') + ds = open_boutdataset(datapath=path, keep_xboundaries=False, + drop_variables=['T']) + + assert 'T' not in ds.keys() + assert 'n' in ds.keys() + @pytest.mark.skip def test_combine_along_tx(self): ... @@ -596,7 +624,7 @@ class TestTrim: # Manually add filename - encoding normally added by xr.open_dataset ds.encoding['source'] = 'folder0/BOUT.dmp.0.nc' actual = _trim(ds, guards={}, keep_boundaries={}, nxpe=1, - nype=1) + nype=1, drop_variables=None) xrt.assert_equal(actual, ds) def test_trim_guards(self): @@ -604,7 +632,7 @@ class TestTrim: # Manually add filename - encoding normally added by xr.open_dataset ds.encoding['source'] = 'folder0/BOUT.dmp.0.nc' actual = _trim(ds, guards={'time': 2}, keep_boundaries={}, - nxpe=1, nype=1) + nxpe=1, nype=1, drop_variables=None) selection = {'time': slice(2, -2)} expected = ds.isel(**selection) xrt.assert_equal(expected, actual) @@ -727,7 +755,8 @@ class TestTrim: ds['jyseps2_1'] = 8 ds['jyseps1_2'] = 8 - actual = _trim(ds, guards={'x': 2}, keep_boundaries={'x': True}, nxpe=1, nype=1) + actual = _trim(ds, guards={'x': 2}, keep_boundaries={'x': True}, nxpe=1, nype=1, + drop_variables=None) expected = ds # Should be unchanged xrt.assert_equal(expected, actual) @@ -741,7 +770,8 @@ class TestTrim: ds['jyseps2_1'] = 8 ds['jyseps1_2'] = 8 - actual = _trim(ds, guards={'y': 2}, keep_boundaries={'y': True}, nxpe=1, nype=1) + actual = _trim(ds, guards={'y': 2}, keep_boundaries={'y': True}, nxpe=1, nype=1, + drop_variables=None) expected = ds # Should be unchanged xrt.assert_equal(expected, actual) @@ -762,7 +792,8 @@ class TestTrim: ds['ny_inner'] = 8 ds['MYSUB'] = 4 - actual = _trim(ds, guards={'y': 2}, keep_boundaries={'y': True}, nxpe=1, nype=4) + actual = _trim(ds, guards={'y': 2}, keep_boundaries={'y': True}, nxpe=1, nype=4, + drop_variables=None) expected = ds # Should be unchanged if not lower: expected = expected.isel(y=slice(2, None, None)) @@ -780,7 +811,8 @@ class TestTrim: for v in _BOUT_PER_PROC_VARIABLES: ds[v] = 42. - ds = _trim(ds, guards={}, keep_boundaries={}, nxpe=1, nype=1) + ds = _trim(ds, guards={}, keep_boundaries={}, nxpe=1, nype=1, + drop_variables=None) expected = create_test_data(0) xrt.assert_equal(ds, expected)
Add installation instructions Missing installation instructions from README -- add to top
0.0
36fb6e0bb8cc7f63d2e4c700905a30e0364d27f9
[ "xbout/tests/test_animate.py::TestAnimate::test_animate1D", "xbout/tests/test_animate.py::TestAnimate::test_animate_list_not_enough_nrowsncols", "xbout/tests/test_load.py::test_check_extensions", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_single", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_both[1-1]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_both[1-4]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_both[3-1]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_both[5-3]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_both[12-1]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_both[1-12]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_both[121-2]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_both[3-111]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_brackets[1-1]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_brackets[1-4]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_brackets[3-1]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_brackets[5-3]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_brackets[1-12]", "xbout/tests/test_load.py::TestPathHandling::test_glob_expansion_brackets[3-111]", "xbout/tests/test_load.py::TestPathHandling::test_no_files", "xbout/tests/test_load.py::TestArrange::test_arrange_single", "xbout/tests/test_load.py::TestArrange::test_arrange_along_x", "xbout/tests/test_load.py::TestArrange::test_arrange_along_y", "xbout/tests/test_load.py::TestArrange::test_arrange_along_t", "xbout/tests/test_load.py::TestArrange::test_arrange_along_xy", "xbout/tests/test_load.py::TestArrange::test_arrange_along_xt", "xbout/tests/test_load.py::TestArrange::test_arrange_along_xyt", "xbout/tests/test_load.py::TestStripMetadata::test_strip_metadata", "xbout/tests/test_load.py::TestOpen::test_single_file", "xbout/tests/test_load.py::TestOpen::test_squashed_file", "xbout/tests/test_load.py::TestOpen::test_combine_along_x", "xbout/tests/test_load.py::TestOpen::test_combine_along_y", "xbout/tests/test_load.py::TestOpen::test_combine_along_xy", "xbout/tests/test_load.py::TestOpen::test_toroidal", "xbout/tests/test_load.py::TestOpen::test_salpha", "xbout/tests/test_load.py::TestOpen::test_drop_vars", "xbout/tests/test_load.py::TestTrim::test_no_trim", "xbout/tests/test_load.py::TestTrim::test_trim_guards", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[0-0-1-1-lower_boundaries0-upper_boundaries0]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[0-0-3-1-lower_boundaries1-upper_boundaries1]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[1-0-3-1-lower_boundaries2-upper_boundaries2]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[2-0-3-1-lower_boundaries3-upper_boundaries3]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[0-0-1-3-lower_boundaries4-upper_boundaries4]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[0-1-1-3-lower_boundaries5-upper_boundaries5]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[0-2-1-3-lower_boundaries6-upper_boundaries6]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[0-0-3-4-lower_boundaries7-upper_boundaries7]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[2-0-3-4-lower_boundaries8-upper_boundaries8]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[0-3-3-4-lower_boundaries9-upper_boundaries9]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[2-3-3-4-lower_boundaries10-upper_boundaries10]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[1-2-3-4-lower_boundaries11-upper_boundaries11]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[0-2-3-4-lower_boundaries12-upper_boundaries12]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[2-2-3-4-lower_boundaries13-upper_boundaries13]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[1-0-3-4-lower_boundaries14-upper_boundaries14]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization[1-3-3-4-lower_boundaries15-upper_boundaries15]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[0-0-1-4-lower_boundaries0-upper_boundaries0]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[0-1-1-4-lower_boundaries1-upper_boundaries1]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[0-2-1-4-lower_boundaries2-upper_boundaries2]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[0-3-1-4-lower_boundaries3-upper_boundaries3]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[0-0-3-4-lower_boundaries4-upper_boundaries4]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[1-0-3-4-lower_boundaries5-upper_boundaries5]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[2-0-3-4-lower_boundaries6-upper_boundaries6]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[0-1-3-4-lower_boundaries7-upper_boundaries7]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[1-1-3-4-lower_boundaries8-upper_boundaries8]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[2-1-3-4-lower_boundaries9-upper_boundaries9]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[0-2-3-4-lower_boundaries10-upper_boundaries10]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[1-2-3-4-lower_boundaries11-upper_boundaries11]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[2-2-3-4-lower_boundaries12-upper_boundaries12]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[0-3-3-4-lower_boundaries13-upper_boundaries13]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[1-3-3-4-lower_boundaries14-upper_boundaries14]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull[2-3-3-4-lower_boundaries15-upper_boundaries15]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[0-0-1-1-lower_boundaries0-upper_boundaries0]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[0-0-3-1-lower_boundaries1-upper_boundaries1]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[1-0-3-1-lower_boundaries2-upper_boundaries2]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[2-0-3-1-lower_boundaries3-upper_boundaries3]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[0-0-1-3-lower_boundaries4-upper_boundaries4]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[0-1-1-3-lower_boundaries5-upper_boundaries5]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[0-2-1-3-lower_boundaries6-upper_boundaries6]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[0-0-3-4-lower_boundaries7-upper_boundaries7]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[2-0-3-4-lower_boundaries8-upper_boundaries8]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[0-3-3-4-lower_boundaries9-upper_boundaries9]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[2-3-3-4-lower_boundaries10-upper_boundaries10]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[1-2-3-4-lower_boundaries11-upper_boundaries11]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[0-2-3-4-lower_boundaries12-upper_boundaries12]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[2-2-3-4-lower_boundaries13-upper_boundaries13]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[1-0-3-4-lower_boundaries14-upper_boundaries14]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_by_filenum[1-3-3-4-lower_boundaries15-upper_boundaries15]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[0-0-1-4-lower_boundaries0-upper_boundaries0]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[0-1-1-4-lower_boundaries1-upper_boundaries1]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[0-2-1-4-lower_boundaries2-upper_boundaries2]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[0-3-1-4-lower_boundaries3-upper_boundaries3]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[0-0-3-4-lower_boundaries4-upper_boundaries4]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[1-0-3-4-lower_boundaries5-upper_boundaries5]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[2-0-3-4-lower_boundaries6-upper_boundaries6]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[0-1-3-4-lower_boundaries7-upper_boundaries7]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[1-1-3-4-lower_boundaries8-upper_boundaries8]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[2-1-3-4-lower_boundaries9-upper_boundaries9]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[0-2-3-4-lower_boundaries10-upper_boundaries10]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[1-2-3-4-lower_boundaries11-upper_boundaries11]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[2-2-3-4-lower_boundaries12-upper_boundaries12]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[0-3-3-4-lower_boundaries13-upper_boundaries13]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[1-3-3-4-lower_boundaries14-upper_boundaries14]", "xbout/tests/test_load.py::TestTrim::test_infer_boundaries_2d_parallelization_doublenull_by_filenum[2-3-3-4-lower_boundaries15-upper_boundaries15]", "xbout/tests/test_load.py::TestTrim::test_keep_xboundaries", "xbout/tests/test_load.py::TestTrim::test_keep_yboundaries", "xbout/tests/test_load.py::TestTrim::test_keep_yboundaries_doublenull_by_filenum[0-True-False]", "xbout/tests/test_load.py::TestTrim::test_keep_yboundaries_doublenull_by_filenum[1-False-True]", "xbout/tests/test_load.py::TestTrim::test_keep_yboundaries_doublenull_by_filenum[2-True-False]", "xbout/tests/test_load.py::TestTrim::test_keep_yboundaries_doublenull_by_filenum[3-False-True]", "xbout/tests/test_load.py::TestTrim::test_trim_timing_info" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-12-12 15:25:24+00:00
apache-2.0
1,419
box__box-python-sdk-343
diff --git a/boxsdk/config.py b/boxsdk/config.py index 203cfc2..371117d 100644 --- a/boxsdk/config.py +++ b/boxsdk/config.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals, absolute_import +from sys import version_info as py_version + from . import version @@ -17,3 +19,9 @@ class Client(object): """Configuration object containing the user agent string.""" VERSION = version.__version__ USER_AGENT_STRING = 'box-python-sdk-{0}'.format(VERSION) + BOX_UA_STRING = 'agent=box-python-sdk/{0}; env=python/{1}.{2}.{3}'.format( + VERSION, + py_version.major, + py_version.minor, + py_version.micro, + ) diff --git a/boxsdk/session/session.py b/boxsdk/session/session.py index e9c0b14..d8a41ba 100644 --- a/boxsdk/session/session.py +++ b/boxsdk/session/session.py @@ -64,7 +64,10 @@ def __init__( self._client_config = client_config or Client() super(Session, self).__init__() self._network_layer = network_layer or DefaultNetwork() - self._default_headers = {'User-Agent': self._client_config.USER_AGENT_STRING} + self._default_headers = { + 'User-Agent': self._client_config.USER_AGENT_STRING, + 'X-Box-UA': self._client_config.BOX_UA_STRING, + } self._translator = translator self._default_network_request_kwargs = {} if default_headers:
box/box-python-sdk
567ea528c8e757fe77399b21a68ccfc94341b68b
diff --git a/test/integration/test_as_user.py b/test/integration/test_as_user.py index 45cff68..372de7d 100644 --- a/test/integration/test_as_user.py +++ b/test/integration/test_as_user.py @@ -14,6 +14,7 @@ def as_user_headers(mock_user_id, access_token): 'Authorization': 'Bearer {0}'.format(access_token), 'As-User': mock_user_id, 'User-Agent': Client.USER_AGENT_STRING, + 'X-Box-UA': Client.BOX_UA_STRING, } diff --git a/test/integration/test_retry_and_refresh.py b/test/integration/test_retry_and_refresh.py index 057ed43..f27858c 100644 --- a/test/integration/test_retry_and_refresh.py +++ b/test/integration/test_retry_and_refresh.py @@ -29,7 +29,7 @@ def test_automatic_refresh( 'POST', '{0}/token'.format(API.OAUTH2_API_URL), data=ANY, - headers={'content-type': 'application/x-www-form-urlencoded', 'User-Agent': ANY}, + headers={'content-type': 'application/x-www-form-urlencoded', 'User-Agent': ANY, 'X-Box-UA': ANY}, ), call( 'GET', diff --git a/test/integration/test_with_shared_link.py b/test/integration/test_with_shared_link.py index c4a74d9..6d2af45 100644 --- a/test/integration/test_with_shared_link.py +++ b/test/integration/test_with_shared_link.py @@ -26,6 +26,7 @@ def box_api_headers(shared_link, shared_link_password, access_token): 'Authorization': 'Bearer {0}'.format(access_token), 'BoxApi': box_api_header, 'User-Agent': Client.USER_AGENT_STRING, + 'X-Box-UA': Client.BOX_UA_STRING, }
Add Box SDK headers to API calls We should definitely send headers for SDK name and SDK version. Other nice-to-haves to consider: - Python version - Requests version
0.0
567ea528c8e757fe77399b21a68ccfc94341b68b
[ "test/integration/test_as_user.py::test_client_as_user_causes_as_user_header_to_be_added", "test/integration/test_as_user.py::test_folder_object_as_user_causes_as_user_header_to_be_added", "test/integration/test_as_user.py::test_group_membership_object_as_user_causes_as_user_header_to_be_added", "test/integration/test_as_user.py::test_events_endpoint_as_user_causes_as_user_header_to_be_added", "test/integration/test_as_user.py::test_metadata_endpoint_as_user_causes_as_user_header_to_be_added", "test/integration/test_retry_and_refresh.py::test_automatic_refresh", "test/integration/test_with_shared_link.py::test_client_with_shared_link_causes_box_api_header_to_be_added[None]", "test/integration/test_with_shared_link.py::test_client_with_shared_link_causes_box_api_header_to_be_added[shared_link_password]", "test/integration/test_with_shared_link.py::test_folder_object_with_shared_link_causes_box_api_header_to_be_added[None]", "test/integration/test_with_shared_link.py::test_folder_object_with_shared_link_causes_box_api_header_to_be_added[shared_link_password]", "test/integration/test_with_shared_link.py::test_group_membership_object_with_shared_link_causes_box_api_header_to_be_added[None]", "test/integration/test_with_shared_link.py::test_group_membership_object_with_shared_link_causes_box_api_header_to_be_added[shared_link_password]", "test/integration/test_with_shared_link.py::test_events_endpoint_with_shared_link_causes_box_api_header_to_be_added[None]", "test/integration/test_with_shared_link.py::test_events_endpoint_with_shared_link_causes_box_api_header_to_be_added[shared_link_password]", "test/integration/test_with_shared_link.py::test_metadata_endpoint_with_shared_link_causes_box_api_header_to_be_added[None]", "test/integration/test_with_shared_link.py::test_metadata_endpoint_with_shared_link_causes_box_api_header_to_be_added[shared_link_password]" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2018-09-28 05:06:45+00:00
apache-2.0
1,420
boxed__mutmut-170
diff --git a/mutmut/__init__.py b/mutmut/__init__.py index 4ee49c5..638b2b1 100644 --- a/mutmut/__init__.py +++ b/mutmut/__init__.py @@ -456,7 +456,7 @@ def should_exclude(context, config): covered_lines = config.covered_lines_by_filename[context.filename] except KeyError: if config.coverage_data is not None: - covered_lines = config.coverage_data.lines(os.path.abspath(context.filename)) + covered_lines = config.coverage_data.get(os.path.abspath(context.filename)) config.covered_lines_by_filename[context.filename] = covered_lines else: covered_lines = None @@ -688,7 +688,6 @@ def mutate_file(backup, context): def queue_mutants(*, progress, config, mutants_queue, mutations_by_file): - from mutmut.cache import update_line_numbers from mutmut.cache import get_cached_mutation_statuses try: @@ -938,7 +937,7 @@ class Progress(object): def check_coverage_data_filepaths(coverage_data): - for filepath in coverage_data._lines: + for filepath in coverage_data: if not os.path.exists(filepath): raise ValueError('Filepaths in .coverage not recognized, try recreating the .coverage file manually.') @@ -1160,7 +1159,8 @@ def read_coverage_data(): raise ImportError('The --use-coverage feature requires the coverage library. Run "pip install --force-reinstall mutmut[coverage]"') from e cov = Coverage('.coverage') cov.load() - return cov.get_data() + data = cov.get_data() + return {filepath: data.lines(filepath) for filepath in data.measured_files()} def read_patch_data(patch_file_path): diff --git a/setup.cfg b/setup.cfg index 91dd2f3..558f8e7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,6 +10,15 @@ addopts = --junitxml=testreport.xml --strict -r fEsxXw [flake8] ignore = E501,E721 +[coverage:run] +source = . +omit = + .tox/* + venv/* + /private/* + /tmp/* + setup.py + [coverage:report] exclude_lines = # Have to re-enable the standard pragma diff --git a/tox.ini b/tox.ini index 88f3095..ed1d86d 100644 --- a/tox.ini +++ b/tox.ini @@ -13,9 +13,7 @@ deps = basepython = python3.7 usedevelop = True commands = - {envpython} -m pytest --cov {posargs} - {envpython} -m coverage report -m - {envpython} -m coverage html + {envpython} -m pytest --cov --cov-config setup.cfg --cov-report term-missing --cov-report html {posargs} deps = -rrequirements.txt -rtest_requirements.txt
boxed/mutmut
cf9c02f618c16815fad433b05d0b45afd9f9967a
diff --git a/tests/test_main.py b/tests/test_main.py index 80fdd9b..9b46ae7 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- import os -import re import subprocess import sys import xml.etree.ElementTree as ET @@ -155,7 +154,7 @@ def test_compute_return_code(): def test_read_coverage_data(filesystem): - assert isinstance(read_coverage_data(), CoverageData) + assert read_coverage_data() == {} @pytest.mark.parametrize( @@ -366,8 +365,7 @@ def test_full_run_all_suspicious_mutant_junit(filesystem): assert int(root.attrib['disabled']) == 0 [email protected]("TODO: fix support for coverage 5") -def test_use_coverage(capsys, filesystem): +def test_use_coverage(filesystem): with open(os.path.join(str(filesystem), "tests", "test_foo.py"), 'w') as f: f.write(test_file_contents.replace('assert foo(2, 2) is False\n', '')) @@ -394,16 +392,8 @@ def test_use_coverage(capsys, filesystem): assert result.exit_code == 0 assert '13/13 🎉 13 ⏰ 0 🤔 0 🙁 0' in repr(result.output) - # replace the .coverage file content with a non existent path to check if an exception is thrown - with open('.coverage', 'r') as f: - content = f.read() - - # the new path is linux-based, but it just needs to be wrong - new_content = re.sub(r'\"[\w\W][^{]*foo.py\"', '"/test_path/foo.py"', content) - - with open('.coverage', 'w') as f: - f.write(new_content) - + # remove existent path to check if an exception is thrown + os.unlink(os.path.join(str(filesystem), 'foo.py')) with pytest.raises(ValueError, match=r'^Filepaths in .coverage not recognized, try recreating the .coverage file manually.$'): CliRunner().invoke(climain, ['run', '--paths-to-mutate=foo.py', "--test-time-base=15.0", "--use-coverage"],
Mutmut doesn't work with Coverage 5.0 I really should have tested this with the beta. I just forgot about this feature of mutmut, sloppy!
0.0
cf9c02f618c16815fad433b05d0b45afd9f9967a
[ "tests/test_main.py::test_read_coverage_data" ]
[ "tests/test_main.py::test_compute_return_code", "tests/test_main.py::test_python_source_files[expected0-foo.py-tests_dirs0]", "tests/test_main.py::test_python_source_files[expected1-.-tests_dirs1]", "tests/test_main.py::test_python_source_files[expected2-.-tests_dirs2]", "tests/test_main.py::test_python_source_files__with_paths_to_exclude", "tests/test_main.py::test_popen_streaming_output_timeout", "tests/test_main.py::test_popen_streaming_output_stream" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-05-17 13:33:00+00:00
bsd-3-clause
1,421
boxed__mutmut-192
diff --git a/mutmut/__init__.py b/mutmut/__init__.py index 78ad464..b77329a 100644 --- a/mutmut/__init__.py +++ b/mutmut/__init__.py @@ -910,8 +910,9 @@ def guess_paths_to_mutate(): class Progress(object): - def __init__(self, total): + def __init__(self, total, output_legend): self.total = total + self.output_legend = output_legend self.progress = 0 self.skipped = 0 self.killed_mutants = 0 @@ -920,7 +921,20 @@ class Progress(object): self.suspicious_mutants = 0 def print(self): - print_status('{}/{} 🎉 {} ⏰ {} 🤔 {} 🙁 {} 🔇 {}'.format(self.progress, self.total, self.killed_mutants, self.surviving_mutants_timeout, self.suspicious_mutants, self.surviving_mutants, self.skipped)) + print_status('{}/{} {} {} {} {} {} {} {} {} {} {}'.format( + self.progress, + self.total, + self.output_legend["killed"], + self.killed_mutants, + self.output_legend["timeout"], + self.surviving_mutants_timeout, + self.output_legend["suspicious"], + self.suspicious_mutants, + self.output_legend["survived"], + self.surviving_mutants, + self.output_legend["skipped"], + self.skipped) + ) def register(self, status): if status == BAD_SURVIVED: diff --git a/mutmut/__main__.py b/mutmut/__main__.py index 2136aee..43e6efc 100644 --- a/mutmut/__main__.py +++ b/mutmut/__main__.py @@ -100,6 +100,7 @@ DEFAULT_RUNNER = 'python -m pytest -x --assert=plain' @click.option('--untested-policy', type=click.Choice(['ignore', 'skipped', 'error', 'failure']), default='ignore') @click.option('--pre-mutation') @click.option('--post-mutation') [email protected]('--simple-output', is_flag=True, default=False, help="Swap emojis in mutmut output to plain text alternatives.") @config_from_setup_cfg( dict_synonyms='', paths_to_exclude='', @@ -113,7 +114,7 @@ def climain(command, argument, argument2, paths_to_mutate, backup, runner, tests test_time_multiplier, test_time_base, swallow_output, use_coverage, dict_synonyms, cache_only, version, suspicious_policy, untested_policy, pre_mutation, post_mutation, - use_patch_file, paths_to_exclude): + use_patch_file, paths_to_exclude, simple_output): """ commands:\n run [mutation id]\n @@ -137,14 +138,14 @@ commands:\n tests_dir, test_time_multiplier, test_time_base, swallow_output, use_coverage, dict_synonyms, cache_only, version, suspicious_policy, untested_policy, pre_mutation, - post_mutation, use_patch_file, paths_to_exclude)) + post_mutation, use_patch_file, paths_to_exclude, simple_output)) def main(command, argument, argument2, paths_to_mutate, backup, runner, tests_dir, test_time_multiplier, test_time_base, swallow_output, use_coverage, dict_synonyms, cache_only, version, suspicious_policy, untested_policy, pre_mutation, post_mutation, - use_patch_file, paths_to_exclude): + use_patch_file, paths_to_exclude, simple_output): """return exit code, after performing an mutation test run. :return: the exit code from executing the mutation tests @@ -223,6 +224,15 @@ def main(command, argument, argument2, paths_to_mutate, backup, runner, tests_di os.environ['PYTHONDONTWRITEBYTECODE'] = '1' # stop python from creating .pyc files using_testmon = '--testmon' in runner + output_legend = { + "killed": "🎉", + "timeout": "⏰", + "suspicious": "🤔", + "survived": "🙁", + "skipped": "🔇", + } + if simple_output: + output_legend = {key: key.upper() for (key, value) in output_legend.items()} print(""" - Mutation testing starting - @@ -237,12 +247,12 @@ Results are stored in .mutmut-cache. Print found mutants with `mutmut results`. Legend for output: -🎉 Killed mutants. The goal is for everything to end up in this bucket. -⏰ Timeout. Test suite took 10 times as long as the baseline so were killed. -🤔 Suspicious. Tests took a long time, but not long enough to be fatal. -🙁 Survived. This means your tests need to be expanded. -🔇 Skipped. Skipped. -""") +{killed} Killed mutants. The goal is for everything to end up in this bucket. +{timeout} Timeout. Test suite took 10 times as long as the baseline so were killed. +{suspicious} Suspicious. Tests took a long time, but not long enough to be fatal. +{survived} Survived. This means your tests need to be expanded. +{skipped} Skipped. Skipped. +""".format(**output_legend)) if runner is DEFAULT_RUNNER: try: import pytest @@ -309,7 +319,7 @@ Legend for output: print() print('2. Checking mutants') - progress = Progress(total=config.total) + progress = Progress(total=config.total, output_legend=output_legend) try: run_mutation_tests(config=config, progress=progress, mutations_by_file=mutations_by_file)
boxed/mutmut
795b39baba4f95c109e6a8be33c7a4d4ef87df49
diff --git a/tests/test_main.py b/tests/test_main.py index 459279d..219f2b9 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -109,7 +109,7 @@ def test_compute_return_code(): class MockProgress(Progress): def __init__(self, killed_mutants, surviving_mutants, surviving_mutants_timeout, suspicious_mutants): - super(MockProgress, self).__init__(total=0) + super(MockProgress, self).__init__(total=0, output_legend={}) self.killed_mutants = killed_mutants self.surviving_mutants = surviving_mutants self.surviving_mutants_timeout = surviving_mutants_timeout @@ -441,3 +441,9 @@ def test_pre_and_post_mutation_hook(single_mutant_filesystem, tmpdir): assert "pre mutation stub" in result.output assert "post mutation stub" in result.output assert result.output.index("pre mutation stub") < result.output.index("post mutation stub") + + +def test_simple_output(filesystem): + result = CliRunner().invoke(climain, ['run', '--paths-to-mutate=foo.py', "--simple-output"], catch_exceptions=False) + print(repr(result.output)) + assert '14/14 KILLED 14 TIMEOUT 0 SUSPICIOUS 0 SURVIVED 0 SKIPPED 0' in repr(result.output)
UTF-8 emoji icons fail to display in a Pycharm 2019.2.1 terminal in windows This was created on a windows 10 environment with Pycharm 2019.2.1. Running mutmut in the Pycharm native terminal fails to render the emojis indicating killed 🎉, suspicious 🤔, and surviving 🙁 mutants. For example: ```console (venv) >mutmut run --paths-to-mutate my_project - Mutation testing starting - These are the steps: 1. A full test suite run will be made to make sure we can run the tests successfully and we know how long it takes (to detect infinite loops for example) 2. Mutants will be generated and checked Results are stored in .mutmut-cache. Print found mutants with `mutmut results`. Legend for output: � Killed mutants. The goal is for everything to end up in this bucket. ⏰ Timeout. Test suite took 10 times as long as the baseline so were killed. � Suspicious. Tests took a long time, but not long enough to be fatal. � Survived. This means your tests needs to be expanded. mutmut cache is out of date, clearing it... 1. Running tests without mutations ... ``` Maybe we should simply adopt a ASCII text based notation?
0.0
795b39baba4f95c109e6a8be33c7a4d4ef87df49
[ "tests/test_main.py::test_compute_return_code" ]
[ "tests/test_main.py::test_read_coverage_data", "tests/test_main.py::test_python_source_files[expected0-foo.py-tests_dirs0]", "tests/test_main.py::test_python_source_files[expected1-.-tests_dirs1]", "tests/test_main.py::test_python_source_files[expected2-.-tests_dirs2]", "tests/test_main.py::test_python_source_files__with_paths_to_exclude", "tests/test_main.py::test_popen_streaming_output_timeout", "tests/test_main.py::test_popen_streaming_output_stream" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-12-11 01:52:25+00:00
bsd-3-clause
1,422
boxed__mutmut-226
diff --git a/HISTORY.rst b/HISTORY.rst index 1bd1287..03768a0 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -6,6 +6,8 @@ Changelog * Add `--disable-mutation-types` and `--enable-mutation-types` to control what types of mutations are performed +* Fixed error where ``mutmut_config.init()`` was not called when running without explicitly having set ``PYTHONPATH`` + 2.2.0 ~~~~~ diff --git a/mutmut/__init__.py b/mutmut/__init__.py index 71088c4..0d98509 100644 --- a/mutmut/__init__.py +++ b/mutmut/__init__.py @@ -35,6 +35,8 @@ from parso.python.tree import Name, Number, Keyword __version__ = '2.3.0' +if os.getcwd() not in sys.path: + sys.path.insert(0, os.getcwd()) try: import mutmut_config except ImportError: diff --git a/mutmut/__main__.py b/mutmut/__main__.py index 566521a..4b6fb74 100644 --- a/mutmut/__main__.py +++ b/mutmut/__main__.py @@ -48,9 +48,6 @@ from mutmut.cache import print_result_cache, print_result_ids_cache, \ from collections import namedtuple import re -if os.getcwd() not in sys.path: - sys.path.insert(0, os.getcwd()) - def do_apply(mutation_pk, dict_synonyms, backup): """Apply a specified mutant to the source code @@ -181,7 +178,7 @@ def main(command, argument, argument2, paths_to_mutate, disable_mutation_types, mutation_types_to_apply = set(mutations_by_type.keys()) invalid_types = None if invalid_types: - raise click.BadArgumentUsage(f"The following are not valid mutation types: {', '.join(invalid_types)}. Valid mutation types are: {', '.join(mutations_by_type.keys())}") + raise click.BadArgumentUsage(f"The following are not valid mutation types: {', '.join(sorted(invalid_types))}. Valid mutation types are: {', '.join(mutations_by_type.keys())}") valid_commands = ['run', 'results', 'result-ids', 'apply', 'show', 'junitxml', 'html'] if command not in valid_commands:
boxed/mutmut
f84912b045a57cad31585e4dc3f75d552a51db6f
diff --git a/tests/test_main.py b/tests/test_main.py index 04ec25a..09fda7f 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -506,7 +506,7 @@ def test_select_unknown_mutation_type(option): ] ) assert result.exception.code == 2 - assert f"The following are not valid mutation types: foo, bar. Valid mutation types are: {', '.join(mutations_by_type.keys())}" in result.output + assert f"The following are not valid mutation types: bar, foo. Valid mutation types are: {', '.join(mutations_by_type.keys())}" in result.output, result.output def test_enable_and_disable_mutation_type_are_exclusive(): diff --git a/tests/test_mutmut_config_hooks.py b/tests/test_mutmut_config_hooks.py new file mode 100644 index 0000000..629f323 --- /dev/null +++ b/tests/test_mutmut_config_hooks.py @@ -0,0 +1,57 @@ +import os +import sys +import subprocess + +import pytest + + [email protected] +def basic_filesystem(tmpdir): + source_file = tmpdir / "foo.py" + source_file.write("def add(a, b): return a + b") + tests_dir = tmpdir / "tests" + tests_dir.mkdir() + test_file = tests_dir / "test_foo.py" + test_file.write(""" +from foo import add + +def test_add(): + assert add(1, 1) == 2 +""") + mutmut_config_py = tmpdir / "mutmut_config.py" + mutmut_config_py.write(f""" +from pathlib import Path + +def init(): + Path("init_hook").touch() + +def pre_mutation(context): + Path("pre_mutation_hook").touch() + +def pre_mutation_ast(context): + Path("pre_mutation_ast_hook").touch() +""") + yield tmpdir + + [email protected] +def set_working_dir_and_path(basic_filesystem): + original_dir = os.path.abspath(os.getcwd()) + original_path = sys.path[:] + + os.chdir(basic_filesystem) + if str(basic_filesystem) in sys.path: + sys.path.remove(str(basic_filesystem)) + + yield basic_filesystem + + sys.path = original_path + os.chdir(original_dir) + + [email protected]("set_working_dir_and_path") +def test_hooks(basic_filesystem): + subprocess.check_output(["python", "-m", "mutmut", "run", "--paths-to-mutate=foo.py"]) + assert (basic_filesystem / "init_hook").exists(), "init was not called." + assert (basic_filesystem / "pre_mutation_hook").exists(), "pre_mutation was not called." + assert (basic_filesystem / "pre_mutation_ast_hook").exists(), "pre_mutation_ast was not called."
``mutmut_config.init()`` not called I have troubles in getting my ``mutmut_config.py`` to do what I want it to. The content ist: ```python def init(): from mutmut import mutations_by_type del mutations_by_type["string"] print(mutations_by_type) ``` When I run ``mutmut run``, the ``init()`` is not called. What I could analyse so far: * all the imports (from ``mutmut``'s ``__init__.py`` and ``__main__.py``) are done twice when running ``mutmut``, once when starting the program and once after the "2. Checking mutants" message * At the first import, ``mutmut_config`` cannot be imported and is None. That is why the ``init()`` is not called. Only after the second import it can be loaded, and things like ``pre_mutation(context)`` work as expected My directory layout is: ``` (.venv) tracematrix [try-mutmut●●] % tree . ├── CHANGELOG.md ├── LICENSE ├── README.md ├── mutmut_config.py ├── poetry.lock ├── pyproject.toml ├── src │ └── tracematrix │ ├── item.py │ ├── matrix.py │ └── reporters │ ├── __init__.py │ ├── base_reporter.py │ ├── csv_reporter.py │ ├── html_reporter.py │ └── template.html └── tests ```
0.0
f84912b045a57cad31585e4dc3f75d552a51db6f
[ "tests/test_main.py::test_select_unknown_mutation_type[--disable-mutation-types]" ]
[ "tests/test_main.py::test_compute_return_code", "tests/test_main.py::test_read_coverage_data", "tests/test_main.py::test_python_source_files[expected0-foo.py-tests_dirs0]", "tests/test_main.py::test_python_source_files[expected1-.-tests_dirs1]", "tests/test_main.py::test_python_source_files[expected2-.-tests_dirs2]", "tests/test_main.py::test_python_source_files__with_paths_to_exclude", "tests/test_main.py::test_popen_streaming_output_timeout", "tests/test_main.py::test_popen_streaming_output_stream", "tests/test_main.py::test_select_unknown_mutation_type[--enable-mutation-types]", "tests/test_main.py::test_enable_and_disable_mutation_type_are_exclusive", "tests/test_mutmut_config_hooks.py::test_hooks" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-09-15 17:36:52+00:00
bsd-3-clause
1,423