instance_id
stringlengths
10
57
patch
stringlengths
261
37.7k
repo
stringlengths
7
53
base_commit
stringlengths
40
40
hints_text
stringclasses
301 values
test_patch
stringlengths
212
2.22M
problem_statement
stringlengths
23
37.7k
version
stringclasses
1 value
environment_setup_commit
stringlengths
40
40
FAIL_TO_PASS
sequencelengths
1
4.94k
PASS_TO_PASS
sequencelengths
0
7.82k
meta
dict
created_at
stringlengths
25
25
license
stringclasses
8 values
__index_level_0__
int64
0
6.41k
weaveworks__grafanalib-584
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7b18f65..2da345a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,7 +2,6 @@ Changelog ========= - 0.x.x (?) ================== @@ -11,6 +10,7 @@ Changelog * Added Maximum option for Timeseries * Added Number of decimals displays option for Timeseries* Added Bar_Chart_ panel support * Extended SqlTarget to support parsing queries from files +* Fix AlertCondition backwards compatibility (``useNewAlerts`` default to ``False``) .. _Bar_Chart: basehttps://grafana.com/docs/grafana/latest/panels-visualizations/visualizations/bar-chart/ diff --git a/grafanalib/core.py b/grafanalib/core.py index e10552a..aeb1cb3 100644 --- a/grafanalib/core.py +++ b/grafanalib/core.py @@ -1197,6 +1197,9 @@ class AlertCondition(object): RTYPE_DIFF = 'diff' RTYPE_PERCENT_DIFF = 'percent_diff' RTYPE_COUNT_NON_NULL = 'count_non_null' + :param useNewAlerts: Whether or not the alert condition is used as part of the Grafana 8.x alerts. + Defaults to False for compatibility with old Grafana alerts, but automatically overridden to true + when used inside ``AlertExpression`` or ``AlertRulev8`` :param type: CTYPE_* """ @@ -1205,6 +1208,7 @@ class AlertCondition(object): timeRange = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(TimeRange))) operator = attr.ib(default=OP_AND) reducerType = attr.ib(default=RTYPE_LAST) + useNewAlerts = attr.ib(default=False) type = attr.ib(default=CTYPE_QUERY, kw_only=True) diff --git a/grafanalib/elasticsearch.py b/grafanalib/elasticsearch.py index a01c531..9726983 100644 --- a/grafanalib/elasticsearch.py +++ b/grafanalib/elasticsearch.py @@ -2,7 +2,7 @@ import attr import itertools -from attr.validators import instance_of +from attr.validators import in_, instance_of from grafanalib.core import AlertCondition DATE_HISTOGRAM_DEFAULT_FIELD = 'time_iso8601' @@ -498,3 +498,49 @@ class PercentilesMetricAgg(object): 'inlineScript': self.inline, 'settings': self.settings, } + + [email protected] +class RateMetricAgg(object): + """An aggregator that provides the rate of the values. + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-rate-aggregation.html + :param field: name of elasticsearch field to provide the sum over + :param hide: show/hide the metric in the final panel display + :param id: id of the metric + :param unit: calendar interval to group by + supported calendar intervals + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html#calendar_intervals + "minute" + "hour" + "day" + "week" + "month" + "quarter" + "year" + :param mode: sum or count the values + :param script: script to apply to the data, using '_value' + """ + + field = attr.ib(default="", validator=instance_of(str)) + id = attr.ib(default=0, validator=instance_of(int)) + hide = attr.ib(default=False, validator=instance_of(bool)) + unit = attr.ib(default="", validator=instance_of(str)) + mode = attr.ib(default="", validator=in_(["", "value_count", "sum"])) + script = attr.ib(default="", validator=instance_of(str)) + + def to_json_data(self): + self.settings = {} + + if self.mode: + self.settings["mode"] = self.mode + + if self.script: + self.settings["script"] = self.script + + return { + "id": str(self.id), + "hide": self.hide, + "field": self.field, + "settings": self.settings, + "type": "rate", + }
weaveworks/grafanalib
bfdae85a19048d2ea1f87c91b4b1207059807bbc
diff --git a/grafanalib/tests/test_core.py b/grafanalib/tests/test_core.py index f8178e8..2b03610 100644 --- a/grafanalib/tests/test_core.py +++ b/grafanalib/tests/test_core.py @@ -954,6 +954,33 @@ def test_alertfilefasedfrovisioning(): assert data['groups'] == groups +def test_alertCondition_useNewAlerts_default(): + alert_condition = G.AlertCondition( + G.Target(refId="A"), + G.Evaluator('a', 'b'), + G.TimeRange('5', '6'), + 'd', + 'e' + ) + data = alert_condition.to_json_data() + assert data['query']['model'] is not None + assert len(data['query']['params']) == 3 + + +def test_alertCondition_useNewAlerts_true(): + alert_condition = G.AlertCondition( + G.Target(refId="A"), + G.Evaluator('a', 'b'), + G.TimeRange('5', '6'), + 'd', + 'e', + useNewAlerts=True + ) + data = alert_condition.to_json_data() + assert 'model' not in data['query'] + assert len(data['query']['params']) == 1 + + def test_worldmap(): data_source = 'dummy data source' targets = ['dummy_prom_query']
AlertCondition object missing useNewAlerts attribute looking at https://github.com/weaveworks/grafanalib/blob/c9a77d481da8d76a91ad92efb1a155d6dae8a34d/grafanalib/core.py#L1192 and https://github.com/weaveworks/grafanalib/blob/c9a77d481da8d76a91ad92efb1a155d6dae8a34d/grafanalib/core.py#L1215 its possible to see there is use of self.useNewAlerts but AlertExpression don't have this attribute
0.0
bfdae85a19048d2ea1f87c91b4b1207059807bbc
[ "grafanalib/tests/test_core.py::test_alertCondition_useNewAlerts_default", "grafanalib/tests/test_core.py::test_alertCondition_useNewAlerts_true" ]
[ "grafanalib/tests/test_core.py::test_template_defaults", "grafanalib/tests/test_core.py::test_custom_template_ok", "grafanalib/tests/test_core.py::test_custom_template_dont_override_options", "grafanalib/tests/test_core.py::test_table", "grafanalib/tests/test_core.py::test_stat_no_repeat", "grafanalib/tests/test_core.py::test_Text_exception_checks", "grafanalib/tests/test_core.py::test_ePictBox", "grafanalib/tests/test_core.py::test_ePictBox_custom_symbole_logic", "grafanalib/tests/test_core.py::test_ePict", "grafanalib/tests/test_core.py::test_Text", "grafanalib/tests/test_core.py::test_DiscreteColorMappingItem_exception_checks", "grafanalib/tests/test_core.py::test_DiscreteColorMappingItem", "grafanalib/tests/test_core.py::test_Discrete_exceptions", "grafanalib/tests/test_core.py::test_Discrete", "grafanalib/tests/test_core.py::test_StatValueMappings_exception_checks", "grafanalib/tests/test_core.py::test_StatValueMappings", "grafanalib/tests/test_core.py::test_StatRangeMappings", "grafanalib/tests/test_core.py::test_StatMapping", "grafanalib/tests/test_core.py::test_stat_with_repeat", "grafanalib/tests/test_core.py::test_single_stat", "grafanalib/tests/test_core.py::test_dashboard_list", "grafanalib/tests/test_core.py::test_logs_panel", "grafanalib/tests/test_core.py::test_notification", "grafanalib/tests/test_core.py::test_graph_panel", "grafanalib/tests/test_core.py::test_panel_extra_json", "grafanalib/tests/test_core.py::test_graph_panel_threshold", "grafanalib/tests/test_core.py::test_graph_panel_alert", "grafanalib/tests/test_core.py::test_graph_threshold", "grafanalib/tests/test_core.py::test_graph_threshold_custom", "grafanalib/tests/test_core.py::test_alert_list", "grafanalib/tests/test_core.py::test_SeriesOverride_exception_checks", "grafanalib/tests/test_core.py::test_SeriesOverride", "grafanalib/tests/test_core.py::test_alert", "grafanalib/tests/test_core.py::test_alertgroup", "grafanalib/tests/test_core.py::test_alertrulev8", "grafanalib/tests/test_core.py::test_alertrule_invalid_triggers", "grafanalib/tests/test_core.py::test_alertrulev9", "grafanalib/tests/test_core.py::test_alertexpression", "grafanalib/tests/test_core.py::test_alertfilefasedfrovisioning", "grafanalib/tests/test_core.py::test_worldmap", "grafanalib/tests/test_core.py::test_stateTimeline", "grafanalib/tests/test_core.py::test_timeseries", "grafanalib/tests/test_core.py::test_timeseries_with_overrides", "grafanalib/tests/test_core.py::test_news", "grafanalib/tests/test_core.py::test_pieChartv2", "grafanalib/tests/test_core.py::test_histogram", "grafanalib/tests/test_core.py::test_ae3e_plotly", "grafanalib/tests/test_core.py::test_barchart", "grafanalib/tests/test_core.py::test_target_invalid", "grafanalib/tests/test_core.py::test_sql_target", "grafanalib/tests/test_core.py::test_sql_target_with_source_files" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-05-10 12:02:39+00:00
apache-2.0
6,235
weaveworks__kubediff-82
diff --git a/kubedifflib/_diff.py b/kubedifflib/_diff.py index 03a6337..e51b8f6 100644 --- a/kubedifflib/_diff.py +++ b/kubedifflib/_diff.py @@ -21,6 +21,12 @@ from ._kube import ( iter_files, ) +def mask(x): + """Turn a string into an equal-length string of asterisks""" + try: + return len(x) * '*' + except TypeError: # not a string - perhaps None - just return it as-is + return x class Difference(object): """An observed difference.""" @@ -32,7 +38,7 @@ class Difference(object): def to_text(self, kind=''): if 'secret' in kind.lower() and len(self.args) == 2: - message = self.message % ((len(self.args[0]) * '*'), (len(self.args[1]) * '*')) + message = self.message % (mask(self.args[0]), mask(self.args[1])) else: message = self.message % self.args
weaveworks/kubediff
42a6c302e06db06b5887e866e29d7d4a452a1d1e
diff --git a/kubedifflib/tests/test_diff.py b/kubedifflib/tests/test_diff.py index fa5a683..575fffb 100644 --- a/kubedifflib/tests/test_diff.py +++ b/kubedifflib/tests/test_diff.py @@ -2,10 +2,10 @@ import copy import random -from hypothesis import given +from hypothesis import given, example from hypothesis.strategies import integers, lists, text, fixed_dictionaries, sampled_from, none, one_of -from kubedifflib._diff import diff_lists, list_subtract +from kubedifflib._diff import diff_lists, list_subtract, Difference from kubedifflib._kube import KubeObject @@ -137,3 +137,16 @@ def test_from_dict_kubernetes_list_type(data): def test_from_dict_kubernetes_obj_type(data): """KubeObject.from_dict parses regular kubernetes objects.""" assert [kube_obj.data for kube_obj in KubeObject.from_dict(data)] == [data] + +@given(path=text(), kind=text()) +def test_difference_no_args(path, kind): + """Difference.to_text works as expected when no args passed.""" + d = Difference("Message", path) + assert d.to_text(kind) == path + ": Message" + +@given(path=text(), kind=text(), arg1=text(), arg2=one_of(text(), none())) +@example("somepath","Secret", "foo", None) +def test_difference_two_args(path, kind, arg1, arg2): + """Difference.to_text works when two args passed, that may be 'none'.""" + d = Difference("Message %s %s", path, arg1, arg2) + assert d.to_text(kind) != ""
TypeError on SealedSecret Reported in PR #78 error below for e.g. `SealedSecret.v1alpha1.bitnami.com` ``` Traceback (most recent call last): File "/Users/po/src/kubediff/kubediff", line 48, in <module> main() File "/Users/po/src/kubediff/kubediff", line 42, in main failed = check_files(args, printer, config) File "/Users/po/src/kubediff/kubedifflib/_diff.py", line 246, in check_files differences += check_file(printer, path, config) File "/Users/po/src/kubediff/kubedifflib/_diff.py", line 174, in check_file printer.diff(path, difference) File "/Users/po/src/kubediff/kubedifflib/_diff.py", line 214, in diff self._write('%s', difference.to_text(self._current.kind)) File "/Users/po/src/kubediff/kubedifflib/_diff.py", line 35, in to_text message = self.message % ((len(self.args[0]) * '*'), (len(self.args[1]) * '*')) TypeError: object of type 'NoneType' has no len() ```
0.0
42a6c302e06db06b5887e866e29d7d4a452a1d1e
[ "kubedifflib/tests/test_diff.py::test_difference_two_args" ]
[ "kubedifflib/tests/test_diff.py::test_two_lists_of_same_size_generator", "kubedifflib/tests/test_diff.py::test_diff_lists_doesnt_mutate_inputs", "kubedifflib/tests/test_diff.py::test_from_dict_kubernetes_obj_type", "kubedifflib/tests/test_diff.py::test_same_list_shuffled_is_not_different_nested", "kubedifflib/tests/test_diff.py::test_from_dict_kubernetes_list_type", "kubedifflib/tests/test_diff.py::test_list_subtract_recover", "kubedifflib/tests/test_diff.py::test_difference_no_args", "kubedifflib/tests/test_diff.py::test_same_list_shuffled_is_not_different", "kubedifflib/tests/test_diff.py::test_list_subtract_same_list", "kubedifflib/tests/test_diff.py::test_diff_lists_doesnt_mutate_inputs_nested_lists", "kubedifflib/tests/test_diff.py::test_diff_lists_equal", "kubedifflib/tests/test_diff.py::test_added_items_appear_in_diff" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2019-02-13 10:37:05+00:00
apache-2.0
6,236
web2py__pydal-349
diff --git a/pydal/dialects/base.py b/pydal/dialects/base.py index 89e261f9..fb058fe2 100644 --- a/pydal/dialects/base.py +++ b/pydal/dialects/base.py @@ -399,7 +399,8 @@ class SQLDialect(CommonDialect): return '' def coalesce(self, first, second): - expressions = [self.expand(first)]+[self.expand(e) for e in second] + expressions = [self.expand(first)] + \ + [self.expand(val, first.type) for val in second] return 'COALESCE(%s)' % ','.join(expressions) def raw(self, val): diff --git a/pydal/dialects/sqlite.py b/pydal/dialects/sqlite.py index 0af56176..078d5c1a 100644 --- a/pydal/dialects/sqlite.py +++ b/pydal/dialects/sqlite.py @@ -28,6 +28,15 @@ class SQLiteDialect(SQLDialect): return '(%s REGEXP %s)' % ( self.expand(first), self.expand(second, 'string')) + def select(self, fields, tables, where=None, groupby=None, having=None, + orderby=None, limitby=None, distinct=False, for_update=False): + if distinct and distinct is not True: + raise SyntaxError( + 'DISTINCT ON is not supported by SQLite') + return super(SQLiteDialect, self).select( + fields, tables, where, groupby, having, orderby, limitby, distinct, + for_update) + def truncate(self, table, mode=''): tablename = table._tablename return [ diff --git a/pydal/objects.py b/pydal/objects.py index c159c181..59d6f471 100644 --- a/pydal/objects.py +++ b/pydal/objects.py @@ -1462,6 +1462,8 @@ class Field(Expression, Serializable): return field def store(self, file, filename=None, path=None): + # make sure filename is a str sequence + filename = "{}".format(filename) if self.custom_store: return self.custom_store(file, filename, path) if isinstance(file, cgi.FieldStorage): @@ -1474,7 +1476,8 @@ class Field(Expression, Serializable): m = REGEX_STORE_PATTERN.search(filename) extension = m and m.group('e') or 'txt' uuid_key = self._db.uuid().replace('-', '')[-16:] - encoded_filename = base64.b16encode(filename).lower() + encoded_filename = base64.b16encode( + filename.encode('utf-8')).lower().decode('utf-8') newfilename = '%s.%s.%s.%s' % ( self._tablename, self.name, uuid_key, encoded_filename) newfilename = newfilename[:(self.length - 1 - len(extension))] + \ @@ -1486,27 +1489,27 @@ class Field(Expression, Serializable): blob_uploadfield_name: file.read()} self_uploadfield.table.insert(**keys) elif self_uploadfield is True: - if path: - pass - elif self.uploadfolder: - path = self.uploadfolder - elif self.db._adapter.folder: - path = pjoin(self.db._adapter.folder, '..', 'uploads') - else: - raise RuntimeError( - "you must specify a Field(..., uploadfolder=...)") - if self.uploadseparate: - if self.uploadfs: - raise RuntimeError("not supported") - path = pjoin(path, "%s.%s" % ( - self._tablename, self.name), uuid_key[:2] - ) - if not exists(path): - os.makedirs(path) - pathfilename = pjoin(path, newfilename) if self.uploadfs: dest_file = self.uploadfs.open(newfilename, 'wb') else: + if path: + pass + elif self.uploadfolder: + path = self.uploadfolder + elif self.db._adapter.folder: + path = pjoin(self.db._adapter.folder, '..', 'uploads') + else: + raise RuntimeError( + "you must specify a Field(..., uploadfolder=...)") + if self.uploadseparate: + if self.uploadfs: + raise RuntimeError("not supported") + path = pjoin(path, "%s.%s" % ( + self._tablename, self.name), uuid_key[:2] + ) + if not exists(path): + os.makedirs(path) + pathfilename = pjoin(path, newfilename) dest_file = open(pathfilename, 'wb') try: shutil.copyfileobj(file, dest_file) @@ -1563,7 +1566,7 @@ class Field(Expression, Serializable): return self.custom_retrieve_file_properties(name, path) if m.group('name'): try: - filename = base64.b16decode(m.group('name'), True) + filename = base64.b16decode(m.group('name'), True).decode('utf-8') filename = REGEX_CLEANUP_FN.sub('_', filename) except (TypeError, AttributeError): filename = name diff --git a/setup.py b/setup.py index f99ee9af..d4d69a06 100644 --- a/setup.py +++ b/setup.py @@ -38,7 +38,9 @@ setup( maintainer_email='[email protected]', description='a pure Python Database Abstraction Layer', long_description=__doc__, - packages=['pydal', 'pydal.adapters', 'pydal.helpers', 'pydal.contrib'], + packages=[ + 'pydal', 'pydal.adapters', 'pydal.dialects', 'pydal.helpers', + 'pydal.parsers', 'pydal.representers', 'pydal.contrib'], include_package_data=True, zip_safe=False, platforms='any',
web2py/pydal
d59b588900f26e6e204fb119115efa91fe7db692
diff --git a/tests/sql.py b/tests/sql.py index 2573f3a3..c118e372 100644 --- a/tests/sql.py +++ b/tests/sql.py @@ -149,6 +149,74 @@ class TestFields(unittest.TestCase): else: isinstance(f.formatter(datetime.datetime.now()), str) + def testUploadField(self): + import tempfile + + stream = tempfile.NamedTemporaryFile() + content = b"this is the stream content" + stream.write(content) + # rewind before inserting + stream.seek(0) + + + db = DAL(DEFAULT_URI, check_reserved=['all']) + db.define_table('tt', Field('fileobj', 'upload', + uploadfolder=tempfile.gettempdir(), + autodelete=True)) + f_id = db.tt.insert(fileobj=stream) + + row = db.tt[f_id] + (retr_name, retr_stream) = db.tt.fileobj.retrieve(row.fileobj) + + # name should be the same + self.assertEqual(retr_name, os.path.basename(stream.name)) + # content should be the same + retr_content = retr_stream.read() + self.assertEqual(retr_content, content) + + # close streams! + retr_stream.close() + + # delete + row.delete_record() + + # drop + db.tt.drop() + + # this part is triggered only if fs (AKA pyfilesystem) module is installed + try: + from fs.memoryfs import MemoryFS + + # rewind before inserting + stream.seek(0) + db.define_table('tt', Field('fileobj', 'upload', + uploadfs=MemoryFS(), + autodelete=True)) + + f_id = db.tt.insert(fileobj=stream) + + row = db.tt[f_id] + (retr_name, retr_stream) = db.tt.fileobj.retrieve(row.fileobj) + + # name should be the same + self.assertEqual(retr_name, os.path.basename(stream.name)) + # content should be the same + retr_content = retr_stream.read() + self.assertEqual(retr_content, content) + + # close streams + retr_stream.close() + stream.close() + + # delete + row.delete_record() + + # drop + db.tt.drop() + + except ImportError: + pass + def testRun(self): """Test all field types and their return values""" db = DAL(DEFAULT_URI, check_reserved=['all'])
coalesce() incorrectly expands constant values When you pass string constant into coalesce, it will be expanded as identifier instead of string constant: `db().select(db.table.str_field.coalesce('foo'))` would expand into this: `SELECT COALESCE(table.str_field,foo) FROM table` but the expected behavior is this: `SELECT COALESCE(table.str_field,'foo') FROM table`
0.0
d59b588900f26e6e204fb119115efa91fe7db692
[ "tests/sql.py::TestFields::testUploadField" ]
[ "tests/sql.py::TestFields::testFieldFormatters", "tests/sql.py::TestFields::testFieldLabels", "tests/sql.py::TestFields::testFieldName", "tests/sql.py::TestFields::testFieldTypes", "tests/sql.py::TestFields::testRun", "tests/sql.py::TestTables::testTableNames", "tests/sql.py::TestAll::testSQLALL", "tests/sql.py::TestTable::testTableAlias", "tests/sql.py::TestTable::testTableCreation", "tests/sql.py::TestTable::testTableInheritance", "tests/sql.py::TestInsert::testRun", "tests/sql.py::TestSelect::testCoalesce", "tests/sql.py::TestSelect::testGroupByAndDistinct", "tests/sql.py::TestSelect::testListInteger", "tests/sql.py::TestSelect::testListReference", "tests/sql.py::TestSelect::testListString", "tests/sql.py::TestSelect::testRun", "tests/sql.py::TestSelect::testTestQuery", "tests/sql.py::TestAddMethod::testRun", "tests/sql.py::TestBelongs::testRun", "tests/sql.py::TestContains::testRun", "tests/sql.py::TestLike::testEscaping", "tests/sql.py::TestLike::testLikeInteger", "tests/sql.py::TestLike::testRegexp", "tests/sql.py::TestLike::testRun", "tests/sql.py::TestLike::testStartsEndsWith", "tests/sql.py::TestLike::testUpperLower", "tests/sql.py::TestDatetime::testRun", "tests/sql.py::TestExpressions::testOps", "tests/sql.py::TestExpressions::testRun", "tests/sql.py::TestExpressions::testSubstring", "tests/sql.py::TestExpressions::testUpdate", "tests/sql.py::TestJoin::testRun", "tests/sql.py::TestMinMaxSumAvg::testRun", "tests/sql.py::TestMigrations::testRun", "tests/sql.py::TestReference::testRun", "tests/sql.py::TestClientLevelOps::testRun", "tests/sql.py::TestVirtualFields::testRun", "tests/sql.py::TestComputedFields::testRun", "tests/sql.py::TestCommonFilters::testRun", "tests/sql.py::TestImportExportFields::testRun", "tests/sql.py::TestImportExportUuidFields::testRun", "tests/sql.py::TestDALDictImportExport::testRun", "tests/sql.py::TestSelectAsDict::testSelect", "tests/sql.py::TestRNameTable::testJoin", "tests/sql.py::TestRNameTable::testSelect", "tests/sql.py::TestRNameFields::testInsert", "tests/sql.py::TestRNameFields::testJoin", "tests/sql.py::TestRNameFields::testRun", "tests/sql.py::TestRNameFields::testSelect", "tests/sql.py::TestQuoting::testCase", "tests/sql.py::TestQuoting::testPKFK", "tests/sql.py::TestTableAndFieldCase::testme", "tests/sql.py::TestQuotesByDefault::testme", "tests/sql.py::TestGis::testGeometry", "tests/sql.py::TestGis::testGeometryCase", "tests/sql.py::TestGis::testGisMigration", "tests/sql.py::TestSQLCustomType::testRun", "tests/sql.py::TestLazy::testLazyGetter", "tests/sql.py::TestLazy::testRowExtra", "tests/sql.py::TestLazy::testRowNone", "tests/sql.py::TestLazy::testRun", "tests/sql.py::TestRedefine::testRun", "tests/sql.py::TestUpdateInsert::testRun", "tests/sql.py::TestBulkInsert::testRun", "tests/sql.py::TestRecordVersioning::testRun", "tests/sql.py::TestSerializers::testAsJson" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2016-04-21 22:23:30+00:00
bsd-3-clause
6,237
web2py__pydal-350
diff --git a/pydal/dialects/sqlite.py b/pydal/dialects/sqlite.py index 0af56176..078d5c1a 100644 --- a/pydal/dialects/sqlite.py +++ b/pydal/dialects/sqlite.py @@ -28,6 +28,15 @@ class SQLiteDialect(SQLDialect): return '(%s REGEXP %s)' % ( self.expand(first), self.expand(second, 'string')) + def select(self, fields, tables, where=None, groupby=None, having=None, + orderby=None, limitby=None, distinct=False, for_update=False): + if distinct and distinct is not True: + raise SyntaxError( + 'DISTINCT ON is not supported by SQLite') + return super(SQLiteDialect, self).select( + fields, tables, where, groupby, having, orderby, limitby, distinct, + for_update) + def truncate(self, table, mode=''): tablename = table._tablename return [ diff --git a/pydal/objects.py b/pydal/objects.py index c159c181..59d6f471 100644 --- a/pydal/objects.py +++ b/pydal/objects.py @@ -1462,6 +1462,8 @@ class Field(Expression, Serializable): return field def store(self, file, filename=None, path=None): + # make sure filename is a str sequence + filename = "{}".format(filename) if self.custom_store: return self.custom_store(file, filename, path) if isinstance(file, cgi.FieldStorage): @@ -1474,7 +1476,8 @@ class Field(Expression, Serializable): m = REGEX_STORE_PATTERN.search(filename) extension = m and m.group('e') or 'txt' uuid_key = self._db.uuid().replace('-', '')[-16:] - encoded_filename = base64.b16encode(filename).lower() + encoded_filename = base64.b16encode( + filename.encode('utf-8')).lower().decode('utf-8') newfilename = '%s.%s.%s.%s' % ( self._tablename, self.name, uuid_key, encoded_filename) newfilename = newfilename[:(self.length - 1 - len(extension))] + \ @@ -1486,27 +1489,27 @@ class Field(Expression, Serializable): blob_uploadfield_name: file.read()} self_uploadfield.table.insert(**keys) elif self_uploadfield is True: - if path: - pass - elif self.uploadfolder: - path = self.uploadfolder - elif self.db._adapter.folder: - path = pjoin(self.db._adapter.folder, '..', 'uploads') - else: - raise RuntimeError( - "you must specify a Field(..., uploadfolder=...)") - if self.uploadseparate: - if self.uploadfs: - raise RuntimeError("not supported") - path = pjoin(path, "%s.%s" % ( - self._tablename, self.name), uuid_key[:2] - ) - if not exists(path): - os.makedirs(path) - pathfilename = pjoin(path, newfilename) if self.uploadfs: dest_file = self.uploadfs.open(newfilename, 'wb') else: + if path: + pass + elif self.uploadfolder: + path = self.uploadfolder + elif self.db._adapter.folder: + path = pjoin(self.db._adapter.folder, '..', 'uploads') + else: + raise RuntimeError( + "you must specify a Field(..., uploadfolder=...)") + if self.uploadseparate: + if self.uploadfs: + raise RuntimeError("not supported") + path = pjoin(path, "%s.%s" % ( + self._tablename, self.name), uuid_key[:2] + ) + if not exists(path): + os.makedirs(path) + pathfilename = pjoin(path, newfilename) dest_file = open(pathfilename, 'wb') try: shutil.copyfileobj(file, dest_file) @@ -1563,7 +1566,7 @@ class Field(Expression, Serializable): return self.custom_retrieve_file_properties(name, path) if m.group('name'): try: - filename = base64.b16decode(m.group('name'), True) + filename = base64.b16decode(m.group('name'), True).decode('utf-8') filename = REGEX_CLEANUP_FN.sub('_', filename) except (TypeError, AttributeError): filename = name diff --git a/setup.py b/setup.py index f99ee9af..d4d69a06 100644 --- a/setup.py +++ b/setup.py @@ -38,7 +38,9 @@ setup( maintainer_email='[email protected]', description='a pure Python Database Abstraction Layer', long_description=__doc__, - packages=['pydal', 'pydal.adapters', 'pydal.helpers', 'pydal.contrib'], + packages=[ + 'pydal', 'pydal.adapters', 'pydal.dialects', 'pydal.helpers', + 'pydal.parsers', 'pydal.representers', 'pydal.contrib'], include_package_data=True, zip_safe=False, platforms='any',
web2py/pydal
d59b588900f26e6e204fb119115efa91fe7db692
diff --git a/tests/sql.py b/tests/sql.py index 2573f3a3..c118e372 100644 --- a/tests/sql.py +++ b/tests/sql.py @@ -149,6 +149,74 @@ class TestFields(unittest.TestCase): else: isinstance(f.formatter(datetime.datetime.now()), str) + def testUploadField(self): + import tempfile + + stream = tempfile.NamedTemporaryFile() + content = b"this is the stream content" + stream.write(content) + # rewind before inserting + stream.seek(0) + + + db = DAL(DEFAULT_URI, check_reserved=['all']) + db.define_table('tt', Field('fileobj', 'upload', + uploadfolder=tempfile.gettempdir(), + autodelete=True)) + f_id = db.tt.insert(fileobj=stream) + + row = db.tt[f_id] + (retr_name, retr_stream) = db.tt.fileobj.retrieve(row.fileobj) + + # name should be the same + self.assertEqual(retr_name, os.path.basename(stream.name)) + # content should be the same + retr_content = retr_stream.read() + self.assertEqual(retr_content, content) + + # close streams! + retr_stream.close() + + # delete + row.delete_record() + + # drop + db.tt.drop() + + # this part is triggered only if fs (AKA pyfilesystem) module is installed + try: + from fs.memoryfs import MemoryFS + + # rewind before inserting + stream.seek(0) + db.define_table('tt', Field('fileobj', 'upload', + uploadfs=MemoryFS(), + autodelete=True)) + + f_id = db.tt.insert(fileobj=stream) + + row = db.tt[f_id] + (retr_name, retr_stream) = db.tt.fileobj.retrieve(row.fileobj) + + # name should be the same + self.assertEqual(retr_name, os.path.basename(stream.name)) + # content should be the same + retr_content = retr_stream.read() + self.assertEqual(retr_content, content) + + # close streams + retr_stream.close() + stream.close() + + # delete + row.delete_record() + + # drop + db.tt.drop() + + except ImportError: + pass + def testRun(self): """Test all field types and their return values""" db = DAL(DEFAULT_URI, check_reserved=['all'])
select(distinct=...) may produce faulty sql for sqlite This one works: ```python In [45]: db().select(db.player.country, distinct = True) Out[45]: <Rows (40)> In [46]: db._lastsql Out[46]: 'SELECT DISTINCT player.country FROM player;' ``` But the other option given in the book does not: ```python In [47]: db().select(db.player.country, distinct = db.player.country) OperationalError: near "ON": syntax error In [48]: db._lastsql Out[48]: 'SELECT DISTINCT ON (player.country) player.country FROM player;' ``` I didn't test it with other DB engines since I don't currently have one installed. Related to https://github.com/web2py/web2py/issues/1129
0.0
d59b588900f26e6e204fb119115efa91fe7db692
[ "tests/sql.py::TestFields::testUploadField" ]
[ "tests/sql.py::TestFields::testFieldFormatters", "tests/sql.py::TestFields::testFieldLabels", "tests/sql.py::TestFields::testFieldName", "tests/sql.py::TestFields::testFieldTypes", "tests/sql.py::TestFields::testRun", "tests/sql.py::TestTables::testTableNames", "tests/sql.py::TestAll::testSQLALL", "tests/sql.py::TestTable::testTableAlias", "tests/sql.py::TestTable::testTableCreation", "tests/sql.py::TestTable::testTableInheritance", "tests/sql.py::TestInsert::testRun", "tests/sql.py::TestSelect::testCoalesce", "tests/sql.py::TestSelect::testGroupByAndDistinct", "tests/sql.py::TestSelect::testListInteger", "tests/sql.py::TestSelect::testListReference", "tests/sql.py::TestSelect::testListString", "tests/sql.py::TestSelect::testRun", "tests/sql.py::TestSelect::testTestQuery", "tests/sql.py::TestAddMethod::testRun", "tests/sql.py::TestBelongs::testRun", "tests/sql.py::TestContains::testRun", "tests/sql.py::TestLike::testEscaping", "tests/sql.py::TestLike::testLikeInteger", "tests/sql.py::TestLike::testRegexp", "tests/sql.py::TestLike::testRun", "tests/sql.py::TestLike::testStartsEndsWith", "tests/sql.py::TestLike::testUpperLower", "tests/sql.py::TestDatetime::testRun", "tests/sql.py::TestExpressions::testOps", "tests/sql.py::TestExpressions::testRun", "tests/sql.py::TestExpressions::testSubstring", "tests/sql.py::TestExpressions::testUpdate", "tests/sql.py::TestJoin::testRun", "tests/sql.py::TestMinMaxSumAvg::testRun", "tests/sql.py::TestMigrations::testRun", "tests/sql.py::TestReference::testRun", "tests/sql.py::TestClientLevelOps::testRun", "tests/sql.py::TestVirtualFields::testRun", "tests/sql.py::TestComputedFields::testRun", "tests/sql.py::TestCommonFilters::testRun", "tests/sql.py::TestImportExportFields::testRun", "tests/sql.py::TestImportExportUuidFields::testRun", "tests/sql.py::TestDALDictImportExport::testRun", "tests/sql.py::TestSelectAsDict::testSelect", "tests/sql.py::TestRNameTable::testJoin", "tests/sql.py::TestRNameTable::testSelect", "tests/sql.py::TestRNameFields::testInsert", "tests/sql.py::TestRNameFields::testJoin", "tests/sql.py::TestRNameFields::testRun", "tests/sql.py::TestRNameFields::testSelect", "tests/sql.py::TestQuoting::testCase", "tests/sql.py::TestQuoting::testPKFK", "tests/sql.py::TestTableAndFieldCase::testme", "tests/sql.py::TestQuotesByDefault::testme", "tests/sql.py::TestGis::testGeometry", "tests/sql.py::TestGis::testGeometryCase", "tests/sql.py::TestGis::testGisMigration", "tests/sql.py::TestSQLCustomType::testRun", "tests/sql.py::TestLazy::testLazyGetter", "tests/sql.py::TestLazy::testRowExtra", "tests/sql.py::TestLazy::testRowNone", "tests/sql.py::TestLazy::testRun", "tests/sql.py::TestRedefine::testRun", "tests/sql.py::TestUpdateInsert::testRun", "tests/sql.py::TestBulkInsert::testRun", "tests/sql.py::TestRecordVersioning::testRun", "tests/sql.py::TestSerializers::testAsJson" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2016-04-21 22:31:32+00:00
bsd-3-clause
6,238
websocket-client__websocket-client-929
diff --git a/websocket/_url.py b/websocket/_url.py index 259ce64..2141b02 100644 --- a/websocket/_url.py +++ b/websocket/_url.py @@ -137,7 +137,7 @@ def get_proxy_info( Websocket server name. is_secure: bool Is the connection secure? (wss) looks for "https_proxy" in env - before falling back to "http_proxy" + instead of "http_proxy" proxy_host: str http proxy host name. proxy_port: str or int @@ -158,15 +158,11 @@ def get_proxy_info( auth = proxy_auth return proxy_host, port, auth - env_keys = ["http_proxy"] - if is_secure: - env_keys.insert(0, "https_proxy") - - for key in env_keys: - value = os.environ.get(key, os.environ.get(key.upper(), "")).replace(" ", "") - if value: - proxy = urlparse(value) - auth = (unquote(proxy.username), unquote(proxy.password)) if proxy.username else None - return proxy.hostname, proxy.port, auth + env_key = "https_proxy" if is_secure else "http_proxy" + value = os.environ.get(env_key, os.environ.get(env_key.upper(), "")).replace(" ", "") + if value: + proxy = urlparse(value) + auth = (unquote(proxy.username), unquote(proxy.password)) if proxy.username else None + return proxy.hostname, proxy.port, auth return None, 0, None
websocket-client/websocket-client
bd506ad2e14749e1d31c07a1a6fca5644adb0ec4
diff --git a/websocket/tests/test_url.py b/websocket/tests/test_url.py index 6a210d5..a74dd76 100644 --- a/websocket/tests/test_url.py +++ b/websocket/tests/test_url.py @@ -254,6 +254,24 @@ def testProxyFromEnv(self): os.environ["https_proxy"] = "http://localhost2:3128/" self.assertEqual(get_proxy_info("echo.websocket.events", True), ("localhost2", 3128, None)) + os.environ["http_proxy"] = "" + os.environ["https_proxy"] = "http://localhost2/" + self.assertEqual(get_proxy_info("echo.websocket.events", True), ("localhost2", None, None)) + self.assertEqual(get_proxy_info("echo.websocket.events", False), (None, 0, None)) + os.environ["http_proxy"] = "" + os.environ["https_proxy"] = "http://localhost2:3128/" + self.assertEqual(get_proxy_info("echo.websocket.events", True), ("localhost2", 3128, None)) + self.assertEqual(get_proxy_info("echo.websocket.events", False), (None, 0, None)) + + os.environ["http_proxy"] = "http://localhost/" + os.environ["https_proxy"] = "" + self.assertEqual(get_proxy_info("echo.websocket.events", True), (None, 0, None)) + self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", None, None)) + os.environ["http_proxy"] = "http://localhost:3128/" + os.environ["https_proxy"] = "" + self.assertEqual(get_proxy_info("echo.websocket.events", True), (None, 0, None)) + self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", 3128, None)) + os.environ["http_proxy"] = "http://a:b@localhost/" self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", None, ("a", "b"))) os.environ["http_proxy"] = "http://a:b@localhost:3128/"
Environment variable HTTP_PROXY is used for HTTPS connections The problem occurred in an environment where a proxy server is to be used for HTTP connections but not for HTTPS connections. In this case `http_proxy` is set in the environment but `https_proxy` is not. The problematic code is here: https://github.com/websocket-client/websocket-client/blob/bd506ad2e14749e1d31c07a1a6fca5644adb0ec4/websocket/_url.py#L161-L163 In my opinion, only `https_proxy` should be used for secure connections.
0.0
bd506ad2e14749e1d31c07a1a6fca5644adb0ec4
[ "websocket/tests/test_url.py::ProxyInfoTest::testProxyFromEnv" ]
[ "websocket/tests/test_url.py::UrlTest::testParseUrl", "websocket/tests/test_url.py::UrlTest::test_address_in_network", "websocket/tests/test_url.py::IsNoProxyHostTest::testHostnameMatch", "websocket/tests/test_url.py::IsNoProxyHostTest::testHostnameMatchDomain", "websocket/tests/test_url.py::IsNoProxyHostTest::testIpAddress", "websocket/tests/test_url.py::IsNoProxyHostTest::testIpAddressInRange", "websocket/tests/test_url.py::IsNoProxyHostTest::testMatchAll", "websocket/tests/test_url.py::ProxyInfoTest::testProxyFromArgs" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-07-10 09:14:17+00:00
apache-2.0
6,239
wesleybowman__UTide-79
diff --git a/utide/_reconstruct.py b/utide/_reconstruct.py index ebb7723..e72c862 100644 --- a/utide/_reconstruct.py +++ b/utide/_reconstruct.py @@ -92,7 +92,7 @@ def _reconstruct(t, goodmask, coef, verbose, constit, min_SNR, min_PE): # Determine constituents to include. if constit is not None: ind = [i for i, c in enumerate(coef['name']) if c in constit] - elif min_SNR == 0 and min_PE == 0: + elif (min_SNR == 0 and min_PE == 0) or coef['aux']['opt']['nodiagn']: ind = slice(None) else: if twodim: diff --git a/utide/_solve.py b/utide/_solve.py index 9ce79e5..0445c16 100644 --- a/utide/_solve.py +++ b/utide/_solve.py @@ -57,6 +57,7 @@ def _translate_opts(opts): oldopts.linci = False elif opts.conf_int == 'none': oldopts.conf_int = False + oldopts.nodiagn = 1 else: raise ValueError("'conf_int' must be 'linear', 'MC', or 'none'")
wesleybowman/UTide
c5ac303aef3365c1a93cae7fddfc6a3672a50788
diff --git a/tests/test_solve.py b/tests/test_solve.py index 5295515..eaa507f 100644 --- a/tests/test_solve.py +++ b/tests/test_solve.py @@ -9,15 +9,18 @@ Smoke testing--just see if the system runs. from __future__ import (absolute_import, division, print_function) +import pytest + import numpy as np from utide import reconstruct from utide import solve from utide._ut_constants import ut_constants from utide.utilities import Bunch - - -def test_roundtrip(): +# We omit the 'MC' case for now because with this test data, it +# fails with 'numpy.linalg.LinAlgError: SVD did not converge'. [email protected]('conf_int', ['linear', 'none']) +def test_roundtrip(conf_int): """Minimal conversion from simple_utide_test.""" ts = 735604 duration = 35 @@ -44,7 +47,7 @@ def test_roundtrip(): 'nodal': False, 'trend': False, 'method': 'ols', - 'conf_int': 'linear', + 'conf_int': conf_int, 'Rayleigh_min': 0.95, }
solve failing when conf_int='none' solve is failing when `conf_int='none'`. For example, if I try changing `conf_int='linear'` to `conf_int='none'` in cell [5] of the [utide_uv_example.ipynb notebook](https://github.com/wesleybowman/UTide/blob/master/notebooks/utide_uv_example.ipynb), I get: ```python-traceback solve: matrix prep ... solution ... diagnostics ... --------------------------------------------------------------------------- KeyError Traceback (most recent call last) <ipython-input-10-359a0567fa71> in <module> 7 method='ols', 8 conf_int='none', ----> 9 Rayleigh_min=0.95,) ~/miniconda3/envs/pangeo/lib/python3.7/site-packages/utide/_solve.py in solve(t, u, v, lat, **opts) 198 compat_opts = _process_opts(opts, v is not None) 199 --> 200 coef = _solv1(t, u, v, lat, **compat_opts) 201 202 return coef ~/miniconda3/envs/pangeo/lib/python3.7/site-packages/utide/_solve.py in _solv1(tin, uin, vin, lat, **opts) 377 # Diagnostics. 378 if not opt['nodiagn']: --> 379 coef, indPE = ut_diagn(coef, opt) 380 381 # Re-order constituents. ~/miniconda3/envs/pangeo/lib/python3.7/site-packages/utide/diagnostics.py in ut_diagn(coef, opt) 15 16 SNR = (coef['Lsmaj']**2 + coef['Lsmin']**2) / ( ---> 17 (coef['Lsmaj_ci']/1.96)**2 + 18 (coef['Lsmin_ci']/1.96)**2) 19 KeyError: 'Lsmaj_ci' ```
0.0
c5ac303aef3365c1a93cae7fddfc6a3672a50788
[ "tests/test_solve.py::test_roundtrip[none]" ]
[ "tests/test_solve.py::test_roundtrip[linear]", "tests/test_solve.py::test_masked_input", "tests/test_solve.py::test_robust", "tests/test_solve.py::test_MC" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-01-10 20:15:32+00:00
mit
6,240
wesleybowman__UTide-96
diff --git a/utide/_solve.py b/utide/_solve.py index 675f3d5..20ed0e8 100644 --- a/utide/_solve.py +++ b/utide/_solve.py @@ -5,10 +5,9 @@ Central module for calculating the tidal amplitudes, phases, etc. import numpy as np from ._time_conversion import _normalize_time -from ._ut_constants import constit_index_dict from .confidence import _confidence from .constituent_selection import ut_cnstitsel -from .diagnostics import ut_diagn +from .diagnostics import _PE, _SNR, ut_diagn from .ellipse_params import ut_cs2cep from .harmonics import ut_E from .robustfit import robustfit @@ -17,6 +16,7 @@ from .utilities import Bunch default_opts = { "constit": "auto", + "order_constit": None, "conf_int": "linear", "method": "ols", "trend": True, @@ -37,6 +37,8 @@ def _process_opts(opts, is_2D): newopts.update_values(strict=True, **opts) # TODO: add more validations. newopts.infer = validate_infer(newopts.infer, is_2D) + snr = newopts.conf_int != "none" + newopts.order_constit = validate_order_constit(newopts.order_constit, snr) compat_opts = _translate_opts(newopts) @@ -48,6 +50,7 @@ def _translate_opts(opts): # Here or elsewhere, proper validation remains to be added. oldopts = Bunch() oldopts.cnstit = opts.constit + oldopts.ordercnstit = opts.order_constit oldopts.infer = opts.infer # we will not use the matlab names, though oldopts.conf_int = True @@ -101,6 +104,22 @@ def validate_infer(infer, is_2D): return infer +def validate_order_constit(arg, have_snr): + available = ["PE", "frequency"] + if have_snr: + available.append("SNR") + if arg is None: + return "PE" + if isinstance(arg, str) and arg in available: + return arg + if not isinstance(arg, str) and np.iterable(arg): + return arg # TODO: add checking of its elements + raise ValueError( + f"order_constit must be one of {available} or" + f" a sequence of constituents, not '{arg}'", + ) + + def solve(t, u, v=None, lat=None, **opts): """ Calculate amplitude, phase, confidence intervals of tidal constituents. @@ -122,7 +141,7 @@ def solve(t, u, v=None, lat=None, **opts): standard library `datetime` proleptic Gregorian calendar, starting with 1 at 00:00 on January 1 of year 1; this is the 'datenum' used by Matplotlib. - constit : {'auto', array_like}, optional + constit : {'auto', sequence}, optional List of strings with standard letter abbreviations of tidal constituents; or 'auto' to let the list be determined based on the time span. @@ -165,6 +184,14 @@ def solve(t, u, v=None, lat=None, **opts): amp_ratios and phase_offsets have length N for a scalar time series, or 2N for a vector series. + order_constit : {'PE', 'SNR', 'frequency', sequence}, optional + The default is 'PE' (percent energy) order, returning results ordered from + high energy to low. + The 'SNR' order is from high signal-to-noise ratio to low, and is + available only if `conf_int` is not 'none'. The + 'frequency' order is from low to high frequency. Alternatively, a + sequence of constituent names may be supplied, typically the same list as + given in the *constit* option. MC_n : integer, optional Not yet implemented. robust_kw : dict, optional @@ -370,7 +397,7 @@ def _solv1(tin, uin, vin, lat, **opts): coef.theta = np.hstack((coef.theta, theta)) coef.g = np.hstack((coef.g, g)) - if opt["conf_int"] is True: + if opt["conf_int"]: coef = _confidence( coef, cnstit, @@ -392,63 +419,50 @@ def _solv1(tin, uin, vin, lat, **opts): # Diagnostics. if not opt["nodiagn"]: - coef, indPE = ut_diagn(coef, opt) + coef = ut_diagn(coef) + # Adds a diagn dictionary, always sorted by energy. + # This doesn't seem very useful. Let's directly add the variables + # to the base coef structure. Then they can be sorted with everything + # else. + coef["PE"] = _PE(coef) + coef["SNR"] = _SNR(coef) # Re-order constituents. - if opt["ordercnstit"] is not None: + coef = _reorder(coef, opt) + # This might have added PE if it was not already present. - if opt["ordercnstit"] == "frq": - ind = coef["aux"]["frq"].argsort() + if opt["RunTimeDisp"]: + print("done.") - elif opt["ordercnstit"] == "snr": - if not opt["nodiagn"]: - ind = coef["diagn"]["SNR"].argsort()[::-1] - else: - if opt["twodim"]: - SNR = (coef["Lsmaj"] ** 2 + coef["Lsmin"] ** 2) / ( - (coef["Lsmaj_ci"] / 1.96) ** 2 + (coef["Lsmin_ci"] / 1.96) ** 2 - ) + return coef - else: - SNR = (coef["A"] ** 2) / (coef["A_ci"] / 1.96) ** 2 - ind = SNR.argsort()[::-1] +def _reorder(coef, opt): + if opt["ordercnstit"] == "PE": + # Default: order by decreasing energy. + if "PE" not in coef: + coef["PE"] = _PE(coef) + ind = coef["PE"].argsort()[::-1] - else: - ilist = [constit_index_dict[name] for name in opt["ordercnstit"]] - ind = np.array(ilist, dtype=int) + elif opt["ordercnstit"] == "frequency": + ind = coef["aux"]["frq"].argsort() - else: # Default: order by decreasing energy. - if not opt["nodiagn"]: - ind = indPE - else: - if opt["twodim"]: - PE = np.sum(coef["Lsmaj"] ** 2 + coef["Lsmin"] ** 2) - PE = 100 * (coef["Lsmaj"] ** 2 + coef["Lsmin"] ** 2) / PE - else: - PE = 100 * coef["A"] ** 2 / np.sum(coef["A"] ** 2) - - ind = PE.argsort()[::-1] - - reorderlist = ["g", "name"] - if opt.twodim: - reorderlist.extend(["Lsmaj", "Lsmin", "theta"]) - if opt.conf_int: - reorderlist.extend(["Lsmaj_ci", "Lsmin_ci", "theta_ci", "g_ci"]) + elif opt["ordercnstit"] == "SNR": + # If we are here, we should be guaranteed to have SNR already. + ind = coef["SNR"].argsort()[::-1] else: - reorderlist.append("A") - if opt.conf_int: - reorderlist.extend(["A_ci", "g_ci"]) + namelist = list(coef["name"]) + ilist = [namelist.index(name) for name in opt["ordercnstit"]] + ind = np.array(ilist, dtype=int) + + arrays = "name PE SNR A A_ci g g_ci Lsmaj Lsmaj_ci Lsmin Lsmin_ci theta theta_ci" + reorderlist = [a for a in arrays.split() if a in coef] for key in reorderlist: coef[key] = coef[key][ind] coef["aux"]["frq"] = coef["aux"]["frq"][ind] coef["aux"]["lind"] = coef["aux"]["lind"][ind] - - if opt["RunTimeDisp"]: - print("done.") - return coef @@ -532,7 +546,7 @@ def _slvinit(tin, uin, vin, lat, **opts): opt["rmin"] = 1 opt["method"] = "ols" opt["tunrdn"] = 1 - opt["linci"] = 0 + opt["linci"] = False opt["white"] = 0 opt["nrlzn"] = 200 opt["lsfrqosmp"] = 1 diff --git a/utide/diagnostics.py b/utide/diagnostics.py index bae848b..b6a250a 100644 --- a/utide/diagnostics.py +++ b/utide/diagnostics.py @@ -1,58 +1,44 @@ import numpy as np -def ut_diagn(coef, opt): - - if opt["RunTimeDisp"]: - print("diagnostics ... ", end="") - coef["diagn"] = {} +def _PE(coef): + """ + Return the energy percentage for each constituent. + """ + if "Lsmaj" in coef: + E = coef["Lsmaj"] ** 2 + coef["Lsmin"] ** 2 + PE = (100 / np.sum(E)) * E + else: + PE = 100 * coef["A"] ** 2 / np.sum(coef["A"] ** 2) + return PE - if opt["twodim"]: - PE = np.sum(coef["Lsmaj"] ** 2 + coef["Lsmin"] ** 2) - PE = 100 * (coef["Lsmaj"] ** 2 + coef["Lsmin"] ** 2) / PE +def _SNR(coef): + """ + Return the signal-to-noise ratio for each constituent. + """ + if "Lsmaj" in coef: SNR = (coef["Lsmaj"] ** 2 + coef["Lsmin"] ** 2) / ( (coef["Lsmaj_ci"] / 1.96) ** 2 + (coef["Lsmin_ci"] / 1.96) ** 2 ) - else: - PE = 100 * coef["A"] ** 2 / np.sum(coef["A"] ** 2) SNR = (coef["A"] ** 2) / (coef["A_ci"] / 1.96) ** 2 + return SNR + +def ut_diagn(coef): + """ + Add to coef the names, PE, and SNR, *always* sorted by energy. + + To be eliminated... + """ + coef["diagn"] = {} + PE = _PE(coef) + SNR = _SNR(coef) indPE = PE.argsort()[::-1] coef["diagn"]["name"] = coef["name"][indPE] coef["diagn"]["PE"] = PE[indPE] coef["diagn"]["SNR"] = SNR[indPE] - return coef, indPE - - -# [~,indPE] = sort(PE,'descend'); -# coef.diagn.name = coef.name(indPE); -# coef.diagn.PE = PE(indPE); -# coef.diagn.SNR = SNR; % used in ut_diagntable; ordered by PE there -# if opt.twodim -# [coef.diagn,usnrc,vsnrc] = ut_diagntable(coef,cnstit,... -# t,u,v,xmod,m,B,W,varMSM,Gall,Hall,elor,varcov_mCw,indPE); -# else -# [coef.diagn,usnrc,~] = ut_diagntable(coef,cnstit,... -# t,u,[],xmod,m,B,W,varMSM,Gall,Hall,elor,varcov_mCw,indPE); -# end -# if opt.diagnplots -# tmp = nan*ones(size(uin)); -# tmp(uvgd) = usnrc; -# usnrc = tmp; -# tmp = nan*ones(size(uin)); -# tmp(uvgd) = e; -# e = tmp; -# if opt.twodim -# tmp = nan*ones(size(uin)); -# tmp(uvgd) = vsnrc; -# vsnrc = tmp; -# ut_diagnfigs(coef,indPE,tin,uin,vin,usnrc,vsnrc,e); -# else -# ut_diagnfigs(coef,indPE,tin,uin,[],usnrc,[],e); -# end -# end -# end + return coef
wesleybowman/UTide
5f15bcb7ba4c724a7e680866272a676d0785f50f
diff --git a/tests/test_order_constit.py b/tests/test_order_constit.py new file mode 100644 index 0000000..8c87e45 --- /dev/null +++ b/tests/test_order_constit.py @@ -0,0 +1,77 @@ +import numpy as np +import pytest + +from utide import reconstruct, solve +from utide._ut_constants import constit_index_dict, ut_constants + + +ts = 735604 +duration = 35 + +time = np.linspace(ts, ts + duration, 842) +tref = (time[-1] + time[0]) / 2 + +const = ut_constants.const + +amps = [1.0, 0.5, 0.6, 0.1] +names = ["M2", "S2", "K1", "O1"] +cpds = [24 * const.freq[constit_index_dict[name]] for name in names] +sinusoids = [] +for amp, cpd in zip(amps, cpds): + arg = 2 * np.pi * (time - tref) * cpd + sinusoids.append(amp * np.cos(arg)) +tide = np.hstack(tuple(sinusoids)).sum(axis=0) + +np.random.seed(1234) +noise = 1e-3 * np.random.randn(len(time)) + +time_series = tide + noise + +opts0 = { + "constit": ["K1", "M2", "O1", "S2"], + "order_constit": "frequency", + "phase": "raw", + "nodal": False, + "trend": False, + "method": "ols", + "conf_int": "MC", + "Rayleigh_min": 0.95, +} + + [email protected]("conf_int", ["none", "linear", "MC"]) +def test_order(conf_int): + + orders = [None, "PE", "frequency", opts0["constit"]] + if conf_int != "none": + orders.append("SNR") + elevs = [] + ts_elevs = [] + vels = [] + ts_vels = [] + for order in orders: + opts = opts0.copy() + opts["order_constit"] = order + opts["conf_int"] = conf_int + elevs.append(solve(time, time_series, lat=45, **opts)) + vels.append(solve(time, time_series, time_series, lat=45, **opts)) + ts_elevs.append(reconstruct(time, elevs[-1], min_SNR=0)) + ts_vels.append(reconstruct(time, vels[-1], min_SNR=0)) + + # Are the reconstructions all the same? + for i in range(1, len(elevs)): + assert (ts_elevs[i].h == ts_elevs[0].h).all() + assert (ts_vels[i].u == ts_vels[0].u).all() + assert (ts_vels[i].v == ts_vels[0].v).all() + + # Is None equivalent to "PE"? (Just a spot check.) + assert (elevs[0].name == elevs[1].name).all() + assert (elevs[0].A == elevs[1].A).all() + + +def test_invalid_snr(): + opts = opts0.copy() + opts["conf_int"] = "none" + opts["order_constit"] = "SNR" + with pytest.raises(ValueError): + solve(time, time_series, lat=45, **opts)
Trying to reorder constituents as in ‘OrderCnstit’ As @rsignell-usgs mentioned in a previous issue, we are trying to analyze the tides at each grid cell in a numerical model solution. We run utide for each grid point but the order of the constituents can be different for each grid point. In the Matlab code, there is the option ‘OrderCnstit’ that allows passing a specified order of constituents when constit is not set to 'auto'. Is there a similar reordering for the python version?
0.0
5f15bcb7ba4c724a7e680866272a676d0785f50f
[ "tests/test_order_constit.py::test_order[none]", "tests/test_order_constit.py::test_order[linear]", "tests/test_order_constit.py::test_order[MC]", "tests/test_order_constit.py::test_invalid_snr" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-03-10 20:59:56+00:00
mit
6,241
wfondrie__mokapot-106
diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a81e5f..27b7fe2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,11 @@ # Changelog for mokapot ## [Unreleased] + +## [v0.10.1] - 2023-09-11 ### Breaking changes - Mokapot now uses `numpy.random.Generator` instead of the deprecated `numpy.random.RandomState` API. - New `rng` arguments have been added to functions and classes that rely on randomness in lieu of setting a global random seed with `np.random.seed()`. Thanks @sjust-seerbio! + New `rng` arguments have been added to functions and classes that rely on randomness in lieu of setting a global random seed with `np.random.seed()`. Thanks @sjust-seerbio! (#55) ### Changed - Added linting with Ruff to tests and pre-commit hooks (along with others)! @@ -11,15 +13,16 @@ ### Fixed - The PepXML reader, which broke due to a Pandas update. - Potential bug if lowercase peptide sequences were used and protein-level confidence estimates were enabled +- Multiprocessing led to the same training set being used for all splits (#104). -## [0.9.1] - 2022-12-14 +## [v0.9.1] - 2022-12-14 ### Changed - Cross-validation classes are now detected by looking for inheritance from the `sklearn.model_selection._search.BaseSearchCV` class. ### Fixed - Fixed backward compatibility issue for Python <3.10. -## [0.9.0] - 2022-12-02 +## [v0.9.0] - 2022-12-02 ### Added - Support for plugins, allowing mokapot to use new models. - Added a custom Docker image with optional dependencies. @@ -31,11 +34,11 @@ - Updated GitHub Actions. - Migrated to a full pyproject.toml setuptools build. Thanks @jspaezp! -## [0.8.3] - 2022-07-20 +## [v0.8.3] - 2022-07-20 ### Fixed - Fixed the reported mokapot score when group FDR is used. -## [0.8.2] - 2022-07-18 +## [v0.8.2] - 2022-07-18 ### Added - `mokapot.Model()` objects now recorded the CV fold that they were fit on. This means that they can be provided to `mokapot.brew()` in any order @@ -45,7 +48,7 @@ - Resolved issue where models were required to have an intercept term. - The PepXML parser would sometimes try and log transform features with `0`'s, resulting in missing values. -## [0.8.1] - 2022-06-24 +## [v0.8.1] - 2022-06-24 ### Added - Support for previously trained models in the `brew()` function and the CLI @@ -56,7 +59,7 @@ `min_length-1`. - Links to example datasets in the documentation. -## [0.8.0] - 2022-03-11 +## [v0.8.0] - 2022-03-11 Thanks to @sambenfredj, @gessulat, @tkschmidt, and @MatthewThe for PR #44, which made these things happen! @@ -72,17 +75,17 @@ PR #44, which made these things happen! - Parallelization within `mokapot.brew()` now uses `joblib` instead of `concurrent.futures`. -## [0.7.4] - 2021-09-03 +## [v0.7.4] - 2021-09-03 ### Changed - Improved documentation and added warnings for `--subset_max_train`. Thanks @jspaezp! -## [0.7.3] - 2021-07-20 +## [v0.7.3] - 2021-07-20 ### Fixed - Fixed bug where the `--keep_decoys` did not work with `--aggregate`. Also, added tests to cover this. Thanks @jspaezp! -## [0.7.2] - 2021-07-16 +## [v0.7.2] - 2021-07-16 ### Added - `--keep_decoys` option to the command line interface. Thanks @jspaezp! - Notes about setting a random seed to the Python API documentation. (Issue #30) @@ -96,12 +99,12 @@ PR #44, which made these things happen! ### Changed - Updates to unit tests. Warnings are now treated as errors for system tests. -## [0.7.1] - 2021-03-22 +## [v0.7.1] - 2021-03-22 ### Changed - Updated the build to align with [PEP517](https://www.python.org/dev/peps/pep-0517/) -## [0.7.0] - 2021-03-19 +## [v0.7.0] - 2021-03-19 ### Added - Support for downstream peptide and protein quantitation with [FlashLFQ](https://github.com/smith-chem-wisc/FlashLFQ). This is accomplished @@ -127,7 +130,7 @@ PR #44, which made these things happen! `importlib.metadata` to the standard library, saving a few hundred milliseconds. -## [0.6.2] - 2021-03-12 +## [v0.6.2] - 2021-03-12 ### Added - Now checks to verify there are no debugging print statements in the code base when linting. @@ -135,7 +138,7 @@ PR #44, which made these things happen! ### Fixed - Removed debugging print statements. -## [0.6.1] - 2021-03-11 +## [v0.6.1] - 2021-03-11 ### Fixed - Parsing Percolator tab-delimited files with a "DefaultDirection" line. - `Label` column is now converted to boolean during PIN file parsing. @@ -143,7 +146,7 @@ PR #44, which made these things happen! - Parsing modifications from pepXML files were indexed incorrectly on the peptide string. -## [0.6.0] - 2021-03-03 +## [v0.6.0] - 2021-03-03 ### Added - Support for parsing PSMs from PepXML input files. - This changelog. diff --git a/mokapot/brew.py b/mokapot/brew.py index 86e06b3..c2c6ea9 100644 --- a/mokapot/brew.py +++ b/mokapot/brew.py @@ -106,9 +106,10 @@ def brew(psms, model=None, test_fdr=0.01, folds=3, max_workers=1, rng=None): LOGGER.info("Splitting PSMs into %i folds...", folds) test_idx = [p._split(folds) for p in psms] train_sets = _make_train_sets(psms, test_idx) + if max_workers != 1: # train_sets can't be a generator for joblib :( - train_sets = list(train_sets) + train_sets = [copy.copy(d) for d in train_sets] # If trained models are provided, use the them as-is. try:
wfondrie/mokapot
2bc16136b94cddddec3decb222f89b796c18bdbb
diff --git a/tests/conftest.py b/tests/conftest.py index c490a61..481d94e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,12 +1,19 @@ """ This file contains fixtures that are used at multiple points in the tests. """ +import logging import pytest import numpy as np import pandas as pd from mokapot import LinearPsmDataset [email protected](autouse=True) +def set_logging(caplog): + """Add logging to everything.""" + caplog.set_level(level=logging.INFO, logger="mokapot") + + @pytest.fixture(scope="session") def psm_df_6(): """A DataFrame containing 6 PSMs""" @@ -34,6 +41,9 @@ def psm_df_1000(tmp_path): "score": np.concatenate( [rng.normal(3, size=200), rng.normal(size=300)] ), + "score2": np.concatenate( + [rng.normal(3, size=200), rng.normal(size=300)] + ), "filename": "test.mzML", "calcmass": rng.uniform(500, 2000, size=500), "expmass": rng.uniform(500, 2000, size=500), @@ -47,6 +57,7 @@ def psm_df_1000(tmp_path): "group": rng.choice(2, size=500), "peptide": [_random_peptide(5, rng) for _ in range(500)], "score": rng.normal(size=500), + "score2": rng.normal(size=500), "filename": "test.mzML", "calcmass": rng.uniform(500, 2000, size=500), "expmass": rng.uniform(500, 2000, size=500), @@ -75,7 +86,7 @@ def psms(psm_df_1000): target_column="target", spectrum_columns="spectrum", peptide_column="peptide", - feature_columns="score", + feature_columns=["score", "score2"], filename_column="filename", scan_column="spectrum", calcmass_column="calcmass", diff --git a/tests/unit_tests/test_brew.py b/tests/unit_tests/test_brew.py index 319626b..27d0495 100644 --- a/tests/unit_tests/test_brew.py +++ b/tests/unit_tests/test_brew.py @@ -44,7 +44,7 @@ def test_brew_joint(psms, svm): def test_brew_folds(psms, svm): """Test that changing the number of folds works""" - results, models = mokapot.brew(psms, svm, test_fdr=0.05, folds=4) + results, models = mokapot.brew(psms, svm, test_fdr=0.1, folds=4) assert isinstance(results, mokapot.confidence.LinearConfidence) assert len(models) == 4 @@ -92,7 +92,12 @@ def test_brew_test_fdr_error(psms, svm): # @pytest.mark.skip(reason="Not currently working, at least on MacOS.") def test_brew_multiprocess(psms, svm): """Test that multiprocessing doesn't yield an error""" - mokapot.brew(psms, svm, test_fdr=0.05, max_workers=2) + _, models = mokapot.brew(psms, svm, test_fdr=0.05, max_workers=2) + + # The models should not be the same: + assert_not_close(models[0].estimator.coef_, models[1].estimator.coef_) + assert_not_close(models[1].estimator.coef_, models[2].estimator.coef_) + assert_not_close(models[2].estimator.coef_, models[0].estimator.coef_) def test_brew_trained_models(psms, svm): @@ -131,3 +136,8 @@ def test_brew_using_non_trained_models_error(psms, svm): "One or more of the provided models was not previously trained" in str(err) ) + + +def assert_not_close(x, y): + """Assert that two arrays are not equal""" + np.testing.assert_raises(AssertionError, np.testing.assert_allclose, x, y) diff --git a/tests/unit_tests/test_confidence.py b/tests/unit_tests/test_confidence.py index 34994f0..06f2d01 100644 --- a/tests/unit_tests/test_confidence.py +++ b/tests/unit_tests/test_confidence.py @@ -28,12 +28,12 @@ def test_one_group(psm_df_1000): ) np.random.seed(42) - grouped = psms.assign_confidence() + grouped = psms.assign_confidence(eval_fdr=0.05) scores1 = grouped.group_confidence_estimates[0].psms["mokapot score"] np.random.seed(42) psms._group_column = None - ungrouped = psms.assign_confidence() + ungrouped = psms.assign_confidence(eval_fdr=0.05) scores2 = ungrouped.psms["mokapot score"] pd.testing.assert_series_equal(scores1, scores2) @@ -59,7 +59,7 @@ def test_pickle(psm_df_1000, tmp_path): copy_data=True, ) - results = psms.assign_confidence() + results = psms.assign_confidence(eval_fdr=0.05) pkl_file = tmp_path / "results.pkl" with pkl_file.open("wb+") as pkl_dat: pickle.dump(results, pkl_dat) diff --git a/tests/unit_tests/test_writer_flashlfq.py b/tests/unit_tests/test_writer_flashlfq.py index 9aba9d7..8b468a1 100644 --- a/tests/unit_tests/test_writer_flashlfq.py +++ b/tests/unit_tests/test_writer_flashlfq.py @@ -8,7 +8,7 @@ import pandas as pd def test_sanity(psms, tmp_path): """Run simple sanity checks""" - conf = psms.assign_confidence() + conf = psms.assign_confidence(eval_fdr=0.05) test1 = conf.to_flashlfq(tmp_path / "test1.txt") mokapot.to_flashlfq(conf, tmp_path / "test2.txt") test3 = mokapot.to_flashlfq([conf, conf], tmp_path / "test3.txt") diff --git a/tests/unit_tests/test_writer_txt.py b/tests/unit_tests/test_writer_txt.py index fea7f19..326cbae 100644 --- a/tests/unit_tests/test_writer_txt.py +++ b/tests/unit_tests/test_writer_txt.py @@ -8,7 +8,7 @@ import pandas as pd def test_sanity(psms, tmp_path): """Run simple sanity checks""" - conf = psms.assign_confidence() + conf = psms.assign_confidence(eval_fdr=0.05) test1 = conf.to_txt(dest_dir=tmp_path, file_root="test1") mokapot.to_txt(conf, dest_dir=tmp_path, file_root="test2") test3 = mokapot.to_txt([conf, conf], dest_dir=tmp_path, file_root="test3")
max_workers issue The cross validation might not work as expected when using multiple threads. ``` (mokapot_test) mokapot -w 3 feature.pin [INFO] [INFO] === Analyzing Fold 1 === [INFO] === Analyzing Fold 2 === [INFO] === Analyzing Fold 3 === [INFO] Finding initial direction... [INFO] Finding initial direction... [INFO] Finding initial direction... [INFO] - Selected feature score with 21657 PSMs at q<=0.01. [INFO] - Selected feature score with 21657 PSMs at q<=0.01. [INFO] - Selected feature score with 21657 PSMs at q<=0.01. ``` If I set **-w** as 3, it always has the same number of PSMs passed at a q-value 0.01 cutoff as shown above. When I set -w as 1, they are different. I also tried to print out the training data for each iteration, it looks like they are identical from different folds by looking at the first several rows.
0.0
2bc16136b94cddddec3decb222f89b796c18bdbb
[ "tests/unit_tests/test_brew.py::test_brew_multiprocess" ]
[ "tests/unit_tests/test_brew.py::test_brew_simple", "tests/unit_tests/test_brew.py::test_brew_random_forest", "tests/unit_tests/test_brew.py::test_brew_joint", "tests/unit_tests/test_brew.py::test_brew_folds", "tests/unit_tests/test_brew.py::test_brew_seed", "tests/unit_tests/test_brew.py::test_brew_test_fdr_error", "tests/unit_tests/test_brew.py::test_brew_trained_models", "tests/unit_tests/test_brew.py::test_brew_using_few_models_error", "tests/unit_tests/test_brew.py::test_brew_using_non_trained_models_error", "tests/unit_tests/test_confidence.py::test_one_group", "tests/unit_tests/test_confidence.py::test_pickle", "tests/unit_tests/test_writer_flashlfq.py::test_sanity", "tests/unit_tests/test_writer_flashlfq.py::test_basic", "tests/unit_tests/test_writer_flashlfq.py::test_with_missing", "tests/unit_tests/test_writer_flashlfq.py::test_no_proteins", "tests/unit_tests/test_writer_flashlfq.py::test_fasta_proteins", "tests/unit_tests/test_writer_txt.py::test_sanity", "tests/unit_tests/test_writer_txt.py::test_columns" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-09-11 16:56:06+00:00
apache-2.0
6,242
wfondrie__mokapot-19
diff --git a/CHANGELOG.md b/CHANGELOG.md index 363da49..4bea464 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog for mokapot +## [Unreleased] +### Fixed +- Parsing Percolator tab-delimited files with a "DefaultDirection" line. +- `Label` column is now converted to boolean during PIN file parsing. + Previously, problems occurred if the `Label` column was of dtype `object`. + ## [0.6.0] - 2021-03-03 ### Added - Support for parsing PSMs from PepXML input files. diff --git a/mokapot/parsers/pin.py b/mokapot/parsers/pin.py index 65e8e30..69567cd 100644 --- a/mokapot/parsers/pin.py +++ b/mokapot/parsers/pin.py @@ -87,14 +87,17 @@ def read_pin(pin_files, group_column=None, to_df=False, copy_data=False): raise ValueError(f"More than one '{name}' column found.") if not all([specid, peptides, proteins, labels, spectra]): + print([specid, peptides, proteins, labels, spectra]) raise ValueError( "This PIN format is incompatible with mokapot. Please" " verify that the required columns are present." ) # Convert labels to the correct format. + print(pin_df[labels[0]]) + pin_df[labels[0]] = pin_df[labels[0]].astype(int) if any(pin_df[labels[0]] == -1): - pin_df[labels[0]] = (pin_df[labels[0]] + 1) / 2 + pin_df[labels[0]] = ((pin_df[labels[0]] + 1) / 2).astype(bool) if to_df: return pin_df @@ -138,8 +141,14 @@ def read_percolator(perc_file): with fopen(perc_file) as perc: cols = perc.readline().rstrip().split("\t") + dir_line = perc.readline().rstrip().split("\t")[0] + if dir_line.lower() != "defaultdirection": + perc.seek(0) + _ = perc.readline() + psms = pd.concat((c for c in _parse_in_chunks(perc, cols)), copy=False) + print(psms.head()) return psms
wfondrie/mokapot
94d6b9eae7583f467349ff1bfa421a5ebe24fbd3
diff --git a/tests/unit_tests/test_parser_pin.py b/tests/unit_tests/test_parser_pin.py index e69de29..bae0fd3 100644 --- a/tests/unit_tests/test_parser_pin.py +++ b/tests/unit_tests/test_parser_pin.py @@ -0,0 +1,37 @@ +"""Test that parsing Percolator input files works correctly""" +import pytest +import mokapot +import pandas as pd + + [email protected] +def std_pin(tmp_path): + """Create a standard pin file""" + out_file = tmp_path / "std_pin" + with open(str(out_file), "w+") as pin: + dat = ( + "sPeCid\tLaBel\tpepTide\tsCore\tscanNR\tpRoteins\n" + "DefaultDirection\t-\t-\t-\t1\t-\t-\n" + "a\t1\tABC\t5\t2\tprotein1\tprotein2\n" + "b\t-1\tCBA\t10\t3\tdecoy_protein1\tdecoy_protein2" + ) + pin.write(dat) + + return out_file + + +def test_pin_parsing(std_pin): + """Test pin parsing""" + df = mokapot.read_pin(std_pin, to_df=True) + assert df["LaBel"].dtype == "bool" + assert len(df) == 2 + assert len(df[df["LaBel"]]) == 1 + assert len(df[df["LaBel"]]) == 1 + + dat = mokapot.read_pin(std_pin) + pd.testing.assert_frame_equal(df.loc[:, ("sCore",)], dat.features) + + +def test_pin_wo_dir(): + """Test a PIN file without a DefaultDirection line""" + dat = mokapot.read_pin("data/scope2_FP97AA.pin")
ValueError: No decoy PSMs were detected. I have loaded a file in the required format (these are the first few lines and the names of the columns). ``` SpecId | Label | ScanNr | Peptide | Proteins 0 | 1 | 38422 | R.AEGSDVANAVLDGADC[Common Fixed:Carbamidomethy... | P14618 1 | 1 | 41542 | R.SNYLLNTTIAGVEEADVVLLVGTNPR.F | P28331 ``` I am getting an error saying that there are no decoy PSMs, however, I know there are decoys. My label column has -1's in it. It loads the file in, but is not getting any further than that. I have looked through the documentation and can't seem to see what I could be missing. Any suggestions would be greatly appreciated. Thank you! Here is the error that I am getting. ``` --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-4-bc18f4fefa97> in <module> ----> 1 psms = mokapot.read_pin(file) 2 results, models = mokapot.brew(psms) 3 results.to_txt() ~/anaconda3/lib/python3.8/site-packages/mokapot/parsers.py in read_pin(pin_files, group_column, to_df, copy_data) 103 return pin_df 104 --> 105 return LinearPsmDataset( 106 psms=pin_df, 107 target_column=labels[0], ~/anaconda3/lib/python3.8/site-packages/mokapot/dataset.py in __init__(self, psms, target_column, spectrum_columns, peptide_column, protein_column, group_column, feature_columns, copy_data) 427 raise ValueError("No target PSMs were detected.") 428 if not num_decoys: --> 429 raise ValueError("No decoy PSMs were detected.") 430 if not self.data.shape[0]: 431 raise ValueError("No PSMs were detected.") ValueError: No decoy PSMs were detected. ```
0.0
94d6b9eae7583f467349ff1bfa421a5ebe24fbd3
[ "tests/unit_tests/test_parser_pin.py::test_pin_parsing" ]
[ "tests/unit_tests/test_parser_pin.py::test_pin_wo_dir" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2021-03-11 21:23:37+00:00
apache-2.0
6,243
wfondrie__mokapot-65
diff --git a/CHANGELOG.md b/CHANGELOG.md index 2030c9a..588e408 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog for mokapot +## [0.8.3] - 2022-07-20 +### Fixed +- Fixed the reported mokapot score when group FDR is used. + ## [0.8.2] - 2022-07-18 ### Added - `mokapot.Model()` objects now recored the CV fold that they were fit on. diff --git a/mokapot/confidence.py b/mokapot/confidence.py index c8935cb..e72cecd 100644 --- a/mokapot/confidence.py +++ b/mokapot/confidence.py @@ -63,9 +63,10 @@ class GroupedConfidence: group_psms = copy.copy(psms) self.group_column = group_psms._group_column group_psms._group_column = None - scores = scores * (desc * 2 - 1) - # Do TDC + # Do TDC to eliminate multiples PSMs for a spectrum that may occur + # in different groups. + keep = "last" if desc else "first" scores = ( pd.Series(scores, index=psms._data.index) .sample(frac=1) @@ -74,7 +75,7 @@ class GroupedConfidence: idx = ( psms.data.loc[scores.index, :] - .drop_duplicates(psms._spectrum_columns, keep="last") + .drop_duplicates(psms._spectrum_columns, keep=keep) .index ) @@ -84,9 +85,9 @@ class GroupedConfidence: group_psms._data = None tdc_winners = group_df.index.intersection(idx) group_psms._data = group_df.loc[tdc_winners, :] - group_scores = scores.loc[group_psms._data.index].values + 1 + group_scores = scores.loc[group_psms._data.index].values res = group_psms.assign_confidence( - group_scores * (2 * desc - 1), desc=desc, eval_fdr=eval_fdr + group_scores, desc=desc, eval_fdr=eval_fdr ) self._group_confidence_estimates[group] = res
wfondrie/mokapot
21680cc5b7136359c033bb0c7fc5d0f7b002c931
diff --git a/tests/unit_tests/test_confidence.py b/tests/unit_tests/test_confidence.py index e69de29..0be3fcd 100644 --- a/tests/unit_tests/test_confidence.py +++ b/tests/unit_tests/test_confidence.py @@ -0,0 +1,38 @@ +"""Test that Confidence classes are working correctly""" +import pytest +import numpy as np +import pandas as pd +from mokapot import LinearPsmDataset + + +def test_one_group(psm_df_1000): + """Test that one group is equivalent to no group.""" + psm_data, _ = psm_df_1000 + psm_data["group"] = 0 + + psms = LinearPsmDataset( + psms=psm_data, + target_column="target", + spectrum_columns="spectrum", + peptide_column="peptide", + feature_columns="score", + filename_column="filename", + scan_column="spectrum", + calcmass_column="calcmass", + expmass_column="expmass", + rt_column="ret_time", + charge_column="charge", + group_column="group", + copy_data=True, + ) + + np.random.seed(42) + grouped = psms.assign_confidence() + scores1 = grouped.group_confidence_estimates[0].psms["mokapot score"] + + np.random.seed(42) + psms._group_column = None + ungrouped = psms.assign_confidence() + scores2 = ungrouped.psms["mokapot score"] + + pd.testing.assert_series_equal(scores1, scores2)
[BUG] Different scores for LinearConfidence and GroupConfidence `LinearConfidence` and `GroupedConfidence` give different scores, even when for `GroupedConfidence` all PSMs are part of a single group. I'm using a random forest, which should give scores between 0 and 1, but for `GroupedConfidence` the scores seem to range between 1 and 2 instead. When using `LinearConfidence`: ``` import mokapot from sklearn.ensemble import RandomForestClassifier psms = mokapot.read_pin("phospho_rep1.pin") moka_conf, _ = mokapot.brew(psms, mokapot.Model(RandomForestClassifier(random_state=1))) moka_conf.psms["mokapot score"].describe() ``` Output: ``` count 42330.000000 mean 0.686944 std 0.435288 min 0.000000 25% 0.060000 50% 1.000000 75% 1.000000 max 1.000000 ``` For simplicity, I use the constant `Charge1` column in the example PIN file as group for `GroupedConfidence`, i.e. all PSMs are part of the same group (`Charge1 == 0`): ``` import mokapot from sklearn.ensemble import RandomForestClassifier psms_grouped = mokapot.read_pin("phospho_rep1.pin", group_column="Charge1") moka_conf_grouped, _ = mokapot.brew(psms_grouped, mokapot.Model(RandomForestClassifier(random_state=1))) moka_conf_grouped.group_confidence_estimates[0].psms["mokapot score"].describe() ``` Output: ``` count 42330.000000 mean 1.687226 std 0.435448 min 1.000000 25% 1.060000 50% 2.000000 75% 2.000000 max 2.000000 ``` Note how the scores seem to be almost identical except for being 1 more. I tried to figure out in the code what was happening, but didn't immediately find the problem. This is with mokapot version 0.8.2.
0.0
21680cc5b7136359c033bb0c7fc5d0f7b002c931
[ "tests/unit_tests/test_confidence.py::test_one_group" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-07-20 16:47:04+00:00
apache-2.0
6,244
whot__uji-17
diff --git a/examples/example.yaml b/examples/example.yaml index 666bcf1..9b144d6 100644 --- a/examples/example.yaml +++ b/examples/example.yaml @@ -36,7 +36,7 @@ # description: a longer description for human consumption # tags: a dictionary of key: value that can be used to filter on # value must not be a list/dict -# +# # The test type is a description of a test that may produce log files. # Allowed subkeys: # extends: inherit all from the referenced section @@ -47,6 +47,8 @@ # The value must be a list, even if it's just one entry # Where the filter tag is missing, this test is assumed # to be run only once. +# If a value starts with ! it's a negative match, i.e. +# the tag must NOT be present in the actor. # tests: a list of instruction strings describing the tests to be # performed. # files: a list of files to collect @@ -80,7 +82,7 @@ file: # # Since we support 'include:' statements, you could have files with all the # hw you have defined in mice.yaml, keyboards.yaml, etc. - + t450_keyboard: type: actor name: Lenovo T450s - AT Translated Keyboard @@ -178,6 +180,15 @@ test_usb_hid: tests: - verify hid report descriptor parses with `hid-parse` +# A test to run only on non-USB keyboards +test_nonusb: + type: test + filter: + device: [keyboard] + bus: ["!USB"] # Note: YAML requires quotes + tests: + - "bus type for this keyboard is XXX" + # This is a test without a filter, so it will show up in the "Generic" # section but not for any specific actor. Good for things you need to # collect only once. diff --git a/uji.py b/uji.py index e5d0b6f..59bf2e4 100755 --- a/uji.py +++ b/uji.py @@ -736,8 +736,19 @@ class UjiNew(object): for key, values in test.filters.items(): if key not in actor.tags: break - if ('__any__' not in values and - actor.tags[key] not in values): + + tag = actor.tags[key] + + excluded = [v[1:] for v in values if v[0] == '!'] + if tag in excluded: + break + + required = [v for v in values if v[0] != '!'] + if not required and excluded: + required = ['__any__'] + + if ('__any__' not in required and + actor.tags[key] not in required): break else: dup = deepcopy(test)
whot/uji
e7d0f65722caeeff5f5ab27d8aea0234a266c693
diff --git a/tests/data/basic-tree.yaml b/tests/data/basic-tree.yaml index 6e0e83a..61f3734 100644 --- a/tests/data/basic-tree.yaml +++ b/tests/data/basic-tree.yaml @@ -13,16 +13,16 @@ actor2: # generic test test1: type: test - test: - - testcase0 + tests: + - testcase1 (generic) logs: - files: [file1] + files: [file01-generic] # generic test test2: type: test logs: - files: [file2] + files: [file02-generic] # all actors but not generic test3: @@ -30,22 +30,38 @@ test3: filter: actor: [__any__] tests: - - testcase1 + - testcase3 (all actors) test4: type: test filter: actor: [one] tests: - - testcase2 + - testcase4 (actor one only) logs: - files: [file3] + files: [file04-actor-one] test5: type: test filter: actor: [two] tests: - - testcase3 + - testcase5 (actor two only) + - testcase5.1 (actor two only) + - testcase5.2 (actor two only) logs: - files: [file4] + files: [file05-actor-two] + +test6: + type: test + filter: + actor: ["!two"] + tests: + - testcase6 (actor one only) + +test7: + type: test + filter: + actor: ["!one", "two"] + tests: + - testcase7 (actor two only) diff --git a/tests/test_uji.py b/tests/test_uji.py index d5381ad..c49040a 100644 --- a/tests/test_uji.py +++ b/tests/test_uji.py @@ -1,5 +1,7 @@ #!/usr/bin/env python3 +from typing import Optional + from click.testing import CliRunner import pytest import os @@ -16,6 +18,22 @@ def datadir(): return Path(os.path.realpath(__file__)).parent / 'data' +def find_in_section(markdown: str, section: str, string: str) -> Optional[str]: + prev_line = None + in_section = False + for line in markdown.split('\n'): + if prev_line is not None and prev_line == section and line == '-' * len(section): + in_section = True + elif in_section and line == '': + in_section = False + elif in_section: + if string in line: + return line + prev_line = line + + return None + + def test_uji_example(datadir): args = ['new', os.fspath(Path(datadir) / 'example.yaml')] runner = CliRunner() @@ -49,12 +67,39 @@ def test_uji_tree(datadir): assert 'actor2\n------\n' in markdown assert 'Generic\n-------\n' in markdown - # FIXME: check for the tests to be distributed across the actors + # check for the tests to be distributed across the actors # correctly + assert find_in_section(markdown, 'Generic', 'testcase1') + assert find_in_section(markdown, 'Generic', 'file01') + assert find_in_section(markdown, 'Generic', 'file02') + + assert find_in_section(markdown, 'actor1', 'testcase3') + assert find_in_section(markdown, 'actor2', 'testcase3') + + assert find_in_section(markdown, 'actor1', 'testcase4') + assert find_in_section(markdown, 'actor1', 'file04') + assert not find_in_section(markdown, 'actor2', 'testcase4') + assert not find_in_section(markdown, 'actor2', 'file04') + + assert not find_in_section(markdown, 'actor1', 'testcase5') + assert not find_in_section(markdown, 'actor1', 'testcase5.1') + assert not find_in_section(markdown, 'actor1', 'testcase5.2') + assert not find_in_section(markdown, 'actor1', 'file05') + + assert find_in_section(markdown, 'actor2', 'testcase5') + assert find_in_section(markdown, 'actor2', 'testcase5.1') + assert find_in_section(markdown, 'actor2', 'testcase5.2') + assert find_in_section(markdown, 'actor2', 'file05') + + assert find_in_section(markdown, 'actor1', 'testcase6'), markdown + assert not find_in_section(markdown, 'actor2', 'testcase6'), markdown + + assert not find_in_section(markdown, 'actor1', 'testcase7'), markdown + assert find_in_section(markdown, 'actor2', 'testcase7'), markdown # Check for the 'emtpy' files to be created - assert (Path('testdir') / 'generic' / 'test1' / 'file1').exists() - assert (Path('testdir') / 'generic' / 'test2' / 'file2').exists() - assert (Path('testdir') / 'actor1' / 'test4' / 'file3').exists() - assert (Path('testdir') / 'actor2' / 'test5' / 'file4').exists() + assert (Path('testdir') / 'generic' / 'test1' / 'file01-generic').exists() + assert (Path('testdir') / 'generic' / 'test2' / 'file02-generic').exists() + assert (Path('testdir') / 'actor1' / 'test4' / 'file04-actor-one').exists() + assert (Path('testdir') / 'actor2' / 'test5' / 'file05-actor-two').exists()
Allow for specifying a "not" filter Found this while using uji for the recent xserver CVEs: I needed a way to specify a test for actors that *don't* have a particular tag. e.g. `not rhel`. Probably simple enough to add with `!` support.
0.0
e7d0f65722caeeff5f5ab27d8aea0234a266c693
[ "tests/test_uji.py::test_uji_tree" ]
[ "tests/test_uji.py::test_uji_example" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-01-13 07:21:46+00:00
mit
6,245
willnx__iiqtools-27
diff --git a/iiqtools/iiqtools_tar_to_zip.py b/iiqtools/iiqtools_tar_to_zip.py index 9356238..2058091 100644 --- a/iiqtools/iiqtools_tar_to_zip.py +++ b/iiqtools/iiqtools_tar_to_zip.py @@ -10,7 +10,6 @@ the same, it's just a different compression format in InsightIQ 4.1. import os import re import zlib -import struct import tarfile import zipfile import argparse @@ -25,7 +24,7 @@ class BufferedZipFile(zipfile.ZipFile): stream the contents into a new zip file. """ - def writebuffered(self, filename, file_handle): + def writebuffered(self, filename, file_handle, file_size): """Stream write data to the zip archive :param filename: **Required** The name to give the data once added to the zip file @@ -33,35 +32,39 @@ class BufferedZipFile(zipfile.ZipFile): :param file_handle: **Required** The file-like object to read :type file_handle: Anything that supports the `read <https://docs.python.org/2/tutorial/inputoutput.html#methods-of-file-objects>`_ method + + :param file_size: **Required** The size of the file in bytes + :type file_size: Integer """ zinfo = zipfile.ZipInfo(filename=filename) - - zinfo.file_size = file_size = 0 + zinfo.file_size = file_size zinfo.flag_bits = 0x00 - zinfo.header_offset = self.fp.tell() + zinfo.header_offset = self.fp.tell() # Start of header bytes self._writecheck(zinfo) self._didModify = True - + # Must overwrite CRC and sizes with correct data later zinfo.CRC = CRC = 0 zinfo.compress_size = compress_size = 0 - self.fp.write(zinfo.FileHeader()) + # Compressed size can be larger than uncompressed size + zip64 = self._allowZip64 and \ + zinfo.file_size * 1.05 > zipfile.ZIP64_LIMIT + self.fp.write(zinfo.FileHeader(zip64)) if zinfo.compress_type == zipfile.ZIP_DEFLATED: cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15) else: cmpr = None + fsize = 0 while True: buf = file_handle.read(1024 * 8) if not buf: break - - file_size = file_size + len(buf) + fsize = fsize + len(buf) CRC = binascii.crc32(buf, CRC) & 0xffffffff if cmpr: buf = cmpr.compress(buf) compress_size = compress_size + len(buf) - self.fp.write(buf) if cmpr: @@ -70,14 +73,19 @@ class BufferedZipFile(zipfile.ZipFile): self.fp.write(buf) zinfo.compress_size = compress_size else: - zinfo.compress_size = file_size - + zinfo.compress_size = fsize zinfo.CRC = CRC - zinfo.file_size = file_size - - position = self.fp.tell() - self.fp.seek(zinfo.header_offset + 14, 0) - self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size, zinfo.file_size)) + zinfo.file_size = fsize + if not zip64 and self._allowZip64: + if fsize > zipfile.ZIP64_LIMIT: + raise RuntimeError('File size has increased during compressing') + if compress_size > zipfile.ZIP64_LIMIT: + raise RuntimeError('Compressed size larger than uncompressed size') + # Seek backwards and write file header (which will now include + # correct CRC and file sizes) + position = self.fp.tell() # Preserve current position in file + self.fp.seek(zinfo.header_offset, 0) + self.fp.write(zinfo.FileHeader(zip64)) self.fp.seek(position, 0) self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo @@ -189,7 +197,7 @@ def main(the_cli_args): log.info('Converting %s', the_file.name) try: filename = joinname(zip_export_dir, the_file.name) - zip_export.writebuffered(filename=filename, file_handle=file_handle) + zip_export.writebuffered(filename=filename, file_handle=file_handle, file_size=the_file.size) except (IOError, OSError) as doh: log.error(doh) log.error('Deleting zip file') diff --git a/setup.py b/setup.py index 504dd4b..2968bc9 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ setup(name="iiqtools", author="Nicholas Willhite,", author_email="[email protected]", url='https://github.com/willnx/iiqtools', - version='2.1.1', + version='2.1.2', packages=find_packages(), include_package_data=True, scripts=['scripts/iiqtools_gather_info',
willnx/iiqtools
9633167b8232b4776d2ad4823fb6652507c08e8e
diff --git a/iiqtools_tests/test_iiqtools_tar_to_zip.py b/iiqtools_tests/test_iiqtools_tar_to_zip.py index 797cadf..ec64b49 100644 --- a/iiqtools_tests/test_iiqtools_tar_to_zip.py +++ b/iiqtools_tests/test_iiqtools_tar_to_zip.py @@ -26,11 +26,10 @@ class TestBufferedZipFile(unittest.TestCase): """Runs after every tests case""" os.remove(self.filepath) - @patch.object(iiqtools_tar_to_zip, 'struct') @patch.object(iiqtools_tar_to_zip, 'binascii') - def test_basic(self, fake_binascii, fake_struct): + def test_basic(self, fake_binascii): """BufferedZipFile - writebuffered is callable""" - self.zipfile.writebuffered(filename='foo', file_handle=self.fake_file) + self.zipfile.writebuffered(filename='foo', file_handle=self.fake_file, file_size=9000) class TestCheckTar(unittest.TestCase):
struct error export too large. I am getting errors trying to convert to a zip. Looks like the export was too large. ``` [administrator@vcmcinsightiq tmp]$ /opt/rh/python27/root/usr/bin/iiqtools_tar_to_zip -s insightiq_export_1522783418.tar.gz -o /mnt/10.193.6.113/ifs/data/IIQ2/ 2018-04-04 20:13:37,625 - INFO - Converting insightiq_export_1522783418.tar.gz to zip format 2018-04-04 20:34:32,536 - INFO - InsightIQ datastore tar export contained 2 files 2018-04-04 20:34:32,536 - INFO - Converting insightiq_export_1522783418/vcmc-12kisilon_00151b00007a2716ce4a1504000067458b6b_config.json 2018-04-04 20:34:32,560 - INFO - Converting insightiq_export_1522783418/vcmc-12kisilon_00151b00007a2716ce4a1504000067458b6b.dump Traceback (most recent call last): File "/opt/rh/python27/root/usr/bin/iiqtools_tar_to_zip", line 11, in <module> sys.exit(main(sys.argv[1:])) File "/opt/rh/python27/root/usr/lib/python2.7/site-packages/iiqtools/iiqtools_tar_to_zip.py", line 192, in main zip_export.writebuffered(filename=filename, file_handle=file_handle) File "/opt/rh/python27/root/usr/lib/python2.7/site-packages/iiqtools/iiqtools_tar_to_zip.py", line 80, in writebuffered self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size, zinfo.file_size)) struct.error: 'L' format requires 0 <= number <= 4294967295 ```
0.0
9633167b8232b4776d2ad4823fb6652507c08e8e
[ "iiqtools_tests/test_iiqtools_tar_to_zip.py::TestBufferedZipFile::test_basic" ]
[ "iiqtools_tests/test_iiqtools_tar_to_zip.py::TestGetTimestampFromExport::test_absolute_path", "iiqtools_tests/test_iiqtools_tar_to_zip.py::TestParseCli::test_no_args", "iiqtools_tests/test_iiqtools_tar_to_zip.py::TestParseCli::test_returns_namespace", "iiqtools_tests/test_iiqtools_tar_to_zip.py::TestParseCli::test_missing_required", "iiqtools_tests/test_iiqtools_tar_to_zip.py::TestJoinname::test_absolute_path", "iiqtools_tests/test_iiqtools_tar_to_zip.py::TestJoinname::test_relative_path", "iiqtools_tests/test_iiqtools_tar_to_zip.py::TestCheckTar::test_not_a_file", "iiqtools_tests/test_iiqtools_tar_to_zip.py::TestCheckTar::test_bad_file_name", "iiqtools_tests/test_iiqtools_tar_to_zip.py::TestCheckTar::test_valid_tar", "iiqtools_tests/test_iiqtools_tar_to_zip.py::TestCheckTar::test_not_a_tar" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-04-25 02:32:46+00:00
mit
6,246
wireservice__agate-637
diff --git a/agate/aggregations/any.py b/agate/aggregations/any.py index 70fa702..67a9651 100644 --- a/agate/aggregations/any.py +++ b/agate/aggregations/any.py @@ -32,7 +32,7 @@ class Any(Aggregation): column = table.columns[self._column_name] data = column.values() - if isinstance(column.data_type, Boolean): + if isinstance(column.data_type, Boolean) and self._test is None: return any(data) return any(self._test(d) for d in data)
wireservice/agate
0d2671358cdea94c83bd8f28b5a6718a9326b033
diff --git a/tests/test_aggregations.py b/tests/test_aggregations.py index c3c8fbb..11eefe1 100644 --- a/tests/test_aggregations.py +++ b/tests/test_aggregations.py @@ -138,6 +138,7 @@ class TestBooleanAggregation(unittest.TestCase): table = Table(rows, ['test'], [Boolean()]) Any('test').validate(table) self.assertEqual(Any('test').run(table), False) + self.assertEqual(Any('test', lambda r: not r).run(table), True) def test_all(self): rows = [
agate.All cannot test whether all data is False If the column data type is boolean, test gets overwritten to search for True values.
0.0
0d2671358cdea94c83bd8f28b5a6718a9326b033
[ "tests/test_aggregations.py::TestBooleanAggregation::test_any" ]
[ "tests/test_aggregations.py::TestSimpleAggregation::test_all", "tests/test_aggregations.py::TestSimpleAggregation::test_any", "tests/test_aggregations.py::TestSimpleAggregation::test_count", "tests/test_aggregations.py::TestSimpleAggregation::test_count_column", "tests/test_aggregations.py::TestSimpleAggregation::test_count_value", "tests/test_aggregations.py::TestSimpleAggregation::test_has_nulls", "tests/test_aggregations.py::TestSimpleAggregation::test_summary", "tests/test_aggregations.py::TestBooleanAggregation::test_all", "tests/test_aggregations.py::TestDateTimeAggregation::test_max", "tests/test_aggregations.py::TestDateTimeAggregation::test_min", "tests/test_aggregations.py::TestNumberAggregation::test_deciles", "tests/test_aggregations.py::TestNumberAggregation::test_iqr", "tests/test_aggregations.py::TestNumberAggregation::test_mad", "tests/test_aggregations.py::TestNumberAggregation::test_max", "tests/test_aggregations.py::TestNumberAggregation::test_max_precision", "tests/test_aggregations.py::TestNumberAggregation::test_mean", "tests/test_aggregations.py::TestNumberAggregation::test_mean_with_nulls", "tests/test_aggregations.py::TestNumberAggregation::test_median", "tests/test_aggregations.py::TestNumberAggregation::test_min", "tests/test_aggregations.py::TestNumberAggregation::test_mode", "tests/test_aggregations.py::TestNumberAggregation::test_percentiles", "tests/test_aggregations.py::TestNumberAggregation::test_percentiles_locate", "tests/test_aggregations.py::TestNumberAggregation::test_population_stdev", "tests/test_aggregations.py::TestNumberAggregation::test_population_variance", "tests/test_aggregations.py::TestNumberAggregation::test_quartiles", "tests/test_aggregations.py::TestNumberAggregation::test_quartiles_locate", "tests/test_aggregations.py::TestNumberAggregation::test_quintiles", "tests/test_aggregations.py::TestNumberAggregation::test_stdev", "tests/test_aggregations.py::TestNumberAggregation::test_sum", "tests/test_aggregations.py::TestNumberAggregation::test_variance", "tests/test_aggregations.py::TestTextAggregation::test_max_length", "tests/test_aggregations.py::TestTextAggregation::test_max_length_invalid" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2016-10-30 16:11:15+00:00
mit
6,247
wireservice__agate-638
diff --git a/agate/aggregations/__init__.py b/agate/aggregations/__init__.py index e4f40cc..cf82a30 100644 --- a/agate/aggregations/__init__.py +++ b/agate/aggregations/__init__.py @@ -21,6 +21,7 @@ from agate.aggregations.all import All # noqa from agate.aggregations.any import Any # noqa from agate.aggregations.count import Count # noqa from agate.aggregations.deciles import Deciles # noqa +from agate.aggregations.first import First # noqa from agate.aggregations.has_nulls import HasNulls # noqa from agate.aggregations.iqr import IQR # noqa from agate.aggregations.mad import MAD # noqa diff --git a/agate/aggregations/first.py b/agate/aggregations/first.py new file mode 100644 index 0000000..37e1695 --- /dev/null +++ b/agate/aggregations/first.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python + +from agate.aggregations.base import Aggregation +from agate.data_types import Boolean + + +class First(Aggregation): + """ + Returns the first value that passes a test. + + If the test is omitted, the aggregation will return the first value in the column. + + If no values pass the test, the aggregation will raise an exception. + + :param column_name: + The name of the column to check. + :param test: + A function that takes a value and returns `True` or `False`. Test may be + omitted when checking :class:`.Boolean` data. + """ + def __init__(self, column_name, test=None): + self._column_name = column_name + self._test = test + + def get_aggregate_data_type(self, table): + return table.columns[self._column_name].data_type + + def validate(self, table): + column = table.columns[self._column_name] + data = column.values() + + if self._test is not None and len([d for d in data if self._test(d)]) == 0: + raise ValueError('No values pass the given test.') + + def run(self, table): + column = table.columns[self._column_name] + data = column.values() + + if self._test is None: + return data[0] + + return next((d for d in data if self._test(d)))
wireservice/agate
97cb37f673af480f74fef546ceefd3ba24aff93b
diff --git a/tests/test_aggregations.py b/tests/test_aggregations.py index 11eefe1..e0dc625 100644 --- a/tests/test_aggregations.py +++ b/tests/test_aggregations.py @@ -67,6 +67,17 @@ class TestSimpleAggregation(unittest.TestCase): self.assertEqual(All('one', lambda d: d != 5).run(self.table), True) self.assertEqual(All('one', lambda d: d == 2).run(self.table), False) + def test_first(self): + with self.assertRaises(ValueError): + First('one', lambda d: d == 5).validate(self.table) + + First('one', lambda d: d).validate(self.table) + + self.assertIsInstance(First('one').get_aggregate_data_type(self.table), Number) + self.assertEqual(First('one').run(self.table), 1) + self.assertEqual(First('one', lambda d: d == 2).run(self.table), 2) + self.assertEqual(First('one', lambda d: not d).run(self.table), None) + def test_count(self): rows = ( (1, 2, 'a'),
agate.First aggregation I end up doing this all the time: ``` def pick_first(c): return c[0] agate.Summary('Serial_Num', agate.Text(), pick_first) ```
0.0
97cb37f673af480f74fef546ceefd3ba24aff93b
[ "tests/test_aggregations.py::TestSimpleAggregation::test_first" ]
[ "tests/test_aggregations.py::TestSimpleAggregation::test_all", "tests/test_aggregations.py::TestSimpleAggregation::test_any", "tests/test_aggregations.py::TestSimpleAggregation::test_count", "tests/test_aggregations.py::TestSimpleAggregation::test_count_column", "tests/test_aggregations.py::TestSimpleAggregation::test_count_value", "tests/test_aggregations.py::TestSimpleAggregation::test_has_nulls", "tests/test_aggregations.py::TestSimpleAggregation::test_summary", "tests/test_aggregations.py::TestBooleanAggregation::test_all", "tests/test_aggregations.py::TestBooleanAggregation::test_any", "tests/test_aggregations.py::TestDateTimeAggregation::test_max", "tests/test_aggregations.py::TestDateTimeAggregation::test_min", "tests/test_aggregations.py::TestNumberAggregation::test_deciles", "tests/test_aggregations.py::TestNumberAggregation::test_iqr", "tests/test_aggregations.py::TestNumberAggregation::test_mad", "tests/test_aggregations.py::TestNumberAggregation::test_max", "tests/test_aggregations.py::TestNumberAggregation::test_max_precision", "tests/test_aggregations.py::TestNumberAggregation::test_mean", "tests/test_aggregations.py::TestNumberAggregation::test_mean_with_nulls", "tests/test_aggregations.py::TestNumberAggregation::test_median", "tests/test_aggregations.py::TestNumberAggregation::test_min", "tests/test_aggregations.py::TestNumberAggregation::test_mode", "tests/test_aggregations.py::TestNumberAggregation::test_percentiles", "tests/test_aggregations.py::TestNumberAggregation::test_percentiles_locate", "tests/test_aggregations.py::TestNumberAggregation::test_population_stdev", "tests/test_aggregations.py::TestNumberAggregation::test_population_variance", "tests/test_aggregations.py::TestNumberAggregation::test_quartiles", "tests/test_aggregations.py::TestNumberAggregation::test_quartiles_locate", "tests/test_aggregations.py::TestNumberAggregation::test_quintiles", "tests/test_aggregations.py::TestNumberAggregation::test_stdev", "tests/test_aggregations.py::TestNumberAggregation::test_sum", "tests/test_aggregations.py::TestNumberAggregation::test_variance", "tests/test_aggregations.py::TestTextAggregation::test_max_length", "tests/test_aggregations.py::TestTextAggregation::test_max_length_invalid" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2016-10-30 16:50:31+00:00
mit
6,248
wireservice__agate-excel-25
diff --git a/agateexcel/table_xls.py b/agateexcel/table_xls.py index 19612c6..9fb9c4a 100644 --- a/agateexcel/table_xls.py +++ b/agateexcel/table_xls.py @@ -83,6 +83,11 @@ def from_xls(cls, path, sheet=None, skip_lines=0, header=True, encoding_override for i in range(len(columns[0])): rows.append([c[i] for c in columns]) + if 'column_names' in kwargs: + if not header: + column_names = kwargs.get('column_names', None) + del kwargs['column_names'] + tables[sheet.name] = agate.Table(rows, column_names, **kwargs) if multiple: diff --git a/agateexcel/table_xlsx.py b/agateexcel/table_xlsx.py index 87619e9..37afd71 100644 --- a/agateexcel/table_xlsx.py +++ b/agateexcel/table_xlsx.py @@ -82,6 +82,11 @@ def from_xlsx(cls, path, sheet=None, skip_lines=0, header=True, read_only=True, rows.append(values) + if 'column_names' in kwargs: + if not header: + column_names = kwargs.get('column_names', None) + del kwargs['column_names'] + tables[sheet.title] = agate.Table(rows, column_names, **kwargs) f.close()
wireservice/agate-excel
bb7474e2762099af5d6053e548341c460a47a758
diff --git a/tests/test_table_xls.py b/tests/test_table_xls.py index cc7b3e8..6d02d74 100644 --- a/tests/test_table_xls.py +++ b/tests/test_table_xls.py @@ -19,6 +19,10 @@ class TestXLS(agate.AgateTestCase): 'number', 'text', 'boolean', 'date', 'datetime', ] + self.user_provided_column_names = [ + 'alt number', 'alt text', 'alt boolean', 'alt date', 'alt datetime', + ] + self.column_types = [ agate.Number(), agate.Text(), agate.Boolean(), agate.Date(), agate.DateTime(), @@ -26,6 +30,13 @@ class TestXLS(agate.AgateTestCase): self.table = agate.Table(self.rows, self.column_names, self.column_types) + def test_from_xls_with_column_names(self): + table = agate.Table.from_xls('examples/test.xls', header=False, skip_lines=1, column_names=self.user_provided_column_names ) + + self.assertColumnNames(table, self.user_provided_column_names) + self.assertColumnTypes(table, [agate.Number, agate.Text, agate.Boolean, agate.Date, agate.DateTime]) + self.assertRows(table, [r.values() for r in self.table.rows]) + def test_from_xls(self): table = agate.Table.from_xls('examples/test.xls') diff --git a/tests/test_table_xlsx.py b/tests/test_table_xlsx.py index 6dd4434..9b56b9b 100644 --- a/tests/test_table_xlsx.py +++ b/tests/test_table_xlsx.py @@ -19,6 +19,10 @@ class TestXLSX(agate.AgateTestCase): 'number', 'text', 'boolean', 'date', 'datetime', ] + self.user_provided_column_names = [ + 'number', 'text', 'boolean', 'date', 'datetime', + ] + self.column_types = [ agate.Number(), agate.Text(), agate.Boolean(), agate.Date(), agate.DateTime(), @@ -26,6 +30,13 @@ class TestXLSX(agate.AgateTestCase): self.table = agate.Table(self.rows, self.column_names, self.column_types) + def test_from_xlsx_with_column_names(self): + table = agate.Table.from_xlsx('examples/test.xlsx', header=False, skip_lines=1, column_names=self.user_provided_column_names) + + self.assertColumnNames(table, self.user_provided_column_names) + self.assertColumnTypes(table, [agate.Number, agate.Text, agate.Boolean, agate.Date, agate.DateTime]) + self.assertRows(table, [r.values() for r in self.table.rows]) + def test_from_xlsx(self): table = agate.Table.from_xlsx('examples/test.xlsx')
Passing column_names to from_xls fails with TypeError ``` Traceback (most recent call last): File "<stdin>", line 5, in <module> File "/Users/jani.mikkonen/src/customers/vr/vr/venv/lib/python3.7/site-packages/agateexcel/table_xls.py", line 86, in from_xls tables[sheet.name] = agate.Table(rows, column_names, **kwargs) TypeError: __init__() got multiple values for argument 'column_names' ``` if column_names is present in kwargs, maybe remove column_names from there and copy it to the positional parameter passed to agate.Table() in https://github.com/wireservice/agate-excel/blob/master/agateexcel/table_xlsx.py#L85 & https://github.com/wireservice/agate-excel/blob/master/agateexcel/table_xls.py#L86 ?
0.0
bb7474e2762099af5d6053e548341c460a47a758
[ "tests/test_table_xls.py::TestXLS::test_from_xls_with_column_names" ]
[ "tests/test_table_xls.py::TestXLS::test_ambiguous_date", "tests/test_table_xls.py::TestXLS::test_empty", "tests/test_table_xls.py::TestXLS::test_file_like", "tests/test_table_xls.py::TestXLS::test_from_xls", "tests/test_table_xls.py::TestXLS::test_header", "tests/test_table_xls.py::TestXLS::test_numeric_column_name", "tests/test_table_xls.py::TestXLS::test_sheet_index", "tests/test_table_xls.py::TestXLS::test_sheet_multiple", "tests/test_table_xls.py::TestXLS::test_sheet_name", "tests/test_table_xls.py::TestXLS::test_skip_lines", "tests/test_table_xls.py::TestXLS::test_zeros", "tests/test_table_xlsx.py::TestXLSX::test_header" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2018-11-30 09:58:46+00:00
mit
6,249
wireservice__agate-sql-23
diff --git a/agatesql/table.py b/agatesql/table.py index c813f30..bad725d 100644 --- a/agatesql/table.py +++ b/agatesql/table.py @@ -11,7 +11,7 @@ import six import agate from sqlalchemy import Column, MetaData, Table, create_engine, dialects from sqlalchemy.engine import Connection -from sqlalchemy.types import BOOLEAN, DECIMAL, DATE, DATETIME, VARCHAR, Interval +from sqlalchemy.types import BOOLEAN, DECIMAL, DATE, TIMESTAMP, VARCHAR, Interval from sqlalchemy.dialects.oracle import INTERVAL as ORACLE_INTERVAL from sqlalchemy.dialects.postgresql import INTERVAL as POSTGRES_INTERVAL from sqlalchemy.schema import CreateTable @@ -21,7 +21,7 @@ SQL_TYPE_MAP = { agate.Boolean: BOOLEAN, agate.Number: DECIMAL, agate.Date: DATE, - agate.DateTime: DATETIME, + agate.DateTime: TIMESTAMP, agate.TimeDelta: None, # See below agate.Text: VARCHAR } @@ -167,7 +167,10 @@ def make_sql_table(table, table_name, dialect=None, db_schema=None, constraints= if isinstance(column.data_type, agate.Text): sql_type_kwargs['length'] = table.aggregate(agate.MaxLength(column_name)) - sql_column_kwargs['nullable'] = table.aggregate(agate.HasNulls(column_name)) + # Avoid errors due to NO_ZERO_DATE. + # @see http://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sqlmode_no_zero_date + if not isinstance(column.data_type, agate.DateTime): + sql_column_kwargs['nullable'] = table.aggregate(agate.HasNulls(column_name)) sql_table.append_column(make_sql_column(column_name, column, sql_type_kwargs, sql_column_kwargs))
wireservice/agate-sql
858bf8824d906a1acd160b239a13ccc17837dd2f
diff --git a/tests/test_agatesql.py b/tests/test_agatesql.py index 46fdd1e..a270222 100644 --- a/tests/test_agatesql.py +++ b/tests/test_agatesql.py @@ -66,7 +66,7 @@ class TestSQL(agate.AgateTestCase): self.assertIn('text VARCHAR(1) NOT NULL', statement) self.assertIn('boolean BOOLEAN', statement) self.assertIn('date DATE', statement) - self.assertIn('datetime DATETIME', statement) + self.assertIn('datetime TIMESTAMP', statement) def test_make_create_table_statement_no_constraints(self): statement = self.table.to_sql_create_statement('test_table', constraints=False) @@ -76,7 +76,7 @@ class TestSQL(agate.AgateTestCase): self.assertIn('text VARCHAR', statement) self.assertIn('boolean BOOLEAN', statement) self.assertIn('date DATE', statement) - self.assertIn('datetime DATETIME', statement) + self.assertIn('datetime TIMESTAMP', statement) def test_make_create_table_statement_with_schema(self): statement = self.table.to_sql_create_statement('test_table', db_schema='test_schema') @@ -86,7 +86,7 @@ class TestSQL(agate.AgateTestCase): self.assertIn('text VARCHAR(1) NOT NULL', statement) self.assertIn('boolean BOOLEAN', statement) self.assertIn('date DATE', statement) - self.assertIn('datetime DATETIME', statement) + self.assertIn('datetime TIMESTAMP', statement) def test_make_create_table_statement_with_dialects(self): for dialect in ['mysql', 'postgresql', 'sqlite']:
DateTime columns can not be stored as DATETIME in sqlite
0.0
858bf8824d906a1acd160b239a13ccc17837dd2f
[ "tests/test_agatesql.py::TestSQL::test_make_create_table_statement_no_constraints", "tests/test_agatesql.py::TestSQL::test_make_create_table_statement_with_schema", "tests/test_agatesql.py::TestSQL::test_to_sql_create_statement" ]
[ "tests/test_agatesql.py::TestSQL::test_make_create_table_statement_with_dialects" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2017-01-04 21:20:05+00:00
mit
6,250
wireservice__csvkit-1241
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index eebf5bb..0cf6ace 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,10 @@ +2.0.0 - Unreleased +------------------ + +**BACKWARDS-INCOMPATIBLE CHANGES** + +* :doc:`/scripts/csvclean` now writes its output to standard output and its errors to standard error, instead of to ``basename_out.csv`` and ``basename_err.csv`` files. Consequently, it no longer supports a :code:`--dry-run` flag to output summary information like ``No errors.``, ``42 errors logged to basename_err.csv`` or ``42 rows were joined/reduced to 24 rows after eliminating expected internal line breaks.``. + 1.5.0 - March 28, 2024 ---------------------- diff --git a/csvkit/cleanup.py b/csvkit/cleanup.py index aa8359c..818d268 100644 --- a/csvkit/cleanup.py +++ b/csvkit/cleanup.py @@ -5,8 +5,10 @@ from csvkit.exceptions import CSVTestException, LengthMismatchError def join_rows(rows, joiner=' '): """ - Given a series of rows, return them as a single row where the inner edge cells are merged. By default joins with a - single space character, but you can specify new-line, empty string, or anything else with the 'joiner' kwarg. + Given a series of rows, return them as a single row where the inner edge cells are merged. + + :param joiner: + The separator between cells, a single space by default. """ rows = list(rows) fixed_row = rows[0][:] @@ -33,8 +35,6 @@ class RowChecker: except StopIteration: self.column_names = [] self.errors = [] - self.rows_joined = 0 - self.joins = 0 def checked_rows(self): """ @@ -69,9 +69,6 @@ class RowChecker: break if len(fixed_row) == length: - self.rows_joined += len(joinable_row_errors) - self.joins += 1 - yield fixed_row for fixed in joinable_row_errors: diff --git a/csvkit/cli.py b/csvkit/cli.py index f8c3ba4..6dabc6b 100644 --- a/csvkit/cli.py +++ b/csvkit/cli.py @@ -68,19 +68,26 @@ class CSVKitUtility: epilog = '' override_flags = '' - def __init__(self, args=None, output_file=None): + def __init__(self, args=None, output_file=None, error_file=None): """ Perform argument processing and other setup for a CSVKitUtility. """ self._init_common_parser() self.add_arguments() self.args = self.argparser.parse_args(args) + # Output file is only set during testing. if output_file is None: self.output_file = sys.stdout else: self.output_file = output_file + # Error file is only set during testing. + if error_file is None: + self.error_file = sys.stderr + else: + self.error_file = error_file + self.reader_kwargs = self._extract_csv_reader_kwargs() self.writer_kwargs = self._extract_csv_writer_kwargs() diff --git a/csvkit/utilities/csvclean.py b/csvkit/utilities/csvclean.py index 2dc0825..2b92bfd 100644 --- a/csvkit/utilities/csvclean.py +++ b/csvkit/utilities/csvclean.py @@ -1,7 +1,6 @@ #!/usr/bin/env python import sys -from os.path import splitext import agate @@ -14,9 +13,7 @@ class CSVClean(CSVKitUtility): override_flags = ['L', 'blanks', 'date-format', 'datetime-format'] def add_arguments(self): - self.argparser.add_argument( - '-n', '--dry-run', dest='dryrun', action='store_true', - help='Do not create output files. Information about what would have been done will be printed to STDERR.') + pass def main(self): if self.additional_input_expected(): @@ -24,65 +21,20 @@ class CSVClean(CSVKitUtility): reader = agate.csv.reader(self.skip_lines(), **self.reader_kwargs) - if self.args.dryrun: - checker = RowChecker(reader) + checker = RowChecker(reader) - for _row in checker.checked_rows(): - pass + output_writer = agate.csv.writer(self.output_file, **self.writer_kwargs) + output_writer.writerow(checker.column_names) + for row in checker.checked_rows(): + output_writer.writerow(row) - if checker.errors: - for e in checker.errors: - self.output_file.write('Line %i: %s\n' % (e.line_number, e.msg)) - else: - self.output_file.write('No errors.\n') + if checker.errors: + error_writer = agate.csv.writer(self.error_file, **self.writer_kwargs) + error_writer.writerow(['line_number', 'msg'] + checker.column_names) + for error in checker.errors: + error_writer.writerow([error.line_number, error.msg] + error.row) - if checker.joins: - self.output_file.write('%i rows would have been joined/reduced to %i rows after eliminating expected ' - 'internal line breaks.\n' % (checker.rows_joined, checker.joins)) - else: - if self.input_file == sys.stdin: - base = 'stdin' # "<stdin>_out.csv" is invalid on Windows - else: - base = splitext(self.input_file.name)[0] - - with open(f'{base}_out.csv', 'w') as f: - clean_writer = agate.csv.writer(f, **self.writer_kwargs) - - checker = RowChecker(reader) - clean_writer.writerow(checker.column_names) - - for row in checker.checked_rows(): - clean_writer.writerow(row) - - if checker.errors: - error_filename = f'{base}_err.csv' - - with open(error_filename, 'w') as f: - error_writer = agate.csv.writer(f, **self.writer_kwargs) - - error_header = ['line_number', 'msg'] - error_header.extend(checker.column_names) - error_writer.writerow(error_header) - - error_count = len(checker.errors) - - for e in checker.errors: - error_writer.writerow(self._format_error_row(e)) - - self.output_file.write('%i error%s logged to %s\n' % ( - error_count, '' if error_count == 1 else 's', error_filename)) - else: - self.output_file.write('No errors.\n') - - if checker.joins: - self.output_file.write('%i rows were joined/reduced to %i rows after eliminating expected internal ' - 'line breaks.\n' % (checker.rows_joined, checker.joins)) - - def _format_error_row(self, error): - row = [error.line_number, error.msg] - row.extend(error.row) - - return row + sys.exit(1) def launch_new_instance(): diff --git a/docs/scripts/csvclean.rst b/docs/scripts/csvclean.rst index 0e3e16b..f94d6a2 100644 --- a/docs/scripts/csvclean.rst +++ b/docs/scripts/csvclean.rst @@ -18,13 +18,13 @@ Note that every csvkit tool does the following: * changes the quote character to a double-quotation mark, if the character is set with the `--quotechar` (`-q`) option * changes the character encoding to UTF-8, if the input encoding is set with the `--encoding` (`-e`) option -Outputs [basename]_out.csv and [basename]_err.csv, the former containing all valid rows and the latter containing all error rows along with line numbers and descriptions: +All valid rows are written to standard output, and all error rows along with line numbers and descriptions are written to standard error. If there are error rows, the exit code will be 1:: .. code-block:: none usage: csvclean [-h] [-d DELIMITER] [-t] [-q QUOTECHAR] [-u {0,1,2,3}] [-b] [-p ESCAPECHAR] [-z FIELD_SIZE_LIMIT] [-e ENCODING] [-S] [-H] - [-K SKIP_LINES] [-v] [-l] [--zero] [-V] [-n] + [-K SKIP_LINES] [-v] [-l] [--zero] [-V] [FILE] Fix common errors in a CSV file. @@ -35,8 +35,6 @@ Outputs [basename]_out.csv and [basename]_err.csv, the former containing all val optional arguments: -h, --help show this help message and exit - -n, --dry-run Do not create output files. Information about what - would have been done will be printed to STDERR. See also: :doc:`../common_arguments`. @@ -47,9 +45,13 @@ Test a file with known bad rows: .. code-block:: console - $ csvclean -n examples/bad.csv - Line 1: Expected 3 columns, found 4 columns - Line 2: Expected 3 columns, found 2 columns + $ csvclean examples/bad.csv 2> errors.csv + column_a,column_b,column_c + 0,mixed types.... uh oh,17 + $ cat errors.csv + line_number,msg,column_a,column_b,column_c + 1,"Expected 3 columns, found 4 columns",1,27,,I'm too long! + 2,"Expected 3 columns, found 2 columns",,I'm too short! To change the line ending from line feed (LF or ``\n``) to carriage return and line feed (CRLF or ``\r\n``) use:
wireservice/csvkit
d00ea20b965548299f4724c6ef9f9a6bdb33e02d
diff --git a/tests/test_utilities/test_csvclean.py b/tests/test_utilities/test_csvclean.py index 1d284c9..754f75a 100644 --- a/tests/test_utilities/test_csvclean.py +++ b/tests/test_utilities/test_csvclean.py @@ -3,6 +3,8 @@ import os import sys from unittest.mock import patch +import agate + from csvkit.utilities.csvclean import CSVClean, launch_new_instance from tests.utils import CSVKitTestCase, EmptyFileTests @@ -15,98 +17,89 @@ class TestCSVClean(CSVKitTestCase, EmptyFileTests): if os.path.isfile(output_file): os.remove(output_file) - def assertCleaned(self, basename, output_lines, error_lines, additional_args=[]): - args = [f'examples/{basename}.csv'] + additional_args + def assertCleaned(self, args, output_rows, error_rows=[]): output_file = io.StringIO() + error_file = io.StringIO() - utility = CSVClean(args, output_file) - utility.run() + utility = CSVClean(args, output_file, error_file) - output_file.close() + if error_rows: + with self.assertRaises(SystemExit) as e: + utility.run() + + self.assertEqual(e.exception.code, 1) + else: + utility.run() + + output_file.seek(0) + error_file.seek(0) - output_file = f'examples/{basename}_out.csv' - error_file = f'examples/{basename}_err.csv' - - self.assertEqual(os.path.exists(output_file), bool(output_lines)) - self.assertEqual(os.path.exists(error_file), bool(error_lines)) - - try: - if output_lines: - with open(output_file) as f: - for line in output_lines: - self.assertEqual(next(f), line) - self.assertRaises(StopIteration, next, f) - if error_lines: - with open(error_file) as f: - for line in error_lines: - self.assertEqual(next(f), line) - self.assertRaises(StopIteration, next, f) - finally: - if output_lines: - os.remove(output_file) - if error_lines: - os.remove(error_file) + if output_rows: + reader = agate.csv.reader(output_file) + for row in output_rows: + self.assertEqual(next(reader), row) + self.assertRaises(StopIteration, next, reader) + if error_rows: + reader = agate.csv.reader(error_file) + for row in error_rows: + self.assertEqual(next(reader), row) + self.assertRaises(StopIteration, next, reader) + + output_file.close() + error_file.close() def test_launch_new_instance(self): - with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/bad.csv']): + with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/dummy.csv']): launch_new_instance() def test_skip_lines(self): - self.assertCleaned('bad_skip_lines', [ - 'column_a,column_b,column_c\n', - '0,mixed types.... uh oh,17\n', + self.assertCleaned(['--skip-lines', '3', 'examples/bad_skip_lines.csv'], [ + ['column_a', 'column_b', 'column_c'], + ['0', 'mixed types.... uh oh', '17'], ], [ - 'line_number,msg,column_a,column_b,column_c\n', - '1,"Expected 3 columns, found 4 columns",1,27,,I\'m too long!\n', - '2,"Expected 3 columns, found 2 columns",,I\'m too short!\n', - ], ['--skip-lines', '3']) + ['line_number', 'msg', 'column_a', 'column_b', 'column_c'], + ['1', 'Expected 3 columns, found 4 columns', '1', '27', '', "I'm too long!"], + ['2', 'Expected 3 columns, found 2 columns', '', "I'm too short!"], + ]) def test_simple(self): - self.assertCleaned('bad', [ - 'column_a,column_b,column_c\n', - '0,mixed types.... uh oh,17\n', + self.assertCleaned(['examples/bad.csv'], [ + ['column_a', 'column_b', 'column_c'], + ['0', 'mixed types.... uh oh', '17'], ], [ - 'line_number,msg,column_a,column_b,column_c\n', - '1,"Expected 3 columns, found 4 columns",1,27,,I\'m too long!\n', - '2,"Expected 3 columns, found 2 columns",,I\'m too short!\n', + ['line_number', 'msg', 'column_a', 'column_b', 'column_c'], + ['1', 'Expected 3 columns, found 4 columns', '1', '27', '', "I'm too long!"], + ['2', 'Expected 3 columns, found 2 columns', '', "I'm too short!"], ]) def test_no_header_row(self): - self.assertCleaned('no_header_row', [ - '1,2,3\n', + self.assertCleaned(['examples/no_header_row.csv'], [ + ['1', '2', '3'], ], []) def test_removes_optional_quote_characters(self): - self.assertCleaned('optional_quote_characters', [ - 'a,b,c\n', - '1,2,3\n', - ], []) + self.assertCleaned(['examples/optional_quote_characters.csv'], [ + ['a', 'b', 'c'], + ['1', '2', '3'], + ]) def test_changes_line_endings(self): - self.assertCleaned('mac_newlines', [ - 'a,b,c\n', - '1,2,3\n', - '"Once upon\n', - 'a time",5,6\n', - ], []) + self.assertCleaned(['examples/mac_newlines.csv'], [ + ['a', 'b', 'c'], + ['1', '2', '3'], + ['Once upon\na time', '5', '6'], + ]) def test_changes_character_encoding(self): - self.assertCleaned('test_latin1', [ - 'a,b,c\n', - '1,2,3\n', - '4,5,©\n', - ], [], ['-e', 'latin1']) + self.assertCleaned(['-e', 'latin1', 'examples/test_latin1.csv'], [ + ['a', 'b', 'c'], + ['1', '2', '3'], + ['4', '5', u'©'], + ]) def test_removes_bom(self): - self.assertCleaned('test_utf8_bom', [ - 'foo,bar,baz\n', - '1,2,3\n', - '4,5,ʤ\n', - ], [], []) - - def test_dry_run(self): - output = self.get_output_as_io(['-n', 'examples/bad.csv']) - self.assertFalse(os.path.exists('examples/bad_err.csv')) - self.assertFalse(os.path.exists('examples/bad_out.csv')) - self.assertEqual(next(output)[:6], 'Line 1') - self.assertEqual(next(output)[:6], 'Line 2') + self.assertCleaned(['examples/test_utf8_bom.csv'], [ + ['foo', 'bar', 'baz'], + ['1', '2', '3'], + ['4', '5', 'ʤ'], + ])
csvclean: options for stdout and stderr to output the fixed file on stdout and errors on stderr
0.0
d00ea20b965548299f4724c6ef9f9a6bdb33e02d
[ "tests/test_utilities/test_csvclean.py::TestCSVClean::test_changes_character_encoding", "tests/test_utilities/test_csvclean.py::TestCSVClean::test_changes_line_endings", "tests/test_utilities/test_csvclean.py::TestCSVClean::test_no_header_row", "tests/test_utilities/test_csvclean.py::TestCSVClean::test_removes_bom", "tests/test_utilities/test_csvclean.py::TestCSVClean::test_removes_optional_quote_characters", "tests/test_utilities/test_csvclean.py::TestCSVClean::test_simple", "tests/test_utilities/test_csvclean.py::TestCSVClean::test_skip_lines" ]
[ "tests/test_utilities/test_csvclean.py::TestCSVClean::test_empty", "tests/test_utilities/test_csvclean.py::TestCSVClean::test_launch_new_instance" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2024-04-27 22:19:01+00:00
mit
6,251
wireservice__csvkit-619
diff --git a/CHANGELOG b/CHANGELOG index a9645ce..fb00cfa 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -11,6 +11,7 @@ Backwards-incompatible changes: * The --doublequote long flag is gone, and the -b short flag is now an alias for --no-doublequote. * When using the --columns or --not-columns options, you must not have spaces around the comma-separated values, unless the column names contain spaces. +* When sorting, null values are now greater than other values instead of less than. * CSVKitReader, CSVKitWriter, CSVKitDictReader, and CSVKitDictWriter have been removed. Use agate.csv.reader, agate.csv.writer, agate.csv.DictReader and agate.csv.DictWriter. * Drop Python 2.6 support. @@ -39,8 +40,10 @@ Fixes: * csvclean with standard input works on Windows. * csvgrep returns the input file's line numbers if the --linenumbers flag is set. * csvgrep can match multiline values. +* csvgrep correctly operates on ragged rows. * csvsql correctly escapes `%` characters in SQL queries. * csvstack supports stacking a single file. +* csvstat always reports frequencies. * FilteringCSVReader's any_match argument works correctly. * All tools handle empty files without error. diff --git a/csvkit/grep.py b/csvkit/grep.py index 975d111..58fc0ee 100644 --- a/csvkit/grep.py +++ b/csvkit/grep.py @@ -64,7 +64,11 @@ class FilteringCSVReader(six.Iterator): def test_row(self, row): for idx, test in self.patterns.items(): - result = test(row[idx]) + try: + value = row[idx] + except IndexError: + value = '' + result = test(value) if self.any_match: if result: return not self.inverse # True diff --git a/csvkit/utilities/csvsort.py b/csvkit/utilities/csvsort.py index 4d043b8..afe439a 100644 --- a/csvkit/utilities/csvsort.py +++ b/csvkit/utilities/csvsort.py @@ -35,7 +35,7 @@ class CSVSort(CSVKitUtility): table = agate.Table.from_csv(self.input_file, sniff_limit=self.args.sniff_limit, header=not self.args.no_header_row, column_types=self.get_column_types(), **self.reader_kwargs) column_ids = parse_column_identifiers(self.args.columns, table.column_names, column_offset=self.get_column_offset()) - table = table.order_by(lambda row: [(row[column_id] is not None, row[column_id]) for column_id in column_ids], reverse=self.args.reverse) + table = table.order_by(column_ids, reverse=self.args.reverse) table.to_csv(self.output_file, **self.writer_kwargs) diff --git a/csvkit/utilities/csvstat.py b/csvkit/utilities/csvstat.py index e6b143c..56e8019 100644 --- a/csvkit/utilities/csvstat.py +++ b/csvkit/utilities/csvstat.py @@ -142,14 +142,13 @@ class CSVStat(CSVKitUtility): self.output_file.write('\tUnique values: %i\n' % len(stats['unique'])) - if len(stats['unique']) != len(values): - self.output_file.write('\t%i most frequent values:\n' % MAX_FREQ) - for value, count in stats['freq']: - self.output_file.write(('\t\t%s:\t%s\n' % (six.text_type(value), count))) - if c.type == six.text_type: self.output_file.write('\tMax length: %i\n' % stats['len']) + self.output_file.write('\t%i most frequent values:\n' % MAX_FREQ) + for value, count in stats['freq']: + self.output_file.write(('\t\t%s:\t%s\n' % (six.text_type(value), count))) + if not operations: self.output_file.write('\n') self.output_file.write('Row count: %s\n' % tab.count_rows())
wireservice/csvkit
106006ba0a1893a7fb8dfb481f73ac242c4e5a30
diff --git a/tests/test_grep.py b/tests/test_grep.py index 4a293f0..ade3c9b 100644 --- a/tests/test_grep.py +++ b/tests/test_grep.py @@ -102,6 +102,16 @@ class TestGrep(unittest.TestCase): except ColumnIdentifierError: pass + def test_index_out_of_range(self): + fcr = FilteringCSVReader(iter(self.tab2), patterns={3: '0'}) + self.assertEqual(self.tab2[0], next(fcr)) + self.assertEqual(self.tab2[4], next(fcr)) + try: + next(fcr) + self.fail("Should be no more rows left.") + except StopIteration: + pass + def test_any_match(self): fcr = FilteringCSVReader(iter(self.tab2), patterns={'age': 'only', 0: '2'}, any_match=True) self.assertEqual(self.tab2[0], next(fcr)) diff --git a/tests/test_utilities/test_csvsort.py b/tests/test_utilities/test_csvsort.py index 796d11f..acd5512 100644 --- a/tests/test_utilities/test_csvsort.py +++ b/tests/test_utilities/test_csvsort.py @@ -29,7 +29,7 @@ class TestCSVSort(CSVKitTestCase, ColumnsTests, EmptyFileTests, NamesTests): def test_sort_date(self): reader = self.get_output_as_reader(['-c', '2', 'examples/testxls_converted.csv']) - test_order = [u'text', u'This row has blanks', u'Unicode! Σ', u'Chicago Tribune', u'Chicago Sun-Times', u'Chicago Reader'] + test_order = [u'text', u'Chicago Tribune', u'Chicago Sun-Times', u'Chicago Reader', u'This row has blanks', u'Unicode! Σ'] new_order = [six.text_type(r[0]) for r in reader] self.assertEqual(test_order, new_order) @@ -45,8 +45,8 @@ class TestCSVSort(CSVKitTestCase, ColumnsTests, EmptyFileTests, NamesTests): new_order = [six.text_type(r[0]) for r in reader] self.assertEqual(test_order, new_order) - def test_sort_ints_and_nulls(self): + def test_sort_t_and_nulls(self): reader = self.get_output_as_reader(['-c', '2', 'examples/sort_ints_nulls.csv']) - test_order = ['b', '', '1', '2'] + test_order = ['b', '1', '2', ''] new_order = [six.text_type(r[1]) for r in reader] self.assertEqual(test_order, new_order)
csvstat doen't give counts for some columns csvstat is great, and usually gives a sense of the mode of the data, via a count of how many rows have the most frequent values: <pre> 4. LAST_NAME <type 'unicode'> Nulls: True Unique values: 307123 5 most frequent values: SMITH: 28155 JOHNSON: 23713 MARTINEZ: 18465 MILLER: 16916 BROWN: 15428 Max length: 28 </pre> But sometimes it doesn't, perhaps when there less than some predefined number of unique values: <pre> 28. STATUS <type 'unicode'> Nulls: False Values: Active, Inactive </pre> I'd like to get counts of the top 5 most frequent values for all columns, as long as there are any repeated values.
0.0
106006ba0a1893a7fb8dfb481f73ac242c4e5a30
[ "tests/test_grep.py::TestGrep::test_index_out_of_range", "tests/test_utilities/test_csvsort.py::TestCSVSort::test_sort_date", "tests/test_utilities/test_csvsort.py::TestCSVSort::test_sort_t_and_nulls" ]
[ "tests/test_grep.py::TestGrep::test_any_match", "tests/test_grep.py::TestGrep::test_any_match_and_inverse", "tests/test_grep.py::TestGrep::test_column_names_in_patterns", "tests/test_grep.py::TestGrep::test_duplicate_column_ids_in_patterns", "tests/test_grep.py::TestGrep::test_inverse", "tests/test_grep.py::TestGrep::test_mixed_indices_and_column_names_in_patterns", "tests/test_grep.py::TestGrep::test_multiline", "tests/test_grep.py::TestGrep::test_no_header", "tests/test_grep.py::TestGrep::test_pattern", "tests/test_grep.py::TestGrep::test_regex", "tests/test_utilities/test_csvsort.py::TestCSVSort::test_empty", "tests/test_utilities/test_csvsort.py::TestCSVSort::test_invalid_column", "tests/test_utilities/test_csvsort.py::TestCSVSort::test_invalid_options", "tests/test_utilities/test_csvsort.py::TestCSVSort::test_launch_new_instance", "tests/test_utilities/test_csvsort.py::TestCSVSort::test_names", "tests/test_utilities/test_csvsort.py::TestCSVSort::test_no_header_row", "tests/test_utilities/test_csvsort.py::TestCSVSort::test_no_inference", "tests/test_utilities/test_csvsort.py::TestCSVSort::test_sort_string_reverse" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2016-06-08 19:25:29+00:00
mit
6,252
wireservice__csvkit-645
diff --git a/CHANGELOG b/CHANGELOG index 0929e08..b5b78f9 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -44,6 +44,7 @@ Fixes: * csvgrep can match multiline values. * csvgrep correctly operates on ragged rows. * csvsql correctly escapes `%` characters in SQL queries. +* csvsql adds standard input only if explicitly requested. * csvstack supports stacking a single file. * csvstat always reports frequencies. * FilteringCSVReader's any_match argument works correctly. diff --git a/csvkit/utilities/csvsql.py b/csvkit/utilities/csvsql.py index 98c5e84..1da023e 100644 --- a/csvkit/utilities/csvsql.py +++ b/csvkit/utilities/csvsql.py @@ -11,7 +11,7 @@ from csvkit.cli import CSVKitUtility class CSVSQL(CSVKitUtility): - description = 'Generate SQL statements for one or more CSV files, create execute those statements directly on a database, and execute one or more SQL queries.' + description = 'Generate SQL statements for one or more CSV files, or execute those statements directly on a database, and execute one or more SQL queries.' override_flags = ['l', 'f'] def add_arguments(self): @@ -56,14 +56,6 @@ class CSVSQL(CSVKitUtility): else: table_names = [] - # If one or more filenames are specified, we need to add stdin ourselves (if available) - if sys.stdin not in self.input_files: - try: - if not sys.stdin.isatty(): - self.input_files.insert(0, sys.stdin) - except: - pass - # Create an SQLite database in memory if no connection string is specified if query and not connection_string: connection_string = "sqlite:///:memory:" diff --git a/docs/scripts/csvsql.rst b/docs/scripts/csvsql.rst index 3a9b4b0..f2dd003 100644 --- a/docs/scripts/csvsql.rst +++ b/docs/scripts/csvsql.rst @@ -16,7 +16,7 @@ Generate SQL statements for a CSV file or execute those statements directly on a [--blanks] [--no-inference] [--db-schema DB_SCHEMA] [FILE [FILE ...]] - Generate SQL statements for one or more CSV files, create execute those + Generate SQL statements for one or more CSV files, or execute those statements directly on a database, and execute one or more SQL queries. positional arguments:
wireservice/csvkit
70d641c60202c8c8d596d1bf90fb03b10a1a4614
diff --git a/tests/test_utilities/test_csvsql.py b/tests/test_utilities/test_csvsql.py index be7d54f..2135fad 100644 --- a/tests/test_utilities/test_csvsql.py +++ b/tests/test_utilities/test_csvsql.py @@ -67,7 +67,7 @@ class TestCSVSQL(CSVKitTestCase, EmptyFileTests): input_file = six.StringIO("a,b,c\n1,2,3\n") with stdin_as_string(input_file): - sql = self.get_output(['examples/dummy.csv']) + sql = self.get_output(['-', 'examples/dummy.csv']) self.assertTrue('CREATE TABLE stdin' in sql) self.assertTrue('CREATE TABLE dummy' in sql)
csvsql without tty always tries to read stdin The following snip works from a terminal but fails in a non-interactive session (we hit it in Jenkins, but I'd guess it also fails in cron) ``` csvsql --table foo --query "select * from foo" foo.csv ``` You get a `StopIteration` exception because csvsql is trying to read from stdin, which has nothing coming (this line: https://github.com/wireservice/csvkit/blob/205175fb70745b80db19acd4c314ad6c774b7fc0/csvkit/utilities/csvsql.py#L57). There's a previous discussion of the issue at https://github.com/wireservice/csvkit/issues/342 and https://github.com/wireservice/csvkit/issues/627, but the linked commit doesn't solve the issue. We're working around it by always sending something in to stdin when running from a job. I think csvsql should require naming stdin with a "-" when you want to read from both files named as arguments and stdin. This is how `cat` works: ``` echo "foo" | cat /tmp/file.csv # just prints file.csv echo "foo" | cat - /tmp/file.csv # prints foo, then file.csv echo "foo" | cat /tmp/file.csv - # prints file.csv, then foo ```
0.0
70d641c60202c8c8d596d1bf90fb03b10a1a4614
[ "tests/test_utilities/test_csvsql.py::TestCSVSQL::test_create_table" ]
[ "tests/test_utilities/test_csvsql.py::TestCSVSQL::test_empty", "tests/test_utilities/test_csvsql.py::TestCSVSQL::test_launch_new_instance", "tests/test_utilities/test_csvsql.py::TestCSVSQL::test_no_header_row", "tests/test_utilities/test_csvsql.py::TestCSVSQL::test_no_inference", "tests/test_utilities/test_csvsql.py::TestCSVSQL::test_stdin", "tests/test_utilities/test_csvsql.py::TestCSVSQL::test_stdin_and_filename" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2016-08-01 18:29:57+00:00
mit
6,253
wireservice__csvkit-755
diff --git a/docs/scripts/csvclean.rst b/docs/scripts/csvclean.rst index 281f2d0..8937495 100644 --- a/docs/scripts/csvclean.rst +++ b/docs/scripts/csvclean.rst @@ -5,7 +5,14 @@ csvclean Description =========== -Cleans a CSV file of common syntax errors. Outputs [basename]_out.csv and [basename]_err.csv, the former containing all valid rows and the latter containing all error rows along with line numbers and descriptions:: +Cleans a CSV file of common syntax errors: + +* reports rows that have a different number of columns than the header row +* removes optional quote characters +* changes the record delimiter to a line feed +* changes the character encoding to UTF-8 + +Outputs [basename]_out.csv and [basename]_err.csv, the former containing all valid rows and the latter containing all error rows along with line numbers and descriptions:: usage: csvclean [-h] [-d DELIMITER] [-t] [-q QUOTECHAR] [-u {0,1,2,3}] [-b] [-p ESCAPECHAR] [-z MAXFIELDSIZE] [-e ENCODING] [-S] [-v] [-l] diff --git a/examples/optional_quote_characters.csv b/examples/optional_quote_characters.csv new file mode 100644 index 0000000..bf9fcfb --- /dev/null +++ b/examples/optional_quote_characters.csv @@ -0,0 +1,2 @@ +a,b,c +"1","2","3"
wireservice/csvkit
f1180b3d674e7945bbcba336f541dc3597614918
diff --git a/tests/test_utilities/test_csvclean.py b/tests/test_utilities/test_csvclean.py index 808ec46..3b85ffb 100644 --- a/tests/test_utilities/test_csvclean.py +++ b/tests/test_utilities/test_csvclean.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- import os import sys @@ -17,12 +18,8 @@ from tests.utils import CSVKitTestCase, EmptyFileTests class TestCSVClean(CSVKitTestCase, EmptyFileTests): Utility = CSVClean - def test_launch_new_instance(self): - with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/bad.csv']): - launch_new_instance() - - def test_simple(self): - args = ['examples/bad.csv'] + def assertCleaned(self, basename, output_lines, error_lines, additional_args=[]): + args = ['examples/%s.csv' % basename] + additional_args output_file = six.StringIO() utility = CSVClean(args, output_file) @@ -30,24 +27,64 @@ class TestCSVClean(CSVKitTestCase, EmptyFileTests): output_file.close() - self.assertTrue(os.path.exists('examples/bad_err.csv')) - self.assertTrue(os.path.exists('examples/bad_out.csv')) + output_file = 'examples/%s_out.csv' % basename + error_file = 'examples/%s_err.csv' % basename + + self.assertEqual(os.path.exists(output_file), bool(output_lines)) + self.assertEqual(os.path.exists(error_file), bool(error_lines)) try: - with open('examples/bad_err.csv') as f: - next(f) - self.assertEqual(next(f)[0], '1') - self.assertEqual(next(f)[0], '2') - self.assertRaises(StopIteration, next, f) - - with open('examples/bad_out.csv') as f: - next(f) - self.assertEqual(next(f)[0], '0') - self.assertRaises(StopIteration, next, f) + if output_lines: + with open(output_file) as f: + for line in output_lines: + self.assertEqual(next(f), line) + self.assertRaises(StopIteration, next, f) + if error_lines: + with open(error_file) as f: + for line in error_lines: + self.assertEqual(next(f), line) + self.assertRaises(StopIteration, next, f) finally: - # Cleanup - os.remove('examples/bad_err.csv') - os.remove('examples/bad_out.csv') + if output_lines: + os.remove(output_file) + if error_lines: + os.remove(error_file) + + + def test_launch_new_instance(self): + with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/bad.csv']): + launch_new_instance() + + def test_simple(self): + self.assertCleaned('bad', [ + 'column_a,column_b,column_c\n', + '0,mixed types.... uh oh,17\n', + ], [ + 'line_number,msg,column_a,column_b,column_c\n', + '1,"Expected 3 columns, found 4 columns",1,27,,I\'m too long!\n', + '2,"Expected 3 columns, found 2 columns",,I\'m too short!\n', + ]) + + def test_removes_optional_quote_characters(self): + self.assertCleaned('optional_quote_characters', [ + 'a,b,c\n', + '1,2,3\n', + ], []) + + def test_changes_line_endings(self): + self.assertCleaned('mac_newlines', [ + 'a,b,c\n', + '1,2,3\n', + '"Once upon\n', + 'a time",5,6\n', + ], []) + + def test_changes_character_encoding(self): + self.assertCleaned('test_latin1', [ + 'a,b,c\n', + '1,2,3\n', + '4,5,©\n', + ], [], ['-e', 'latin1']) def test_dry_run(self): output = self.get_output_as_io(['-n', 'examples/bad.csv'])
csvclean documentation is poor Great tool really, but the documentation is very poor. It should be interesting to explain each task done by csvclean: - delete every unneeded quote - recode from XXX charset to UTF-8 - replace X delimiter by a comma - replace \r\n by \n This last modification is ok for me as I can then grep the file without problem, but it is not compatible with the RFC (which recommand \r\n).
0.0
f1180b3d674e7945bbcba336f541dc3597614918
[ "tests/test_utilities/test_csvclean.py::TestCSVClean::test_removes_optional_quote_characters" ]
[ "tests/test_utilities/test_csvclean.py::TestCSVClean::test_changes_character_encoding", "tests/test_utilities/test_csvclean.py::TestCSVClean::test_changes_line_endings", "tests/test_utilities/test_csvclean.py::TestCSVClean::test_dry_run", "tests/test_utilities/test_csvclean.py::TestCSVClean::test_empty", "tests/test_utilities/test_csvclean.py::TestCSVClean::test_launch_new_instance", "tests/test_utilities/test_csvclean.py::TestCSVClean::test_simple" ]
{ "failed_lite_validators": [ "has_added_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2017-01-17 05:17:22+00:00
mit
6,254
wireservice__csvkit-770
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6ebf3be..06441ce 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,7 +6,8 @@ Improvements: * Add a :code:`--version` (:code:`-V`) flag. * :code:`-I` is the short option for :code:`--no-inference`. * :doc:`/scripts/csvjoin` supports :code:`--snifflimit` and :code:`--no-inference`. -* :doc:`/scripts/in2csv` now supports a :code:`--names` flag to print Excel sheet names. +* :doc:`/scripts/csvstat` adds a :code:`--freq-count` option to set the maximum number of frequent values to display. +* :doc:`/scripts/in2csv` adds a :code:`--names` flag to print Excel sheet names. Fixes: diff --git a/csvkit/utilities/csvstat.py b/csvkit/utilities/csvstat.py index 2292e77..29908d2 100644 --- a/csvkit/utilities/csvstat.py +++ b/csvkit/utilities/csvstat.py @@ -13,9 +13,6 @@ from csvkit.cli import CSVKitUtility, parse_column_identifiers NoneType = type(None) -MAX_UNIQUE = 5 -MAX_FREQ = 5 - OPERATIONS = OrderedDict([ ('type', { 'aggregation': None, @@ -97,8 +94,10 @@ class CSVStat(CSVKitUtility): help='Only output the length of the longest values.') self.argparser.add_argument('--freq', dest='freq_only', action='store_true', help='Only output lists of frequent values.') + self.argparser.add_argument('--freq-count', dest='freq_count', type=int, + help='The maximum number of frequent values to display.') self.argparser.add_argument('--count', dest='count_only', action='store_true', - help='Only output total row count') + help='Only output total row count.') self.argparser.add_argument('-y', '--snifflimit', dest='sniff_limit', type=int, help='Limit CSV dialect sniffing to the specified number of bytes. Specify "0" to disable sniffing entirely.') @@ -144,18 +143,23 @@ class CSVStat(CSVKitUtility): self.get_column_offset() ) + kwargs = {} + + if self.args.freq_count: + kwargs['freq_count'] = self.args.freq_count + # Output a single stat if operations: if len(column_ids) == 1: - self.print_one(table, column_ids[0], operations[0], label=False) + self.print_one(table, column_ids[0], operations[0], label=False, **kwargs) else: for column_id in column_ids: - self.print_one(table, column_id, operations[0]) + self.print_one(table, column_id, operations[0], **kwargs) else: stats = {} for column_id in column_ids: - stats[column_id] = self.calculate_stats(table, column_id) + stats[column_id] = self.calculate_stats(table, column_id, **kwargs) # Output as CSV if self.args.csv_output: @@ -164,7 +168,7 @@ class CSVStat(CSVKitUtility): else: self.print_stats(table, column_ids, stats) - def print_one(self, table, column_id, operation, label=True): + def print_one(self, table, column_id, operation, label=True, **kwargs): """ Print data for a single statistic. """ @@ -178,7 +182,7 @@ class CSVStat(CSVKitUtility): try: if getter: - stat = getter(table, column_id) + stat = getter(table, column_id, **kwargs) else: op = OPERATIONS[op_name]['aggregation'] stat = table.aggregate(op(column_id)) @@ -198,7 +202,7 @@ class CSVStat(CSVKitUtility): else: self.output_file.write(u'%s\n' % stat) - def calculate_stats(self, table, column_id): + def calculate_stats(self, table, column_id, **kwargs): """ Calculate stats for all valid operations. """ @@ -212,7 +216,7 @@ class CSVStat(CSVKitUtility): try: if getter: - stats[op_name] = getter(table, column_id) + stats[op_name] = getter(table, column_id, **kwargs) else: op = op_data['aggregation'] v = table.aggregate(op(column_id)) @@ -314,16 +318,16 @@ class CSVStat(CSVKitUtility): writer.writerow(output_row) -def get_type(table, column_id): +def get_type(table, column_id, **kwargs): return '%s' % table.columns[column_id].data_type.__class__.__name__ -def get_unique(table, column_id): +def get_unique(table, column_id, **kwargs): return len(table.columns[column_id].values_distinct()) -def get_freq(table, column_id): - return table.pivot(column_id).order_by('Count', reverse=True).limit(MAX_FREQ) +def get_freq(table, column_id, freq_count=5, **kwargs): + return table.pivot(column_id).order_by('Count', reverse=True).limit(freq_count) def launch_new_instance():
wireservice/csvkit
7c26421a9f7f32318eb96b2649f62ab0192f2f33
diff --git a/tests/test_utilities/test_csvstat.py b/tests/test_utilities/test_csvstat.py index 2f9cec2..875c7dd 100644 --- a/tests/test_utilities/test_csvstat.py +++ b/tests/test_utilities/test_csvstat.py @@ -55,6 +55,14 @@ class TestCSVStat(CSVKitTestCase, ColumnsTests, EmptyFileTests, NamesTests): self.assertIn('SALINE (59x)', output) self.assertNotIn('MIAMI (56x)', output) + + def test_freq_count(self): + output = self.get_output(['examples/realdata/ks_1033_data.csv', '--freq-count', '1']) + + self.assertIn('WYANDOTTE (123x)', output) + self.assertNotIn('SALINE (59x)', output) + self.assertNotIn('MIAMI (56x)', output) + def test_csv(self): output = self.get_output_as_io(['--csv', 'examples/realdata/ks_1033_data.csv'])
csvstat: flag to specify how many frequent values to display Csvstat is really a very useful tool, but it still could be better : the "frequent values" feature is apparently fixedly limited to 5 values. It would be great if this "5" value was only the default value, and could be altered by a new parameter, say "-f ". For example : "csvstat -f 20 -c 3,7 --freq myfile.csv" would return the 20 most frequent values of colums 3 and 7. Today it (seems) impossible to exceed the limit of 5 values. Best regards, thanks for this great kit.
0.0
7c26421a9f7f32318eb96b2649f62ab0192f2f33
[ "tests/test_utilities/test_csvstat.py::TestCSVStat::test_freq_count" ]
[ "tests/test_utilities/test_csvstat.py::TestCSVStat::test_columns", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_count_only", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_csv", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_csv_columns", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_empty", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_encoding", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_freq_list", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_invalid_column", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_invalid_options", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_launch_new_instance", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_max_length", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_names", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_no_header_row", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_runs", "tests/test_utilities/test_csvstat.py::TestCSVStat::test_unique" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-01-27 15:42:42+00:00
mit
6,255
wireservice__csvkit-776
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 8f1617b..9dc61c3 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -3,8 +3,9 @@ Improvements: -* Add a :code:`--version` (:code:`-V`) flag. +* Add a :code:`--version` flag. * Add a :code:`--skip-lines` option to skip initial lines (e.g. comments, copyright notices, empty rows). +* Add a :code:`--locale` option to set the locale of any formatted numbers. * :code:`-I` is the short option for :code:`--no-inference`. * :doc:`/scripts/csvjoin` supports :code:`--snifflimit` and :code:`--no-inference`. * :doc:`/scripts/csvstat` adds a :code:`--freq-count` option to set the maximum number of frequent values to display. diff --git a/csvkit/cli.py b/csvkit/cli.py index cf52724..f69f961 100644 --- a/csvkit/cli.py +++ b/csvkit/cli.py @@ -159,7 +159,10 @@ class CSVKitUtility(object): help='Maximum length of a single field in the input CSV file.') if 'e' not in self.override_flags: self.argparser.add_argument('-e', '--encoding', dest='encoding', default='utf-8', - help='Specify the encoding the input CSV file.') + help='Specify the encoding of the input CSV file.') + if 'L' not in self.override_flags: + self.argparser.add_argument('-L', '--locale', dest='locale', default='en_US', + help='Specify the locale (en_US) of any formatted numbers.') if 'S' not in self.override_flags: self.argparser.add_argument('-S', '--skipinitialspace', dest='skipinitialspace', action='store_true', help='Ignore whitespace immediately following the delimiter.') @@ -283,7 +286,7 @@ class CSVKitUtility(object): else: return agate.TypeTester(types=[ agate.Boolean(), - agate.Number(), + agate.Number(locale=self.args.locale), agate.TimeDelta(), agate.Date(), agate.DateTime(), diff --git a/csvkit/utilities/sql2csv.py b/csvkit/utilities/sql2csv.py index 98bf911..1b56f52 100644 --- a/csvkit/utilities/sql2csv.py +++ b/csvkit/utilities/sql2csv.py @@ -10,7 +10,7 @@ from csvkit.cli import CSVKitUtility class SQL2CSV(CSVKitUtility): description = 'Execute an SQL query on a database and output the result to a CSV file.' - override_flags = 'f,b,d,e,H,K,p,q,S,t,u,z,zero'.split(',') + override_flags = 'f,b,d,e,H,K,L,p,q,S,t,u,z,zero'.split(',') def add_arguments(self): self.argparser.add_argument('--db', dest='connection_string', default='sqlite://', @@ -20,7 +20,7 @@ class SQL2CSV(CSVKitUtility): self.argparser.add_argument('--query', default=None, help="The SQL query to execute. If specified, it overrides FILE and STDIN.") self.argparser.add_argument('-e', '--encoding', dest='encoding', default='utf-8', - help='Specify the encoding the input query file.') + help='Specify the encoding of the input query file.') self.argparser.add_argument('-H', '--no-header-row', dest='no_header_row', action='store_true', help='Do not output column names.') diff --git a/docs/common_arguments.rst b/docs/common_arguments.rst index b4af001..337ac0a 100644 --- a/docs/common_arguments.rst +++ b/docs/common_arguments.rst @@ -24,7 +24,9 @@ All tools which accept CSV as input share a set of common command-line arguments Maximum length of a single field in the input CSV file. -e ENCODING, --encoding ENCODING - Specify the encoding the input CSV file. + Specify the encoding of the input CSV file. + -L LOCALE, --locale LOCALE + Specify the locale (en_US) of any formatted numbers. -S, --skipinitialspace Ignore whitespace immediately following the delimiter. -H, --no-header-row Specify that the input CSV file has no header row. diff --git a/docs/scripts/sql2csv.rst b/docs/scripts/sql2csv.rst index 1ea81e2..bfd3439 100644 --- a/docs/scripts/sql2csv.rst +++ b/docs/scripts/sql2csv.rst @@ -30,7 +30,7 @@ Executes arbitrary commands against a SQL database and outputs the results as a --query QUERY The SQL query to execute. If specified, it overrides FILE and STDIN. -e ENCODING, --encoding ENCODING - Specify the encoding the input query file. + Specify the encoding of the input query file. -H, --no-header-row Do not output column names. Examples
wireservice/csvkit
b69d7cd51f0e273564a3209d871bb9af3cfd7f42
diff --git a/examples/test_locale.csv b/examples/test_locale.csv new file mode 100644 index 0000000..4924ddc --- /dev/null +++ b/examples/test_locale.csv @@ -0,0 +1,2 @@ +a,b,c +"1,7","200.000.000", diff --git a/examples/test_locale_converted.csv b/examples/test_locale_converted.csv new file mode 100644 index 0000000..3cd0f59 --- /dev/null +++ b/examples/test_locale_converted.csv @@ -0,0 +1,2 @@ +a,b,c +1.7,200000000, diff --git a/tests/test_utilities/test_in2csv.py b/tests/test_utilities/test_in2csv.py index 5bedf05..ce9382d 100644 --- a/tests/test_utilities/test_in2csv.py +++ b/tests/test_utilities/test_in2csv.py @@ -34,6 +34,9 @@ class TestIn2CSV(CSVKitTestCase, EmptyFileTests): self.assertEqual(e.exception.code, 0) + def test_locale(self): + self.assertConverted('csv', 'examples/test_locale.csv', 'examples/test_locale_converted.csv', ['--locale', 'de_DE']) + def test_convert_csv(self): self.assertConverted('csv', 'examples/testfixed_converted.csv', 'examples/testfixed_converted.csv')
Parse non-US locale numbers Sometimes numeric data contains thousands separators, typically ',', '_' or in Europe ','. Also Europeans sometimes use ',' as the decimal point.
0.0
b69d7cd51f0e273564a3209d871bb9af3cfd7f42
[ "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_locale" ]
[ "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_convert_csv", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_convert_csv_with_skip_lines", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_convert_geojson", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_convert_json", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_convert_ndjson", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_convert_nested_json", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_convert_xls", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_convert_xls_with_sheet", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_convert_xls_with_skip_lines", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_convert_xlsx", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_convert_xlsx_with_sheet", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_convert_xlsx_with_skip_lines", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_csv_datetime_inference", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_csv_no_headers", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_csv_no_inference", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_empty", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_geojson_no_inference", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_json_no_inference", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_launch_new_instance", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_names_xls", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_names_xlsx", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_ndjson_no_inference", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_version", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_xls_no_inference", "tests/test_utilities/test_in2csv.py::TestIn2CSV::test_xlsx_no_inference" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-01-28 07:04:06+00:00
mit
6,256
wireservice__csvkit-800
diff --git a/csvkit/utilities/csvstack.py b/csvkit/utilities/csvstack.py index bf1c00b..39d544d 100644 --- a/csvkit/utilities/csvstack.py +++ b/csvkit/utilities/csvstack.py @@ -9,7 +9,7 @@ from csvkit.cli import CSVKitUtility, make_default_headers class CSVStack(CSVKitUtility): description = 'Stack up the rows from multiple CSV files, optionally adding a grouping value.' - override_flags = ['f', 'K', 'L', 'date-format', 'datetime-format'] + override_flags = ['f', 'L', 'date-format', 'datetime-format'] def add_arguments(self): self.argparser.add_argument(metavar="FILE", nargs='+', dest='input_paths', default=['-'], @@ -45,6 +45,14 @@ class CSVStack(CSVKitUtility): output = agate.csv.writer(self.output_file, **self.writer_kwargs) for i, f in enumerate(self.input_files): + if isinstance(self.args.skip_lines, int): + skip_lines = self.args.skip_lines + while skip_lines > 0: + f.readline() + skip_lines -= 1 + else: + raise ValueError('skip_lines argument must be an int') + rows = agate.csv.reader(f, **self.reader_kwargs) # If we have header rows, use them
wireservice/csvkit
3d9438e7ea5db34948ade66b0a4333736990c77a
diff --git a/tests/test_utilities/test_csvstack.py b/tests/test_utilities/test_csvstack.py index abae02d..2921f2f 100644 --- a/tests/test_utilities/test_csvstack.py +++ b/tests/test_utilities/test_csvstack.py @@ -19,6 +19,13 @@ class TestCSVStack(CSVKitTestCase, EmptyFileTests): with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/dummy.csv']): launch_new_instance() + def test_skip_lines(self): + self.assertRows(['--skip-lines', '3', 'examples/test_skip_lines.csv', 'examples/test_skip_lines.csv'], [ + ['a', 'b', 'c'], + ['1', '2', '3'], + ['1', '2', '3'], + ]) + def test_single_file_stack(self): self.assertRows(['examples/dummy.csv'], [ ['a', 'b', 'c'],
csvstack to support --skip-lines First , great library. It was very handy in searching big csv files , i just needed to search a complete folder full of csv that each file has a header that should be ignored by using latest version 1.0.2 --skip-lines it was possible but file by file . i tried using csvstack but it did not recognise the parameter --skip-lines ``` Alis-Mac-mini:sonus shahbour$ csvgrep --skip-lines 1 -c 20 -r "^449" -H 20170219013000.1014D6F.ACT.gz | csvgrep -c 21 -r "^639" | csvcut -c 20,21 t,u 44971506961058,639398219637 44971504921587,639106889971 44971569097874,639291643991 44971568622691,639101981790 44971543461612,639495761895 44971502473650,639287415793 44971543544583,639183191196 44971569097874,639291643991 44971566267135,639293255451 44971507677524,639108700472 ``` ``` Alis-Mac-mini:sonus shahbour$ csvstack -H --skip-lines 1 * | csvgrep -c 20 -r "^449" | csvgrep -c 21 -r "^639" | csvcut -c 20,21 usage: csvstack [-h] [-d DELIMITER] [-t] [-q QUOTECHAR] [-u {0,1,2,3}] [-b] [-p ESCAPECHAR] [-z FIELD_SIZE_LIMIT] [-e ENCODING] [-S] [-H] [-v] [-l] [--zero] [-V] [-g GROUPS] [-n GROUP_NAME] [--filenames] FILE [FILE ...] csvstack: error: unrecognized arguments: --skip-lines ```
0.0
3d9438e7ea5db34948ade66b0a4333736990c77a
[ "tests/test_utilities/test_csvstack.py::TestCSVStack::test_skip_lines" ]
[ "tests/test_utilities/test_csvstack.py::TestCSVStack::test_empty", "tests/test_utilities/test_csvstack.py::TestCSVStack::test_explicit_grouping", "tests/test_utilities/test_csvstack.py::TestCSVStack::test_filenames_grouping", "tests/test_utilities/test_csvstack.py::TestCSVStack::test_launch_new_instance", "tests/test_utilities/test_csvstack.py::TestCSVStack::test_multiple_file_stack", "tests/test_utilities/test_csvstack.py::TestCSVStack::test_no_header_row", "tests/test_utilities/test_csvstack.py::TestCSVStack::test_single_file_stack" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2017-02-24 20:00:49+00:00
mit
6,257
wireservice__csvkit-900
diff --git a/.travis.yml b/.travis.yml index ca53492..d5a2d31 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,10 +7,8 @@ python: - "3.4" - "3.5" - "3.6" - - "pypy-5.3.1" install: - if [[ $TRAVIS_PYTHON_VERSION == 3* ]]; then pip install -r requirements-py3.txt; else pip install -r requirements-py2.txt; fi - - if [[ $TRAVIS_PYTHON_VERSION == "pypy-5.3.1" ]]; then pip install psycopg2cffi; else pip install psycopg2; fi - pip install coveralls before_script: - psql -U postgres -c 'CREATE DATABASE dummy_test' diff --git a/AUTHORS.rst b/AUTHORS.rst index ce1a210..ad1e207 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -81,3 +81,4 @@ The following individuals have contributed code to csvkit: * kjedamzik * John Vandenberg * Olivier Lacan +* Adrien Delessert diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ea9460b..0cf8633 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -3,14 +3,19 @@ Improvements: +* :doc:`/scripts/csvgrep` adds a :code:`--any-match` (:code:`-a`) flag to select rows where any column matches instead of all columns. * :doc:`/scripts/csvjson` no longer emits a property if its value is null. * :doc:`/scripts/in2csv` adds a :code:`--encoding-xls` option to specify the encoding of the input XLS file. Fixes: * :doc:`/scripts/csvgrep` accepts utf-8 arguments to the :code:`--match` and :code:`--regex` options in Python 2. +* :doc:`/scripts/csvsql` sets a DECIMAL's precision and scale and a VARCHAR's length to avoid dialect-specific errors. +* :doc:`/scripts/csvstack` no longer opens all files at once. * :doc:`/scripts/in2csv` respects :code:`--no-header-row` when :code:`--no-inference` is set. +csvkit is no longer tested on PyPy. + 1.0.2 - April 28, 2017 ---------------------- diff --git a/csvkit/utilities/csvgrep.py b/csvkit/utilities/csvgrep.py index df18eb8..c0b080b 100644 --- a/csvkit/utilities/csvgrep.py +++ b/csvkit/utilities/csvgrep.py @@ -35,7 +35,8 @@ class CSVGrep(CSVKitUtility): help='If specified, must be the path to a file. For each tested row, if any line in the file (stripped of line separators) is an exact match for the cell value, the row will pass.') self.argparser.add_argument('-i', '--invert-match', dest='inverse', action='store_true', help='If specified, select non-matching instead of matching rows.') - + self.argparser.add_argument('-a', '--any-match', dest='any_match', action='store_true', + help='If specified, select rows where any column matches instead of all columns.') def main(self): if self.args.names_only: self.print_column_names() @@ -67,7 +68,7 @@ class CSVGrep(CSVKitUtility): pattern = self.args.pattern patterns = dict((column_id, pattern) for column_id in column_ids) - filter_reader = FilteringCSVReader(rows, header=False, patterns=patterns, inverse=self.args.inverse) + filter_reader = FilteringCSVReader(rows, header=False, patterns=patterns, inverse=self.args.inverse, any_match=self.args.any_match) output = agate.csv.writer(self.output_file, **writer_kwargs) output.writerow(column_names) diff --git a/csvkit/utilities/csvstack.py b/csvkit/utilities/csvstack.py index 56e2051..cba10af 100644 --- a/csvkit/utilities/csvstack.py +++ b/csvkit/utilities/csvstack.py @@ -23,20 +23,15 @@ class CSVStack(CSVKitUtility): help='Use the filename of each input file as its grouping value. When specified, -g will be ignored.') def main(self): - self.input_files = [] - - for path in self.args.input_paths: - self.input_files.append(self._open_input_file(path)) - - if not self.input_files: + if not self.args.input_paths: self.argparser.error('You must specify at least one file to stack.') - if self.args.group_by_filenames: - groups = [os.path.basename(f.name) for f in self.input_files] - elif self.args.groups: + has_groups = self.args.group_by_filenames or self.args.groups + + if self.args.groups and not self.args.group_by_filenames: groups = self.args.groups.split(',') - if len(groups) != len(self.input_files): + if len(groups) != len(self.args.input_paths): self.argparser.error('The number of grouping values must be equal to the number of CSV files being stacked.') else: groups = None @@ -45,7 +40,9 @@ class CSVStack(CSVKitUtility): output = agate.csv.writer(self.output_file, **self.writer_kwargs) - for i, f in enumerate(self.input_files): + for i, path in enumerate(self.args.input_paths): + f = self._open_input_file(path) + if isinstance(self.args.skip_lines, int): skip_lines = self.args.skip_lines while skip_lines > 0: @@ -56,12 +53,18 @@ class CSVStack(CSVKitUtility): rows = agate.csv.reader(f, **self.reader_kwargs) + if has_groups: + if groups: + group = groups[i] + else: + group = os.path.basename(f.name) + # If we have header rows, use them if not self.args.no_header_row: headers = next(rows, []) if i == 0: - if groups: + if has_groups: headers.insert(0, group_name) output.writerow(headers) @@ -72,19 +75,19 @@ class CSVStack(CSVKitUtility): headers = make_default_headers(len(row)) if i == 0: - if groups: + if has_groups: headers.insert(0, group_name) output.writerow(headers) - if groups: - row.insert(0, groups[i]) + if has_groups: + row.insert(0, group) output.writerow(row) for row in rows: - if groups: - row.insert(0, groups[i]) + if has_groups: + row.insert(0, group) output.writerow(row) diff --git a/docs/scripts/csvgrep.rst b/docs/scripts/csvgrep.rst index 7998053..d40b6d1 100644 --- a/docs/scripts/csvgrep.rst +++ b/docs/scripts/csvgrep.rst @@ -38,6 +38,8 @@ Filter tabular data to only those rows where certain columns contain a given val row will pass. -i, --invert-match If specified, select non-matching instead of matching rows. + -a --any-match If specified, select rows where any column matches + instead of all columns. See also: :doc:`../common_arguments`. @@ -53,8 +55,7 @@ Search for the row relating to Illinois:: Search for rows relating to states with names beginning with the letter "I":: csvgrep -c 1 -r "^I" examples/realdata/FY09_EDU_Recipients_by_State.csv - + Search for rows that do not contain an empty state cell:: csvgrep -c 1 -r "^$" -i examples/realdata/FY09_EDU_Recipients_by_State.csv -
wireservice/csvkit
8911438e05b83a78bbee778369e83f9defa1fac9
diff --git a/tests/test_utilities/test_csvgrep.py b/tests/test_utilities/test_csvgrep.py index 08b59d1..a4c8eb2 100644 --- a/tests/test_utilities/test_csvgrep.py +++ b/tests/test_utilities/test_csvgrep.py @@ -33,6 +33,12 @@ class TestCSVGrep(CSVKitTestCase, ColumnsTests, EmptyFileTests, NamesTests): ['1', '2', '3'], ]) + def test_any_match(self): + self.assertRows(['-c', '1,2,3', '-a', '-m', '1', 'examples/dummy.csv'], [ + ['a', 'b', 'c'], + ['1', '2', '3'], + ]) + def test_match_utf8(self): self.assertRows(['-c', '3', '-m', 'ʤ', 'examples/test_utf8.csv'], [ ['foo', 'bar', 'baz'],
csvstack [Errno 24] Too many open files `csvstack` returns "[Errno 24] Too many open files" when many files are added for stacking. For example, `csvstack --filenames *.csv > stacked.csv`. It seems to be a Python error, but can `csvstack` use an intermediate file and close the previous ones?
0.0
8911438e05b83a78bbee778369e83f9defa1fac9
[ "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_any_match" ]
[ "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_empty", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_invalid_column", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_invalid_options", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_invert_match", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_kwargs_with_line_numbers", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_launch_new_instance", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_match", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_match_utf8", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_match_with_line_numbers", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_names", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_no_match", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_re_match", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_re_match_utf8", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_skip_lines", "tests/test_utilities/test_csvgrep.py::TestCSVGrep::test_string_match" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-10-26 00:15:59+00:00
mit
6,258
witchard__grole-21
diff --git a/docs/tutorial.rst b/docs/tutorial.rst index e7d4091..dd7d9af 100644 --- a/docs/tutorial.rst +++ b/docs/tutorial.rst @@ -51,7 +51,7 @@ Responding In-built python types returned by registered request handlers are automatically converted into 200 OK HTTP responses. The following mappings apply: * bytes: Sent directly with content type text/plain -* string: Encoded as bytes and sent with content type text/plain +* string: Encoded as bytes and sent with content type text/html * others: Encoded as json and sent with content type application/json Finer grained control of the response data can be achieved using :class:`ResponseBody` or one of it's children. These allow for overriding of the content type. The following are available: diff --git a/grole.py b/grole.py index d34ea4d..e8f6506 100755 --- a/grole.py +++ b/grole.py @@ -132,7 +132,7 @@ class ResponseString(ResponseBody): """ Response body from a string """ - def __init__(self, data='', content_type='text/plain'): + def __init__(self, data='', content_type='text/html'): """ Initialise object, data is the data to send
witchard/grole
a766ad29789b27e75f388ef0f7ce8d999d52c4e4
diff --git a/test/test_response.py b/test/test_response.py index fb58ad8..967e050 100644 --- a/test/test_response.py +++ b/test/test_response.py @@ -106,5 +106,29 @@ class TestFile(unittest.TestCase): self.assertEqual(writer.data, b'4\r\nfoo\n\r\n0\r\n\r\n') +class TestAuto(unittest.TestCase): + + def test_empty(self): + res = grole.Response() + self.assertTrue(isinstance(res.data, grole.ResponseBody)) + + def test_bytes(self): + res = grole.Response(b'foo') + self.assertTrue(isinstance(res.data, grole.ResponseBody)) + self.assertEqual(res.data._data, b'foo') + self.assertEqual(res.data._headers['Content-Type'], 'text/plain') + + def test_str(self): + res = grole.Response('foo') + self.assertTrue(isinstance(res.data, grole.ResponseString)) + self.assertEqual(res.data._data, b'foo') + self.assertEqual(res.data._headers['Content-Type'], 'text/html') + + def test_json(self): + res = grole.Response({'foo': 'bar'}) + self.assertTrue(isinstance(res.data, grole.ResponseJSON)) + self.assertEqual(res.data._data, b'{"foo": "bar"}') + self.assertEqual(res.data._headers['Content-Type'], 'application/json') + if __name__ == '__main__': unittest.main()
Change default content type for string to text/html This feels like it makes more sense. Don't forget to update the doc!
0.0
a766ad29789b27e75f388ef0f7ce8d999d52c4e4
[ "test/test_response.py::TestAuto::test_str" ]
[ "test/test_response.py::TestHeader::test_header", "test/test_response.py::TestBody::test_bytes", "test/test_response.py::TestBody::test_data", "test/test_response.py::TestBody::test_file", "test/test_response.py::TestBody::test_headers", "test/test_response.py::TestBody::test_json", "test/test_response.py::TestBody::test_string", "test/test_response.py::TestString::test_data", "test/test_response.py::TestString::test_headers", "test/test_response.py::TestJSON::test_data", "test/test_response.py::TestJSON::test_headers", "test/test_response.py::TestFile::test_data", "test/test_response.py::TestFile::test_headers", "test/test_response.py::TestAuto::test_bytes", "test/test_response.py::TestAuto::test_empty", "test/test_response.py::TestAuto::test_json" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-06-13 08:31:41+00:00
mit
6,259
wkeeling__selenium-wire-509
diff --git a/README.rst b/README.rst index 17e62f7..bb02330 100644 --- a/README.rst +++ b/README.rst @@ -694,21 +694,19 @@ If you wish to take advantage of this make sure you have undetected_chromedriver pip install undetected-chromedriver -Then you can select the version of undetected_chromedriver you want to use by importing ``Chrome`` and ``ChromeOptions`` from the appropriate package. - -For undetected_chromedriver version 1: +Then in your code, import the ``seleniumwire.undetected_chromedriver`` package: .. code:: python - from seleniumwire.undetected_chromedriver import Chrome, ChromeOptions - -For undetected_chromedriver version 2: + import seleniumwire.undetected_chromedriver as uc -.. code:: python + chrome_options = uc.ChromeOptions() - from seleniumwire.undetected_chromedriver.v2 import Chrome, ChromeOptions + driver = uc.Chrome( + options=chrome_options, + seleniumwire_options={} + ) -See the `undetected_chromedriver docs <https://github.com/ultrafunkamsterdam/undetected-chromedriver>`_ for differences between the two versions. Certificates ~~~~~~~~~~~~ diff --git a/seleniumwire/storage.py b/seleniumwire/storage.py index 6d1cd66..1be95da 100644 --- a/seleniumwire/storage.py +++ b/seleniumwire/storage.py @@ -181,12 +181,9 @@ class RequestStorage: request_dir = self._get_request_dir(request_id) with open(os.path.join(request_dir, 'request'), 'rb') as req: - try: - request = pickle.load(req) - except Exception: - # Errors may sometimes occur with unpickling - e.g. - # sometimes data hasn't been fully flushed to disk - # by the OS by the time we come to unpickle it. + request = self._unpickle(req) + + if request is None: return None ws_messages = self._ws_messages.get(request.id) @@ -198,19 +195,37 @@ class RequestStorage: try: # Attach the response if there is one. with open(os.path.join(request_dir, 'response'), 'rb') as res: - response = pickle.load(res) - request.response = response - - # The certificate data has been stored on the response but we make - # it available on the request which is a more logical location. - if hasattr(response, 'cert'): - request.cert = response.cert - del response.cert + response = self._unpickle(res) + + if response is not None: + request.response = response + + # The certificate data has been stored on the response but we make + # it available on the request which is a more logical location. + if hasattr(response, 'cert'): + request.cert = response.cert + del response.cert except (FileNotFoundError, EOFError): pass return request + def _unpickle(self, f): + """Unpickle the object specified by the file f. + + If unpickling fails return None. + """ + try: + return pickle.load(f) + except Exception: + # Errors may sometimes occur with unpickling - e.g. + # sometimes data hasn't been fully flushed to disk + # by the OS by the time we come to unpickle it. + if log.isEnabledFor(logging.DEBUG): + log.exception('Error unpickling object') + + return None + def load_last_request(self) -> Optional[Request]: """Load the last saved request. @@ -240,8 +255,10 @@ class RequestStorage: try: with open(os.path.join(request_dir, 'har_entry'), 'rb') as f: - entry = pickle.load(f) - entries.append(entry) + entry = self._unpickle(f) + + if entry is not None: + entries.append(entry) except FileNotFoundError: # HAR entries aren't necessarily saved with each request. pass diff --git a/seleniumwire/undetected_chromedriver/__init__.py b/seleniumwire/undetected_chromedriver/__init__.py index a470664..8eaa1cd 100644 --- a/seleniumwire/undetected_chromedriver/__init__.py +++ b/seleniumwire/undetected_chromedriver/__init__.py @@ -1,12 +1,1 @@ -try: - import undetected_chromedriver as uc -except ImportError as e: - raise ImportError( - 'undetected_chromedriver not found. ' 'Install it with `pip install undetected_chromedriver`.' - ) from e - -from seleniumwire.webdriver import Chrome - -uc._Chrome = Chrome -Chrome = uc.Chrome -ChromeOptions = uc.ChromeOptions # noqa: F811 +from .webdriver import Chrome, ChromeOptions diff --git a/seleniumwire/undetected_chromedriver/v2.py b/seleniumwire/undetected_chromedriver/v2.py index c4a42c9..e0c9dd0 100644 --- a/seleniumwire/undetected_chromedriver/v2.py +++ b/seleniumwire/undetected_chromedriver/v2.py @@ -1,58 +1,1 @@ -import logging - -import undetected_chromedriver.v2 as uc -from selenium.webdriver import DesiredCapabilities - -from seleniumwire.inspect import InspectRequestsMixin -from seleniumwire.utils import urlsafe_address -from seleniumwire.webdriver import DriverCommonMixin - -log = logging.getLogger(__name__) - - -class Chrome(InspectRequestsMixin, DriverCommonMixin, uc.Chrome): - """Extends the undetected_chrome Chrome webdriver to provide additional - methods for inspecting requests.""" - - def __init__(self, *args, seleniumwire_options=None, **kwargs): - """Initialise a new Chrome WebDriver instance. - - Args: - seleniumwire_options: The seleniumwire options dictionary. - """ - if seleniumwire_options is None: - seleniumwire_options = {} - - config = self._setup_backend(seleniumwire_options) - - if seleniumwire_options.get('auto_config', True): - capabilities = kwargs.get('desired_capabilities') - if capabilities is None: - capabilities = DesiredCapabilities.CHROME - capabilities = capabilities.copy() - - capabilities.update(config) - - kwargs['desired_capabilities'] = capabilities - - try: - chrome_options = kwargs['options'] - except KeyError: - chrome_options = ChromeOptions() - - log.info('Using undetected_chromedriver.v2') - - # We need to point Chrome back to Selenium Wire since the executable - # will be started separately by undetected_chromedriver. - addr, port = urlsafe_address(self.backend.address()) - chrome_options.add_argument(f'--proxy-server={addr}:{port}') - chrome_options.add_argument( - f"--proxy-bypass-list={','.join(seleniumwire_options.get('exclude_hosts', ['<-loopback>']))}" - ) - - kwargs['options'] = chrome_options - - super().__init__(*args, **kwargs) - - -ChromeOptions = uc.ChromeOptions # noqa: F811 +from .webdriver import Chrome, ChromeOptions # noqa: F401 diff --git a/seleniumwire/undetected_chromedriver/webdriver.py b/seleniumwire/undetected_chromedriver/webdriver.py new file mode 100644 index 0000000..c261346 --- /dev/null +++ b/seleniumwire/undetected_chromedriver/webdriver.py @@ -0,0 +1,64 @@ +import logging + +from selenium.webdriver import DesiredCapabilities + +try: + import undetected_chromedriver as uc +except ImportError as e: + raise ImportError( + 'undetected_chromedriver not found. ' 'Install it with `pip install undetected_chromedriver`.' + ) from e + +from seleniumwire.inspect import InspectRequestsMixin +from seleniumwire.utils import urlsafe_address +from seleniumwire.webdriver import DriverCommonMixin + +log = logging.getLogger(__name__) + + +class Chrome(InspectRequestsMixin, DriverCommonMixin, uc.Chrome): + """Extends the undetected_chrome Chrome webdriver to provide additional + methods for inspecting requests.""" + + def __init__(self, *args, seleniumwire_options=None, **kwargs): + """Initialise a new Chrome WebDriver instance. + + Args: + seleniumwire_options: The seleniumwire options dictionary. + """ + if seleniumwire_options is None: + seleniumwire_options = {} + + config = self._setup_backend(seleniumwire_options) + + if seleniumwire_options.get('auto_config', True): + capabilities = kwargs.get('desired_capabilities') + if capabilities is None: + capabilities = DesiredCapabilities.CHROME + capabilities = capabilities.copy() + + capabilities.update(config) + + kwargs['desired_capabilities'] = capabilities + + try: + chrome_options = kwargs['options'] + except KeyError: + chrome_options = ChromeOptions() + + log.info('Using undetected_chromedriver') + + # We need to point Chrome back to Selenium Wire since the executable + # will be started separately by undetected_chromedriver. + addr, port = urlsafe_address(self.backend.address()) + chrome_options.add_argument(f'--proxy-server={addr}:{port}') + chrome_options.add_argument( + f"--proxy-bypass-list={','.join(seleniumwire_options.get('exclude_hosts', ['<-loopback>']))}" + ) + + kwargs['options'] = chrome_options + + super().__init__(*args, **kwargs) + + +ChromeOptions = uc.ChromeOptions # noqa: F811 diff --git a/seleniumwire/webdriver.py b/seleniumwire/webdriver.py index cb876e2..115a0ea 100644 --- a/seleniumwire/webdriver.py +++ b/seleniumwire/webdriver.py @@ -26,7 +26,7 @@ SELENIUM_V4 = parse_version(getattr(selenium, '__version__', '0')) >= parse_vers class DriverCommonMixin: - """Operations common to all webdriver types.""" + """Attributes common to all webdriver types.""" def _setup_backend(self, seleniumwire_options: Dict[str, Any]) -> Dict[str, Any]: """Create the backend proxy server and return its configuration @@ -121,7 +121,7 @@ class DriverCommonMixin: class Firefox(InspectRequestsMixin, DriverCommonMixin, _Firefox): - """Extends the Firefox webdriver to provide additional methods for inspecting requests.""" + """Extend the Firefox webdriver to provide additional methods for inspecting requests.""" def __init__(self, *args, seleniumwire_options=None, **kwargs): """Initialise a new Firefox WebDriver instance. @@ -174,7 +174,7 @@ class Firefox(InspectRequestsMixin, DriverCommonMixin, _Firefox): class Chrome(InspectRequestsMixin, DriverCommonMixin, _Chrome): - """Extends the Chrome webdriver to provide additional methods for inspecting requests.""" + """Extend the Chrome webdriver to provide additional methods for inspecting requests.""" def __init__(self, *args, seleniumwire_options=None, **kwargs): """Initialise a new Chrome WebDriver instance. @@ -207,7 +207,7 @@ class Chrome(InspectRequestsMixin, DriverCommonMixin, _Chrome): class Safari(InspectRequestsMixin, DriverCommonMixin, _Safari): - """Extends the Safari webdriver to provide additional methods for inspecting requests.""" + """Extend the Safari webdriver to provide additional methods for inspecting requests.""" def __init__(self, seleniumwire_options=None, *args, **kwargs): """Initialise a new Safari WebDriver instance. @@ -230,7 +230,7 @@ class Safari(InspectRequestsMixin, DriverCommonMixin, _Safari): class Edge(InspectRequestsMixin, DriverCommonMixin, _Edge): - """Extends the Edge webdriver to provide additional methods for inspecting requests.""" + """Extend the Edge webdriver to provide additional methods for inspecting requests.""" def __init__(self, seleniumwire_options=None, *args, **kwargs): """Initialise a new Edge WebDriver instance. @@ -253,7 +253,7 @@ class Edge(InspectRequestsMixin, DriverCommonMixin, _Edge): class Remote(InspectRequestsMixin, DriverCommonMixin, _Remote): - """Extends the Remote webdriver to provide additional methods for inspecting requests.""" + """Extend the Remote webdriver to provide additional methods for inspecting requests.""" def __init__(self, *args, seleniumwire_options=None, **kwargs): """Initialise a new Firefox WebDriver instance. diff --git a/setup.py b/setup.py index 40a722f..013c914 100644 --- a/setup.py +++ b/setup.py @@ -61,7 +61,7 @@ setup( 'gunicorn', 'httpbin', 'isort', - 'mitmproxy', # Needed for end2end tests + "mitmproxy>5.3.0; python_version>='3.8.0'", # Needed for end2end tests 'mypy', 'pre-commit', 'pytest',
wkeeling/selenium-wire
5a5a83c0189e0a10fbcf100d619148d6c1bc7dad
diff --git a/tests/seleniumwire/test_storage.py b/tests/seleniumwire/test_storage.py index a31ab9b..67f83a8 100644 --- a/tests/seleniumwire/test_storage.py +++ b/tests/seleniumwire/test_storage.py @@ -212,6 +212,18 @@ class RequestStorageTest(TestCase): self.assertIsNotNone(requests[0].response) + @patch('seleniumwire.storage.pickle') + def test_load_response_unpickle_error(self, mock_pickle): + request = self._create_request() + self.storage.save_request(request) + response = self._create_response() + self.storage.save_response(request.id, response) + mock_pickle.load.side_effect = [request, Exception] + + requests = self.storage.load_requests() + + self.assertIsNone(requests[0].response) + def test_load_last_request(self): request_1 = self._create_request() request_2 = self._create_request()
importing uc from seleniumwire ignores proxy option On newer versions of undetected chromedriver, I need to use this arg ``use_subprocess=True`` else I can't run it without getting a Runtime error. (https://github.com/ultrafunkamsterdam/undetected-chromedriver/issues/432) This argument seems to break the usage of proxies with selenium wire though. ```python import seleniumwire.undetected_chromedriver.v2 as uc from seleniumwire import webdriver chrome_options = webdriver.ChromeOptions() chrome_options.headless = False seleniumwire_options = {'proxy': {'https': 'type://host:port',}} driver = uc.Chrome(options=chrome_options, seleniumwire_options=seleniumwire_options, use_subprocess=True) driver.get('https://whatismyipaddress.com/') ``` Ignores passed proxy option and loads website without proxy.
0.0
5a5a83c0189e0a10fbcf100d619148d6c1bc7dad
[ "tests/seleniumwire/test_storage.py::RequestStorageTest::test_load_response_unpickle_error" ]
[ "tests/seleniumwire/test_storage.py::CreateTest::test_create_default_storage", "tests/seleniumwire/test_storage.py::CreateTest::test_create_in_memory_storage", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_cleanup_does_not_remove_parent_folder", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_cleanup_removes_storage", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_clear_requests", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_find", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_find_similar_urls", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_get_home_dir", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_get_session_dir", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_initialise", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_initialise_clears_old_folders", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_iter_requests", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_load_last_request", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_load_last_request_none", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_load_request_cert_data", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_load_request_with_ws_messages", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_load_requests", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_load_requests_unpickle_error", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_load_response", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_save_har_entry", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_save_har_entry_no_request", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_save_request", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_save_request_with_body", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_save_response", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_save_response_no_request", "tests/seleniumwire/test_storage.py::RequestStorageTest::test_save_response_with_body", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_cleanup", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_clear_requests", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_find", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_find_similar_urls", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_get_home_dir", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_iter_requests", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_load_last_request", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_load_last_request_none", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_load_request_with_ws_messages", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_load_requests", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_save_har_entry", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_save_har_entry_no_request", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_save_request", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_save_request_max_size", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_save_request_max_size_zero", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_save_response", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_save_response_cert_data", "tests/seleniumwire/test_storage.py::InMemoryRequestStorageTest::test_save_response_no_request" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-02-16 20:12:56+00:00
mit
6,260
wookayin__expt-5
diff --git a/expt/plot.py b/expt/plot.py index 657d7e5..5a09b36 100644 --- a/expt/plot.py +++ b/expt/plot.py @@ -21,6 +21,11 @@ warnings.filterwarnings("ignore", category=UserWarning, message='Creating legend with loc="best"') # yapf: enable +HypothesisSummaryFn = Callable[ # see HypothesisPlotter + [Hypothesis], pd.DataFrame] +HypothesisSummaryErrFn = Callable[ # see HypothesisPlotter + [Hypothesis], Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame]]] + class GridPlot: """Multi-plot grid subplots. @@ -269,7 +274,8 @@ class HypothesisPlotter: *args, subplots=True, err_style="runs", - err_fn: Optional[Callable[[Hypothesis], pd.DataFrame]] = None, + err_fn: Optional[HypothesisSummaryFn] = None, + representative_fn: Optional[HypothesisSummaryErrFn] = None, std_alpha=0.2, runs_alpha=0.2, n_samples=None, @@ -309,13 +315,31 @@ class HypothesisPlotter: (i) runs, unit_traces: Show individual runs/traces (see runs_alpha) (ii) band, fill: Show as shaded area (see std_alpha) (iii) None or False: do not display any errors - - err_fn (Callable: Hypothesis -> pd.DataFrame): - A strategy to compute the standard error or deviation. This should - return the standard err results as a DataFrame, having the same - column and index as the hypothesis. - Defaults to "standard deviation.", i.e. `hypothoses.grouped.std()`. - To use standard error, use `err_fn=lambda h: h.grouped.sem()`. - - std_alpha (float): If not None, will show the 1-std range as a + - err_fn (Callable: Hypothesis -> pd.DataFrame | Tuple): + A strategy to compute the error range when err_style is band or fill. + Defaults to "standard deviation.", i.e. `hypothosis.grouped.std()`. + This function may return either: + (i) a single DataFrame, representing the standard error, + which must have the same column and index as the hypothesis; or + (ii) a tuple of two DataFrames, representing the error range + (lower, upper). Both DataFrames must also have the same + column and index as the hypothesis. + In the case of (i), we assume that a custom `representative_fn` is + NOT being used, but the representative value of the hypothesis is + the grouped mean of the Hypothesis, i.e., `hypothesis.mean()`. + (Example) To use standard error for the bands, you can use either + `err_fn=lambda h: h.grouped.sem()` or + `err_fn=lambda h: (h.grouped.mean() - h.grouped.sem(), + h.grouped.mean() + h.grouped.sem())`. + - representative_fn (Callable: Hypothesis -> pd.DataFrame): + A strategy to compute the representative value (usually drawn + in a thicker line) when plotting. + This function should return a DataFrame that has the same column + and index as the hypothesis. + Defaults to "sample mean.", i.e., `hypothesis.mean()` + For instance, to use median instead of mean, use + `representative_fn=lambda h: h.grouped.median()` + - std_alpha (float): If not None, will show the error band as a shaded area. Defaults 0.2, - runs_alpha (float): If not None, will draw an individual line for each run. Defaults 0.2. @@ -339,17 +363,50 @@ class HypothesisPlotter: # nothing to draw (no rows) raise ValueError("No data to plot, all runs have empty DataFrame.") - mean, std = None, None - _h_interpolated = None + def _representative_and_err(h: Hypothesis) -> Tuple[ + pd.DataFrame, # representative (mean) + Tuple[pd.DataFrame, pd.DataFrame] # error band range (stderr) + ]: # yapf: disable + """Evaluate representative_fn and err_fn.""" - def _mean_and_err(h: Hypothesis): # type: ignore - mean = h.grouped.mean() + representative = representative_fn(h) if representative_fn \ + else h.grouped.mean() # noqa: E127 + err_range: Tuple[pd.DataFrame, pd.DataFrame] std = err_fn(h) if err_fn else h.grouped.std() - return mean, std + + # Condition check: when representative_fn is given, + # err_fn should return a range (i.e., tuple) + if representative_fn and err_fn and not isinstance(std, tuple): + raise ValueError( + "When representative_fn is given, err_fn must return a range " + "(tuple of pd.DataFrame) representing the lower and upper value " + "of the error band. Pass err_fn=None to use the default one, " + "or try: lambda h: (h.mean() + h.std(), h.mean() - h.std()). " + f"err_fn returned: {std}") + + if isinstance(std, pd.DataFrame): + mean = h.grouped.mean() + err_range = (mean - std, mean + std) + return representative, err_range + + elif (isinstance(std, tuple) and len(std) == 2 and + isinstance(std[0], pd.DataFrame) and + isinstance(std[1], pd.DataFrame)): + err_range = (std[0], std[1]) + return representative, err_range # type: ignore + + raise ValueError("err_fn must return either a tuple of " + "two DataFrames or a single DataFrame, but " + f"got {type(std)}") + + NULL = pd.DataFrame() + representative = NULL + err = (NULL, NULL) + _h_interpolated = None if 'x' not in kwargs: # index (same across runs) being x value, so we can simply average - mean, std = _mean_and_err(self._parent) + representative, err = _representative_and_err(self._parent) else: # might have different x values --- we need to interpolate. # (i) check if the x-column is consistent? @@ -363,31 +420,33 @@ class HypothesisPlotter: "recommended.", UserWarning) n_samples = 10000 else: - mean, std = _mean_and_err(self._parent) + representative, err = _representative_and_err(self._parent) if n_samples is not None: # subsample by interpolation, then average. _h_interpolated = self._parent.interpolate( x_column=kwargs.get('x', None), n_samples=n_samples) - mean, std = _mean_and_err(_h_interpolated) + representative, err = _representative_and_err(_h_interpolated) # Now that the index of group-averaged dataframes are the x samples # we interpolated on, we can let DataFrame.plot use them as index if 'x' in kwargs: del kwargs['x'] - if not isinstance(std, pd.DataFrame): - raise TypeError(f"err_fn should return a pd.DataFrame, got {type(std)}") + if not isinstance(representative, pd.DataFrame): + raise TypeError("representative_fn should return a pd.DataFrame, " + f"but got {type(err)}") # there might be many NaN values if each column is being logged # at a different period. We fill in the missing values. - mean = mean.interpolate() # type: ignore - std = std.interpolate() # type: ignore - assert mean is not None and std is not None + representative = representative.interpolate() + assert representative is not None + err = (err[0].interpolate(), err[1].interpolate()) + assert err[0] is not None and err[1] is not None # determine which columns to draw (i.e. y) before smoothing. # should only include numerical values - y: Iterable[str] = kwargs.get('y', None) or mean.columns + y: Iterable[str] = kwargs.get('y', None) or representative.columns if isinstance(y, str): y = [y] if 'x' in kwargs: @@ -397,24 +456,25 @@ class HypothesisPlotter: # TODO(remove): this is hack to handle homogeneous column names # over different hypotheses in a single of experiment, because it # will end up adding dummy columns instead of ignoring unknowns. - extra_y = set(y) - set(mean.columns) + extra_y = set(y) - set(representative.columns) for yi in extra_y: - mean[yi] = np.nan + representative[yi] = np.nan def _should_include_column(col_name: str) -> bool: if not col_name: # empty name return False # unknown column in the DataFrame - assert mean is not None - dtypes = mean.dtypes.to_dict() # type: ignore + assert representative is not None + dtypes = representative.dtypes.to_dict() # type: ignore if col_name not in dtypes: if ignore_unknown: return False # just ignore, no error else: - raise ValueError(f"Unknown column name '{col_name}'. " + - f"Available columns: {list(mean.columns)}; " + - "Use ignore_unknown=True to ignore unknown columns.") + raise ValueError( + f"Unknown column name '{col_name}'. " + + f"Available columns: {list(representative.columns)}; " + + "Use ignore_unknown=True to ignore unknown columns.") # include only numeric values (integer or float) if not (dtypes[col_name].kind in ('i', 'f')): @@ -424,8 +484,10 @@ class HypothesisPlotter: y = [yi for yi in y if _should_include_column(yi)] if rolling: - mean = mean.rolling(rolling, min_periods=1, center=True).mean() - std = std.rolling(rolling, min_periods=1, center=True).mean() + representative = representative.rolling( + rolling, min_periods=1, center=True).mean() + err = (err[0].rolling(rolling, min_periods=1, center=True).mean(), + err[1].rolling(rolling, min_periods=1, center=True).mean()) # suptitle: defaults to hypothesis name if ax/grid was not given if suptitle is None and (ax is None and grid is None): @@ -433,8 +495,8 @@ class HypothesisPlotter: return self._do_plot( y, - mean, # type: ignore - std, # type: ignore + representative, # type: ignore + err, # type: ignore _h_interpolated=_h_interpolated, n_samples=n_samples, subplots=subplots, @@ -465,8 +527,8 @@ class HypothesisPlotter: def _do_plot( self, y: List[str], - mean: pd.DataFrame, - std: pd.DataFrame, + representative: pd.DataFrame, # usually mean + err_range: Tuple[pd.DataFrame, pd.DataFrame], # usually mean ± stderr *, _h_interpolated: Optional[Hypothesis] = None, # type: ignore n_samples: Optional[int], @@ -544,7 +606,7 @@ class HypothesisPlotter: else: kwargs['legend'] = bool(legend) - axes = mean.plot(*args, subplots=subplots, ax=ax, **kwargs) + axes = representative.plot(*args, subplots=subplots, ax=ax, **kwargs) if err_style not in self.KNOWN_ERR_STYLES: raise ValueError(f"Unknown err_style '{err_style}', " @@ -556,10 +618,10 @@ class HypothesisPlotter: ax = cast(Axes, ax) mean_line = ax.get_lines()[-1] x = kwargs.get('x', None) - x_values = mean[x].values if x else mean[yi].index + x_values = representative[x].values if x else representative[yi].index ax.fill_between(x_values, - (mean - std)[yi].values, - (mean + std)[yi].values, + err_range[0][yi].values, + err_range[1][yi].values, color=mean_line.get_color(), alpha=std_alpha) # yapf: disable @@ -623,8 +685,8 @@ class HypothesisHvPlotter(HypothesisPlotter): def _do_plot( self, y: List[str], - mean: pd.DataFrame, - std: pd.DataFrame, + representative: pd.DataFrame, + err_range: Tuple[pd.DataFrame, pd.DataFrame], # usually mean ± stderr *, _h_interpolated: Optional[Hypothesis] = None, n_samples: Optional[int], @@ -642,7 +704,7 @@ class HypothesisHvPlotter(HypothesisPlotter): args: List, kwargs: Dict, ): - if not hasattr(mean, 'hvplot'): + if not hasattr(representative, 'hvplot'): import hvplot.pandas if subplots: @@ -650,7 +712,7 @@ class HypothesisHvPlotter(HypothesisPlotter): # TODO implement various options for hvplot. kwargs.update(dict(y=y)) - p = mean.hvplot(shared_axes=False, subplots=True, **kwargs) + p = representative.hvplot(shared_axes=False, subplots=True, **kwargs) # Display a single legend without duplication if legend and isinstance(p.data, dict): @@ -674,9 +736,9 @@ class HypothesisHvPlotter(HypothesisPlotter): raise NotImplementedError if err_style in ('band', 'fill') and std_alpha: - band_lower = mean - std + # TODO + band_lower, band_upper = err_range band_lower['_facet'] = 'lower' - band_upper = mean + std band_upper['_facet'] = 'upper' band = pd.concat([band_lower.add_suffix('.min'), band_upper.add_suffix('.max')], axis=1) # yapf: disable
wookayin/expt
39cb29ab535082f8c209cf993708016245fe977a
diff --git a/expt/plot_test.py b/expt/plot_test.py index f10c603..985f08d 100644 --- a/expt/plot_test.py +++ b/expt/plot_test.py @@ -1,7 +1,7 @@ """Tests for expt.plot""" import contextlib import sys -from typing import List, cast +from typing import List, Tuple, cast import matplotlib import matplotlib.pyplot as plt @@ -43,6 +43,7 @@ def matplotlib_rcparams(kwargs: dict): # ----------------------------------------------------------------------------- # Fixtures +# pylint: disable=redefined-outer-name @pytest.fixture @@ -247,8 +248,12 @@ class TestHypothesisPlot: def test_error_range_custom_fn(self, hypothesis: Hypothesis): """Tests plot(err_fn=...)""" + # Case 1: err_fn returns a single DataFrame. + # ------------------------------------------ def err_fn(h: Hypothesis) -> pd.DataFrame: - return cast(pd.DataFrame, h.grouped.std()).applymap(lambda x: 5000) + df: pd.DataFrame = h.grouped.mean() + df['loss'][:] = 5000 + return df # without interpolation g = hypothesis.plot(x='step', y='loss', err_style='fill', err_fn=err_fn) @@ -264,6 +269,52 @@ class TestHypothesisPlot: x='step', y='loss', err_style='runs', n_samples=100, err_fn=err_fn) # Note: with err_style='runs', err_fn is not useful..? + # Case 2: err_fn returns a tuple of two DataFrames. + # ------------------------------------------------- + def err_fn2(h: Hypothesis) -> Tuple[pd.DataFrame, pd.DataFrame]: + df: pd.DataFrame = h.grouped.mean() + std: pd.DataFrame = h.grouped.sem() + return (df - std, df + std * 100000) + + # without interpolation + g = hypothesis.plot(x='step', y='loss', err_style='fill', err_fn=err_fn2) + band = g['loss'].collections[0].get_paths()[0].vertices + + # std is approximately 0.25 (0.25 * 100_000 ~= 25000) + assert -1 <= np.min(band[:, 1]) <= 0 + assert 20000 <= np.max(band[:, 1]) <= 30000 + + def test_representative_custom_fn(self, hypothesis: Hypothesis): + """Tests plot(representative_fn=...)""" + + def repr_fn(h: Hypothesis) -> pd.DataFrame: + # A dummy function that manipulates the representative value ('mean') + df: pd.DataFrame = h.grouped.mean() + df['loss'] = np.asarray(df.reset_index()['step']) * -1.0 + return df + + def _ensure_representative_curve(line): + assert line.get_alpha() is None + return line + + # without interpolation + g = hypothesis.plot(x='step', y='loss', representative_fn=repr_fn) + line = _ensure_representative_curve(g['loss'].get_lines()[0]) + np.testing.assert_array_equal(line.get_xdata() * -1, line.get_ydata()) + + # with interpolation + # yapf: disable + g = hypothesis.plot(x='step', y='loss', n_samples=100, + representative_fn=repr_fn, err_style='fill') # fill + line = _ensure_representative_curve(g['loss'].get_lines()[0]) + np.testing.assert_array_equal(line.get_xdata() * -1, line.get_ydata()) + + g = hypothesis.plot(x='step', y='loss', n_samples=100, + representative_fn=repr_fn, err_style='runs') # runs + line = _ensure_representative_curve(g['loss'].get_lines()[0]) + np.testing.assert_array_equal(line.get_xdata() * -1, line.get_ydata()) + # yapf: enable + class TestExperimentPlot:
Feature Request: Plotting the median Hi @wookayin, I have been using the `expt` package to do plotting, and the experience has been great. Any chance you could add support for plotting the **median**? I am trying to plot the human normalized score used in classical Atari literature, so it would be useful to plot the median instead of mean. As an example, the snippet below plots the mean ```python # Costa: Note the data is not the same as Mnih et al., 2015 # Note the random agent score on Video Pinball is sometimes greater than the # human score under other evaluation methods. atari_human_normalized_scores = { 'Alien-v5': (227.8, 7127.7), 'Amidar-v5': (5.8, 1719.5), 'Assault-v5': (222.4, 742.0), 'Asterix-v5': (210.0, 8503.3), 'Asteroids-v5': (719.1, 47388.7), 'Atlantis-v5': (12850.0, 29028.1), 'BankHeist-v5': (14.2, 753.1), 'BattleZone-v5': (2360.0, 37187.5), 'BeamRider-v5': (363.9, 16926.5), 'Berzerk-v5': (123.7, 2630.4), 'Bowling-v5': (23.1, 160.7), 'Boxing-v5': (0.1, 12.1), 'Breakout-v5': (1.7, 30.5), 'Centipede-v5': (2090.9, 12017.0), 'ChopperCommand-v5': (811.0, 7387.8), 'CrazyClimber-v5': (10780.5, 35829.4), 'Defender-v5': (2874.5, 18688.9), # 'DemonAttack-v5': (152.1, 1971.0), # 'DoubleDunk-v5': (-18.6, -16.4), # 'Enduro-v5': (0.0, 860.5), # 'FishingDerby-v5': (-91.7, -38.7), # 'Freeway-v5': (0.0, 29.6), # 'Frostbite-v5': (65.2, 4334.7), # 'Gopher-v5': (257.6, 2412.5), # 'Gravitar-v5': (173.0, 3351.4), # 'Hero-v5': (1027.0, 30826.4), # 'IceHockey-v5': (-11.2, 0.9), # 'Jamesbond-v5': (29.0, 302.8), # 'Kangaroo-v5': (52.0, 3035.0), # 'Krull-v5': (1598.0, 2665.5), # 'KungFuMaster-v5': (258.5, 22736.3), # 'MontezumaRevenge-v5': (0.0, 4753.3), # 'MsPacman-v5': (307.3, 6951.6), # 'NameThisGame-v5': (2292.3, 8049.0), # 'Phoenix-v5': (761.4, 7242.6), # 'Pitfall-v5': (-229.4, 6463.7), # 'Pong-v5': (-20.7, 14.6), # 'PrivateEye-v5': (24.9, 69571.3), # 'Qbert-v5': (163.9, 13455.0), # 'Riverraid-v5': (1338.5, 17118.0), # 'RoadRunner-v5': (11.5, 7845.0), # 'Robotank-v5': (2.2, 11.9), # 'Seaquest-v5': (68.4, 42054.7), # 'Skiing-v5': (-17098.1, -4336.9), # 'Solaris-v5': (1236.3, 12326.7), # 'SpaceInvaders-v5': (148.0, 1668.7), # 'StarGunner-v5': (664.0, 10250.0), # 'Surround-v5': (-10.0, 6.5), # 'Tennis-v5': (-23.8, -8.3), # 'TimePilot-v5': (3568.0, 5229.2), # 'Tutankham-v5': (11.4, 167.6), # 'UpNDown-v5': (533.4, 11693.2), # 'Venture-v5': (0.0, 1187.5), # 'VideoPinball-v5': (16256.9, 17667.9), # 'WizardOfWor-v5': (563.5, 4756.5), # 'YarsRevenge-v5': (3092.9, 54576.9), # 'Zaxxon-v5': (32.5, 9173.3), } import enum from matplotlib import axis import numpy as np import expt from expt import Run, Hypothesis, Experiment import pandas as pd import matplotlib.pyplot as plt import wandb import wandb.apis.reports as wb # noqa from expt.plot import GridPlot def create_expt_runs(wandb_runs): runs = [] for idx, run in enumerate(wandb_runs): wandb_run = run.history() if 'videos' in wandb_run: wandb_run = wandb_run.drop(columns=['videos'], axis=1) runs += [Run(f"seed{idx}", wandb_run)] return runs api = wandb.Api() env_ids = atari_human_normalized_scores.keys() NUM_FRAME_STACK = 4 runss = [] for env_id in env_ids: api = wandb.Api() wandb_runs = api.runs( path="costa-huang/envpool-atari", filters={'$and': [{'config.env_id.value': env_id}, {'config.exp_name.value': 'ppo_atari_envpool_xla_jax'}]} ) expt_runs = create_expt_runs(wandb_runs) # normalize scores and adjust x-axis from steps to frames for expt_run in expt_runs: expt_run.df["charts/avg_episodic_return"] = ( expt_run.df["charts/avg_episodic_return"] - atari_human_normalized_scores[env_id][0]) / \ (atari_human_normalized_scores[env_id][1] - atari_human_normalized_scores[env_id][0] ) expt_run.df["global_step"] *= NUM_FRAME_STACK runss.extend(expt_runs) h = Hypothesis("CleanRL's PPO + Envpool", runss) fig, ax = plt.subplots(figsize=(4, 4)) g = h.plot(x='global_step', y="charts/avg_episodic_return", rolling=50, n_samples=400, legend=False, err_fn=lambda h: h.grouped.sem(), err_style="fill", suptitle="", ax=ax,) ax.set_title("CleanRL's PPO + Envpool") ax.yaxis.set_label_text("Human normalized score") ax.xaxis.set_label_text("Frames") plt.savefig("test.png", bbox_inches='tight') ``` <img width="401" alt="image" src="https://user-images.githubusercontent.com/5555347/179360171-1acfa6c0-55ea-48d5-8031-f03ae867ee55.png">
0.0
39cb29ab535082f8c209cf993708016245fe977a
[ "expt/plot_test.py::TestHypothesisPlot::test_error_range_custom_fn", "expt/plot_test.py::TestHypothesisPlot::test_representative_custom_fn" ]
[ "expt/plot_test.py::TestGridPlot::test_layout", "expt/plot_test.py::TestHypothesisPlot::test_grid_spec", "expt/plot_test.py::TestHypothesisPlot::test_when_fig_axes_are_given", "expt/plot_test.py::TestHypothesisPlot::test_suptitle", "expt/plot_test.py::TestHypothesisPlot::test_single_hypothesis_legend", "expt/plot_test.py::TestHypothesisPlot::test_error_range_averaging", "expt/plot_test.py::TestExperimentPlot::test_gridplot_basic", "expt/plot_test.py::TestExperimentPlot::test_when_fig_axes_are_given", "expt/plot_test.py::TestExperimentPlot::test_suptitle", "expt/plot_test.py::TestExperimentPlot::test_multi_hypothesis_legend", "expt/plot_test.py::TestExperimentPlot::test_multi_hypothesis_legend_presets", "expt/plot_test.py::TestExperimentPlot::test_color_kwargs" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-07-17 11:54:53+00:00
mit
6,261
wookayin__gpustat-28
diff --git a/gpustat.py b/gpustat.py index 4ec8a41..df936c4 100755 --- a/gpustat.py +++ b/gpustat.py @@ -113,6 +113,24 @@ class GPUStat(object): v = self.entry['utilization.gpu'] return int(v) if v is not None else None + @property + def power_draw(self): + """ + Returns the GPU power usage in Watts, + or None if the information is not available. + """ + v = self.entry['power.draw'] + return int(v) if v is not None else None + + @property + def power_limit(self): + """ + Returns the (enforced) GPU power limit in Watts, + or None if the information is not available. + """ + v = self.entry['enforced.power.limit'] + return int(v) if v is not None else None + @property def processes(self): """ @@ -126,6 +144,7 @@ class GPUStat(object): show_cmd=False, show_user=False, show_pid=False, + show_power=False, gpuname_width=16, term=Terminal(), ): @@ -150,6 +169,8 @@ class GPUStat(object): colors['CUser'] = term.bold_black # gray colors['CUtil'] = _conditional(lambda: int(self.entry['utilization.gpu']) < 30, term.green, term.bold_green) + colors['CPowU'] = term.bold_red + colors['CPowL'] = term.red if not with_colors: for k in list(colors.keys()): @@ -160,10 +181,14 @@ class GPUStat(object): else: return str(v) # build one-line display information - reps = ("%(C1)s[{entry[index]}]%(C0)s %(CName)s{entry[name]:{gpuname_width}}%(C0)s |" + - "%(CTemp)s{entry[temperature.gpu]:>3}'C%(C0)s, %(CUtil)s{entry[utilization.gpu]:>3} %%%(C0)s | " + - "%(C1)s%(CMemU)s{entry[memory.used]:>5}%(C0)s / %(CMemT)s{entry[memory.total]:>5}%(C0)s MB" - ) % colors + # we want power use optional, but if deserves being grouped with temperature and utilization + reps = "%(C1)s[{entry[index]}]%(C0)s %(CName)s{entry[name]:{gpuname_width}}%(C0)s |" \ + "%(CTemp)s{entry[temperature.gpu]:>3}'C%(C0)s, %(CUtil)s{entry[utilization.gpu]:>3} %%%(C0)s" + + if show_power: + reps += ", %(CPowU)s{entry[power.draw]:>3}%(C0)s / %(CPowL)s{entry[enforced.power.limit]:>3}%(C0)s W" + reps += " | %(C1)s%(CMemU)s{entry[memory.used]:>5}%(C0)s / %(CMemT)s{entry[memory.total]:>5}%(C0)s MB" + reps = (reps) % colors reps = reps.format(entry={k: _repr(v) for (k, v) in self.entry.items()}, gpuname_width=gpuname_width) reps += " |" @@ -252,6 +277,16 @@ class GPUStatCollection(object): except N.NVMLError: utilization = None # Not supported + try: + power = N.nvmlDeviceGetPowerUsage(handle) + except: + power = None + + try: + power_limit = N.nvmlDeviceGetEnforcedPowerLimit(handle) + except: + power_limit = None + processes = [] try: nv_comp_processes = N.nvmlDeviceGetComputeRunningProcesses(handle) @@ -284,6 +319,8 @@ class GPUStatCollection(object): 'name': name, 'temperature.gpu': temperature, 'utilization.gpu': utilization.gpu if utilization else None, + 'power.draw': int(power / 1000) if power is not None else None, + 'enforced.power.limit': int(power_limit / 1000) if power is not None else None, # Convert bytes into MBytes 'memory.used': int(memory.used / 1024 / 1024) if memory else None, 'memory.total': int(memory.total / 1024 / 1024) if memory else None, @@ -323,7 +360,7 @@ class GPUStatCollection(object): def print_formatted(self, fp=sys.stdout, force_color=False, no_color=False, show_cmd=False, show_user=False, show_pid=False, - gpuname_width=16, + show_power=False, gpuname_width=16, ): # ANSI color configuration if force_color and no_color: @@ -355,6 +392,7 @@ class GPUStatCollection(object): show_cmd=show_cmd, show_user=show_user, show_pid=show_pid, + show_power=show_power, gpuname_width=gpuname_width, term=t_color) fp.write('\n') @@ -430,6 +468,8 @@ def main(): help='Display username of running process') parser.add_argument('-p', '--show-pid', action='store_true', help='Display PID of running process') + parser.add_argument('-P', '--show-power', action='store_true', + help='Show GPU power usage (and limit)') parser.add_argument('--gpuname-width', type=int, default=16, help='The minimum column width of GPU names, defaults to 16') parser.add_argument('--json', action='store_true', default=False,
wookayin/gpustat
a38bc5fd11add4a8ab805f5b327020196ce558d0
diff --git a/test_gpustat.py b/test_gpustat.py index 0ac0279..4b81978 100644 --- a/test_gpustat.py +++ b/test_gpustat.py @@ -72,6 +72,18 @@ def _configure_mock(N, Process, mock_handles[2]: 71, }.get(handle, RuntimeError)) + N.nvmlDeviceGetPowerUsage = _raise_ex(lambda handle: { + mock_handles[0]: 125000, + mock_handles[1]: 100000, + mock_handles[2]: 250000, + }.get(handle, RuntimeError)) + + N.nvmlDeviceGetEnforcedPowerLimit = _raise_ex(lambda handle: { + mock_handles[0]: 250000, + mock_handles[1]: 250000, + mock_handles[2]: 250000, + }.get(handle, RuntimeError)) + mock_memory_t = namedtuple("Memory_t", ['total', 'used']) N.nvmlDeviceGetMemoryInfo.side_effect = _raise_ex(lambda handle: { mock_handles[0]: mock_memory_t(total=12883853312, used=8000*MB), @@ -147,7 +159,7 @@ class TestGPUStat(unittest.TestCase): gpustats = gpustat.new_query() fp = StringIO() - gpustats.print_formatted(fp=fp, no_color=False, show_user=True, show_cmd=True, show_pid=True) + gpustats.print_formatted(fp=fp, no_color=False, show_user=True, show_cmd=True, show_pid=True, show_power=True) result = fp.getvalue() print(result) @@ -157,9 +169,9 @@ class TestGPUStat(unittest.TestCase): unescaped = '\n'.join(unescaped.split('\n')[1:]) expected = """\ -[0] GeForce GTX TITAN 0 | 80'C, 76 % | 8000 / 12287 MB | user1:python/48448(4000M) user2:python/153223(4000M) -[1] GeForce GTX TITAN 1 | 36'C, 0 % | 9000 / 12189 MB | user1:torch/192453(3000M) user3:caffe/194826(6000M) -[2] GeForce GTX TITAN 2 | 71'C, ?? % | 0 / 12189 MB | (Not Supported) +[0] GeForce GTX TITAN 0 | 80'C, 76 %, 125 / 250 W | 8000 / 12287 MB | user1:python/48448(4000M) user2:python/153223(4000M) +[1] GeForce GTX TITAN 1 | 36'C, 0 %, 100 / 250 W | 9000 / 12189 MB | user1:torch/192453(3000M) user3:caffe/194826(6000M) +[2] GeForce GTX TITAN 2 | 71'C, ?? %, 250 / 250 W | 0 / 12189 MB | (Not Supported) """ self.maxDiff = 4096 self.assertEqual(unescaped, expected)
Power usage Hi, How to add power usage and efficiency information ?
0.0
a38bc5fd11add4a8ab805f5b327020196ce558d0
[ "test_gpustat.py::TestGPUStat::test_new_query_mocked" ]
[ "test_gpustat.py::TestGPUStat::test_attributes_and_items", "test_gpustat.py::TestGPUStat::test_new_query_mocked_nonexistent_pid" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-09-19 17:20:15+00:00
mit
6,262
wookayin__gpustat-63
diff --git a/README.md b/README.md index 4b9d24c..33bbad4 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,7 @@ Options: * `-u`, `--show-user` : Display username of the process owner * `-c`, `--show-cmd` : Display the process name * `-p`, `--show-pid` : Display PID of the process +* `-F`, `--show-fan` : Display GPU fan speed * `-P`, `--show-power` : Display GPU power usage and/or limit (`draw` or `draw,limit`) * `--watch`, `-i`, `--interval` : Run in watch mode (equivalent to `watch gpustat`) if given. Denotes interval between updates. ([#41][gh-issue-41]) * `--json` : JSON Output (Experimental, [#10][gh-issue-10]) diff --git a/gpustat/__main__.py b/gpustat/__main__.py index dab7954..8de332f 100644 --- a/gpustat/__main__.py +++ b/gpustat/__main__.py @@ -79,6 +79,8 @@ def main(*argv): help='Display username of running process') parser.add_argument('-p', '--show-pid', action='store_true', help='Display PID of running process') + parser.add_argument('-F', '--show-fan', action='store_true', + help='Display GPU fan speed') parser.add_argument('--json', action='store_true', default=False, help='Print all the information in JSON format') parser.add_argument('-v', '--version', action='version', diff --git a/gpustat/core.py b/gpustat/core.py index 85c85cf..dad85a3 100644 --- a/gpustat/core.py +++ b/gpustat/core.py @@ -107,6 +107,15 @@ class GPUStat(object): v = self.entry['temperature.gpu'] return int(v) if v is not None else None + @property + def fan(self): + """ + Returns the fan percentage of GPU as an integer, + or None if the information is not available. + """ + v = self.entry['fan.speed'] + return int(v) if v is not None else None + @property def utilization(self): """ @@ -147,6 +156,7 @@ class GPUStat(object): show_user=False, show_pid=False, show_power=None, + show_fan=None, gpuname_width=16, term=Terminal(), ): @@ -165,6 +175,8 @@ class GPUStat(object): colors['CName'] = term.blue colors['CTemp'] = _conditional(lambda: self.temperature < 50, term.red, term.bold_red) + colors['FSpeed'] = _conditional(lambda: self.fan < 50, + term.yellow, term.bold_yellow) colors['CMemU'] = term.bold_yellow colors['CMemT'] = term.yellow colors['CMemP'] = term.yellow @@ -189,8 +201,12 @@ class GPUStat(object): # temperature and utilization reps = "%(C1)s[{entry[index]}]%(C0)s " \ "%(CName)s{entry[name]:{gpuname_width}}%(C0)s |" \ - "%(CTemp)s{entry[temperature.gpu]:>3}'C%(C0)s, " \ - "%(CUtil)s{entry[utilization.gpu]:>3} %%%(C0)s" + "%(CTemp)s{entry[temperature.gpu]:>3}'C%(C0)s, " + + if show_fan: + reps += "%(FSpeed)s{entry[fan.speed]:>3} %%%(C0)s, " + + reps += "%(CUtil)s{entry[utilization.gpu]:>3} %%%(C0)s" if show_power: reps += ", %(CPowU)s{entry[power.draw]:>3}%(C0)s " @@ -300,6 +316,11 @@ class GPUStatCollection(object): except N.NVMLError: temperature = None # Not supported + try: + fan_speed = N.nvmlDeviceGetFanSpeed(handle) + except N.NVMLError: + fan_speed = None # Not supported + try: memory = N.nvmlDeviceGetMemoryInfo(handle) # in Bytes except N.NVMLError: @@ -354,6 +375,7 @@ class GPUStatCollection(object): 'uuid': uuid, 'name': name, 'temperature.gpu': temperature, + 'fan.speed': fan_speed, 'utilization.gpu': utilization.gpu if utilization else None, 'power.draw': power // 1000 if power is not None else None, 'enforced.power.limit': power_limit // 1000 @@ -403,7 +425,7 @@ class GPUStatCollection(object): def print_formatted(self, fp=sys.stdout, force_color=False, no_color=False, show_cmd=False, show_user=False, show_pid=False, - show_power=None, gpuname_width=16, + show_power=None, show_fan=None, gpuname_width=16, show_header=True, eol_char=os.linesep, **kwargs @@ -453,6 +475,7 @@ class GPUStatCollection(object): show_user=show_user, show_pid=show_pid, show_power=show_power, + show_fan=show_fan, gpuname_width=gpuname_width, term=t_color) fp.write(eol_char)
wookayin/gpustat
28299cdcf55dd627fdd9800cf344988b43188ee8
diff --git a/gpustat/test_gpustat.py b/gpustat/test_gpustat.py index d41dd6b..088b6bd 100644 --- a/gpustat/test_gpustat.py +++ b/gpustat/test_gpustat.py @@ -81,6 +81,12 @@ def _configure_mock(N, Process, mock_handles[2]: 71, }.get(handle, RuntimeError)) + N.nvmlDeviceGetFanSpeed = _raise_ex(lambda handle: { + mock_handles[0]: 16, + mock_handles[1]: 53, + mock_handles[2]: 100, + }.get(handle, RuntimeError)) + N.nvmlDeviceGetPowerUsage = _raise_ex(lambda handle: { mock_handles[0]: 125000, mock_handles[1]: N.NVMLError_NotSupported(), # Not Supported @@ -154,9 +160,9 @@ MOCK_EXPECTED_OUTPUT_DEFAULT = """\ """ # noqa: E501 MOCK_EXPECTED_OUTPUT_FULL = """\ -[0] GeForce GTX TITAN 0 | 80'C, 76 %, 125 / 250 W | 8000 / 12287 MB | user1:python/48448(4000M) user2:python/153223(4000M) -[1] GeForce GTX TITAN 1 | 36'C, 0 %, ?? / 250 W | 9000 / 12189 MB | user1:torch/192453(3000M) user3:caffe/194826(6000M) -[2] GeForce GTX TITAN 2 | 71'C, ?? %, 250 / ?? W | 0 / 12189 MB | (Not Supported) +[0] GeForce GTX TITAN 0 | 80'C, 16 %, 76 %, 125 / 250 W | 8000 / 12287 MB | user1:python/48448(4000M) user2:python/153223(4000M) +[1] GeForce GTX TITAN 1 | 36'C, 53 %, 0 %, ?? / 250 W | 9000 / 12189 MB | user1:torch/192453(3000M) user3:caffe/194826(6000M) +[2] GeForce GTX TITAN 2 | 71'C, 100 %, ?? %, 250 / ?? W | 0 / 12189 MB | (Not Supported) """ # noqa: E501 @@ -195,7 +201,7 @@ class TestGPUStat(unittest.TestCase): fp = StringIO() gpustats.print_formatted( fp=fp, no_color=False, show_user=True, - show_cmd=True, show_pid=True, show_power=True + show_cmd=True, show_pid=True, show_power=True, show_fan=True ) result = fp.getvalue()
Show fan speed Command `nvidia-smi` shows also fan speed: ```bash $ nvidia-smi Wed Apr 3 14:09:10 2019 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 396.37 Driver Version: 396.37 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | |===============================+======================+======================| | 0 GeForce GTX 108... On | 00000000:03:00.0 On | N/A | | 30% 42C P8 16W / 250W | 53MiB / 11177MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 1 GeForce GTX 108... On | 00000000:04:00.0 Off | N/A | | 31% 43C P8 16W / 250W | 2MiB / 11178MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 2 GeForce GTX 108... On | 00000000:81:00.0 Off | N/A | | 51% 68C P2 76W / 250W | 10781MiB / 11178MiB | 17% Default | +-------------------------------+----------------------+----------------------+ | 3 GeForce GTX 108... On | 00000000:82:00.0 Off | N/A | | 29% 34C P8 16W / 250W | 2MiB / 11178MiB | 0% Default | ``` Could gpustat show this information too?
0.0
28299cdcf55dd627fdd9800cf344988b43188ee8
[ "gpustat/test_gpustat.py::TestGPUStat::test_new_query_mocked" ]
[ "gpustat/test_gpustat.py::TestGPUStat::test_args_endtoend", "gpustat/test_gpustat.py::TestGPUStat::test_attributes_and_items", "gpustat/test_gpustat.py::TestGPUStat::test_json_mocked", "gpustat/test_gpustat.py::TestGPUStat::test_main", "gpustat/test_gpustat.py::TestGPUStat::test_new_query_mocked_nonexistent_pid" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-04-18 19:26:46+00:00
mit
6,263
wright-group__WrightTools-1075
diff --git a/CHANGELOG.md b/CHANGELOG.md index cd3e6c7..b8410e9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/). ## [Unreleased] +### Fixed +- `kit.fft`: fixed bug where Fourier coefficients were off by a scalar factor. + ## [3.4.4] ### Added diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py index a538aa9..3e3057c 100644 --- a/WrightTools/kit/_array.py +++ b/WrightTools/kit/_array.py @@ -8,6 +8,8 @@ import numpy as np from .. import exceptions as wt_exceptions +from typing import Tuple + # --- define -------------------------------------------------------------------------------------- @@ -120,35 +122,40 @@ def diff(xi, yi, order=1) -> np.ndarray: return yi -def fft(xi, yi, axis=0) -> tuple: - """Take the 1D FFT of an N-dimensional array and return "sensible" properly shifted arrays. +def fft(xi, yi, axis=0) -> Tuple[np.ndarray, np.ndarray]: + """Compute a discrete Fourier Transform along one axis of an N-dimensional + array and also compute the 1D frequency coordinates of the transform. The + Fourier coefficients and frequency coordinates are ordered so that the + coordinates are monotonic (i.e. uses `numpy.fft.fftshift`). Parameters ---------- - xi : numpy.ndarray - 1D array over which the points to be FFT'ed are defined - yi : numpy.ndarray - ND array with values to FFT + ti : 1D numpy.ndarray + Independent variable specifying data coordinates. Must be monotonic, + linearly spaced data. `ti.size` must be equal to `yi.shape[axis]` + yi : n-dimensional numpy.ndarray + Dependent variable. ND array with values to FFT. axis : int axis of yi to perform FFT over Returns ------- xi : 1D numpy.ndarray - 1D array. Conjugate to input xi. Example: if input xi is in the time - domain, output xi is in frequency domain. - yi : ND numpy.ndarray - FFT. Has the same shape as the input array (yi). + 1D array. Conjugate coordinates to input xi. Example: if input `xi` + is time coordinates, output `xi` is (cyclic) frequency coordinates. + yi : complex numpy.ndarray + Transformed data. Has the same shape as the input array (yi). """ # xi must be 1D if xi.ndim != 1: raise wt_exceptions.DimensionalityError(1, xi.ndim) # xi must be evenly spaced spacing = np.diff(xi) - if not np.allclose(spacing, spacing.mean()): + spacing_mean = spacing.mean() + if not np.allclose(spacing, spacing_mean): raise RuntimeError("WrightTools.kit.fft: argument xi must be evenly spaced") # fft - yi = np.fft.fft(yi, axis=axis) + yi = np.fft.fft(yi, axis=axis) * spacing_mean d = (xi.max() - xi.min()) / (xi.size - 1) xi = np.fft.fftfreq(xi.size, d=d) # shift
wright-group/WrightTools
91554ccfe3a2b288e7277d52f34a0220412cc0cd
diff --git a/tests/kit/fft.py b/tests/kit/fft.py index a03ab9c..5fb4973 100644 --- a/tests/kit/fft.py +++ b/tests/kit/fft.py @@ -14,12 +14,24 @@ import WrightTools as wt # --- test ---------------------------------------------------------------------------------------- -def test_1_sin(): +def test_analytic_fft(): + a = 1 - 1j + t = np.linspace(0, 10, 10000) + z = np.heaviside(t, 0.5) * np.exp(-a * t) + wi, zi = wt.kit.fft(t, z) + zi_analytical = 1 / (a + 1j * 2 * np.pi * wi) + assert np.allclose(zi.real, zi_analytical.real, atol=1e-3) + assert np.allclose(zi.imag, zi_analytical.imag, atol=1e-3) + + +def test_plancherel(): t = np.linspace(-10, 10, 10000) z = np.sin(2 * np.pi * t) wi, zi = wt.kit.fft(t, z) - freq = np.abs(wi[np.argmax(zi)]) - assert np.isclose(freq, 1, rtol=1e-3, atol=1e-3) + intensity_time = (z**2).sum() * (t[1] - t[0]) + intensity_freq = (zi * zi.conjugate()).real.sum() * (wi[1] - wi[0]) + rel_error = np.abs(intensity_time - intensity_freq) / (intensity_time + intensity_freq) + assert rel_error < 1e-12 def test_5_sines(): @@ -28,7 +40,7 @@ def test_5_sines(): z = np.sin(2 * np.pi * freqs[None, :] * t[:, None]) wi, zi = wt.kit.fft(t, z, axis=0) freq = np.abs(wi[np.argmax(zi, axis=0)]) - assert np.all(np.isclose(freq, freqs, rtol=1e-3, atol=1e-3)) + assert np.allclose(freq, freqs, rtol=1e-3, atol=1e-3) def test_dimensionality_error():
kit.fft: fourier coefficients ignore x-axis spacing kit fft expands on np.fft.fft by accepting an x-axis as an argument to give convenient units for Fourier space. It neglects to use these units to calculate the Fourier transform itself (e.g. we calculate `array.sum()` instead of `array.sum() * dt`). Since our method we explicitly call for x-units, we should use their to calculate the Fourier coefficients as well. This comes up when you want to relate spectral amplitudes to temporal features. e.g. ``` t = np.linspace(-10, 10, 10000) z = np.sin(2 * np.pi * t) wi, zi = wt.kit.fft(t, z) intensity_time = (z**2).sum() * (t[1] - t[0]) intensity_freq = np.abs(zi).sum() * (wi[1] - wi[0]) ``` By Parseval's (or Plancherel's) theorem, both `intensity_time` and `intensity_freq` should be equal to within calculation error. This is not currently the case. In general, all our FT coefficients are off by a constant that depends on the spacing between points.
0.0
91554ccfe3a2b288e7277d52f34a0220412cc0cd
[ "tests/kit/fft.py::test_analytic_fft", "tests/kit/fft.py::test_plancherel" ]
[ "tests/kit/fft.py::test_5_sines", "tests/kit/fft.py::test_dimensionality_error", "tests/kit/fft.py::test_even_spacing_error" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2022-06-10 22:12:55+00:00
mit
6,264
wright-group__WrightTools-534
diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py index 66cfb11..16136f1 100644 --- a/WrightTools/kit/_array.py +++ b/WrightTools/kit/_array.py @@ -210,17 +210,10 @@ def remove_nans_1D(*args): tuple Tuple of 1D arrays in same order as given, with nan indicies removed. """ - # find all indicies to keep - bads = np.array([]) - for arr in args: - bad = np.array(np.where(np.isnan(arr))).flatten() - bads = np.hstack((bad, bads)) - if hasattr(args, 'shape') and len(args.shape) == 1: - goods = [i for i in np.arange(args.shape[0]) if i not in bads] - else: - goods = [i for i in np.arange(len(args[0])) if i not in bads] - # apply - return tuple(a[goods] for a in args) + vals = np.isnan(args[0]) + for a in args: + vals |= np.isnan(a) + return tuple(np.array(a)[vals == False] for a in args) def share_nans(*arrs):
wright-group/WrightTools
a11e47d7786f63dcc595c8e9ccf121e73a16407b
diff --git a/tests/kit/remove_nans_1D.py b/tests/kit/remove_nans_1D.py old mode 100644 new mode 100755 index 31d15ab..8c09a16 --- a/tests/kit/remove_nans_1D.py +++ b/tests/kit/remove_nans_1D.py @@ -1,3 +1,4 @@ +#! /usr/bin/env python3 """Test remove_nans_1D.""" @@ -18,10 +19,20 @@ def test_simple(): assert wt.kit.remove_nans_1D(arr)[0].all() == np.arange(0, 6, dtype=float).all() -def test_list(): +def test_multiple(): arrs = [np.random.random(21) for _ in range(5)] arrs[0][0] = np.nan arrs[1][-1] = np.nan arrs = wt.kit.remove_nans_1D(*arrs) for arr in arrs: assert arr.size == 19 + + +def test_list(): + assert np.all(wt.kit.remove_nans_1D([np.nan, 1, 2, 3])[0] == np.array([1, 2, 3])) + + +if __name__ == "__main__": + test_simple() + test_multiple() + test_list()
remove_nans_1D fails for list ``` >>> wt.kit.remove_nans_1D([np.nan, 1, 2, 2]) Traceback (most recent call last): File "<input>", line 1, in <module> wt.kit.remove_nans_1D([np.nan, 1, 2, 2]) File "/home/kyle/wright/WrightTools/WrightTools/kit/_array.py", line 223, in rem ove_nans_1D return tuple(a[goods] for a in args) File "/home/kyle/wright/WrightTools/WrightTools/kit/_array.py", line 223, in <ge nexpr> return tuple(a[goods] for a in args) TypeError: list indices must be integers or slices, not list >>> wt.kit.remove_nans_1D(np.array([np.nan, 1, 2, 2])) (array([1., 2., 2.]),) ```
0.0
a11e47d7786f63dcc595c8e9ccf121e73a16407b
[ "tests/kit/remove_nans_1D.py::test_list" ]
[ "tests/kit/remove_nans_1D.py::test_simple", "tests/kit/remove_nans_1D.py::test_multiple" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2018-03-06 22:01:33+00:00
mit
6,265
wright-group__WrightTools-746
diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py index dec8f19..e9ae20f 100644 --- a/WrightTools/kit/_array.py +++ b/WrightTools/kit/_array.py @@ -243,18 +243,52 @@ def share_nans(*arrs) -> tuple: return tuple([a + nans for a in arrs]) -def smooth_1D(arr, n=10) -> np.ndarray: - """Smooth 1D data by 'running average'. +def smooth_1D(arr, n=10, smooth_type="flat") -> np.ndarray: + """Smooth 1D data using a window function. + + Edge effects will be present. Parameters ---------- - n : int - number of points to average + arr : array_like + Input array, 1D. + n : int (optional) + Window length. + smooth_type : {'flat', 'hanning', 'hamming', 'bartlett', 'blackman'} (optional) + Type of window function to convolve data with. + 'flat' window will produce a moving average smoothing. + + Returns + ------- + array_like + Smoothed 1D array. """ - for i in range(n, len(arr) - n): - window = arr[i - n : i + n].copy() - arr[i] = window.mean() - return arr + + # check array input + if arr.ndim != 1: + raise wt_exceptions.DimensionalityError(1, arr.ndim) + if arr.size < n: + message = "Input array size must be larger than window size." + raise wt_exceptions.ValueError(message) + if n < 3: + return arr + # construct window array + if smooth_type == "flat": + w = np.ones(n, dtype=arr.dtype) + elif smooth_type == "hanning": + w = np.hanning(n) + elif smooth_type == "hamming": + w = np.hamming(n) + elif smooth_type == "bartlett": + w = np.bartlett(n) + elif smooth_type == "blackman": + w = np.blackman(n) + else: + message = "Given smooth_type, {0}, not available.".format(str(smooth_type)) + raise wt_exceptions.ValueError(message) + # convolve reflected array with window function + out = np.convolve(w / w.sum(), arr, mode="same") + return out def svd(a, i=None) -> tuple:
wright-group/WrightTools
4cf127e9d431265dad6f42c48b5be05bc36e3cb7
diff --git a/tests/kit/smooth_1D.py b/tests/kit/smooth_1D.py new file mode 100644 index 0000000..5e4e9b4 --- /dev/null +++ b/tests/kit/smooth_1D.py @@ -0,0 +1,35 @@ +"""Test kit.smooth_1D.""" + + +# --- import -------------------------------------------------------------------------------------- + + +import numpy as np + +import WrightTools as wt + + +# --- test ---------------------------------------------------------------------------------------- + + +def test_basic_smoothing_functionality(): + # create arrays + x = np.linspace(0, 10, 1000) + y = np.sin(x) + np.random.seed(seed=12) + r = np.random.rand(1000) - .5 + yr = y + r + # iterate through window types + windows = ["flat", "hanning", "hamming", "bartlett", "blackman"] + for w in windows: + out = wt.kit.smooth_1D(yr, n=101, smooth_type=w) + check_arr = out - y + check_arr = check_arr[50:-50] # get rid of edge effects + assert np.allclose(check_arr, 0, rtol=.2, atol=.2) + + +# --- run ----------------------------------------------------------------------------------------- + + +if __name__ == "__main__": + test_basic_smoothing_functionality()
test kit.smooth_1D write tests for `wt.kit.smooth_1D`
0.0
4cf127e9d431265dad6f42c48b5be05bc36e3cb7
[ "tests/kit/smooth_1D.py::test_basic_smoothing_functionality" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2018-09-17 15:10:30+00:00
mit
6,266
wright-group__WrightTools-886
diff --git a/WrightTools/data/_channel.py b/WrightTools/data/_channel.py index 91a04ed..03d4973 100644 --- a/WrightTools/data/_channel.py +++ b/WrightTools/data/_channel.py @@ -152,14 +152,17 @@ class Channel(Dataset): factor : number (optional) Tolerance factor. Default is 3. - replace : {'nan', 'mean', number} (optional) + replace : {'nan', 'mean', 'exclusive_mean', number} (optional) Behavior of outlier replacement. Default is nan. nan Outliers are replaced by numpy nans. mean - Outliers are replaced by the mean of its neighborhood. + Outliers are replaced by the mean of its neighborhood, including itself. + + exclusive_mean + Outilers are replaced by the mean of its neighborhood, not including itself. number Array becomes given number. @@ -177,6 +180,7 @@ class Channel(Dataset): warnings.warn("trim", category=wt_exceptions.EntireDatasetInMemoryWarning) outliers = [] means = [] + ex_means = [] # find outliers for idx in np.ndindex(self.shape): slices = [] @@ -186,26 +190,33 @@ class Channel(Dataset): slices.append(slice(start, stop, 1)) neighbors = self[slices] mean = np.nanmean(neighbors) + sum_ = np.nansum(neighbors) limit = np.nanstd(neighbors) * factor if np.abs(self[idx] - mean) > limit: outliers.append(idx) means.append(mean) + # Note, "- 1" is to exclude the point itself, which is not nan, in order + # to enter this if block, as `np.abs(nan - mean)` is nan, which would + # evaluate to False + ex_means.append((sum_ - self[idx]) / (np.sum(~np.isnan(neighbors)) - 1)) + # replace outliers i = tuple(zip(*outliers)) - if replace == "nan": - arr = self[:] - arr[i] = np.nan - self[:] = arr - elif replace == "mean": - arr = self[:] - arr[i] = means - self[:] = arr - elif isinstance(replace, numbers.Number): - arr = self[:] - arr[i] = replace - self[:] = arr - else: - raise KeyError("replace must be one of {nan, mean} or some number") + + if len(i) == 0: + if verbose: + print("No outliers found") + return [] + + replace = {"nan": np.nan, "mean": means, "exclusive_mean": ex_means}.get(replace, replace) + + # This may someday be available in h5py directly, but seems that day is not yet. + # This is annoying because it is the only reason we hold the whole set in memory. + # KFS 2019-03-21 + arr = self[:] + arr[i] = replace + self[:] = arr + # finish if verbose: print("%i outliers removed" % len(outliers))
wright-group/WrightTools
dc02147913c603792e8a7c12228dc292334d8084
diff --git a/tests/data/trim.py b/tests/data/trim.py index 2a94167..f096904 100644 --- a/tests/data/trim.py +++ b/tests/data/trim.py @@ -32,18 +32,23 @@ def test_trim_2Dgauss(): d.create_channel("damaged1", arr2) d.create_channel("damaged2", arr2) d.create_channel("damaged3", arr2) + d.create_channel("damaged4", arr2) d.transform("x", "y") # trim + d.original.trim([2, 2], factor=2) d.damaged1.trim([2, 2], factor=2) d.damaged2.trim([2, 2], factor=2, replace="mean") d.damaged3.trim([2, 2], factor=2, replace=0.5) + d.damaged4.trim([2, 2], factor=2, replace="exclusive_mean") # now heal d.create_channel("healed_linear", d.damaged1[:]) d.heal(channel="healed_linear", fill_value=0, method="linear") # check - assert np.allclose(d.original[:], d.healed_linear[:], rtol=1e-1, atol=1e-1) - assert np.allclose(d.original[:], d.damaged2[:], rtol=1e-1, atol=9e-1) - assert np.allclose(d.original[:], d.damaged3[:], rtol=1e-1, atol=5e-1) + np.testing.assert_allclose(d.original[:], d.original[:], rtol=1e-1, atol=1e-1) + np.testing.assert_allclose(d.original[:], d.healed_linear[:], rtol=1e-1, atol=1e-1) + np.testing.assert_allclose(d.original[:], d.damaged2[:], rtol=1e-1, atol=9e-1) + np.testing.assert_allclose(d.original[:], d.damaged3[:], rtol=1e-1, atol=5e-1) + np.testing.assert_allclose(d.original[:], d.damaged4[:], rtol=1e-1, atol=3e-1) def test_trim_3Dgauss(): @@ -67,7 +72,7 @@ def test_trim_3Dgauss(): # trim d.damaged.trim([2, 2, 2], factor=2, replace="mean") # check - assert np.allclose(d.original[:], d.damaged[:], rtol=1e-1, atol=9e-1) + np.testing.assert_allclose(d.original[:], d.damaged[:], rtol=1e-1, atol=9e-1) if __name__ == "__main__":
BUG: Trim writes channel to all NaNs when no outliers are present Example code: ``` # import import numpy as np from matplotlib import pyplot as plt import WrightTools as wt # create arrays x = np.linspace(-3, 3, 31)[:, None] y = np.linspace(-3, 3, 31)[None, :] arr = np.exp(-1 * (x ** 2 + y ** 2)) # create damaged array arr2 = arr.copy() arr2[15,15] = 20 # create data object d = wt.data.Data() d.create_variable("x", values=x) d.create_variable("y", values=y) d.create_channel("original", arr) d.create_channel("damaged", arr2) d.create_channel("trimmed", arr2) d.create_channel("broken", arr) d.transform("x", "y") # now trim d.trimmed.trim([2,2]) # this one works as expected d.broken.trim([2,2]) # this one will write the channel to all NaNs # create figure fig, gs = wt.artists.create_figure(cols=[1, 1, 1, 1]) for i in range(4): ax = plt.subplot(gs[i]) ax.pcolor(d, channel=i) ax.set_title(d.channel_names[i]) # pretty up ticks = [-2, 0, 2] wt.artists.set_fig_labels( xlabel=d.axes[0].label, ylabel=d.axes[1].label, xticks=ticks, yticks=ticks ) ``` stdout from execution is ``` 1 outliers removed 0 outliers removed /home/darien/source/WrightTools/WrightTools/_dataset.py:373: RuntimeWarning: All-NaN slice encountered return np.nanmax(dataset[s]) /home/darien/source/WrightTools/WrightTools/_dataset.py:375: RuntimeWarning: All-NaN axis encountered self.attrs["max"] = np.nanmax(list(self.chunkwise(f).values())) ``` ![image](https://user-images.githubusercontent.com/15792657/54384984-8c3c9d00-4663-11e9-971b-e911f95489d3.png)
0.0
dc02147913c603792e8a7c12228dc292334d8084
[ "tests/data/trim.py::test_trim_2Dgauss" ]
[ "tests/data/trim.py::test_trim_3Dgauss" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media" ], "has_test_patch": true, "is_lite": false }
2019-03-21 16:59:48+00:00
mit
6,267
wright-group__WrightTools-938
diff --git a/WrightTools/kit/_calculate.py b/WrightTools/kit/_calculate.py index 50adda6..2aaa11e 100644 --- a/WrightTools/kit/_calculate.py +++ b/WrightTools/kit/_calculate.py @@ -137,8 +137,8 @@ def nm_width(center, width, units="wn") -> float: number Width in nm. """ - red = wt_units.converter(center - width / 2., units, "nm") - blue = wt_units.converter(center + width / 2., units, "nm") + red = wt_units.converter(center - width / 2.0, units, "nm") + blue = wt_units.converter(center + width / 2.0, units, "nm") return red - blue @@ -162,4 +162,5 @@ def symmetric_sqrt(x, out=None): """ factor = np.sign(x) out = np.sqrt(np.abs(x), out=out) - return out * factor + out *= factor + return out diff --git a/WrightTools/kit/_interpolate.py b/WrightTools/kit/_interpolate.py index aaf5438..86667ea 100644 --- a/WrightTools/kit/_interpolate.py +++ b/WrightTools/kit/_interpolate.py @@ -21,7 +21,7 @@ __all__ = ["zoom2D", "Spline"] # --- functions ----------------------------------------------------------------------------------- -def zoom2D(xi, yi, zi, xi_zoom=3., yi_zoom=3., order=3, mode="nearest", cval=0.): +def zoom2D(xi, yi, zi, xi_zoom=3.0, yi_zoom=3.0, order=3, mode="nearest", cval=0.0): """Zoom a 2D array, with axes. Parameters diff --git a/WrightTools/kit/_timestamp.py b/WrightTools/kit/_timestamp.py index 6ef2355..8a9c01e 100644 --- a/WrightTools/kit/_timestamp.py +++ b/WrightTools/kit/_timestamp.py @@ -159,7 +159,7 @@ class TimeStamp: format_string = "%Y-%m-%dT%H:%M:%S.%f" out = self.datetime.strftime(format_string) # timezone - if delta_sec == 0.: + if delta_sec == 0.0: out += "Z" else: if delta_sec > 0:
wright-group/WrightTools
e8966a3807c27c60ec23639601f4db276588d25f
diff --git a/tests/kit/symmetric_sqrt.py b/tests/kit/symmetric_sqrt.py index 2cf5c93..55112d0 100644 --- a/tests/kit/symmetric_sqrt.py +++ b/tests/kit/symmetric_sqrt.py @@ -17,3 +17,11 @@ def test_numbers(): for number in numbers: answer = wt.kit.symmetric_sqrt(number) assert answer == np.sign(number) * np.sqrt(np.abs(number)) + + +def test_no_reallocation(): + a = np.linspace(-9, 9, 3) + out = np.empty_like(a) + ret = wt.kit.symmetric_sqrt(a, out=out) + assert out is ret + assert np.allclose(ret, [-3, 0, 3])
Symmetric square root does not correctly multiple by the sign factor when the output array is supplied https://github.com/wright-group/WrightTools/blob/e875360573e8375f94ed4ed70a7d8dc02ab92bb5/WrightTools/kit/_calculate.py#L145-L165
0.0
e8966a3807c27c60ec23639601f4db276588d25f
[ "tests/kit/symmetric_sqrt.py::test_no_reallocation" ]
[ "tests/kit/symmetric_sqrt.py::test_numbers" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-03-09 22:39:35+00:00
mit
6,268
wright-group__attune-122
diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..28c3b65 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,17 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/). + +## [Unreleased] + +### Fixed +- Writing NDarrays to `instrument.json` files + +## [0.4.0] + +### Added +- initial release after a major rewrite + +[Unreleased]: https://github.com/wright-group/attune/compare/0.4.0...master +[0.4.0]: https://github.com/wright-group/attune/releases/tag/0.4.0 diff --git a/attune/_arrangement.py b/attune/_arrangement.py index bab449f..d4d867e 100644 --- a/attune/_arrangement.py +++ b/attune/_arrangement.py @@ -31,8 +31,6 @@ class Arrangement: k: Tune(**v) if isinstance(v, dict) else v for k, v in tunes.items() } self._ind_units: str = "nm" - self._ind_max: float = min([t.ind_max for t in self._tunes.values()]) - self._ind_min: float = max([t.ind_min for t in self._tunes.values()]) def __repr__(self): return f"Arrangement({repr(self.name)}, {repr(self.tunes)})" @@ -81,11 +79,11 @@ class Arrangement: @property def ind_max(self): - return self._ind_max + return min([t.ind_max for t in self._tunes.values()]) @property def ind_min(self): - return self._ind_min + return max([t.ind_min for t in self._tunes.values()]) @property def name(self): diff --git a/attune/_instrument.py b/attune/_instrument.py index a3260eb..7bf50db 100644 --- a/attune/_instrument.py +++ b/attune/_instrument.py @@ -133,4 +133,11 @@ class Instrument(object): def save(self, file): """Save the JSON representation into an open file.""" - json.dump(self.as_dict(), file) + + class NdarrayEncoder(json.JSONEncoder): + def default(self, obj): + if hasattr(obj, "tolist"): + return obj.tolist() + return json.JSONEncoder.default(self, obj) + + json.dump(self.as_dict(), file, cls=NdarrayEncoder) diff --git a/attune/_tune.py b/attune/_tune.py index 259e389..9e9e50c 100644 --- a/attune/_tune.py +++ b/attune/_tune.py @@ -31,8 +31,6 @@ class Tune: dependent = np.asarray(dependent) assert independent.size == dependent.size assert independent.ndim == dependent.ndim == 1 - self._ind_max = max(independent) - self._ind_min = min(independent) self._ind_units = "nm" self._dep_units = dep_units self._interp = scipy.interpolate.interp1d(independent, dependent, fill_value="extrapolate") @@ -79,11 +77,11 @@ class Tune: @property def ind_max(self): - return self._ind_max + return self.independent.max() @property def ind_min(self): - return self._ind_min + return self.independent.min() @property def ind_units(self):
wright-group/attune
4e98eec7d3a3e917fc1364a80e4b46017370f595
diff --git a/tests/map.py b/tests/map.py index d3d3887..351ca18 100644 --- a/tests/map.py +++ b/tests/map.py @@ -16,6 +16,8 @@ def test_map_ind_points(): inst1["test_map"]["test"](test_points), inst0["test_map"]["test"](test_points) ) assert len(inst1["test_map"]["test"]) == 25 + assert inst1["test_map"].ind_min == 1310 + assert inst1["test_map"].ind_max == 1450 def test_map_ind_limits(): diff --git a/tests/store/test_store.py b/tests/store/test_store.py index 97c32e7..8109c30 100644 --- a/tests/store/test_store.py +++ b/tests/store/test_store.py @@ -4,6 +4,7 @@ import shutil import tempfile import attune +import numpy as np import pytest here = pathlib.Path(__file__).parent @@ -46,3 +47,12 @@ def test_load_store(): instr = attune.load("test") with pytest.warns(UserWarning, match="Attempted to store instrument equivalent"): attune.store(instr) + + +@temp_store +def test_store_ndarray(): + instr = attune.load("test") + instr = attune.map_ind_points(instr, "arr", "tune", np.linspace(0.25, 1, 5)) + # Would raise here because it is trying to serialize the ndarray in metadata + # prior to bug fix + attune.store(instr)
Incorrect ind_min and ind_max on arrangement after map ``` tune = attune.Tune(np.linspace(1300, 1400, 20), np.linspace(-5, 5, 20)) arr = attune.Arrangement("map", {"test": tune}) inst0 = attune.Instrument({"map": arr}, {"test": attune.Setable("tune")}) inst1 = attune.map_ind_points(inst0, "map", "test", np.linspace(1310, 1450, 25)) print(inst0(1373)) print(inst1(1373)) print(inst0["map"].ind_min, inst0["map"].ind_max) print(inst1["map"].ind_min, inst1["map"].ind_max) ```
0.0
4e98eec7d3a3e917fc1364a80e4b46017370f595
[ "tests/map.py::test_map_ind_points", "tests/store/test_store.py::test_store_ndarray" ]
[ "tests/map.py::test_map_ind_limits", "tests/store/test_store.py::test_normal_load_store", "tests/store/test_store.py::test_load_old", "tests/store/test_store.py::test_load_store" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-12-15 22:46:48+00:00
mit
6,269
wright-group__attune-123
diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..28c3b65 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,17 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/). + +## [Unreleased] + +### Fixed +- Writing NDarrays to `instrument.json` files + +## [0.4.0] + +### Added +- initial release after a major rewrite + +[Unreleased]: https://github.com/wright-group/attune/compare/0.4.0...master +[0.4.0]: https://github.com/wright-group/attune/releases/tag/0.4.0 diff --git a/attune/_arrangement.py b/attune/_arrangement.py index bab449f..d4d867e 100644 --- a/attune/_arrangement.py +++ b/attune/_arrangement.py @@ -31,8 +31,6 @@ class Arrangement: k: Tune(**v) if isinstance(v, dict) else v for k, v in tunes.items() } self._ind_units: str = "nm" - self._ind_max: float = min([t.ind_max for t in self._tunes.values()]) - self._ind_min: float = max([t.ind_min for t in self._tunes.values()]) def __repr__(self): return f"Arrangement({repr(self.name)}, {repr(self.tunes)})" @@ -81,11 +79,11 @@ class Arrangement: @property def ind_max(self): - return self._ind_max + return min([t.ind_max for t in self._tunes.values()]) @property def ind_min(self): - return self._ind_min + return max([t.ind_min for t in self._tunes.values()]) @property def name(self): diff --git a/attune/_instrument.py b/attune/_instrument.py index a3260eb..39b8ae6 100644 --- a/attune/_instrument.py +++ b/attune/_instrument.py @@ -15,7 +15,7 @@ class Instrument(object): def __init__( self, arrangements: Dict["str", Union[Arrangement, dict]], - setables: Dict["str", Union[Setable, dict]], + setables: Dict["str", Optional[Union[Setable, dict]]] = None, *, name: Optional[str] = None, transition: Optional[Union[Transition, dict]] = None, @@ -25,6 +25,8 @@ class Instrument(object): self._arrangements: Dict["str", Arrangement] = { k: Arrangement(**v) if isinstance(v, dict) else v for k, v in arrangements.items() } + if setables is None: + setables = {} self._setables: Dict["str", Setable] = { k: Setable(**v) if isinstance(v, dict) else v for k, v in setables.items() } @@ -77,20 +79,20 @@ class Instrument(object): raise ValueError("There are multiple valid arrangements! You must specify one.") # call arrangement setable_positions = {} + setables = self._setables.copy() todo = [(ind_value, tune) for tune in arrangement.tunes.items()] while todo: v, t = todo.pop(0) tune_name, tune = t - if tune_name in self._setables: - assert tune_name not in setable_positions - setable_positions[tune_name] = tune(v) - elif tune_name in self._arrangements: + if tune_name in self._arrangements: new = [ (tune(v), subtune) for subtune in self._arrangements[tune_name].tunes.items() ] todo += new else: - raise ValueError(f"Unrecognized name {tune_name}") + assert tune_name not in setable_positions + setable_positions[tune_name] = tune(v) + setables[tune_name] = Setable(tune_name) # finish note = Note( setables=self._setables, @@ -133,4 +135,11 @@ class Instrument(object): def save(self, file): """Save the JSON representation into an open file.""" - json.dump(self.as_dict(), file) + + class NdarrayEncoder(json.JSONEncoder): + def default(self, obj): + if hasattr(obj, "tolist"): + return obj.tolist() + return json.JSONEncoder.default(self, obj) + + json.dump(self.as_dict(), file, cls=NdarrayEncoder) diff --git a/attune/_tune.py b/attune/_tune.py index 259e389..9e9e50c 100644 --- a/attune/_tune.py +++ b/attune/_tune.py @@ -31,8 +31,6 @@ class Tune: dependent = np.asarray(dependent) assert independent.size == dependent.size assert independent.ndim == dependent.ndim == 1 - self._ind_max = max(independent) - self._ind_min = min(independent) self._ind_units = "nm" self._dep_units = dep_units self._interp = scipy.interpolate.interp1d(independent, dependent, fill_value="extrapolate") @@ -79,11 +77,11 @@ class Tune: @property def ind_max(self): - return self._ind_max + return self.independent.max() @property def ind_min(self): - return self._ind_min + return self.independent.min() @property def ind_units(self):
wright-group/attune
4e98eec7d3a3e917fc1364a80e4b46017370f595
diff --git a/tests/instrument/test_call.py b/tests/instrument/test_call.py index 9ae5426..ba2c911 100644 --- a/tests/instrument/test_call.py +++ b/tests/instrument/test_call.py @@ -30,3 +30,12 @@ def test_nested(): second = attune.Arrangement("second", {"first": tune1}) inst = attune.Instrument({"first": first, "second": second}, {"tune": attune.Setable("tune")}) assert math.isclose(inst(0.75, "second")["tune"], 0.25) + + +def test_implicit_setable(): + tune = attune.Tune([0, 1], [0, 1]) + tune1 = attune.Tune([0.5, 1.5], [0, 1]) + first = attune.Arrangement("first", {"tune": tune}) + second = attune.Arrangement("second", {"first": tune1}) + inst = attune.Instrument({"first": first, "second": second}) + assert math.isclose(inst(0.75, "second")["tune"], 0.25) diff --git a/tests/map.py b/tests/map.py index d3d3887..351ca18 100644 --- a/tests/map.py +++ b/tests/map.py @@ -16,6 +16,8 @@ def test_map_ind_points(): inst1["test_map"]["test"](test_points), inst0["test_map"]["test"](test_points) ) assert len(inst1["test_map"]["test"]) == 25 + assert inst1["test_map"].ind_min == 1310 + assert inst1["test_map"].ind_max == 1450 def test_map_ind_limits(): diff --git a/tests/store/test_store.py b/tests/store/test_store.py index 97c32e7..8109c30 100644 --- a/tests/store/test_store.py +++ b/tests/store/test_store.py @@ -4,6 +4,7 @@ import shutil import tempfile import attune +import numpy as np import pytest here = pathlib.Path(__file__).parent @@ -46,3 +47,12 @@ def test_load_store(): instr = attune.load("test") with pytest.warns(UserWarning, match="Attempted to store instrument equivalent"): attune.store(instr) + + +@temp_store +def test_store_ndarray(): + instr = attune.load("test") + instr = attune.map_ind_points(instr, "arr", "tune", np.linspace(0.25, 1, 5)) + # Would raise here because it is trying to serialize the ndarray in metadata + # prior to bug fix + attune.store(instr)
Settables robustness Setables are not really anything other than a string for their name at this point... which raises the question: do they need to exist at all Currently if there is a tune that is not an arrangement or setable `__call__` fails. I think it should fall back to assuming it is a setable if it is not an arrangement We made setables objects such that they can have null positions (for arrangements which do not use them), but that has not been implemented. I propose that we make setables completely optional, and basically unused until we have such null behavior.
0.0
4e98eec7d3a3e917fc1364a80e4b46017370f595
[ "tests/instrument/test_call.py::test_implicit_setable", "tests/map.py::test_map_ind_points", "tests/store/test_store.py::test_store_ndarray" ]
[ "tests/instrument/test_call.py::test_overlap", "tests/instrument/test_call.py::test_nested", "tests/map.py::test_map_ind_limits", "tests/store/test_store.py::test_normal_load_store", "tests/store/test_store.py::test_load_old", "tests/store/test_store.py::test_load_store" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-12-15 22:53:36+00:00
mit
6,270
wtbarnes__fiasco-223
diff --git a/fiasco/ions.py b/fiasco/ions.py index 3a97dd5..6dd30db 100644 --- a/fiasco/ions.py +++ b/fiasco/ions.py @@ -6,7 +6,7 @@ import astropy.units as u import numpy as np from functools import cached_property -from scipy.interpolate import interp1d, splev, splrep +from scipy.interpolate import interp1d, PchipInterpolator, splev, splrep from scipy.ndimage import map_coordinates from fiasco import proton_electron_ratio @@ -173,18 +173,34 @@ Using Datasets: ionization equilibrium outside of this temperature range, it is better to use the ionization and recombination rates. + Note + ---- + The cubic interpolation is performed in log-log spaceusing a Piecewise Cubic Hermite + Interpolating Polynomial with `~scipy.interpolate.PchipInterpolator`. This helps to + ensure smoothness while reducing oscillations in the interpolated ionization fractions. + See Also -------- fiasco.Element.equilibrium_ionization """ - f = interp1d(self._ioneq[self._dset_names['ioneq_filename']]['temperature'].to('MK').value, - self._ioneq[self._dset_names['ioneq_filename']]['ionization_fraction'], - kind='linear', - bounds_error=False, - fill_value=np.nan) - ioneq = f(self.temperature.to('MK').value) - isfinite = np.isfinite(ioneq) - ioneq[isfinite] = np.where(ioneq[isfinite] < 0., 0., ioneq[isfinite]) + temperature = self.temperature.to_value('K') + temperature_data = self._ioneq[self._dset_names['ioneq_filename']]['temperature'].to_value('K') + ioneq_data = self._ioneq[self._dset_names['ioneq_filename']]['ionization_fraction'].value + # Perform PCHIP interpolation in log-space on only the non-zero ionization fractions. + # See https://github.com/wtbarnes/fiasco/pull/223 for additional discussion. + is_nonzero = ioneq_data > 0.0 + f_interp = PchipInterpolator(np.log10(temperature_data[is_nonzero]), + np.log10(ioneq_data[is_nonzero]), + extrapolate=False) + ioneq = f_interp(np.log10(temperature)) + ioneq = 10**ioneq + # This sets all entries that would have interpolated to zero ionization fraction to zero + ioneq = np.where(np.isnan(ioneq), 0.0, ioneq) + # Set entries that are truly out of bounds of the original temperature data back to NaN + out_of_bounds = np.logical_or(temperature<temperature_data.min(), temperature>temperature_data.max()) + ioneq = np.where(out_of_bounds, np.nan, ioneq) + is_finite = np.isfinite(ioneq) + ioneq[is_finite] = np.where(ioneq[is_finite] < 0., 0., ioneq[is_finite]) return u.Quantity(ioneq) @property @@ -339,6 +355,7 @@ Using Datasets: See Also -------- + proton_collision_deexcitation_rate electron_collision_excitation_rate """ # Create scaled temperature--these are not stored in the file @@ -389,6 +406,7 @@ Using Datasets: def level_populations(self, density: u.cm**(-3), include_protons=True, + include_level_resolved_rate_correction=True, couple_density_to_temperature=False) -> u.dimensionless_unscaled: """ Energy level populations as a function of temperature and density. @@ -507,10 +525,136 @@ Using Datasets: # positivity np.fabs(pop, out=pop) np.divide(pop, pop.sum(axis=1)[:, np.newaxis], out=pop) + # Apply ionization/recombination correction + if include_level_resolved_rate_correction: + correction = self._population_correction(pop, d, c_matrix) + pop *= correction + np.divide(pop, pop.sum(axis=1)[:, np.newaxis], out=pop) populations[:, i, :] = pop return u.Quantity(populations) + def _level_resolved_rates_interpolation(self, temperature_table, rate_table, + extrapolate_above=False, + extrapolate_below=False): + # NOTE: According to CHIANTI Technical Report No. 20, Section 5, + # the interpolation of the level resolved recombination, + # the rates should be zero below the temperature range and above + # the temperature range, the last two points should be used to perform + # a linear extrapolation. For the ionization rates, the rates should be + # zero above the temperature range and below the temperature range, the + # last two points should be used. Thus, we need to perform two interpolations + # for each level. + # NOTE: In the CHIANTI IDL code, the interpolation is done using a cubic spline. + # Here, the rates are interpolated using a Piecewise Cubic Hermite Interpolating + # Polynomial (PCHIP) which balances smoothness and also reduces the oscillations + # that occur with higher order spline fits. This is needed mostly due to the wide + # range over which this data is fit. + temperature = self.temperature.to_value('K') + rates = [] + for t, r in zip(temperature_table.to_value('K'), rate_table.to_value('cm3 s-1')): + rate_interp = PchipInterpolator(t, r, extrapolate=False)(temperature) + # NOTE: Anything outside of the temperature range will be set to NaN by the + # interpolation but we want these to be 0. + rate_interp = np.where(np.isnan(rate_interp), 0, rate_interp) + if extrapolate_above: + f_extrapolate = interp1d(t[-2:], r[-2:], kind='linear', fill_value='extrapolate') + i_extrapolate = np.where(temperature > t[-1]) + rate_interp[i_extrapolate] = f_extrapolate(temperature[i_extrapolate]) + if extrapolate_below: + f_extrapolate = interp1d(t[:2], r[:2], kind='linear', fill_value='extrapolate') + i_extrapolate = np.where(temperature < t[0]) + rate_interp[i_extrapolate] = f_extrapolate(temperature[i_extrapolate]) + rates.append(rate_interp) + # NOTE: Take transpose to maintain consistent ordering of temperature in the leading + # dimension and levels in the last dimension + rates = u.Quantity(rates, 'cm3 s-1').T + # NOTE: The linear extrapolation at either end may return rates < 0 so we set these + # to zero. + rates = np.where(rates<0, 0, rates) + return rates + + @cached_property + @needs_dataset('cilvl') + @u.quantity_input + def _level_resolved_ionization_rate(self): + ionization_rates = self._level_resolved_rates_interpolation( + self._cilvl['temperature'], + self._cilvl['ionization_rate'], + extrapolate_below=True, + extrapolate_above=False, + ) + return self._cilvl['upper_level'], ionization_rates + + @cached_property + @needs_dataset('reclvl') + @u.quantity_input + def _level_resolved_recombination_rate(self): + recombination_rates = self._level_resolved_rates_interpolation( + self._reclvl['temperature'], + self._reclvl['recombination_rate'], + extrapolate_below=False, + extrapolate_above=True, + ) + return self._reclvl['upper_level'], recombination_rates + + @u.quantity_input + def _population_correction(self, population, density, rate_matrix): + """ + Correct level population to account for ionization and + recombination processes. + + Parameters + ---------- + population: `np.ndarray` + density: `~astropy.units.Quantity` + rate_matrix: `~astropy.units.Quantity` + + Returns + ------- + correction: `np.ndarray` + Correction factor to multiply populations by + """ + # NOTE: These are done in separate try/except blocks because some ions have just a cilvl file, + # some have just a reclvl file, and some have both. + # NOTE: Ioneq values for surrounding ions are retrieved afterwards because first and last ions do + # not have previous or next ions but also do not have reclvl or cilvl files. + # NOTE: stripping the units off and adding them at the end because of some strange astropy + # Quantity behavior that does not allow for adding these two compatible shapes together. + numerator = np.zeros(population.shape) + try: + upper_level_ionization, ionization_rate = self._level_resolved_ionization_rate + ioneq_previous = self.previous_ion().ioneq.value[:, np.newaxis] + numerator[:, upper_level_ionization-1] += (ionization_rate * ioneq_previous).to_value('cm3 s-1') + except MissingDatasetException: + pass + try: + upper_level_recombination, recombination_rate = self._level_resolved_recombination_rate + ioneq_next = self.next_ion().ioneq.value[:, np.newaxis] + numerator[:, upper_level_recombination-1] += (recombination_rate * ioneq_next).to_value('cm3 s-1') + except MissingDatasetException: + pass + numerator *= density.to_value('cm-3') + + c = rate_matrix.to_value('s-1').copy() + # This excludes processes that depopulate the level + i_diag, j_diag = np.diag_indices(c.shape[1]) + c[:, i_diag, j_diag] = 0.0 + # Sum of the population-weighted excitations from lower levels + # and cascades from higher levels + denominator = np.einsum('ijk,ik->ij', c, population) + denominator *= self.ioneq.value[:, np.newaxis] + # Set any zero entries to NaN to avoid divide by zero warnings + denominator = np.where(denominator==0.0, np.nan, denominator) + + ratio = numerator / denominator + # Set ratio to zero where denominator is zero. This also covers the + # case of out-of-bounds ionization fractions (which will be NaN) + ratio = np.where(np.isfinite(ratio), ratio, 0.0) + # NOTE: Correction should not affect the ground state populations + ratio[:, 0] = 0.0 + return 1.0 + ratio + @needs_dataset('abundance', 'elvlc') @u.quantity_input def contribution_function(self, density: u.cm**(-3), **kwargs) -> u.cm**3 * u.erg / u.s:
wtbarnes/fiasco
c674d97fc88262d1ad2afe29edafadb8e24674bb
diff --git a/fiasco/conftest.py b/fiasco/conftest.py index ce8c437..15519c7 100644 --- a/fiasco/conftest.py +++ b/fiasco/conftest.py @@ -84,6 +84,11 @@ TEST_FILES = { 'fe_27.rrparams': '75383b0f1b167f862cfd26bbadd2a029', 'fe_10.psplups': 'dd34363f6daa81dbf106fbeb211b457d', 'fe_10.elvlc': 'f221d4c7167336556d57378ac368afc1', + 'fe_20.elvlc': 'bbddcf958dd41311ea24bf177c2b62de', + 'fe_20.wgfa': 'c991c30b98b03c9152ba5a2c71877149', + 'fe_20.scups': 'f0e375cad2ec8296efb2abcb8f02705e', + 'fe_20.cilvl': 'b71833c51a03c7073f1657ce60afcdbb', + 'fe_20.reclvl': 'cf28869709acef521fb6a1c9a2b59530', } diff --git a/fiasco/tests/idl/test_idl_ioneq.py b/fiasco/tests/idl/test_idl_ioneq.py index 6061370..ac542c8 100644 --- a/fiasco/tests/idl/test_idl_ioneq.py +++ b/fiasco/tests/idl/test_idl_ioneq.py @@ -32,6 +32,7 @@ def ioneq_from_idl(idl_env, ascii_dbase_root): 'C 2', 'C 3', 'Ca 2', + 'Fe 20', ]) def test_ioneq_from_idl(ion_name, ioneq_from_idl, hdf5_dbase_root): temperature = 10**ioneq_from_idl['ioneq_logt'] * u.K diff --git a/fiasco/tests/test_collections.py b/fiasco/tests/test_collections.py index ae8575b..507ae1f 100644 --- a/fiasco/tests/test_collections.py +++ b/fiasco/tests/test_collections.py @@ -93,16 +93,18 @@ def test_length(collection): def test_free_free(another_collection, wavelength): ff = another_collection.free_free(wavelength) assert ff.shape == temperature.shape + wavelength.shape if wavelength.shape else (1,) - index = 50 if wavelength.shape else 0 - assert u.allclose(ff[50, index], 3.19877384e-35 * u.Unit('erg cm3 s-1 Angstrom-1')) + index_w = 50 if wavelength.shape else 0 + index_t = 24 # This is approximately where the ioneq for Fe V peaks + assert u.allclose(ff[index_t, index_w], 3.2914969734961024e-42 * u.Unit('erg cm3 s-1 Angstrom-1')) @pytest.mark.parametrize('wavelength', [wavelength, wavelength[50]]) def test_free_bound(another_collection, wavelength): fb = another_collection.free_bound(wavelength) assert fb.shape == temperature.shape + wavelength.shape if wavelength.shape else (1,) - index = 50 if wavelength.shape else 0 - assert u.allclose(fb[50, index], 3.2653516e-29 * u.Unit('erg cm3 s-1 Angstrom-1')) + index_w = 50 if wavelength.shape else 0 + index_t = 24 # This is approximately where the ioneq for Fe V peaks + assert u.allclose(fb[index_t, index_w], 1.1573022245197259e-35 * u.Unit('erg cm3 s-1 Angstrom-1')) def test_radiative_los(collection): diff --git a/fiasco/tests/test_ion.py b/fiasco/tests/test_ion.py index 04abc5d..fa7ea95 100644 --- a/fiasco/tests/test_ion.py +++ b/fiasco/tests/test_ion.py @@ -32,6 +32,13 @@ def c6(hdf5_dbase_root): return fiasco.Ion('C VI', temperature, hdf5_dbase_root=hdf5_dbase_root) [email protected] +def fe20(hdf5_dbase_root): + # NOTE: This ion was added because it has reclvl and cilvl files which + # we need to test the level-resolved rate correction factor + return fiasco.Ion('Fe XX', temperature, hdf5_dbase_root=hdf5_dbase_root) + + def test_new_instance(ion): abundance_filename = ion._instance_kwargs['abundance_filename'] new_ion = ion._new_instance() @@ -99,15 +106,7 @@ def test_scalar_temperature(hdf5_dbase_root): t_data = ion._ioneq[ion._dset_names['ioneq_filename']]['temperature'] ioneq_data = ion._ioneq[ion._dset_names['ioneq_filename']]['ionization_fraction'] i_t = np.where(t_data == ion.temperature) - np.testing.assert_allclose(ioneq, ioneq_data[i_t]) - - -def test_scalar_density(hdf5_dbase_root): - ion = fiasco.Ion('H 1', temperature, hdf5_dbase_root=hdf5_dbase_root) - pop = ion.level_populations(1e8 * u.cm**-3) - assert pop.shape == ion.temperature.shape + (1,) + ion._elvlc['level'].shape - # This value has not been checked for correctness - np.testing.assert_allclose(pop[0, 0, 0], 0.9965048292729177) + assert u.allclose(ioneq, ioneq_data[i_t]) def test_no_elvlc_raises_index_error(hdf5_dbase_root): @@ -116,13 +115,21 @@ def test_no_elvlc_raises_index_error(hdf5_dbase_root): def test_ioneq(ion): - assert ion.ioneq.shape == temperature.shape t_data = ion._ioneq[ion._dset_names['ioneq_filename']]['temperature'] ioneq_data = ion._ioneq[ion._dset_names['ioneq_filename']]['ionization_fraction'] - i_t = np.where(t_data == ion.temperature[0]) - # Essentially test that we've done the interpolation to the data correctly - # for a single value - np.testing.assert_allclose(ion.ioneq[0], ioneq_data[i_t]) + ion_at_nodes = ion._new_instance(temperature=t_data) + assert u.allclose(ion_at_nodes.ioneq, ioneq_data, rtol=1e-6) + + +def test_ioneq_positive(ion): + assert np.all(ion.ioneq >= 0) + + +def test_ioneq_out_bounds_is_nan(ion): + t_data = ion._ioneq[ion._dset_names['ioneq_filename']]['temperature'] + t_out_of_bounds = t_data[[0,-1]] + [-100, 1e6] * u.K + ion_out_of_bounds = ion._new_instance(temperature=t_out_of_bounds) + assert np.isnan(ion_out_of_bounds.ioneq).all() def test_formation_temeprature(ion): @@ -132,7 +139,7 @@ def test_formation_temeprature(ion): def test_abundance(ion): assert ion.abundance.dtype == np.dtype('float64') # This value has not been tested for correctness - np.testing.assert_allclose(ion.abundance, 0.0001258925411794166) + assert u.allclose(ion.abundance, 0.0001258925411794166) def test_proton_collision(fe10): @@ -164,6 +171,15 @@ def test_missing_ip(hdf5_dbase_root): _ = ion.ip +def test_level_populations(ion): + pop = ion.level_populations(1e8 * u.cm**-3) + assert pop.shape == ion.temperature.shape + (1,) + ion._elvlc['level'].shape + # This value has not been checked for correctness + assert u.allclose(pop[0, 0, 0], 0.011643747849652244) + # Check that the total populations are normalized to 1 for all temperatures + assert u.allclose(pop.squeeze().sum(axis=1), 1, atol=None, rtol=1e-15) + + def test_contribution_function(ion): cont_func = ion.contribution_function(1e7 * u.cm**-3) assert cont_func.shape == ion.temperature.shape + (1, ) + ion._wgfa['wavelength'].shape @@ -204,6 +220,39 @@ def test_coupling_unequal_dimensions_exception(ion): _ = ion.level_populations([1e7, 1e8]*u.cm**(-3), couple_density_to_temperature=True) [email protected] +def pops_with_correction(fe20): + return fe20.level_populations(1e9*u.cm**(-3)).squeeze() + + [email protected] +def pops_no_correction(fe20): + return fe20.level_populations(1e9*u.cm**(-3), + include_level_resolved_rate_correction=False).squeeze() + + +def test_level_populations_normalized(pops_no_correction, pops_with_correction): + assert u.allclose(pops_with_correction.sum(axis=1), 1, atol=None, rtol=1e-15) + assert u.allclose(pops_no_correction.sum(axis=1), 1, atol=None, rtol=1e-15) + + +def test_level_populations_correction(fe20, pops_no_correction, pops_with_correction): + # Test level-resolved correction applied to correct levels + i_corrected = np.unique(np.concatenate([fe20._cilvl['upper_level'], fe20._reclvl['upper_level']])) + i_corrected -= 1 + # This tests that, for at least some portion of the temperature axis, the populations are + # significantly different for each corrected level + pops_equal = u.isclose(pops_with_correction[:, i_corrected], pops_no_correction[:, i_corrected], + atol=0.0, rtol=1e-5) + assert ~np.all(np.all(pops_equal, axis=0)) + # All other levels should be unchanged (with some tolerance for renormalization) + is_uncorrected = np.ones(pops_no_correction.shape[-1], dtype=bool) + is_uncorrected[i_corrected] = False + i_uncorrected = np.where(is_uncorrected) + assert u.allclose(pops_with_correction[:, i_uncorrected], pops_no_correction[:, i_uncorrected], + atol=0.0, rtol=1e-5) + + def test_emissivity(ion): emm = ion.emissivity(1e7 * u.cm**-3) assert emm.shape == ion.temperature.shape + (1, ) + ion._wgfa['wavelength'].shape
Add correction for ionization and recombination in level populations calculation There should be a correction for ionization and recombination processes in the level population calculation. Currently, this is not included. See [section 2.3 of Landi et al. (2006)](http://adsabs.harvard.edu/abs/2006ApJS..162..261L) for more details as well as [section 6 of Dere et al. (2009)](http://adsabs.harvard.edu/abs/2009A%26A...498..915D).
0.0
c674d97fc88262d1ad2afe29edafadb8e24674bb
[ "fiasco/tests/test_collections.py::test_free_free[wavelength0]", "fiasco/tests/test_collections.py::test_free_free[wavelength1]", "fiasco/tests/test_collections.py::test_free_bound[wavelength0]", "fiasco/tests/test_collections.py::test_free_bound[wavelength1]", "fiasco/tests/test_ion.py::test_level_populations_normalized", "fiasco/tests/test_ion.py::test_level_populations_correction" ]
[ "fiasco/tests/test_collections.py::test_create_collection_from_ions", "fiasco/tests/test_collections.py::test_create_collection_from_elements", "fiasco/tests/test_collections.py::test_create_collection_from_mixture", "fiasco/tests/test_collections.py::test_create_collection_from_collection", "fiasco/tests/test_collections.py::test_getitem", "fiasco/tests/test_collections.py::test_contains", "fiasco/tests/test_collections.py::test_length", "fiasco/tests/test_collections.py::test_radiative_los", "fiasco/tests/test_collections.py::test_spectrum", "fiasco/tests/test_collections.py::test_spectrum_no_valid_ions", "fiasco/tests/test_collections.py::test_unequal_temperatures_raise_value_error", "fiasco/tests/test_collections.py::test_create_with_wrong_type_raise_type_error", "fiasco/tests/test_collections.py::test_collections_repr", "fiasco/tests/test_ion.py::test_new_instance", "fiasco/tests/test_ion.py::test_level_indexing", "fiasco/tests/test_ion.py::test_level", "fiasco/tests/test_ion.py::test_repr", "fiasco/tests/test_ion.py::test_repr_scalar_temp", "fiasco/tests/test_ion.py::test_ion_properties", "fiasco/tests/test_ion.py::test_level_properties", "fiasco/tests/test_ion.py::test_scalar_temperature", "fiasco/tests/test_ion.py::test_no_elvlc_raises_index_error", "fiasco/tests/test_ion.py::test_ioneq", "fiasco/tests/test_ion.py::test_ioneq_positive", "fiasco/tests/test_ion.py::test_ioneq_out_bounds_is_nan", "fiasco/tests/test_ion.py::test_formation_temeprature", "fiasco/tests/test_ion.py::test_abundance", "fiasco/tests/test_ion.py::test_proton_collision", "fiasco/tests/test_ion.py::test_missing_abundance", "fiasco/tests/test_ion.py::test_ip", "fiasco/tests/test_ion.py::test_missing_ip", "fiasco/tests/test_ion.py::test_level_populations", "fiasco/tests/test_ion.py::test_contribution_function", "fiasco/tests/test_ion.py::test_emissivity_shape", "fiasco/tests/test_ion.py::test_coupling_unequal_dimensions_exception", "fiasco/tests/test_ion.py::test_emissivity", "fiasco/tests/test_ion.py::test_intensity[em0]", "fiasco/tests/test_ion.py::test_intensity[em1]", "fiasco/tests/test_ion.py::test_intensity[em2]", "fiasco/tests/test_ion.py::test_excitation_autoionization_rate", "fiasco/tests/test_ion.py::test_dielectronic_recombination_rate", "fiasco/tests/test_ion.py::test_free_free", "fiasco/tests/test_ion.py::test_free_bound", "fiasco/tests/test_ion.py::test_add_ions", "fiasco/tests/test_ion.py::test_radd_ions", "fiasco/tests/test_ion.py::test_transitions", "fiasco/tests/test_ion.py::test_create_ion_without_units_raises_units_error", "fiasco/tests/test_ion.py::test_create_ion_with_wrong_units_raises_unit_conversion_error", "fiasco/tests/test_ion.py::test_indexing_no_levels", "fiasco/tests/test_ion.py::test_repr_no_levels", "fiasco/tests/test_ion.py::test_next_ion", "fiasco/tests/test_ion.py::test_previous_ion" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-02-24 00:25:39+00:00
bsd-3-clause
6,271
wwkimball__yamlpath-155
diff --git a/CHANGES b/CHANGES index 1e09b97..afa9061 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,8 @@ +3.6.4 +Bug Fixes: +* Refactored single-star wildcard segment (*) handling to enable filtering + matches when subsequent segments exist; this fixes Issue #154. + 3.6.3 Bug Fixes: * The eyaml-rotate-keys command-line tool failed to preserve block-style EYAML diff --git a/yamlpath/__init__.py b/yamlpath/__init__.py index 17f456c..a47c822 100644 --- a/yamlpath/__init__.py +++ b/yamlpath/__init__.py @@ -1,6 +1,6 @@ """Core YAML Path classes.""" # Establish the version number common to all components -__version__ = "3.6.3" +__version__ = "3.6.4" from yamlpath.yamlpath import YAMLPath from yamlpath.processor import Processor diff --git a/yamlpath/enums/pathsegmenttypes.py b/yamlpath/enums/pathsegmenttypes.py index 489d9e0..4c9a402 100644 --- a/yamlpath/enums/pathsegmenttypes.py +++ b/yamlpath/enums/pathsegmenttypes.py @@ -36,6 +36,9 @@ class PathSegmentTypes(Enum): Traverses the document tree deeply. If there is a next segment, it must match or no data is matched. When there is no next segment, every leaf node matches. + + `MATCH_ALL` + Matches every immediate child node. """ ANCHOR = auto() @@ -45,3 +48,4 @@ class PathSegmentTypes(Enum): SEARCH = auto() TRAVERSE = auto() KEYWORD_SEARCH = auto() + MATCH_ALL = auto() diff --git a/yamlpath/processor.py b/yamlpath/processor.py index 7c97027..38e301c 100644 --- a/yamlpath/processor.py +++ b/yamlpath/processor.py @@ -839,6 +839,11 @@ class Processor: node_coords = self._get_nodes_by_index( data, yaml_path, segment_index, translated_path=translated_path, ancestry=ancestry) + elif segment_type == PathSegmentTypes.MATCH_ALL: + node_coords = self._get_nodes_by_match_all( + data, yaml_path, segment_index, parent=parent, + parentref=parentref, translated_path=translated_path, + ancestry=ancestry) elif segment_type == PathSegmentTypes.ANCHOR: node_coords = self._get_nodes_by_anchor( data, yaml_path, segment_index, @@ -1894,6 +1899,244 @@ class Processor: data=node_coord) yield node_coord + def _get_nodes_by_match_all_unfiltered( + self, data: Any, yaml_path: YAMLPath, segment_index: int, **kwargs: Any + ) -> Generator[Any, None, None]: + """ + Yield every immediate, non-leaf child node. + + Parameters: + 1. data (ruamel.yaml data) The parsed YAML data to process + 2. yaml_path (yamlpath.Path) The YAML Path being processed + 3. segment_index (int) Segment index of the YAML Path to process + + Keyword Arguments: + * parent (ruamel.yaml node) The parent node from which this query + originates + * parentref (Any) The Index or Key of data within parent + * translated_path (YAMLPath) YAML Path indicating precisely which node + is being evaluated + * ancestry (List[AncestryEntry]) Stack of ancestors preceding the + present node under evaluation + + Returns: (Generator[Any, None, None]) Each node coordinate as they are + matched. + """ + dbg_prefix="Processor::_get_nodes_by_match_all_unfiltered: " + parent: Any = kwargs.pop("parent", None) + parentref: Any = kwargs.pop("parentref", None) + translated_path: YAMLPath = kwargs.pop("translated_path", YAMLPath("")) + ancestry: List[AncestryEntry] = kwargs.pop("ancestry", []) + segments = yaml_path.escaped + pathseg: PathSegment = segments[segment_index] + + self.logger.debug( + "Gathering ALL immediate children in the tree at parentref," + f" {parentref}, in data:", + prefix=dbg_prefix, data=data) + + if isinstance(data, (CommentedMap, dict)): + self.logger.debug( + "Iterating over all keys to find ANY matches in data:", + prefix=dbg_prefix, data=data) + for key, val in data.items(): + next_translated_path = ( + translated_path + YAMLPath.escape_path_section( + key, translated_path.seperator)) + next_ancestry = ancestry + [(data, key)] + self.logger.debug( + f"Yielding dict value at key, {key} from data:", + prefix=dbg_prefix, data={'VAL': val, 'OF_DATA': data}) + yield NodeCoords(val, data, key, next_translated_path, + next_ancestry, pathseg) + return + + if isinstance(data, (CommentedSeq, list)): + for idx, ele in enumerate(data): + next_translated_path = translated_path + f"[{idx}]" + next_ancestry = ancestry + [(data, idx)] + self.logger.debug( + f"Yielding list element at index, {idx}:", + prefix=dbg_prefix, data=ele) + yield NodeCoords(ele, data, idx, next_translated_path, + next_ancestry, pathseg) + return + + if isinstance(data, (CommentedSet, set)): + for ele in data: + next_translated_path = ( + translated_path + YAMLPath.escape_path_section( + ele, translated_path.seperator)) + self.logger.debug( + "Yielding set element:", + prefix=dbg_prefix, data=ele) + yield NodeCoords( + ele, parent, ele, next_translated_path, ancestry, pathseg) + return + + self.logger.debug( + "NOT yielding Scalar node (* excludes scalars):", + prefix=dbg_prefix, data=data) + return + + def _get_nodes_by_match_all_filtered( + self, data: Any, yaml_path: YAMLPath, segment_index: int, **kwargs: Any + ) -> Generator[Any, None, None]: + """ + Yield immediate child nodes whose children match additional filters. + + Parameters: + 1. data (ruamel.yaml data) The parsed YAML data to process + 2. yaml_path (yamlpath.Path) The YAML Path being processed + 3. segment_index (int) Segment index of the YAML Path to process + + Keyword Arguments: + * parent (ruamel.yaml node) The parent node from which this query + originates + * parentref (Any) The Index or Key of data within parent + * translated_path (YAMLPath) YAML Path indicating precisely which node + is being evaluated + * ancestry (List[AncestryEntry]) Stack of ancestors preceding the + present node under evaluation + + Returns: (Generator[Any, None, None]) Each node coordinate as they are + matched. + """ + dbg_prefix="Processor::_get_nodes_by_match_all_filtered: " + parentref: Any = kwargs.pop("parentref", None) + translated_path: YAMLPath = kwargs.pop("translated_path", YAMLPath("")) + ancestry: List[AncestryEntry] = kwargs.pop("ancestry", []) + segments = yaml_path.escaped + pathseg: PathSegment = segments[segment_index] + next_segment_idx: int = segment_index + 1 + + self.logger.debug( + "FILTERING children in the tree at parentref," + f" {parentref}, of data:", + prefix=dbg_prefix, data=data) + + # There is a filter on this segment. Return nodes from the present + # data if-and-only-if any of their immediate children will match the + # filter. Do not return the child nodes; the caller will continue to + # process subsequent path segments to yield them. + if isinstance(data, dict): + self.logger.debug( + "Iterating over all keys to find ANY matches in data:", + prefix=dbg_prefix, data=data) + for key, val in data.items(): + next_translated_path = ( + translated_path + YAMLPath.escape_path_section( + key, translated_path.seperator)) + next_ancestry = ancestry + [(data, key)] + for filtered_nc in self._get_nodes_by_path_segment( + val, yaml_path, next_segment_idx, parent=data, + parentref=key, translated_path=next_translated_path, + ancestry=next_ancestry + ): + self.logger.debug( + "Ignoring yielded child node coordinate to yield its" + " successfully matched, filtered dict val parent for" + f" key, {key}:" + , prefix=dbg_prefix + , data={ + 'VAL': val + , 'OF_DATA': data + , 'IGNORING': filtered_nc + }) + yield NodeCoords( + val, data, key, next_translated_path, next_ancestry, + pathseg + ) + break # because we need only the matching parent + return + + if isinstance(data, list): + for idx, ele in enumerate(data): + self.logger.debug( + f"Recursing into INDEX '{idx}' at ref '{parentref}' for" + " next-segment matches...", prefix=dbg_prefix) + next_translated_path = translated_path + f"[{idx}]" + next_ancestry = ancestry + [(data, idx)] + for filtered_nc in self._get_nodes_by_path_segment( + ele, yaml_path, next_segment_idx, parent=data, + parentref=idx, translated_path=next_translated_path, + ancestry=next_ancestry + ): + self.logger.debug( + "Ignoring yielded child node coordinate to yield its" + " successfully matched, filtered list ele parent for" + f" idx, {idx}:" + , prefix=dbg_prefix + , data={ + 'ELE': ele + , 'OF_DATA': data + , 'IGNORING': filtered_nc + }) + yield NodeCoords( + ele, data, idx, next_translated_path, next_ancestry, + pathseg + ) + break # because we need only the matching parent + return + + def _get_nodes_by_match_all( + self, data: Any, yaml_path: YAMLPath, segment_index: int, **kwargs: Any + ) -> Generator[Any, None, None]: + """ + Yield every immediate child node. + + Parameters: + 1. data (ruamel.yaml data) The parsed YAML data to process + 2. yaml_path (yamlpath.Path) The YAML Path being processed + 3. segment_index (int) Segment index of the YAML Path to process + + Keyword Arguments: + * parent (ruamel.yaml node) The parent node from which this query + originates + * parentref (Any) The Index or Key of data within parent + * translated_path (YAMLPath) YAML Path indicating precisely which node + is being evaluated + * ancestry (List[AncestryEntry]) Stack of ancestors preceding the + present node under evaluation + + Returns: (Generator[Any, None, None]) Each node coordinate as they are + matched. + """ + dbg_prefix="Processor::_get_nodes_by_match_all: " + parent: Any = kwargs.pop("parent", None) + parentref: Any = kwargs.pop("parentref", None) + translated_path: YAMLPath = kwargs.pop("translated_path", YAMLPath("")) + ancestry: List[AncestryEntry] = kwargs.pop("ancestry", []) + + segments = yaml_path.escaped + next_segment_idx: int = segment_index + 1 + filter_results = next_segment_idx < len(segments) + + self.logger.debug( + "Processing either FILTERED or UNFILTERED nodes from data:" + , prefix=dbg_prefix, data=data) + + if filter_results: + # Of data, yield every node which has children matching next seg + all_coords = self._get_nodes_by_match_all_filtered( + data, yaml_path, segment_index, + parent=parent, parentref=parentref, + translated_path=translated_path, ancestry=ancestry + ) + else: + # Of data, yield every node + all_coords = self._get_nodes_by_match_all_unfiltered( + data, yaml_path, segment_index, + parent=parent, parentref=parentref, + translated_path=translated_path, ancestry=ancestry + ) + + for all_coord in all_coords: + self.logger.debug( + "Yielding matched child node of source data:" + , prefix=dbg_prefix, data={'NODE': all_coord, 'DATA': data}) + yield all_coord + def _get_required_nodes( self, data: Any, yaml_path: YAMLPath, depth: int = 0, **kwargs: Any ) -> Generator[NodeCoords, None, None]: diff --git a/yamlpath/yamlpath.py b/yamlpath/yamlpath.py index 759bafd..132e8e1 100644 --- a/yamlpath/yamlpath.py +++ b/yamlpath/yamlpath.py @@ -798,10 +798,9 @@ class YAMLPath: segment_len = len(segment_id) if splat_count == 1: if segment_len == 1: - # /*/ -> [.=~/.*/] - coal_type = PathSegmentTypes.SEARCH - coal_value = SearchTerms( - False, PathSearchMethods.REGEX, ".", ".*") + # /*/ -> MATCH_ALL + coal_type = PathSegmentTypes.MATCH_ALL + coal_value = None elif splat_pos == 0: # /*text/ -> [.$text] coal_type = PathSegmentTypes.SEARCH @@ -877,6 +876,10 @@ class YAMLPath: ) elif segment_type == PathSegmentTypes.INDEX: ppath += "[{}]".format(segment_attrs) + elif segment_type == PathSegmentTypes.MATCH_ALL: + if add_sep: + ppath += pathsep + ppath += "*" elif segment_type == PathSegmentTypes.ANCHOR: if add_sep: ppath += "[&{}]".format(segment_attrs) @@ -886,17 +889,7 @@ class YAMLPath: ppath += str(segment_attrs) elif (segment_type == PathSegmentTypes.SEARCH and isinstance(segment_attrs, SearchTerms)): - terms: SearchTerms = segment_attrs - if (terms.method == PathSearchMethods.REGEX - and terms.attribute == "." - and terms.term == ".*" - and not terms.inverted - ): - if add_sep: - ppath += pathsep - ppath += "*" - else: - ppath += str(segment_attrs) + ppath += str(segment_attrs) elif segment_type == PathSegmentTypes.COLLECTOR: ppath += str(segment_attrs) elif segment_type == PathSegmentTypes.TRAVERSE:
wwkimball/yamlpath
d2b693ca756638122697288ea25cc02310b00842
diff --git a/tests/test_processor.py b/tests/test_processor.py index a205d18..34e6ebd 100644 --- a/tests/test_processor.py +++ b/tests/test_processor.py @@ -82,7 +82,11 @@ class Test_Processor(): ("/array_of_hashes/**", [1, "one", 2, "two"], True, None), ("products_hash.*[dimensions.weight==4].(availability.start.date)+(availability.stop.date)", [[date(2020, 8, 1), date(2020, 9, 25)], [date(2020, 1, 1), date(2020, 1, 1)]], True, None), ("products_array[dimensions.weight==4].product", ["doohickey", "widget"], True, None), - ("(products_hash.*.dimensions.weight)[max()][parent(2)].dimensions.weight", [10], True, None) + ("(products_hash.*.dimensions.weight)[max()][parent(2)].dimensions.weight", [10], True, None), + ("/Locations/*/*", ["ny", "bstn"], True, None), + ("/AoH_Locations/*/*/*", ["nyc", "bo"], True, None), + ("/Weird_AoH_Locations/*/*/*", ["nyc", "bstn"], True, None), + ("/Set_Locations/*/*", ["New York", "Boston"], True, None), ]) def test_get_nodes(self, quiet_logger, yamlpath, results, mustexist, default): yamldata = """--- @@ -222,7 +226,35 @@ products_array: height: 10 depth: 1 weight: 4 + ############################################################################### +# For wildcard matching (#154) +Locations: + United States: + New York: ny + Boston: bstn + Canada: cnd + +AoH_Locations: + - United States: us + New York: + New York City: nyc + Massachussets: + Boston: bo + - Canada: ca + +# Weird Array-of-Hashes +Weird_AoH_Locations: + - United States: + New York: nyc + Boston: bstn + - Canada: cnd + +Set_Locations: + United States: !!set + ? New York + ? Boston + Canada: """ yaml = YAML() processor = Processor(quiet_logger, yaml.load(yamldata))
Unexpected nodes returned for grandchild query /Locations/*/* ## Operating System 1. Name/Distribution: Windows 10 Home 2. Version: 10.0.19043 Build 19043 ## Version of Python and packages in use at the time of the issue. 1. [Distribution](https://wiki.python.org/moin/PythonDistributions): CPython (for Windows) from python.org 2. Python Version: 3.7 3. Version of yamlpath installed: 3.6.3 4. Version of ruamel.yaml installed: 0.17.10 ## Minimum sample of YAML (or compatible) data necessary to trigger the issue ```yaml --- Locations: United States: New York: Boston: Canada: ``` ## Complete steps to reproduce the issue when triggered via: 1. Command-Line Tools (yaml-get, yaml-set, or eyaml-rotate-keys): Precise command-line arguments which trigger the defect. 2. Libraries (yamlpath.*): Minimum amount of code necessary to trigger the defect. #I thought that a complete unittest might be the most helpful way to demonstrate my issue. Please let me know if another format would be more helpful. ```python import unittest import yamlpath from yamlpath.wrappers import ConsolePrinter from yamlpath.common import Parsers from yamlpath import Processor from yamlpath.exceptions.yamlpathexception import YAMLPathException from types import SimpleNamespace class IssueReportTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_retrieveGrandChildren_OnlyGrandChildrenAreReturned(self): yamlTagHierarchy = '''--- Locations: United States: New York: Boston: Canada: ''' logging_args = SimpleNamespace(quiet=True, verbose=False, debug=False) self._log = ConsolePrinter(logging_args) self._editor = Parsers.get_yaml_editor() (yaml_data, doc_loaded) = Parsers.get_yaml_data(self._editor, self._log, yamlTagHierarchy, literal=True) self._processor = Processor(self._log, yaml_data) nodes = list(self._processor.get_nodes("/Locations/*/*")) self.assertEqual(nodes[0].parentref, "New York") self.assertEqual(nodes[1].parentref, "Boston") self.assertEqual(len(nodes), 2, f"Node '{nodes[2].parentref}' should not be part of this list, or?") ``` ## Expected Outcome When I try to select a specific level of descendant nodes using child and wildcard operators I expect to receive only nodes at the requested level. For example, in the above sample I expect "/Locations/*/*" to return "New York" and "Boston" (grandchildren of "Locations") ## Actual Outcome If another branch of the yaml tree ends above the requested level, the query returns the last leaf on that branch. The above example returns "Canada" in addition to "New York" and "Boston", which is surprising to me as "Canada" is merely a child of "Location", while "New York" and "Boston" are grandchildren. I haven't been able to identify an easy way to distinguish the child from the grandchild nodes. ## Thank you Thanks so much for considering this. I was thrilled to find yamlpath for a hobby project and really appreciate the library. I hope that I'm actually reporting a real issue rather than flaunting my ignorance of how the wildcard operator should work.
0.0
d2b693ca756638122697288ea25cc02310b00842
[ "tests/test_processor.py::Test_Processor::test_get_nodes[/Locations/*/*-results38-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[/AoH_Locations/*/*/*-results39-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[/Weird_AoH_Locations/*/*/*-results40-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[/Set_Locations/*/*-results41-True-None]" ]
[ "tests/test_processor.py::Test_Processor::test_get_none_data_nodes", "tests/test_processor.py::Test_Processor::test_get_nodes[aliases[&aliasAnchorOne]-results0-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[aliases[&newAlias]-results1-False-Not", "tests/test_processor.py::Test_Processor::test_get_nodes[aliases[0]-results2-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[aliases.0-results3-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[(array_of_hashes.name)+(rollback_hashes.on_condition.failure.name)-results4-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[/array_of_hashes/name-results5-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[aliases[1:2]-results6-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[aliases[1:1]-results7-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[squads[bravo:charlie]-results8-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[/&arrayOfHashes/1/step-results9-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[&arrayOfHashes[step=1].name-results10-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[squads[.!=][.=1.1]-results11-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[squads[.!=][.>1.1][.<3.3]-results12-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[aliases[.^Hey]-results13-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[aliases[.$Value]-results14-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[aliases[.%Value]-results15-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[&arrayOfHashes[step>1].name-results16-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[&arrayOfHashes[step<2].name-results17-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[squads[.>charlie]-results18-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[squads[.>=charlie]-results19-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[squads[.<bravo]-results20-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[squads[.<=bravo]-results21-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[squads[.=~/^\\\\w{6,}$/]-results22-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[squads[alpha=1.1]-results23-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[(&arrayOfHashes.step)+(/rollback_hashes/on_condition/failure/step)-(disabled_steps)-results24-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[(&arrayOfHashes.step)+((/rollback_hashes/on_condition/failure/step)-(disabled_steps))-results25-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[(disabled_steps)+(&arrayOfHashes.step)-results26-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[(&arrayOfHashes.step)+(disabled_steps)[1]-results27-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[((&arrayOfHashes.step)[1])[0]-results28-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[does.not.previously.exist[7]-results29-False-Huzzah!]", "tests/test_processor.py::Test_Processor::test_get_nodes[/number_keys/1-results30-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[**.[.^Hey]-results31-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[/**/Hey*-results32-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[lots_of_names.**.name-results33-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[/array_of_hashes/**-results34-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[products_hash.*[dimensions.weight==4].(availability.start.date)+(availability.stop.date)-results35-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[products_array[dimensions.weight==4].product-results36-True-None]", "tests/test_processor.py::Test_Processor::test_get_nodes[(products_hash.*.dimensions.weight)[max()][parent(2)].dimensions.weight-results37-True-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends-results0-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends.*bb-results1-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends[A:S]-results2-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends[2]-results3-Array", "tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends[&bl_anchor]-results4-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends([A:M])+([T:Z])-results5-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends([A:Z])-([S:Z])-results6-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[True-**-results7-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends-results8-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends.*bb-results9-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends[A:S]-results10-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends[2]-results11-Array", "tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends[&bl_anchor]-results12-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends([A:M])+([T:Z])-results13-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends([A:Z])-([S:Z])-results14-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[False-**-results15-None]", "tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends(rbi)+(errate)-results16-Cannot", "tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends.Ted\\\\", "tests/test_processor.py::Test_Processor::test_change_values_in_sets[aliases[&bl_anchor]-REPLACEMENT-**.&bl_anchor-2]", "tests/test_processor.py::Test_Processor::test_change_values_in_sets[baseball_legends.Sammy\\\\", "tests/test_processor.py::Test_Processor::test_delete_from_sets[**[&bl_anchor]-old_deleted_nodes0-new_flat_data0]", "tests/test_processor.py::Test_Processor::test_delete_from_sets[/baseball_legends/Ken\\\\", "tests/test_processor.py::Test_Processor::test_enforce_pathsep", "tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[abc-True]", "tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/ints/[.=4F]-True]", "tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/ints/[.>4F]-True]", "tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/ints/[.<4F]-True]", "tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/ints/[.>=4F]-True]", "tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/ints/[.<=4F]-True]", "tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/floats/[.=4.F]-True]", "tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/floats/[.>4.F]-True]", "tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/floats/[.<4.F]-True]", "tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/floats/[.>=4.F]-True]", "tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/floats/[.<=4.F]-True]", "tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[abc.**-True]", "tests/test_processor.py::Test_Processor::test_illegal_traversal_recursion", "tests/test_processor.py::Test_Processor::test_set_value_in_empty_data", "tests/test_processor.py::Test_Processor::test_set_value_in_none_data", "tests/test_processor.py::Test_Processor::test_set_value[aliases[&testAnchor]-Updated", "tests/test_processor.py::Test_Processor::test_set_value[yamlpath1-New", "tests/test_processor.py::Test_Processor::test_set_value[/top_array/2-42-1-False-YAMLValueFormats.INT-/]", "tests/test_processor.py::Test_Processor::test_set_value[/top_hash/positive_float-0.009-1-True-YAMLValueFormats.FLOAT-/]", "tests/test_processor.py::Test_Processor::test_set_value[/top_hash/negative_float--0.009-1-True-YAMLValueFormats.FLOAT-/]", "tests/test_processor.py::Test_Processor::test_set_value[/top_hash/positive_float--2.71828-1-True-YAMLValueFormats.FLOAT-/]", "tests/test_processor.py::Test_Processor::test_set_value[/top_hash/negative_float-5283.4-1-True-YAMLValueFormats.FLOAT-/]", "tests/test_processor.py::Test_Processor::test_set_value[/null_value-No", "tests/test_processor.py::Test_Processor::test_set_value[(top_array[0])+(top_hash.negative_float)+(/null_value)-REPLACEMENT-3-True-YAMLValueFormats.DEFAULT-/]", "tests/test_processor.py::Test_Processor::test_set_value[(((top_array[0])+(top_hash.negative_float))+(/null_value))-REPLACEMENT-3-False-YAMLValueFormats.DEFAULT-/]", "tests/test_processor.py::Test_Processor::test_cannot_set_nonexistent_required_node_error", "tests/test_processor.py::Test_Processor::test_none_data_to_get_nodes_by_path_segment", "tests/test_processor.py::Test_Processor::test_bad_segment_index_for_get_nodes_by_path_segment", "tests/test_processor.py::Test_Processor::test_get_nodes_by_unknown_path_segment_error", "tests/test_processor.py::Test_Processor::test_non_int_slice_error", "tests/test_processor.py::Test_Processor::test_non_int_array_index_error", "tests/test_processor.py::Test_Processor::test_nonexistant_path_search_method_error", "tests/test_processor.py::Test_Processor::test_adjoined_collectors_error", "tests/test_processor.py::Test_Processor::test_no_attrs_to_arrays_error", "tests/test_processor.py::Test_Processor::test_no_index_to_hashes_error", "tests/test_processor.py::Test_Processor::test_get_nodes_array_impossible_type_error", "tests/test_processor.py::Test_Processor::test_no_attrs_to_scalars_errors", "tests/test_processor.py::Test_Processor::test_key_anchor_changes[/anchorKeys[&keyOne]-Set", "tests/test_processor.py::Test_Processor::test_key_anchor_changes[/hash[&keyTwo]-Confirm-1-True-YAMLValueFormats.DEFAULT-.]", "tests/test_processor.py::Test_Processor::test_key_anchor_changes[/anchorKeys[&recursiveAnchorKey]-Recurse", "tests/test_processor.py::Test_Processor::test_key_anchor_changes[/hash[&recursiveAnchorKey]-Recurse", "tests/test_processor.py::Test_Processor::test_key_anchor_children", "tests/test_processor.py::Test_Processor::test_cannot_add_novel_alias_keys", "tests/test_processor.py::Test_Processor::test_set_nonunique_values[number-5280-verifications0]", "tests/test_processor.py::Test_Processor::test_set_nonunique_values[aliases[&alias_number]-5280-verifications1]", "tests/test_processor.py::Test_Processor::test_set_nonunique_values[bool-False-verifications2]", "tests/test_processor.py::Test_Processor::test_set_nonunique_values[aliases[&alias_bool]-False-verifications3]", "tests/test_processor.py::Test_Processor::test_get_singular_collectors[(temps[.", "tests/test_processor.py::Test_Processor::test_scalar_collectors[(/list1)", "tests/test_processor.py::Test_Processor::test_scalar_collectors[(/list2)", "tests/test_processor.py::Test_Processor::test_scalar_collectors[((/list1)", "tests/test_processor.py::Test_Processor::test_scalar_collectors[(((/list1)", "tests/test_processor.py::Test_Processor::test_collector_math[(hash.*)-(array[1])-results0]", "tests/test_processor.py::Test_Processor::test_collector_math[(hash)-(hoh.two.*)-results1]", "tests/test_processor.py::Test_Processor::test_collector_math[(aoa)-(hoa.two)-results2]", "tests/test_processor.py::Test_Processor::test_collector_math[(aoh)-(aoh[max(key1)])-results3]", "tests/test_processor.py::Test_Processor::test_get_every_data_type", "tests/test_processor.py::Test_Processor::test_delete_nodes[delete_yamlpath0-/-old_deleted_nodes0-new_flat_data0]", "tests/test_processor.py::Test_Processor::test_delete_nodes[records[1]-.-old_deleted_nodes1-new_flat_data1]", "tests/test_processor.py::Test_Processor::test_null_docs_have_nothing_to_delete", "tests/test_processor.py::Test_Processor::test_null_docs_have_nothing_to_gather_and_alias", "tests/test_processor.py::Test_Processor::test_null_docs_have_nothing_to_alias", "tests/test_processor.py::Test_Processor::test_null_docs_have_nothing_to_gather_and_ymk", "tests/test_processor.py::Test_Processor::test_null_docs_have_nothing_to_ymk", "tests/test_processor.py::Test_Processor::test_null_docs_have_nothing_to_tag", "tests/test_processor.py::Test_Processor::test_anchor_nodes[alias_path0-anchor_path0--/]", "tests/test_processor.py::Test_Processor::test_anchor_nodes[a_hash.a_key-some_key--.]", "tests/test_processor.py::Test_Processor::test_ymk_nodes[target-source--.-validations0]", "tests/test_processor.py::Test_Processor::test_ymk_nodes[change_path1-ymk_path1--.-validations1]", "tests/test_processor.py::Test_Processor::test_ymk_nodes[/target-/source--/-validations2]", "tests/test_processor.py::Test_Processor::test_ymk_nodes[target-source-custom_name-.-validations3]", "tests/test_processor.py::Test_Processor::test_tag_nodes[yaml_path0-!taggidy-/]", "tests/test_processor.py::Test_Processor::test_tag_nodes[key-taggidy-.]", "tests/test_processor.py::Test_Processor::test_rename_dict_key[yaml_path0-renamed_key-old_data0-new_data0]", "tests/test_processor.py::Test_Processor::test_rename_dict_key_cannot_overwrite[yaml_path0-renamed_key-old_data0]", "tests/test_processor.py::Test_Processor::test_traverse_with_null", "tests/test_processor.py::Test_Processor::test_yaml_merge_keys_access[reuse1.key12-results0]", "tests/test_processor.py::Test_Processor::test_yaml_merge_keys_access[reuse1.&alias_name1.key12-results1]", "tests/test_processor.py::Test_Processor::test_yaml_merge_keys_access[reuse1[&alias_name1].key12-results2]", "tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/list*[has_child(&anchored_value)][name()]-results0]", "tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/list*[!has_child(&anchored_value)][name()]-results1]", "tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/hash*[has_child(&anchored_hash)][name()]-results2]", "tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/hash*[!has_child(&anchored_hash)][name()]-results3]", "tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/hash*[has_child(&anchored_key)][name()]-results4]", "tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/hash*[!has_child(&anchored_key)][name()]-results5]", "tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/hash*[has_child(&anchored_value)][name()]-results6]", "tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/hash*[!has_child(&anchored_value)][name()]-results7]", "tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/aoh[has_child(&anchored_hash)]/intent-results8]", "tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/aoh[!has_child(&anchored_hash)]/intent-results9]", "tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/aoa/*[has_child(&anchored_value)][name()]-results10]", "tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/aoa/*[!has_child(&anchored_value)][name()]-results11]", "tests/test_processor.py::Test_Processor::test_wiki_array_element_searches[temperature[.", "tests/test_processor.py::Test_Processor::test_wiki_collectors[consoles[.", "tests/test_processor.py::Test_Processor::test_wiki_collector_math[(/standard/setup/action)", "tests/test_processor.py::Test_Processor::test_wiki_collector_math[(/standard[.!='']/action)", "tests/test_processor.py::Test_Processor::test_wiki_collector_math[(/standard[.!='']/id)", "tests/test_processor.py::Test_Processor::test_wiki_collector_order_of_ops[(/list1)", "tests/test_processor.py::Test_Processor::test_wiki_collector_order_of_ops[(/list2)", "tests/test_processor.py::Test_Processor::test_wiki_collector_order_of_ops[((/list1)", "tests/test_processor.py::Test_Processor::test_wiki_search_array_of_hashes[warriors[power_level", "tests/test_processor.py::Test_Processor::test_wiki_search_key_names[contrast_ct[.", "tests/test_processor.py::Test_Processor::test_wiki_has_child[hash_of_hashes.*[!has_child(child_two)]-results0]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[/array_of_hashes/*[!has_child(child_two)]-results1]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[/hash_of_hashes/*[!has_child(child_two)][name()]-results2]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[array_of_hashes.*[!has_child(child_two)].id-results3]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[/array_of_arrays/*[!has_child(value2.1)]-results4]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[array_of_arrays[*!=value2.1]-results5]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[array_of_arrays.*[!has_child(value2.1)][name()]-results6]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[/array_of_arrays[*!=value2.1][name()]-results7]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[(/array_of_arrays/*[!has_child(value2.1)][name()])[0]-results8]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[(array_of_arrays[*!=value2.1][name()])[0]-results9]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[(array_of_arrays.*[!has_child(value2.1)][name()])[-1]-results10]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[(/array_of_arrays[*!=value2.1][name()])[-1]-results11]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[/simple_array[has_child(value1.1)]-results12]", "tests/test_processor.py::Test_Processor::test_wiki_has_child[/simple_array[!has_child(value1.3)]-results13]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_aoh[max(price)]-results0]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_hash[max(price)]-results1]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_aoh[max(price)]/price-results2]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_hash[max(price)]/price-results3]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_aoh[max(price)]/product-results4]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_hash[max(price)][name()]-results5]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[prices_array[max()]-results6]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[bad_prices_aoh[max(price)]-results7]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[bad_prices_hash[max(price)]-results8]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[bad_prices_array[max()]-results9]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[bare[max()]-results10]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[(prices_aoh[!max(price)])[max(price)]-results11]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[(prices_hash[!max(price)])[max(price)]-results12]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[(prices_aoh)-(prices_aoh[max(price)])[max(price)]-results13]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[(prices_hash)-(prices_hash[max(price)]).*[max(price)]-results14]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[((prices_aoh[!max(price)])[max(price)])[0]-results15]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[((prices_hash[!max(price)])[max(price)])[0]-results16]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[((prices_aoh[!max(price)])[max(price)])[0].price-results17]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[((prices_hash[!max(price)])[max(price)])[0].price-results18]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_aoh[min(price)]-results19]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_hash[min(price)]-results20]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_aoh[min(price)]/price-results21]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_hash[min(price)]/price-results22]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_aoh[min(price)]/product-results23]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_hash[min(price)][name()]-results24]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[prices_array[min()]-results25]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[bad_prices_aoh[min(price)]-results26]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[bad_prices_hash[min(price)]-results27]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[bad_prices_array[min()]-results28]", "tests/test_processor.py::Test_Processor::test_wiki_min_max[bare[min()]-results29]", "tests/test_processor.py::Test_Processor::test_wiki_parent[**.Opal[parent()][name()]-results0]", "tests/test_processor.py::Test_Processor::test_wiki_parent[minerals.*.*.mohs_hardness[.>7][parent(2)][name()]-results1]", "tests/test_processor.py::Test_Processor::test_wiki_parent[minerals.*.*.[mohs_hardness[1]>7][name()]-results2]", "tests/test_processor.py::Test_Processor::test_wiki_parent[minerals.*.*(([mohs_hardness[0]>=4])-([mohs_hardness[1]>5]))[name()]-results3]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-12-06 19:50:45+00:00
isc
6,272
wwkimball__yamlpath-225
diff --git a/yamlpath/merger/mergerconfig.py b/yamlpath/merger/mergerconfig.py index cafc0c3..394abac 100644 --- a/yamlpath/merger/mergerconfig.py +++ b/yamlpath/merger/mergerconfig.py @@ -4,7 +4,7 @@ Implement MergerConfig. Copyright 2020, 2021 William W. Kimball, Jr. MBA MSIS """ import configparser -from typing import Any, Dict, Union +from typing import Any, Dict, Optional from argparse import Namespace from yamlpath.exceptions import YAMLPathException @@ -24,23 +24,35 @@ from yamlpath.wrappers import ConsolePrinter, NodeCoords class MergerConfig: """Config file processor for the Merger.""" - def __init__(self, logger: ConsolePrinter, args: Namespace) -> None: + def __init__( + self, + logger: ConsolePrinter, + args: Namespace, + **kwargs: Any, + ) -> None: """ Instantiate this class into an object. Parameters: 1. logger (ConsolePrinter) Instance of ConsoleWriter or subclass 2. args (dict) Default options for merge rules + 3. kwargs (dict) Overrides for config values Returns: N/A """ self.log = logger self.args = args - self.config: Union[None, configparser.ConfigParser] = None + self.config: Optional[configparser.ConfigParser] = None self.rules: Dict[NodeCoords, str] = {} self.keys: Dict[NodeCoords, str] = {} + config_overrides: Dict[str, Any] = {} + + if "keys" in kwargs: + config_overrides["keys"] = kwargs.pop("keys") + if "rules" in kwargs: + config_overrides["rules"] = kwargs.pop("rules") - self._load_config() + self._load_config(config_overrides) def anchor_merge_mode(self) -> AnchorConflictResolutions: """ @@ -322,7 +334,7 @@ class MergerConfig: "... NODE:", data=node_coord, prefix="MergerConfig::_prepare_user_rules: ") - def _load_config(self) -> None: + def _load_config(self, config_overrides: Dict[str, Any]) -> None: """Load the external configuration file.""" config = configparser.ConfigParser() @@ -334,8 +346,15 @@ class MergerConfig: if config_file: config.read(config_file) - if config.sections(): - self.config = config + + if "keys" in config_overrides: + config["keys"] = config_overrides["keys"] + + if "rules" in config_overrides: + config["rules"] = config_overrides["rules"] + + if config.sections(): + self.config = config def _get_config_for(self, node_coord: NodeCoords, section: dict) -> str: """
wwkimball/yamlpath
a80a36c73912ca69ba388ef2f05369c3243bc1c5
diff --git a/tests/test_merger_mergerconfig.py b/tests/test_merger_mergerconfig.py index 96aaf93..a40196f 100644 --- a/tests/test_merger_mergerconfig.py +++ b/tests/test_merger_mergerconfig.py @@ -20,6 +20,7 @@ from tests.conftest import ( create_temp_yaml_file ) + class Test_merger_MergerConfig(): """Tests for the MergerConfig class.""" @@ -207,6 +208,83 @@ class Test_merger_MergerConfig(): assert mc.hash_merge_mode( NodeCoords(node, parent, parentref)) == mode + @pytest.mark.parametrize("ini_rule, override_rule, mode", [ + ("left", "right", HashMergeOpts.RIGHT), + ("right", "deep", HashMergeOpts.DEEP), + ("deep", "left", HashMergeOpts.LEFT), + ]) + def test_hash_merge_mode_override_rule_overrides_ini_rule( + self, quiet_logger, tmp_path_factory, ini_rule, override_rule, mode + ): + config_file = create_temp_yaml_file(tmp_path_factory, """ + [rules] + /hash = {} + """.format(ini_rule)) + lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """--- + hash: + lhs_exclusive: lhs value 1 + merge_targets: + subkey: lhs value 2 + subarray: + - one + - two + array_of_hashes: + - name: LHS Record 1 + id: 1 + prop: LHS value AoH 1 + - name: LHS Record 2 + id: 2 + prop: LHS value AoH 2 + """) + lhs_yaml = get_yaml_editor() + (lhs_data, lhs_loaded) = get_yaml_data(lhs_yaml, quiet_logger, lhs_yaml_file) + + mc = MergerConfig(quiet_logger, SimpleNamespace(config=config_file), rules={"/hash": override_rule}) + mc.prepare(lhs_data) + + node = lhs_data["hash"] + parent = lhs_data + parentref = "hash" + + assert mc.hash_merge_mode( + NodeCoords(node, parent, parentref)) == mode + + @pytest.mark.parametrize("arg_rule, override_rule, mode", [ + ("left", "right", HashMergeOpts.RIGHT), + ("right", "deep", HashMergeOpts.DEEP), + ("deep", "left", HashMergeOpts.LEFT), + ]) + def test_hash_merge_mode_override_rule_overrides_arg_rule( + self, quiet_logger, tmp_path_factory, arg_rule, override_rule, mode + ): + lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """--- + hash: + lhs_exclusive: lhs value 1 + merge_targets: + subkey: lhs value 2 + subarray: + - one + - two + array_of_hashes: + - name: LHS Record 1 + id: 1 + prop: LHS value AoH 1 + - name: LHS Record 2 + id: 2 + prop: LHS value AoH 2 + """) + lhs_yaml = get_yaml_editor() + (lhs_data, lhs_loaded) = get_yaml_data(lhs_yaml, quiet_logger, lhs_yaml_file) + + mc = MergerConfig(quiet_logger, SimpleNamespace(hashes=arg_rule), rules={"/hash": override_rule}) + mc.prepare(lhs_data) + + node = lhs_data["hash"] + parent = lhs_data + parentref = "hash" + + assert mc.hash_merge_mode( + NodeCoords(node, parent, parentref)) == mode ### # array_merge_mode @@ -311,6 +389,93 @@ class Test_merger_MergerConfig(): assert mc.array_merge_mode( NodeCoords(node, parent, parentref)) == mode + @pytest.mark.parametrize("ini_rule, override_rule, mode", [ + ("left", "right", ArrayMergeOpts.RIGHT), + ("right", "unique", ArrayMergeOpts.UNIQUE), + ("unique", "all", ArrayMergeOpts.ALL), + ("all", "left", ArrayMergeOpts.LEFT), + ]) + def test_array_merge_mode_override_rule_overrides_ini_rule( + self, quiet_logger, tmp_path_factory, ini_rule, override_rule, mode + ): + config_file = create_temp_yaml_file(tmp_path_factory, """ + [rules] + /hash/merge_targets/subarray = {} + """.format(ini_rule)) + lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """--- + hash: + lhs_exclusive: lhs value 1 + merge_targets: + subkey: lhs value 2 + subarray: + - one + - two + array_of_hashes: + - name: LHS Record 1 + id: 1 + prop: LHS value AoH 1 + - name: LHS Record 2 + id: 2 + prop: LHS value AoH 2 + """) + lhs_yaml = get_yaml_editor() + (lhs_data, lhs_loaded) = get_yaml_data(lhs_yaml, quiet_logger, lhs_yaml_file) + + mc = MergerConfig( + quiet_logger, + SimpleNamespace(config=config_file), + rules={"/hash/merge_targets/subarray": override_rule} + ) + mc.prepare(lhs_data) + + node = lhs_data["hash"]["merge_targets"]["subarray"] + parent = lhs_data["hash"]["merge_targets"] + parentref = "subarray" + + assert mc.array_merge_mode( + NodeCoords(node, parent, parentref)) == mode + + @pytest.mark.parametrize("arg_rule, override_rule, mode", [ + ("left", "right", ArrayMergeOpts.RIGHT), + ("right", "unique", ArrayMergeOpts.UNIQUE), + ("unique", "all", ArrayMergeOpts.ALL), + ("all", "left", ArrayMergeOpts.LEFT), + ]) + def test_array_merge_mode_override_rule_overrides_arg_rule( + self, quiet_logger, tmp_path_factory, arg_rule, override_rule, mode + ): + lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """--- + hash: + lhs_exclusive: lhs value 1 + merge_targets: + subkey: lhs value 2 + subarray: + - one + - two + array_of_hashes: + - name: LHS Record 1 + id: 1 + prop: LHS value AoH 1 + - name: LHS Record 2 + id: 2 + prop: LHS value AoH 2 + """) + lhs_yaml = get_yaml_editor() + (lhs_data, lhs_loaded) = get_yaml_data(lhs_yaml, quiet_logger, lhs_yaml_file) + + mc = MergerConfig( + quiet_logger, + SimpleNamespace(arrays=arg_rule), + rules={"/hash/merge_targets/subarray": override_rule} + ) + mc.prepare(lhs_data) + + node = lhs_data["hash"]["merge_targets"]["subarray"] + parent = lhs_data["hash"]["merge_targets"] + parentref = "subarray" + + assert mc.array_merge_mode( + NodeCoords(node, parent, parentref)) == mode ### # aoh_merge_mode @@ -419,6 +584,95 @@ class Test_merger_MergerConfig(): assert mc.aoh_merge_mode( NodeCoords(node, parent, parentref)) == mode + @pytest.mark.parametrize("ini_rule, override_rule, mode", [ + ("deep", "left", AoHMergeOpts.LEFT), + ("left", "right", AoHMergeOpts.RIGHT), + ("right", "unique", AoHMergeOpts.UNIQUE), + ("unique", "all", AoHMergeOpts.ALL), + ("all", "deep", AoHMergeOpts.DEEP), + ]) + def test_array_merge_mode_override_rule_overrides_ini_rule( + self, quiet_logger, tmp_path_factory, ini_rule, override_rule, mode + ): + config_file = create_temp_yaml_file(tmp_path_factory, """ + [rules] + /array_of_hashes = {} + """.format(ini_rule)) + lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """--- + hash: + lhs_exclusive: lhs value 1 + merge_targets: + subkey: lhs value 2 + subarray: + - one + - two + array_of_hashes: + - name: LHS Record 1 + id: 1 + prop: LHS value AoH 1 + - name: LHS Record 2 + id: 2 + prop: LHS value AoH 2 + """) + lhs_yaml = get_yaml_editor() + (lhs_data, lhs_loaded) = get_yaml_data(lhs_yaml, quiet_logger, lhs_yaml_file) + + mc = MergerConfig( + quiet_logger, + SimpleNamespace(config=config_file), + rules={"/array_of_hashes": override_rule} + ) + mc.prepare(lhs_data) + + node = lhs_data["array_of_hashes"] + parent = lhs_data + parentref = "array_of_hashes" + + assert mc.aoh_merge_mode( + NodeCoords(node, parent, parentref)) == mode + + @pytest.mark.parametrize("arg_rule, override_rule, mode", [ + ("deep", "left", AoHMergeOpts.LEFT), + ("left", "right", AoHMergeOpts.RIGHT), + ("right", "unique", AoHMergeOpts.UNIQUE), + ("unique", "all", AoHMergeOpts.ALL), + ("all", "deep", AoHMergeOpts.DEEP), + ]) + def test_array_merge_mode_override_rule_overrides_arg_rule( + self, quiet_logger, tmp_path_factory, arg_rule, override_rule, mode + ): + lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """--- + hash: + lhs_exclusive: lhs value 1 + merge_targets: + subkey: lhs value 2 + subarray: + - one + - two + array_of_hashes: + - name: LHS Record 1 + id: 1 + prop: LHS value AoH 1 + - name: LHS Record 2 + id: 2 + prop: LHS value AoH 2 + """) + lhs_yaml = get_yaml_editor() + (lhs_data, lhs_loaded) = get_yaml_data(lhs_yaml, quiet_logger, lhs_yaml_file) + + mc = MergerConfig( + quiet_logger, + SimpleNamespace(aoh=arg_rule), + rules={"/array_of_hashes": override_rule} + ) + mc.prepare(lhs_data) + + node = lhs_data["array_of_hashes"] + parent = lhs_data + parentref = "array_of_hashes" + + assert mc.aoh_merge_mode( + NodeCoords(node, parent, parentref)) == mode ### # aoh_merge_key @@ -526,6 +780,40 @@ class Test_merger_MergerConfig(): assert mc.aoh_merge_key( NodeCoords(node, parent, parentref), record) == "prop" + def test_aoh_merge_key_override_rule_overrides_ini(self, quiet_logger, tmp_path_factory): + config_file = create_temp_yaml_file(tmp_path_factory, """ + [keys] + /array_of_hashes = name + """) + lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """--- + hash: + lhs_exclusive: lhs value 1 + merge_targets: + subkey: lhs value 2 + subarray: + - one + - two + array_of_hashes: + - name: LHS Record 1 + id: 1 + prop: LHS value AoH 1 + - name: LHS Record 2 + id: 2 + prop: LHS value AoH 2 + """) + lhs_yaml = get_yaml_editor() + (lhs_data, lhs_loaded) = get_yaml_data(lhs_yaml, quiet_logger, lhs_yaml_file) + + mc = MergerConfig(quiet_logger, SimpleNamespace(config=config_file), keys={"/array_of_hashes": "id"}) + mc.prepare(lhs_data) + + node = lhs_data["array_of_hashes"] + parent = lhs_data + parentref = "array_of_hashes" + record = node[0] + + assert mc.aoh_merge_key( + NodeCoords(node, parent, parentref), record) == "id" ### # set_merge_mode
Difficulty using MergerConfig Hey, first off, thanks for the awesome library. This project solves a huge recurring problem for me when trying to parse config files. I plan to use this in a lot of projects moving forward. From my experience, it seems like this project is CLI-first rather than API-first. This makes the library a huge pain to integrate into other projects. For example, I'm trying to make a CLI program that merges some YAML files, overwriting only specific nodes. It seems like the only way to specify these exceptions using this project is to write out a config file with a list of rules (which in my case changes on every run). This adds unnecessary complexity as I should be able to just pass these options to the `MergerConfig` class directly. Additionally, the YAML path rules I'm writing into the config include the `:` character, which `ConfigParse` likes to convert to `=`. Then the code in `MergerConfig` assumes that the `=` symbol is part of the value, not the key. I've solve this problem by using the following nasty hack: ```python old_init = configparser.ConfigParser.__init__ def new_init(self): old_init(self, delimiters="=") configparser.ConfigParser.__init__ = new_init ``` Both of these problems can be solved by allowing rules, defaults, and key to be specified in the class constructor, and only loading from a config file in the event these values are absent.
0.0
a80a36c73912ca69ba388ef2f05369c3243bc1c5
[ "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_override_rule_overrides_ini_rule[left-right-HashMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_override_rule_overrides_ini_rule[right-deep-HashMergeOpts.DEEP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_override_rule_overrides_ini_rule[deep-left-HashMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_override_rule_overrides_arg_rule[left-right-HashMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_override_rule_overrides_arg_rule[right-deep-HashMergeOpts.DEEP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_override_rule_overrides_arg_rule[deep-left-HashMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_override_rule_overrides_ini_rule[deep-left-AoHMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_override_rule_overrides_ini_rule[left-right-AoHMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_override_rule_overrides_ini_rule[right-unique-AoHMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_override_rule_overrides_ini_rule[unique-all-AoHMergeOpts.ALL]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_override_rule_overrides_ini_rule[all-deep-AoHMergeOpts.DEEP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_override_rule_overrides_arg_rule[deep-left-AoHMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_override_rule_overrides_arg_rule[left-right-AoHMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_override_rule_overrides_arg_rule[right-unique-AoHMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_override_rule_overrides_arg_rule[unique-all-AoHMergeOpts.ALL]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_override_rule_overrides_arg_rule[all-deep-AoHMergeOpts.DEEP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_key_override_rule_overrides_ini" ]
[ "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_get_insertion_point_default", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_get_insertion_point_cli", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_get_document_format", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_get_multidoc_mode_default", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_get_multidoc_mode_cli", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_default", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_cli[left-AnchorConflictResolutions.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_cli[rename-AnchorConflictResolutions.RENAME]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_cli[right-AnchorConflictResolutions.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_cli[stop-AnchorConflictResolutions.STOP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_ini[left-AnchorConflictResolutions.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_ini[rename-AnchorConflictResolutions.RENAME]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_ini[right-AnchorConflictResolutions.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_ini[stop-AnchorConflictResolutions.STOP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_cli_overrides_ini[left-stop-AnchorConflictResolutions.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_cli_overrides_ini[rename-stop-AnchorConflictResolutions.RENAME]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_cli_overrides_ini[right-stop-AnchorConflictResolutions.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_anchor_merge_mode_cli_overrides_ini[stop-rename-AnchorConflictResolutions.STOP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_default", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_cli[deep-HashMergeOpts.DEEP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_cli[left-HashMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_cli[right-HashMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_ini[deep-HashMergeOpts.DEEP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_ini[left-HashMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_ini[right-HashMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_cli_overrides_ini_defaults[deep-left-HashMergeOpts.DEEP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_cli_overrides_ini_defaults[left-right-HashMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_cli_overrides_ini_defaults[right-deep-HashMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_ini_rule_overrides_cli[deep-left-right-HashMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_ini_rule_overrides_cli[left-right-deep-HashMergeOpts.DEEP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_hash_merge_mode_ini_rule_overrides_cli[right-deep-left-HashMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_default", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_cli[all-ArrayMergeOpts.ALL]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_cli[left-ArrayMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_cli[right-ArrayMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_cli[unique-ArrayMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_ini[all-ArrayMergeOpts.ALL]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_ini[left-ArrayMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_ini[right-ArrayMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_ini[unique-ArrayMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_cli_overrides_ini_defaults[all-left-ArrayMergeOpts.ALL]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_cli_overrides_ini_defaults[left-right-ArrayMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_cli_overrides_ini_defaults[right-unique-ArrayMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_cli_overrides_ini_defaults[unique-all-ArrayMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_ini_rule_overrides_cli[all-left-right-ArrayMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_ini_rule_overrides_cli[left-right-unique-ArrayMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_ini_rule_overrides_cli[right-unique-all-ArrayMergeOpts.ALL]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_array_merge_mode_ini_rule_overrides_cli[unique-all-left-ArrayMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_default", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_cli[all-AoHMergeOpts.ALL]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_cli[deep-AoHMergeOpts.DEEP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_cli[left-AoHMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_cli[right-AoHMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_cli[unique-AoHMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_ini[all-AoHMergeOpts.ALL]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_ini[deep-AoHMergeOpts.DEEP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_ini[left-AoHMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_ini[right-AoHMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_ini[unique-AoHMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_cli_overrides_ini_defaults[all-deep-AoHMergeOpts.ALL]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_cli_overrides_ini_defaults[deep-left-AoHMergeOpts.DEEP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_cli_overrides_ini_defaults[left-right-AoHMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_cli_overrides_ini_defaults[right-unique-AoHMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_cli_overrides_ini_defaults[unique-all-AoHMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_ini_rule_overrides_cli[all-deep-left-AoHMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_ini_rule_overrides_cli[deep-left-right-AoHMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_ini_rule_overrides_cli[left-right-unique-AoHMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_ini_rule_overrides_cli[right-unique-all-AoHMergeOpts.ALL]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_mode_ini_rule_overrides_cli[unique-all-deep-AoHMergeOpts.DEEP]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_key_default", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_key_ini", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_aoh_merge_key_ini_inferred_parent", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_default", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_cli[left-SetMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_cli[right-SetMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_cli[unique-SetMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_ini[left-SetMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_ini[right-SetMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_ini[unique-SetMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_cli_overrides_ini_defaults[left-right-SetMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_cli_overrides_ini_defaults[right-unique-SetMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_cli_overrides_ini_defaults[unique-all-SetMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_ini_rule_overrides_cli[left-right-unique-SetMergeOpts.UNIQUE]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_ini_rule_overrides_cli[right-unique-left-SetMergeOpts.LEFT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_set_merge_mode_ini_rule_overrides_cli[unique-left-right-SetMergeOpts.RIGHT]", "tests/test_merger_mergerconfig.py::Test_merger_MergerConfig::test_warn_when_rules_matches_zero_nodes" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-10-23 01:40:37+00:00
isc
6,273
xCDAT__xcdat-226
diff --git a/xcdat/spatial.py b/xcdat/spatial.py index 97d2b55..65c0581 100644 --- a/xcdat/spatial.py +++ b/xcdat/spatial.py @@ -17,18 +17,13 @@ import numpy as np import xarray as xr from dask.array.core import Array -from xcdat.axis import ( - GENERIC_AXIS_MAP, - GenericAxis, - _align_lon_bounds_to_360, - _get_prime_meridian_index, -) +from xcdat.axis import _align_lon_bounds_to_360, _get_prime_meridian_index from xcdat.dataset import get_data_var #: Type alias for a dictionary of axis keys mapped to their bounds. AxisWeights = Dict[Hashable, xr.DataArray] #: Type alias for supported spatial axis keys. -SpatialAxis = Literal["lat", "lon"] +SpatialAxis = Literal["X", "Y"] SPATIAL_AXES: Tuple[SpatialAxis, ...] = get_args(SpatialAxis) #: Type alias for a tuple of floats/ints for the regional selection bounds. RegionAxisBounds = Tuple[float, float] @@ -44,13 +39,13 @@ class SpatialAccessor: def average( self, data_var: str, - axis: Union[List[SpatialAxis], SpatialAxis] = ["lat", "lon"], + axis: List[SpatialAxis] = ["X", "Y"], weights: Union[Literal["generate"], xr.DataArray] = "generate", lat_bounds: Optional[RegionAxisBounds] = None, lon_bounds: Optional[RegionAxisBounds] = None, ) -> xr.Dataset: """ - Calculate the spatial average for a rectilinear grid over a (optional) + Calculates the spatial average for a rectilinear grid over an optionally specified regional domain. Operations include: @@ -62,14 +57,21 @@ class SpatialAccessor: - Adjust weights to conform to the specified regional boundary. - Compute spatial weighted average. + This method requires that the dataset's coordinates have the 'axis' + attribute set to the keys in ``axis``. For example, the latitude + coordinates should have its 'axis' attribute set to 'Y' (which is also + CF-compliant). This 'axis' attribute is used to retrieve the related + coordinates via `cf_xarray`. Refer to this method's examples for more + information. + Parameters ---------- data_var: str The name of the data variable inside the dataset to spatially average. - axis : Union[List[SpatialAxis], SpatialAxis] - List of axis dimensions or single axis dimension to average over. - For example, ["lat", "lon"] or "lat", by default ["lat", "lon"]. + axis : List[SpatialAxis] + List of axis dimensions to average over, by default ["X", "Y"]. + Valid axis keys include "X" and "Y". weights : Union[Literal["generate"], xr.DataArray], optional If "generate", then weights are generated. Otherwise, pass a DataArray containing the regional weights used for weighted @@ -104,30 +106,36 @@ class SpatialAccessor: >>> import xcdat - Open a dataset and limit to a single variable: + Check the 'axis' attribute is set on the required coordinates: + + >>> ds.lat.attrs["axis"] + >>> Y + >>> + >>> ds.lon.attrs["axis"] + >>> X + + Set the 'axis' attribute for the required coordinates if it isn't: - >>> ds = xcdat.open_dataset("path/to/file.nc", var="tas") + >>> ds.lat.attrs["axis"] = "Y" + >>> ds.lon.attrs["axis"] = "X" Call spatial averaging method: - >>> # First option >>> ds.spatial.average(...) - >>> # Second option - >>> ds.xcdat.average(...) Get global average time series: - >>> ts_global = ds.spatial.average("tas", axis=["lat", "lon"])["tas"] + >>> ts_global = ds.spatial.average("tas", axis=["X", "Y"])["tas"] Get time series in Nino 3.4 domain: - >>> ts_n34 = ds.spatial.average("ts", axis=["lat", "lon"], + >>> ts_n34 = ds.spatial.average("ts", axis=["X", "Y"], >>> lat_bounds=(-5, 5), >>> lon_bounds=(-170, -120))["ts"] Get zonal mean time series: - >>> ts_zonal = ds.spatial.average("tas", axis=['lon'])["tas"] + >>> ts_zonal = ds.spatial.average("tas", axis=["X"])["tas"] Using custom weights for averaging: @@ -138,18 +146,18 @@ class SpatialAccessor: >>> dims=["lat", "lon"], >>> ) >>> - >>> ts_global = ds.spatial.average("tas", axis=["lat","lon"], + >>> ts_global = ds.spatial.average("tas", axis=["X", "Y"], >>> weights=weights)["tas"] """ dataset = self._dataset.copy() dv = get_data_var(dataset, data_var) - axis = self._validate_axis(dv, axis) + self._validate_axis_arg(axis) if isinstance(weights, str) and weights == "generate": if lat_bounds is not None: - self._validate_region_bounds("lat", lat_bounds) + self._validate_region_bounds("Y", lat_bounds) if lon_bounds is not None: - self._validate_region_bounds("lon", lon_bounds) + self._validate_region_bounds("X", lon_bounds) dv_weights = self._get_weights(axis, lat_bounds, lon_bounds) elif isinstance(weights, xr.DataArray): dv_weights = weights @@ -158,51 +166,39 @@ class SpatialAccessor: dataset[dv.name] = self._averager(dv, axis, dv_weights) return dataset - def _validate_axis( - self, data_var: xr.DataArray, axis: Union[List[SpatialAxis], SpatialAxis] - ) -> List[SpatialAxis]: - """Validates if ``axis`` arg is supported and exists in the data var. + def _validate_axis_arg(self, axis: List[SpatialAxis]): + """ + Validates that the ``axis`` dimension(s) exists in the dataset. Parameters ---------- - data_var : xr.DataArray - The data variable. - axis : Union[List[SpatialAxis], SpatialAxis] - List of axis dimensions or single axis dimension to average over. - - Returns - ------- - List[SpatialAxis] - List of axis dimensions or single axis dimension to average over. + axis : List[SpatialAxis] + List of axis dimensions to average over. Raises ------ ValueError - If any key in ``axis`` is not supported for spatial averaging. + If a key in ``axis`` is not a supported value. KeyError - If any key in ``axis`` does not exist in the ``data_var``. + If the dataset does not have coordinates for the ``axis`` dimension, + or the `axis` attribute is not set for those coordinates. """ - if isinstance(axis, str): - axis = [axis] - for key in axis: if key not in SPATIAL_AXES: raise ValueError( - "Incorrect `axis` argument. Supported axes include: " + "Incorrect `axis` argument value. Supported values include: " f"{', '.join(SPATIAL_AXES)}." ) - generic_axis_key = GENERIC_AXIS_MAP[key] try: - data_var.cf.axes[generic_axis_key] + self._dataset.cf.axes[key] except KeyError: raise KeyError( - f"The data variable '{data_var.name}' is missing the '{axis}' " - "dimension, which is required for spatial averaging." + f"A '{key}' axis dimension was not found in the dataset. Make sure " + f"the dataset has '{key}' axis coordinates and the coordinates' " + f"'axis' attribute is set to '{key}'." ) - return axis - def _validate_domain_bounds(self, domain_bounds: xr.DataArray): """Validates the ``domain_bounds`` arg based on a set of criteria. @@ -244,7 +240,7 @@ class SpatialAccessor: TypeError If the ``bounds`` upper bound is not a float or integer. ValueError - If the ``axis`` is "lat" and the ``bounds`` lower value is larger + If the ``axis`` is "Y" and the ``bounds`` lower value is larger than the upper value. """ if not isinstance(bounds, tuple): @@ -269,12 +265,12 @@ class SpatialAccessor: f"The regional {axis} upper bound is not a float or an integer." ) - # For latitude, require that the upper bound be larger than the lower - # bound. Note that this does not apply to longitude (since it is - # a circular axis). - if axis == "lat" and lower >= upper: + # For the "Y" axis (latitude), require that the upper bound be larger + # than the lower bound. Note that this does not apply to the "X" axis + # (longitude) since it is circular. + if axis == "Y" and lower >= upper: raise ValueError( - f"The regional {axis} lower bound is greater than the upper. " + "The regional latitude lower bound is greater than the upper. " "Pass a tuple with the format (lower, upper)." ) @@ -299,9 +295,8 @@ class SpatialAccessor: Parameters ---------- - axis : Union[List[SpatialAxis], SpatialAxis] - List of axis dimensions or single axis dimension to average over. - For example, ["lat", "lon"] or "lat". + axis : List[SpatialAxis] + List of axis dimensions to average over. lat_bounds : Optional[RegionAxisBounds] Tuple of latitude boundaries for regional selection. lon_bounds : Optional[RegionAxisBounds] @@ -326,17 +321,18 @@ class SpatialAccessor: {"domain": xr.DataArray, "region": Optional[RegionAxisBounds]}, ) axis_bounds: Dict[SpatialAxis, Bounds] = { - "lat": { - "domain": self._dataset.bounds.get_bounds("lat").copy(), - "region": lat_bounds, - }, - "lon": { + "X": { "domain": self._dataset.bounds.get_bounds("lon").copy(), "region": lon_bounds, }, + "Y": { + "domain": self._dataset.bounds.get_bounds("lat").copy(), + "region": lat_bounds, + }, } axis_weights: AxisWeights = {} + for key in axis: d_bounds = axis_bounds[key]["domain"] self._validate_domain_bounds(d_bounds) @@ -347,9 +343,9 @@ class SpatialAccessor: if r_bounds is not None: r_bounds = np.array(r_bounds, dtype="float") - if key == "lon": + if key == "X": weights = self._get_longitude_weights(d_bounds, r_bounds) - elif key == "lat": + elif key == "Y": weights = self._get_latitude_weights(d_bounds, r_bounds) weights.attrs = d_bounds.attrs @@ -357,6 +353,7 @@ class SpatialAccessor: axis_weights[key] = weights weights = self._combine_weights(axis_weights) + return weights def _get_longitude_weights( @@ -386,9 +383,9 @@ class SpatialAccessor: Parameters ---------- domain_bounds : xr.DataArray - The array of bounds for the latitude domain. + The array of bounds for the longitude domain. region_bounds : Optional[np.ndarray] - The array of bounds for latitude regional selection. + The array of bounds for longitude regional selection. Returns ------- @@ -655,14 +652,22 @@ class SpatialAccessor: If the axis dimension sizes between ``weights`` and ``data_var`` are misaligned. """ - # Check that the supplied weights include lat and lon dimensions. - lat_key = data_var.cf.axes["Y"][0] - lon_key = data_var.cf.axes["X"][0] - - if "lat" in axis and lat_key not in weights.dims: - raise KeyError(f"Check weights DataArray includes {lat_key} dimension.") - if "lon" in axis and lon_key not in weights.dims: - raise KeyError(f"Check weights DataArray includes {lon_key} dimension.") + # Check that the supplied weights include x and y dimensions. + x_key = data_var.cf.axes["X"][0] + y_key = data_var.cf.axes["Y"][0] + + if "X" in axis and x_key not in weights.dims: + raise KeyError( + "The weights DataArray either does not include an X axis, " + "or the X axis coordinates does not have the 'axis' attribute " + "set to 'X'." + ) + if "Y" in axis and y_key not in weights.dims: + raise KeyError( + "The weights DataArray either does not include an Y axis, " + "or the Y axis coordinates does not have the 'axis' attribute " + "set to 'Y'." + ) # Check the weight dim sizes equal data var dim sizes. dim_sizes = {key: data_var.sizes[key] for key in weights.sizes.keys()} @@ -692,8 +697,7 @@ class SpatialAccessor: data_var : xr.DataArray Data variable inside a Dataset. axis : List[SpatialAxis] - List of axis dimensions or single axis dimension to average over. - For example, ["lat", "lon"] or "lat". + List of axis dimensions to average over. weights : xr.DataArray A DataArray containing the region area weights for averaging. ``weights`` must include the same spatial axis dimensions and have @@ -710,34 +714,8 @@ class SpatialAccessor: Missing values are replaced with 0 using ``weights.fillna(0)``. """ weights = weights.fillna(0) - with xr.set_options(keep_attrs=True): - weighted_mean = data_var.cf.weighted(weights).mean( - self._get_generic_axis_keys(axis) - ) - return weighted_mean - def _get_generic_axis_keys(self, axis: List[SpatialAxis]) -> List[GenericAxis]: - """Converts supported axis keys to their generic CF representations. - - Since xCDAT's spatial averaging accepts the CF short version of axes - keys, attempting to index a Dataset/DataArray on the short key through - cf_xarray might fail for cases where the long key is used instead (e.g., - "latitude" instead of "lat"). This method handles this edge case by - converting the list of axis keys to their generic representations (e.g., - "Y" instead of "lat") for indexing operations. - - Parameters - ---------- - axis_keys : List[SpatialAxis] - List of axis dimension(s) to average over. - - Returns - ------- - List[GenericAxis] - List of axis dimension(s) to average over. - """ - generic_axis_keys = [] - for key in axis: - generic_axis_keys.append(GENERIC_AXIS_MAP[key]) + with xr.set_options(keep_attrs=True): + weighted_mean = data_var.cf.weighted(weights).mean(axis) - return generic_axis_keys + return weighted_mean diff --git a/xcdat/temporal.py b/xcdat/temporal.py index 702b15e..ad13d93 100644 --- a/xcdat/temporal.py +++ b/xcdat/temporal.py @@ -106,8 +106,9 @@ class TemporalAccessor: dataset.cf["T"] except KeyError: raise KeyError( - "This dataset does not have a time dimension, which is required for " - "using the methods in the TemporalAccessor class." + "A 'T' axis dimension was not found in the dataset. Make sure the " + "dataset has time axis coordinates and its 'axis' attribute is set to " + "'T'." ) self._dataset: xr.Dataset = dataset @@ -210,6 +211,15 @@ class TemporalAccessor: >>> import xcdat + Check the 'axis' attribute is set on the time coordinates: + + >>> ds.time.attrs["axis"] + >>> T + + Set the 'axis' attribute for the time coordinates if it isn't: + + >>> ds.time.attrs["axis"] = "T" + Call ``average()`` method: >>> ds.temporal.average(...)
xCDAT/xcdat
4e582e54e564d6d69339b2218027c6cd1affd957
diff --git a/tests/test_spatial.py b/tests/test_spatial.py index dd0f9fb..49e01b8 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -41,7 +41,7 @@ class TestAverage: with pytest.raises(KeyError): self.ds.spatial.average( "not_a_data_var", - axis=["lat", "incorrect_axis"], + axis=["Y", "incorrect_axis"], ) def test_spatial_average_for_lat_and_lon_region_using_custom_weights(self): @@ -53,7 +53,7 @@ class TestAverage: dims=["lat", "lon"], ) result = ds.spatial.average( - axis=["lat", "lon"], + axis=["X", "Y"], lat_bounds=(-5.0, 5), lon_bounds=(-170, -120.1), weights=weights, @@ -72,7 +72,7 @@ class TestAverage: def test_spatial_average_for_lat_and_lon_region(self): ds = self.ds.copy() result = ds.spatial.average( - "ts", axis=["lat", "lon"], lat_bounds=(-5.0, 5), lon_bounds=(-170, -120.1) + "ts", axis=["X", "Y"], lat_bounds=(-5.0, 5), lon_bounds=(-170, -120.1) ) expected = self.ds.copy() @@ -89,7 +89,7 @@ class TestAverage: # Specifying axis as a str instead of list of str. result = ds.spatial.average( - "ts", axis="lat", lat_bounds=(-5.0, 5), lon_bounds=(-170, -120.1) + "ts", axis=["Y"], lat_bounds=(-5.0, 5), lon_bounds=(-170, -120.1) ) expected = self.ds.copy() @@ -109,7 +109,7 @@ class TestAverage: # Specifying axis as a str instead of list of str. result = ds.spatial.average( - "ts", axis="lat", lat_bounds=(-5.0, 5), lon_bounds=(-170, -120.1) + "ts", axis=["Y"], lat_bounds=(-5.0, 5), lon_bounds=(-170, -120.1) ) expected = self.ds.copy() @@ -124,32 +124,26 @@ class TestAverage: assert result.identical(expected) -class TestValidateAxis: +class TestValidateAxisArg: @pytest.fixture(autouse=True) def setup(self): self.ds = generate_dataset(cf_compliant=True, has_bounds=True) def test_raises_error_if_axis_list_contains_unsupported_axis(self): with pytest.raises(ValueError): - self.ds.spatial._validate_axis(self.ds.ts, axis=["lat", "incorrect_axis"]) + self.ds.spatial._validate_axis_arg(axis=["Y", "incorrect_axis"]) def test_raises_error_if_lat_axis_does_not_exist(self): ds = self.ds.copy() - ds["ts"] = xr.DataArray(data=None, coords={"lon": ds.lon}, dims=["lon"]) + ds.lat.attrs["axis"] = None with pytest.raises(KeyError): - ds.spatial._validate_axis(ds.ts, axis=["lat", "lon"]) + ds.spatial._validate_axis_arg(axis=["X", "Y"]) def test_raises_error_if_lon_axis_does_not_exist(self): ds = self.ds.copy() - ds["ts"] = xr.DataArray(data=None, coords={"lat": ds.lat}, dims=["lat"]) + ds.lon.attrs["axis"] = None with pytest.raises(KeyError): - ds.spatial._validate_axis(ds.ts, axis=["lat", "lon"]) - - def test_returns_list_of_str_if_axis_is_a_single_supported_str_input(self): - result = self.ds.spatial._validate_axis(self.ds.ts, axis="lat") - expected = ["lat"] - - assert result == expected + ds.spatial._validate_axis_arg(axis=["X", "Y"]) class TestValidateRegionBounds: @@ -178,18 +172,18 @@ class TestValidateRegionBounds: def test_raises_error_if_lower_bound_is_not_a_float_or_int(self): with pytest.raises(TypeError): - self.ds.spatial._validate_region_bounds("lat", ("invalid", 1)) + self.ds.spatial._validate_region_bounds("Y", ("invalid", 1)) def test_raises_error_if_upper_bound_is_not_a_float_or_int(self): with pytest.raises(TypeError): - self.ds.spatial._validate_region_bounds("lon", (1, "invalid")) + self.ds.spatial._validate_region_bounds("X", (1, "invalid")) def test_raises_error_if_lower_lat_bound_is_bigger_than_upper(self): with pytest.raises(ValueError): - self.ds.spatial._validate_region_bounds("lat", (2, 1)) + self.ds.spatial._validate_region_bounds("Y", (2, 1)) def test_does_not_raise_error_if_lon_lower_bound_is_larger_than_upper(self): - self.ds.spatial._validate_region_bounds("lon", (2, 1)) + self.ds.spatial._validate_region_bounds("X", (2, 1)) class TestValidateWeights: @@ -209,7 +203,7 @@ class TestValidateWeights: coords={"lat": self.ds.lat, "lon": self.ds.lon}, dims=["lat", "lon"], ) - self.ds.spatial._validate_weights(self.ds["ts"], axis="lat", weights=weights) + self.ds.spatial._validate_weights(self.ds["ts"], axis=["Y"], weights=weights) def test_error_is_raised_when_lat_axis_is_specified_but_lat_is_not_in_weights_dims( self, @@ -219,7 +213,7 @@ class TestValidateWeights: ) with pytest.raises(KeyError): self.ds.spatial._validate_weights( - self.ds["ts"], axis=["lon", "lat"], weights=weights + self.ds["ts"], axis=["X", "Y"], weights=weights ) def test_error_is_raised_when_lon_axis_is_specified_but_lon_is_not_in_weights_dims( @@ -230,7 +224,7 @@ class TestValidateWeights: ) with pytest.raises(KeyError): self.ds.spatial._validate_weights( - self.ds["ts"], axis=["lon", "lat"], weights=weights + self.ds["ts"], axis=["X", "Y"], weights=weights ) def test_error_is_raised_when_weights_lat_and_lon_dims_dont_align_with_data_var_dims( @@ -247,7 +241,7 @@ class TestValidateWeights: with pytest.raises(ValueError): self.ds.spatial._validate_weights( - self.ds["ts"], axis=["lat", "lon"], weights=weights + self.ds["ts"], axis=["X", "Y"], weights=weights ) @@ -404,7 +398,7 @@ class TestGetWeights: def test_weights_for_region_in_lat_and_lon_domains(self): result = self.ds.spatial._get_weights( - axis=["lat", "lon"], lat_bounds=(-5, 5), lon_bounds=(-170, -120) + axis=["Y", "X"], lat_bounds=(-5, 5), lon_bounds=(-170, -120) ) expected = xr.DataArray( data=np.array( @@ -423,7 +417,7 @@ class TestGetWeights: def test_area_weights_for_region_in_lat_domain(self): result = self.ds.spatial._get_weights( - axis=["lat", "lon"], lat_bounds=(-5, 5), lon_bounds=None + axis=["Y", "X"], lat_bounds=(-5, 5), lon_bounds=None ) expected = xr.DataArray( data=np.array( @@ -454,7 +448,7 @@ class TestGetWeights: dims=["lat", "lon"], ) result = self.ds.spatial._get_weights( - axis=["lat", "lon"], lat_bounds=None, lon_bounds=(-170, -120) + axis=["Y", "X"], lat_bounds=None, lon_bounds=(-170, -120) ) xr.testing.assert_allclose(result, expected) @@ -828,7 +822,7 @@ class TestAverager: dims=["lat", "lon"], ) - result = ds.spatial._averager(ds.ts, axis=["lat", "lon"], weights=weights) + result = ds.spatial._averager(ds.ts, axis=["X", "Y"], weights=weights) expected = xr.DataArray( name="ts", data=np.ones(15), coords={"time": ds.time}, dims=["time"] ) @@ -843,7 +837,7 @@ class TestAverager: dims=["lat"], ) - result = self.ds.spatial._averager(self.ds.ts, axis=["lat"], weights=weights) + result = self.ds.spatial._averager(self.ds.ts, axis=["Y"], weights=weights) expected = xr.DataArray( name="ts", data=np.ones((15, 4)), @@ -861,7 +855,7 @@ class TestAverager: dims=["lon"], ) - result = self.ds.spatial._averager(self.ds.ts, axis=["lon"], weights=weights) + result = self.ds.spatial._averager(self.ds.ts, axis=["X"], weights=weights) expected = xr.DataArray( name="ts", data=np.ones((15, 4)), @@ -878,22 +872,9 @@ class TestAverager: dims=["lat", "lon"], ) - result = self.ds.spatial._averager( - self.ds.ts, axis=["lat", "lon"], weights=weights - ) + result = self.ds.spatial._averager(self.ds.ts, axis=["X", "Y"], weights=weights) expected = xr.DataArray( name="ts", data=np.ones(15), coords={"time": self.ds.time}, dims=["time"] ) assert result.identical(expected) - - -class TestGetGenericAxisKeys: - @pytest.fixture(autouse=True) - def setup(self): - self.ds = generate_dataset(cf_compliant=True, has_bounds=True) - - def test_generic_keys(self): - result = self.ds.spatial._get_generic_axis_keys(["lat", "lon"]) - expected = ["Y", "X"] - assert result == expected
[FEATURE]: Update supported spatial `axis` arg keys generic format ### Is your feature request related to a problem? The valid `axis` arg values of `"lat"` and `"lon"` do not follow the axis naming convention that we adopted with our APIs. https://github.com/XCDAT/xcdat/blob/79c488ea890febc422fe2e38b85c4e9dc7c72565/xcdat/spatial.py#L47 We implemented a mapping system for axis and coordinates names to their generic axis name. https://github.com/XCDAT/xcdat/blob/79c488ea890febc422fe2e38b85c4e9dc7c72565/xcdat/axis.py#L14-L25 ### Describe the solution you'd like 1. Update valid `axis` arg values from `"lat"` and `"lon"` to `"Y"`/`"y"` and `"X"`/`"x"` 3. Convert the `axis` arg value to `.upper()` 4. Map the `axis` arg to the dimension/coordinates in the `xr.Dataset` 5. Update `KeyError` if the CF `axis` attribute is not set for the X and Y axis coordinates * Related to https://github.com/XCDAT/xcdat/issues/166#issuecomment-1099382979 > We can probably improve the KeyError so that it says something like: > KeyError: "Could not find an X and/or Y axis for spatial averaging. Make sure the data variable 'tas' has X and Y axis coordinates and the 'axis' attribute is set for both." * A reusable convenience function/method might be useful for checking if the `axis` attribute is set for the desired axis in the Dataset. If it isn't, then raise a `KeyError`. `cf_xarray` already throws an error if we try something like `ds.cf["X"]` when the `"X"` axis attr is not set, but the error is ambiguous. ### Describe alternatives you've considered ### Additional context * `cdutil.averager()`'s `axis` arg accepts lowercase generic axis names (`"x"`, `"y"`, `"t"`, etc.)
0.0
4e582e54e564d6d69339b2218027c6cd1affd957
[ "tests/test_spatial.py::TestAverage::test_spatial_average_for_lat_and_lon_region_using_custom_weights", "tests/test_spatial.py::TestAverage::test_spatial_average_for_lat_and_lon_region", "tests/test_spatial.py::TestAverage::test_spatial_average_for_lat_region", "tests/test_spatial.py::TestAverage::test_chunked_spatial_average_for_lat_region", "tests/test_spatial.py::TestValidateAxisArg::test_raises_error_if_axis_list_contains_unsupported_axis", "tests/test_spatial.py::TestValidateAxisArg::test_raises_error_if_lat_axis_does_not_exist", "tests/test_spatial.py::TestValidateAxisArg::test_raises_error_if_lon_axis_does_not_exist", "tests/test_spatial.py::TestValidateRegionBounds::test_raises_error_if_lower_lat_bound_is_bigger_than_upper", "tests/test_spatial.py::TestGetWeights::test_weights_for_region_in_lat_and_lon_domains", "tests/test_spatial.py::TestGetWeights::test_area_weights_for_region_in_lat_domain", "tests/test_spatial.py::TestGetWeights::test_weights_for_region_in_lon_domain" ]
[ "tests/test_spatial.py::TestSpatialAccessor::test__init__", "tests/test_spatial.py::TestSpatialAccessor::test_decorator_call", "tests/test_spatial.py::TestAverage::test_raises_error_if_data_var_not_in_dataset", "tests/test_spatial.py::TestValidateRegionBounds::test_raises_error_if_bounds_type_is_not_a_tuple", "tests/test_spatial.py::TestValidateRegionBounds::test_raises_error_if_there_are_0_elements_in_the_bounds", "tests/test_spatial.py::TestValidateRegionBounds::test_raises_error_if_there_are_more_than_two_elements_in_the_bounds", "tests/test_spatial.py::TestValidateRegionBounds::test_does_not_raise_error_if_lower_and_upper_bounds_are_floats_or_ints", "tests/test_spatial.py::TestValidateRegionBounds::test_raises_error_if_lower_bound_is_not_a_float_or_int", "tests/test_spatial.py::TestValidateRegionBounds::test_raises_error_if_upper_bound_is_not_a_float_or_int", "tests/test_spatial.py::TestValidateRegionBounds::test_does_not_raise_error_if_lon_lower_bound_is_larger_than_upper", "tests/test_spatial.py::TestSwapLonAxis::test_raises_error_with_incorrect_orientation_to_swap_to", "tests/test_spatial.py::TestSwapLonAxis::test_swap_chunked_domain_dataarray_from_180_to_360", "tests/test_spatial.py::TestSwapLonAxis::test_swap_chunked_domain_dataarray_from_360_to_180", "tests/test_spatial.py::TestSwapLonAxis::test_swap_domain_dataarray_from_180_to_360", "tests/test_spatial.py::TestSwapLonAxis::test_swap_domain_dataarray_from_360_to_180", "tests/test_spatial.py::TestSwapLonAxis::test_swap_region_ndarray_from_180_to_360", "tests/test_spatial.py::TestSwapLonAxis::test_swap_region_ndarray_from_360_to_180", "tests/test_spatial.py::TestGetLongitudeWeights::test_weights_for_region_in_lon_domain", "tests/test_spatial.py::TestGetLongitudeWeights::test_weights_for_region_in_lon_domain_with_both_spanning_p_meridian", "tests/test_spatial.py::TestGetLongitudeWeights::test_weights_for_region_in_lon_domain_with_domain_spanning_p_meridian", "tests/test_spatial.py::TestGetLongitudeWeights::test_weights_for_region_in_lon_domain_with_region_spanning_p_meridian", "tests/test_spatial.py::TestGetLongitudeWeights::test_weights_all_longitudes_for_equal_region_bounds", "tests/test_spatial.py::TestGetLongitudeWeights::test_weights_for_equal_region_bounds_representing_entire_lon_domain", "tests/test_spatial.py::TestGetLatitudeWeights::test_weights_for_region_in_lat_domain", "tests/test_spatial.py::TestValidateDomainBounds::test_raises_error_if_low_bounds_exceeds_high_bound", "tests/test_spatial.py::TestCalculateWeights::test_returns_weights_as_the_absolute_difference_of_upper_and_lower_bounds", "tests/test_spatial.py::TestScaleDimToRegion::test_scales_chunked_lat_bounds_when_not_wrapping_around_prime_meridian", "tests/test_spatial.py::TestScaleDimToRegion::test_scales_chunked_lon_bounds_when_not_wrapping_around_prime_meridian", "tests/test_spatial.py::TestScaleDimToRegion::test_scales_lat_bounds_when_not_wrapping_around_prime_meridian", "tests/test_spatial.py::TestScaleDimToRegion::test_scales_lon_bounds_when_not_wrapping_around_prime_meridian", "tests/test_spatial.py::TestScaleDimToRegion::test_scales_lon_bounds_when_wrapping_around_prime_meridian", "tests/test_spatial.py::TestCombineWeights::test_weights_for_single_axis_are_identical", "tests/test_spatial.py::TestCombineWeights::test_weights_for_multiple_axis_is_the_product_of_matrix_multiplication", "tests/test_spatial.py::TestAverager::test_chunked_weighted_avg_over_lat_and_lon_axes", "tests/test_spatial.py::TestAverager::test_weighted_avg_over_lat_axis", "tests/test_spatial.py::TestAverager::test_weighted_avg_over_lon_axis", "tests/test_spatial.py::TestAverager::test_weighted_avg_over_lat_and_lon_axis" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-04-25 23:03:09+00:00
apache-2.0
6,274
xCDAT__xcdat-241
diff --git a/xcdat/bounds.py b/xcdat/bounds.py index 0d81742..0b011fe 100644 --- a/xcdat/bounds.py +++ b/xcdat/bounds.py @@ -1,9 +1,12 @@ """Bounds module for functions related to coordinate bounds.""" import collections +import warnings from typing import Dict, List, Literal, Optional import cf_xarray as cfxr # noqa: F401 +import cftime import numpy as np +import pandas as pd import xarray as xr from xcdat.axis import GENERIC_AXIS_MAP @@ -253,13 +256,32 @@ class BoundsAccessor: diffs = da_coord.diff(dim).values # Add beginning and end points to account for lower and upper bounds. + # np.array of string values with `dtype="timedelta64[ns]"` diffs = np.insert(diffs, 0, diffs[0]) diffs = np.append(diffs, diffs[-1]) - # Get lower and upper bounds by using the width relative to nearest point. + # In xarray and xCDAT, time coordinates with non-CF compliant calendars + # (360-day, noleap) and/or units ("months", "years") are decoded using + # `cftime` objects instead of `datetime` objects. `cftime` objects only + # support arithmetic using `timedelta` objects, so the values of `diffs` + # must be casted from `dtype="timedelta64[ns]"` to `timedelta`. + if da_coord.name in ("T", "time") and issubclass( + type(da_coord.values[0]), cftime.datetime + ): + diffs = pd.to_timedelta(diffs) + + # FIXME: These lines produces the warning: `PerformanceWarning: + # Adding/subtracting object-dtype array to TimedeltaArray not + # vectorized` after converting diffs to `timedelta`. I (Tom) was not + # able to find an alternative, vectorized solution at the time of this + # implementation. + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=pd.errors.PerformanceWarning) + # Get lower and upper bounds by using the width relative to nearest point. + lower_bounds = da_coord - diffs[:-1] * width + upper_bounds = da_coord + diffs[1:] * (1 - width) + # Transpose both bound arrays into a 2D array. - lower_bounds = da_coord - diffs[:-1] * width - upper_bounds = da_coord + diffs[1:] * (1 - width) bounds = np.array([lower_bounds, upper_bounds]).transpose() # Clip latitude bounds at (-90, 90) diff --git a/xcdat/dataset.py b/xcdat/dataset.py index eddc9bb..0c7e3c5 100644 --- a/xcdat/dataset.py +++ b/xcdat/dataset.py @@ -457,7 +457,7 @@ def _postprocess_dataset( if center_times: if dataset.cf.dims.get("T") is not None: - dataset = dataset.temporal.center_times(dataset) + dataset = dataset.temporal.center_times() else: raise ValueError("This dataset does not have a time coordinates to center.") diff --git a/xcdat/temporal.py b/xcdat/temporal.py index 20baec6..4a6078c 100644 --- a/xcdat/temporal.py +++ b/xcdat/temporal.py @@ -677,7 +677,7 @@ class TemporalAccessor: return ds_departs - def center_times(self, dataset: xr.Dataset) -> xr.Dataset: + def center_times(self) -> xr.Dataset: """Centers the time coordinates using the midpoint between time bounds. Time coordinates can be recorded using different intervals, including @@ -695,12 +695,9 @@ class TemporalAccessor: xr.Dataset The Dataset with centered time coordinates. """ - ds = dataset.copy() - - if hasattr(self, "_time_bounds") is False: - self._time_bounds = ds.bounds.get_bounds("time") + ds = self._dataset.copy() + time_bounds = ds.bounds.get_bounds("time") - time_bounds = self._time_bounds.copy() lower_bounds, upper_bounds = (time_bounds[:, 0].data, time_bounds[:, 1].data) bounds_diffs: np.timedelta64 = (upper_bounds - lower_bounds) / 2 bounds_mids: np.ndarray = lower_bounds + bounds_diffs @@ -842,7 +839,7 @@ class TemporalAccessor: ds = self._dataset.copy() if self._center_times: - ds = self.center_times(ds) + ds = self.center_times() if ( self._freq == "season" @@ -1393,14 +1390,14 @@ class TemporalAccessor: self._time_bounds[:, 1] - self._time_bounds[:, 0] ) - # Must be convert dtype from timedelta64[ns] to float64, specifically - # when chunking DataArrays using Dask. Otherwise, the numpy warning - # below is thrown: `DeprecationWarning: The `dtype` and `signature` - # arguments to ufuncs only select the general DType and not details such - # as the byte order or time unit (with rare exceptions see release - # notes). To avoid this warning please use the scalar types - # `np.float64`, or string notation.` + # Must be cast dtype from "timedelta64[ns]" to "float64", specifically + # when using Dask arrays. Otherwise, the numpy warning below is thrown: + # `DeprecationWarning: The `dtype` and `signature` arguments to ufuncs + # only select the general DType and not details such as the byte order + # or time unit (with rare exceptions see release notes). To avoid this + # warning please use the scalar types `np.float64`, or string notation.` time_lengths = time_lengths.astype(np.float64) + grouped_time_lengths = self._group_data(time_lengths) weights: xr.DataArray = grouped_time_lengths / grouped_time_lengths.sum() # type: ignore
xCDAT/xcdat
112eb58f797821f14af2934b7b2551b39912c291
diff --git a/tests/test_bounds.py b/tests/test_bounds.py index 2c1bfcb..92698ba 100644 --- a/tests/test_bounds.py +++ b/tests/test_bounds.py @@ -1,3 +1,4 @@ +import cftime import numpy as np import pytest import xarray as xr @@ -115,7 +116,7 @@ class TestAddBounds: with pytest.raises(ValueError): ds.bounds.add_bounds("lat") - def test__add_bounds_raises_errors_for_data_dim_and_length(self): + def test_add_bounds_raises_errors_for_data_dim_and_length(self): # Multidimensional lat = xr.DataArray( data=np.array([[0, 1, 2], [3, 4, 5]]), @@ -132,23 +133,23 @@ class TestAddBounds: # If coords dimensions does not equal 1. with pytest.raises(ValueError): - ds.bounds._add_bounds("lat") + ds.bounds.add_bounds("lat") # If coords are length of <=1. with pytest.raises(ValueError): - ds.bounds._add_bounds("lon") + ds.bounds.add_bounds("lon") - def test__add_bounds_returns_dataset_with_bounds_added(self): + def test_add_bounds_for_dataset_with_coords_as_datetime_objects(self): ds = self.ds.copy() - ds = ds.bounds._add_bounds("lat") - assert ds.lat_bnds.equals(lat_bnds) - assert ds.lat_bnds.is_generated == "True" + result = ds.bounds.add_bounds("lat") + assert result.lat_bnds.equals(lat_bnds) + assert result.lat_bnds.is_generated == "True" - ds = ds.bounds._add_bounds("lon") - assert ds.lon_bnds.equals(lon_bnds) - assert ds.lon_bnds.is_generated == "True" + result = result.bounds.add_bounds("lon") + assert result.lon_bnds.equals(lon_bnds) + assert result.lon_bnds.is_generated == "True" - ds = ds.bounds._add_bounds("time") + result = ds.bounds.add_bounds("time") # NOTE: The algorithm for generating time bounds doesn't extend the # upper bound into the next month. expected_time_bnds = xr.DataArray( @@ -173,16 +174,61 @@ class TestAddBounds: ], dtype="datetime64[ns]", ), - coords={"time": ds.time}, + coords={"time": ds.time.assign_attrs({"bounds": "time_bnds"})}, + dims=["time", "bnds"], + attrs={"is_generated": "True"}, + ) + + assert result.time_bnds.identical(expected_time_bnds) + + def test_returns_bounds_for_dataset_with_coords_as_cftime_objects(self): + ds = self.ds.copy() + ds = ds.drop_dims("time") + ds["time"] = xr.DataArray( + name="time", + data=np.array( + [ + cftime.DatetimeNoLeap(1850, 1, 1), + cftime.DatetimeNoLeap(1850, 2, 1), + cftime.DatetimeNoLeap(1850, 3, 1), + ], + ), + dims=["time"], + attrs={ + "axis": "T", + "long_name": "time", + "standard_name": "time", + }, + ) + + result = ds.bounds.add_bounds("time") + expected_time_bnds = xr.DataArray( + name="time_bnds", + data=np.array( + [ + [ + cftime.DatetimeNoLeap(1849, 12, 16, 12), + cftime.DatetimeNoLeap(1850, 1, 16, 12), + ], + [ + cftime.DatetimeNoLeap(1850, 1, 16, 12), + cftime.DatetimeNoLeap(1850, 2, 15, 0), + ], + [ + cftime.DatetimeNoLeap(1850, 2, 15, 0), + cftime.DatetimeNoLeap(1850, 3, 15, 0), + ], + ], + ), + coords={"time": ds.time.assign_attrs({"bounds": "time_bnds"})}, dims=["time", "bnds"], - attrs=ds.time_bnds.attrs, + attrs={"is_generated": "True"}, ) - assert ds.time_bnds.equals(expected_time_bnds) - assert ds.time_bnds.is_generated == "True" + assert result.time_bnds.identical(expected_time_bnds) -class TestGetCoord: +class Test_GetCoord: @pytest.fixture(autouse=True) def setup(self): self.ds = generate_dataset(cf_compliant=True, has_bounds=False) diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 4ca3438..d8e8c4d 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -264,7 +264,7 @@ class TestOpenMfDataset: assert result.identical(expected) -class TestHasCFCompliantTime: +class Test_HasCFCompliantTime: @pytest.fixture(autouse=True) def setUp(self, tmp_path): # Create temporary directory to save files. @@ -668,7 +668,7 @@ class TestDecodeNonCFTimeUnits: assert result.time_bnds.encoding == expected.time_bnds.encoding -class TestPostProcessDataset: +class Test_PostProcessDataset: @pytest.fixture(autouse=True) def setup(self): self.ds = generate_dataset(cf_compliant=True, has_bounds=True) @@ -868,7 +868,7 @@ class TestPostProcessDataset: _postprocess_dataset(ds, lon_orient=(0, 360)) -class TestKeepSingleVar: +class Test_KeepSingleVar: @pytest.fixture(autouse=True) def setup(self): self.ds = generate_dataset(cf_compliant=True, has_bounds=True) @@ -909,7 +909,7 @@ class TestKeepSingleVar: assert ds.get("time_bnds") is not None -class TestPreProcessNonCFDataset: +class Test_PreProcessNonCFDataset: @pytest.fixture(autouse=True) def setup(self): self.ds = generate_dataset(cf_compliant=False, has_bounds=True) @@ -944,7 +944,7 @@ class TestPreProcessNonCFDataset: assert result.identical(expected) -class TestSplitTimeUnitsAttr: +class Test_SplitTimeUnitsAttr: def test_raises_error_if_units_attr_is_none(self): with pytest.raises(KeyError): _split_time_units_attr(None) # type: ignore diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 15f4c48..a5d5ef2 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -64,7 +64,7 @@ class TestAverage: attrs={"is_generated": "True"}, ) ds["ts"] = xr.DataArray( - data=np.array([[[2]], [[1]], [[1]], [[1]], [[2]]]), + data=np.array([[[2]], [[np.nan]], [[1]], [[1]], [[2]]]), coords={"lat": ds.lat, "lon": ds.lon, "time": ds.time}, dims=["time", "lat", "lon"], ) @@ -74,7 +74,7 @@ class TestAverage: expected = ds.copy() expected = expected.drop_dims("time") expected["ts"] = xr.DataArray( - data=np.array([[1.4]]), + data=np.array([[1.5]]), coords={"lat": expected.lat, "lon": expected.lon}, dims=["lat", "lon"], attrs={ @@ -93,7 +93,7 @@ class TestAverage: expected = ds.copy() expected = expected.drop_dims("time") expected["ts"] = xr.DataArray( - data=np.array([[1.4]]), + data=np.array([[1.5]]), coords={"lat": expected.lat, "lon": expected.lon}, dims=["lat", "lon"], attrs={ @@ -120,7 +120,7 @@ class TestAverage: "2000-02-01T00:00:00.000000000", "2000-03-01T00:00:00.000000000", "2000-04-01T00:00:00.000000000", - "2000-05-01T00:00:00.000000000", + "2001-02-01T00:00:00.000000000", ], dtype="datetime64[ns]", ), @@ -142,7 +142,7 @@ class TestAverage: ["2000-02-01T00:00:00.000000000", "2000-03-01T00:00:00.000000000"], ["2000-03-01T00:00:00.000000000", "2000-04-01T00:00:00.000000000"], ["2000-04-01T00:00:00.000000000", "2000-05-01T00:00:00.000000000"], - ["2000-05-01T00:00:00.000000000", "2000-06-01T00:00:00.000000000"], + ["2001-01-01T00:00:00.000000000", "2000-03-01T00:00:00.000000000"], ], dtype="datetime64[ns]", ), @@ -151,7 +151,7 @@ class TestAverage: attrs={"is_generated": "True"}, ) ds["ts"] = xr.DataArray( - data=np.array([[[2]], [[1]], [[1]], [[1]], [[1]]]), + data=np.array([[[2]], [[np.nan]], [[1]], [[1]], [[1]]]), coords={"lat": ds.lat, "lon": ds.lon, "time": ds.time}, dims=["time", "lat", "lon"], ) @@ -161,7 +161,7 @@ class TestAverage: expected = ds.copy() expected = expected.drop_dims("time") expected["ts"] = xr.DataArray( - data=np.array([[1.2]]), + data=np.array([[1.24362357]]), coords={"lat": expected.lat, "lon": expected.lon}, dims=["lat", "lon"], attrs={ @@ -173,14 +173,14 @@ class TestAverage: }, ) - assert result.identical(expected) + xr.testing.assert_allclose(result, expected) # Test unweighted averages result = ds.temporal.average("ts", weighted=False) expected = ds.copy() expected = expected.drop_dims("time") expected["ts"] = xr.DataArray( - data=np.array([[1.2]]), + data=np.array([[1.25]]), coords={"lat": expected.lat, "lon": expected.lon}, dims=["lat", "lon"], attrs={ @@ -191,7 +191,7 @@ class TestAverage: "center_times": "False", }, ) - assert result.identical(expected) + xr.testing.assert_allclose(result, expected) def test_averages_for_daily_time_series(self): ds = xr.Dataset( @@ -826,6 +826,57 @@ class TestGroupAverage: assert result.identical(expected) + def test_weighted_monthly_averages_with_masked_data(self): + ds = self.ds.copy() + ds["ts"] = xr.DataArray( + data=np.array( + [[[2.0]], [[np.nan]], [[1.0]], [[1.0]], [[2.0]]], dtype="float64" + ), + coords={"time": self.ds.time, "lat": self.ds.lat, "lon": self.ds.lon}, + dims=["time", "lat", "lon"], + ) + + result = ds.temporal.group_average("ts", "month") + expected = ds.copy() + expected = expected.drop_dims("time") + expected["ts"] = xr.DataArray( + name="ts", + data=np.array([[[2.0]], [[0.0]], [[1.0]], [[1.0]], [[2.0]]]), + coords={ + "lat": expected.lat, + "lon": expected.lon, + "time": xr.DataArray( + data=np.array( + [ + "2000-01-01T00:00:00.000000000", + "2000-03-01T00:00:00.000000000", + "2000-06-01T00:00:00.000000000", + "2000-09-01T00:00:00.000000000", + "2001-02-01T00:00:00.000000000", + ], + dtype="datetime64[ns]", + ), + dims=["time"], + attrs={ + "axis": "T", + "long_name": "time", + "standard_name": "time", + "bounds": "time_bnds", + }, + ), + }, + dims=["time", "lat", "lon"], + attrs={ + "operation": "temporal_avg", + "mode": "group_average", + "freq": "month", + "weighted": "True", + "center_times": "False", + }, + ) + + assert result.identical(expected) + def test_weighted_daily_averages(self): ds = self.ds.copy() @@ -1584,7 +1635,7 @@ class TestCenterTimes: ds = ds.drop_dims("time") with pytest.raises(KeyError): - ds.temporal.center_times(ds) + ds.temporal.center_times() def test_gets_time_as_the_midpoint_between_time_bounds(self): ds = self.ds.copy() @@ -1658,7 +1709,7 @@ class TestCenterTimes: time_bounds["time"] = expected.time expected["time_bnds"] = time_bounds - result = ds.temporal.center_times(ds) + result = ds.temporal.center_times() assert result.identical(expected)
[Bug]: `add_bounds()` breaks when time coordinates are in `cftime` objects instead of `datetime` ### Bug Report Criteria - [X] Bug is not related to a data quality issue(s) beyond the scope of xCDAT - [X] Bug is not related to core xarray APIs (please open an issue in the xarray repo if it is) ### What happened? `cftime` datetime objects are used to represent time coordinates for non-cf compliant calendars (360-day, noleap) and units ("months", "years"). Unlike `datetime` datetime objects, `cftime` datetime objects (e.g., `cftime.Datetime`, `cftime.DatetimeNoLeap`) don't support arithmetic involving `timedelta64[ns]`, ints, floats, etc. In the formula to calculate the lower and upper bounds for each coordinate point, a subtraction and addition operation is performed respectively (example below). The `diffs` array consists of `timedelta64[ns]`, so it breaks (refer to MCV example and log outputs). https://github.com/xCDAT/xcdat/blob/112eb58f797821f14af2934b7b2551b39912c291/xcdat/bounds.py#L255-L263 Instead of subtracting `diffs` as a `np.array` of strings with a dtype of `timedelta64[ns]`, we have to subtract using `timedelta` objects. This can be achieved by using `pd.to_timedelta(diffs)`. ```python # Add beginning and end points to account for lower and upper bounds. # np.array of string values with dtype "timedelta64[ns]"" diffs = np.insert(diffs, 0, diffs[0]) diffs = np.append(diffs, diffs[-1]) # In xarray and xCDAT, `cftime` objects are used to represent time # coordinates for non-Cf compliant calendars (360-day, noleap) and # units ("months", "years"), instead of `datetime` objects. `cftime` # objects only support arithmetic using `timedelta`` objects, so # the values of `diffs` must be casted to `timedelta`. # FIXME: This line produces the warning: python3.9/site-packages/pandas # /core/arrays/datetimelike.py:1189: PerformanceWarning: # Adding/subtracting object-dtype array to TimedeltaArray not # vectorized.warnings.warn( diffs = pd.to_timedelta(diffs) ``` Related issue: https://github.com/Unidata/cftime/issues/198 ### What did you expect to happen? Bounds are generated regardless of the datetime object type used to represent time coordinates ### Minimal Complete Verifiable Example ```python import xcdat dataset_links = [ "https://esgf-data2.llnl.gov/thredds/dodsC/user_pub_work/E3SM/1_0/amip_1850_aeroF/1deg_atm_60-30km_ocean/atmos/180x360/time-series/mon/ens2/v3/TS_187001_189412.nc", "https://esgf-data2.llnl.gov/thredds/dodsC/user_pub_work/E3SM/1_0/amip_1850_aeroF/1deg_atm_60-30km_ocean/atmos/180x360/time-series/mon/ens2/v3/TS_189501_191912.nc", ] ds = xcdat.open_mfdataset(dataset_links) # Drop the existing time bounds to demonstrate adding new bounds ds = ds.drop_vars("time_bnds") # Breaks here dataset_links = [ "https://esgf-data2.llnl.gov/thredds/dodsC/user_pub_work/E3SM/1_0/amip_1850_aeroF/1deg_atm_60-30km_ocean/atmos/180x360/time-series/mon/ens2/v3/TS_187001_189412.nc", "https://esgf-data2.llnl.gov/thredds/dodsC/user_pub_work/E3SM/1_0/amip_1850_aeroF/1deg_atm_60-30km_ocean/atmos/180x360/time-series/mon/ens2/v3/TS_189501_191912.nc", ] ds = ds.bounds.add_bounds("time") ``` ### Relevant log output ```python During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/vo13/miniconda3/envs/xcdat_dev/lib/python3.9/site-packages/IPython/core/interactiveshell.py", line 3397, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "/tmp/ipykernel_9974/1848296045.py", line 1, in <cell line: 1> ds_new.bounds.add_bounds("time") File "/home/vo13/XCDAT/xcdat/xcdat/bounds.py", line 207, in add_bounds dataset = self._add_bounds(axis, width) File "/home/vo13/XCDAT/xcdat/xcdat/bounds.py", line 262, in _add_bounds lower_bounds = da_coord - diffs[:-1] * width File "/home/vo13/miniconda3/envs/xcdat_dev/lib/python3.9/site-packages/xarray/core/_typed_ops.py", line 209, in __sub__ return self._binary_op(other, operator.sub) File "/home/vo13/miniconda3/envs/xcdat_dev/lib/python3.9/site-packages/xarray/core/dataarray.py", line 3098, in _binary_op f(self.variable, other_variable) File "/home/vo13/miniconda3/envs/xcdat_dev/lib/python3.9/site-packages/xarray/core/_typed_ops.py", line 399, in __sub__ return self._binary_op(other, operator.sub) File "/home/vo13/miniconda3/envs/xcdat_dev/lib/python3.9/site-packages/xarray/core/variable.py", line 2467, in _binary_op f(self_data, other_data) if not reflexive else f(other_data, self_data) numpy.core._exceptions._UFuncBinaryResolutionError: ufunc 'subtract' cannot use operands with types dtype('O') and dtype('<m8[ns]') ``` ### Anything else we need to know? Related code: https://github.com/Unidata/cftime/blob/dc75368cd02bbcd1352dbecfef10404a58683f94/src/cftime/_cftime.pyx#L1020-L1021 https://github.com/Unidata/cftime/blob/dc75368cd02bbcd1352dbecfef10404a58683f94/src/cftime/_cftime.pyx#L439-L472 ### Environment INSTALLED VERSIONS ------------------ commit: None python: 3.9.12 | packaged by conda-forge | (main, Mar 24 2022, 23:22:55) [GCC 10.3.0] python-bits: 64 OS: Linux OS-release: 3.10.0-1160.45.1.el7.x86_64 machine: x86_64 processor: x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 LOCALE: ('en_US', 'UTF-8') libhdf5: 1.12.1 libnetcdf: 4.8.1 xarray: 2022.3.0 pandas: 1.4.1 numpy: 1.22.3 scipy: 1.8.1 netCDF4: 1.5.8 pydap: None h5netcdf: None h5py: None Nio: None zarr: None cftime: 1.6.0 nc_time_axis: None PseudoNetCDF: None rasterio: None cfgrib: None iris: None bottleneck: 1.3.2 dask: 2022.03.0 distributed: 2022.3.0 matplotlib: 3.5.1 cartopy: 0.20.1 seaborn: None numbagg: None fsspec: 2022.3.0 cupy: None pint: None sparse: None setuptools: 61.2.0 pip: 22.0.4 conda: None pytest: 7.1.1 IPython: 8.3.0 sphinx: 4.4.0 INSTALLED VERSIONS ------------------ commit: None python: 3.9.12 | packaged by conda-forge | (main, Mar 24 2022, 23:22:55) [GCC 10.3.0] python-bits: 64 OS: Linux OS-release: 3.10.0-1160.45.1.el7.x86_64 machine: x86_64 processor: x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 LOCALE: ('en_US', 'UTF-8') libhdf5: 1.12.1 libnetcdf: 4.8.1 xarray: 2022.3.0 pandas: 1.4.1 numpy: 1.22.3 scipy: 1.8.1 netCDF4: 1.5.8 pydap: None h5netcdf: None h5py: None Nio: None zarr: None cftime: 1.6.0 nc_time_axis: None PseudoNetCDF: None rasterio: None cfgrib: None iris: None bottleneck: 1.3.2 dask: 2022.03.0 distributed: 2022.3.0 matplotlib: 3.5.1 cartopy: 0.20.1 seaborn: None numbagg: None fsspec: 2022.3.0 cupy: None pint: None sparse: None setuptools: 61.2.0 pip: 22.0.4 conda: None pytest: 7.1.1 IPython: 8.3.0 sphinx: 4.4.0 None
0.0
112eb58f797821f14af2934b7b2551b39912c291
[ "tests/test_bounds.py::TestAddBounds::test_returns_bounds_for_dataset_with_coords_as_cftime_objects", "tests/test_temporal.py::TestCenterTimes::test_gets_time_as_the_midpoint_between_time_bounds" ]
[ "tests/test_bounds.py::TestBoundsAccessor::test__init__", "tests/test_bounds.py::TestBoundsAccessor::test_decorator_call", "tests/test_bounds.py::TestBoundsAccessor::test_map_property_returns_map_of_axis_and_coordinate_keys_to_bounds_dataarray", "tests/test_bounds.py::TestBoundsAccessor::test_keys_property_returns_a_list_of_sorted_bounds_keys", "tests/test_bounds.py::TestAddMissingBounds::test_adds_bounds_in_dataset", "tests/test_bounds.py::TestAddMissingBounds::test_does_not_fill_bounds_for_coord_of_len_less_than_2", "tests/test_bounds.py::TestGetBounds::test_raises_error_when_bounds_dont_exist", "tests/test_bounds.py::TestGetBounds::test_getting_existing_bounds_in_dataset", "tests/test_bounds.py::TestGetBounds::test_get_nonexistent_bounds_in_dataset", "tests/test_bounds.py::TestGetBounds::test_raises_error_with_incorrect_coord_arg", "tests/test_bounds.py::TestAddBounds::test_add_bounds_raises_error_if_bounds_exist", "tests/test_bounds.py::TestAddBounds::test_add_bounds_raises_errors_for_data_dim_and_length", "tests/test_bounds.py::TestAddBounds::test_add_bounds_for_dataset_with_coords_as_datetime_objects", "tests/test_bounds.py::Test_GetCoord::test_gets_coords", "tests/test_bounds.py::Test_GetCoord::test_raises_error_if_coord_does_not_exist", "tests/test_dataset.py::TestOpenDataset::test_non_cf_compliant_time_is_not_decoded", "tests/test_dataset.py::TestOpenDataset::test_non_cf_compliant_time_is_decoded", "tests/test_dataset.py::TestOpenDataset::test_preserves_lat_and_lon_bounds_if_they_exist", "tests/test_dataset.py::TestOpenDataset::test_keeps_specified_var", "tests/test_dataset.py::TestOpenMfDataset::test_non_cf_compliant_time_is_not_decoded", "tests/test_dataset.py::TestOpenMfDataset::test_non_cf_compliant_time_is_decoded", "tests/test_dataset.py::TestOpenMfDataset::test_keeps_specified_var", "tests/test_dataset.py::Test_HasCFCompliantTime::test_non_cf_compliant_time", "tests/test_dataset.py::Test_HasCFCompliantTime::test_no_time_axis", "tests/test_dataset.py::Test_HasCFCompliantTime::test_glob_cf_compliant_time", "tests/test_dataset.py::Test_HasCFCompliantTime::test_list_cf_compliant_time", "tests/test_dataset.py::Test_HasCFCompliantTime::test_cf_compliant_time_with_string_path", "tests/test_dataset.py::Test_HasCFCompliantTime::test_cf_compliant_time_with_pathlib_path", "tests/test_dataset.py::Test_HasCFCompliantTime::test_cf_compliant_time_with_list_of_list_of_strings", "tests/test_dataset.py::Test_HasCFCompliantTime::test_cf_compliant_time_with_list_of_list_of_pathlib_paths", "tests/test_dataset.py::TestDecodeNonCFTimeUnits::test_raises_error_if_function_is_called_on_already_decoded_cf_compliant_dataset", "tests/test_dataset.py::TestDecodeNonCFTimeUnits::test_decodes_months_with_a_reference_date_at_the_start_of_the_month", "tests/test_dataset.py::TestDecodeNonCFTimeUnits::test_decodes_months_with_a_reference_date_at_the_middle_of_the_month", "tests/test_dataset.py::TestDecodeNonCFTimeUnits::test_decodes_months_with_a_reference_date_at_the_end_of_the_month", "tests/test_dataset.py::TestDecodeNonCFTimeUnits::test_decodes_months_with_a_reference_date_on_a_leap_year", "tests/test_dataset.py::TestDecodeNonCFTimeUnits::test_decodes_years_with_a_reference_date_at_the_middle_of_the_year", "tests/test_dataset.py::TestDecodeNonCFTimeUnits::test_decodes_years_with_a_reference_date_on_a_leap_year", "tests/test_dataset.py::Test_PostProcessDataset::test_keeps_specified_var", "tests/test_dataset.py::Test_PostProcessDataset::test_centers_time", "tests/test_dataset.py::Test_PostProcessDataset::test_raises_error_if_dataset_has_no_time_coords_but_center_times_is_true", "tests/test_dataset.py::Test_PostProcessDataset::test_adds_missing_lat_and_lon_bounds", "tests/test_dataset.py::Test_PostProcessDataset::test_orients_longitude_bounds_from_180_to_360_and_sorts_with_prime_meridian_cell", "tests/test_dataset.py::Test_PostProcessDataset::test_raises_error_if_dataset_has_no_longitude_coords_but_lon_orient_is_specified", "tests/test_dataset.py::Test_KeepSingleVar::tests_raises_error_if_only_bounds_data_variables_exist", "tests/test_dataset.py::Test_KeepSingleVar::test_raises_error_if_specified_data_var_does_not_exist", "tests/test_dataset.py::Test_KeepSingleVar::test_raises_error_if_specified_data_var_is_a_bounds_var", "tests/test_dataset.py::Test_KeepSingleVar::test_returns_dataset_with_specified_data_var", "tests/test_dataset.py::Test_KeepSingleVar::test_bounds_always_persist", "tests/test_dataset.py::Test_PreProcessNonCFDataset::test_user_specified_callable_results_in_subsetting_dataset_on_time_slice", "tests/test_dataset.py::Test_SplitTimeUnitsAttr::test_raises_error_if_units_attr_is_none", "tests/test_dataset.py::Test_SplitTimeUnitsAttr::test_splits_units_attr_to_unit_and_reference_date", "tests/test_temporal.py::TestTemporalAccessor::test__init__", "tests/test_temporal.py::TestTemporalAccessor::test_decorator", "tests/test_temporal.py::TestAverage::test_averages_for_yearly_time_series", "tests/test_temporal.py::TestAverage::test_averages_for_monthly_time_series", "tests/test_temporal.py::TestAverage::test_averages_for_daily_time_series", "tests/test_temporal.py::TestAverage::test_averages_for_hourly_time_series", "tests/test_temporal.py::TestGroupAverage::test_weighted_annual_averages", "tests/test_temporal.py::TestGroupAverage::test_weighted_annual_averages_with_chunking", "tests/test_temporal.py::TestGroupAverage::test_weighted_seasonal_averages_with_DJF_and_drop_incomplete_seasons", "tests/test_temporal.py::TestGroupAverage::test_weighted_seasonal_averages_with_DJF_without_dropping_incomplete_seasons", "tests/test_temporal.py::TestGroupAverage::test_weighted_seasonal_averages_with_JFD", "tests/test_temporal.py::TestGroupAverage::test_weighted_custom_seasonal_averages", "tests/test_temporal.py::TestGroupAverage::test_raises_error_with_incorrect_custom_seasons_argument", "tests/test_temporal.py::TestGroupAverage::test_weighted_monthly_averages", "tests/test_temporal.py::TestGroupAverage::test_weighted_monthly_averages_with_masked_data", "tests/test_temporal.py::TestGroupAverage::test_weighted_daily_averages", "tests/test_temporal.py::TestGroupAverage::test_weighted_daily_averages_and_center_times", "tests/test_temporal.py::TestGroupAverage::test_weighted_hourly_averages", "tests/test_temporal.py::TestClimatology::test_weighted_seasonal_climatology_with_DJF", "tests/test_temporal.py::TestClimatology::test_chunked_weighted_seasonal_climatology_with_DJF", "tests/test_temporal.py::TestClimatology::test_weighted_seasonal_climatology_with_JFD", "tests/test_temporal.py::TestClimatology::test_weighted_custom_seasonal_climatology", "tests/test_temporal.py::TestClimatology::test_weighted_monthly_climatology", "tests/test_temporal.py::TestClimatology::test_unweighted_monthly_climatology", "tests/test_temporal.py::TestClimatology::test_weighted_daily_climatology", "tests/test_temporal.py::TestClimatology::test_unweighted_daily_climatology", "tests/test_temporal.py::TestDepartures::test_weighted_seasonal_departures_with_DJF", "tests/test_temporal.py::TestDepartures::test_unweighted_seasonal_departures_with_DJF", "tests/test_temporal.py::TestDepartures::test_unweighted_seasonal_departures_with_JFD", "tests/test_temporal.py::TestCenterTimes::test_raises_error_if_time_dimension_does_not_exist_in_dataset", "tests/test_temporal.py::Test_SetObjAttrs::test_raises_error_if_operation_is_not_supported", "tests/test_temporal.py::Test_SetObjAttrs::test_raises_error_if_freq_arg_is_not_supported_by_operation", "tests/test_temporal.py::Test_SetObjAttrs::test_does_not_raise_error_if_freq_arg_is_supported_by_operation", "tests/test_temporal.py::Test_SetObjAttrs::test_raises_error_if_season_config_key_is_not_supported", "tests/test_temporal.py::Test_SetObjAttrs::test_raises_error_if_december_mode_is_not_supported", "tests/test_temporal.py::Test_GetWeights::TestWeightsForAverageMode::test_weights_for_yearly_averages", "tests/test_temporal.py::Test_GetWeights::TestWeightsForAverageMode::test_weights_for_monthly_averages", "tests/test_temporal.py::Test_GetWeights::TestWeightsForGroupAverageMode::test_weights_for_yearly_averages", "tests/test_temporal.py::Test_GetWeights::TestWeightsForGroupAverageMode::test_weights_for_monthly_averages", "tests/test_temporal.py::Test_GetWeights::TestWeightsForGroupAverageMode::test_weights_for_seasonal_averages_with_DJF_and_drop_incomplete_seasons", "tests/test_temporal.py::Test_GetWeights::TestWeightsForGroupAverageMode::test_weights_for_seasonal_averages_with_JFD", "tests/test_temporal.py::Test_GetWeights::TestWeightsForGroupAverageMode::test_custom_season_time_series_weights", "tests/test_temporal.py::Test_GetWeights::TestWeightsForGroupAverageMode::test_weights_for_daily_averages", "tests/test_temporal.py::Test_GetWeights::TestWeightsForGroupAverageMode::test_weights_for_hourly_averages", "tests/test_temporal.py::Test_GetWeights::TestWeightsForClimatologyMode::test_weights_for_seasonal_climatology_with_DJF", "tests/test_temporal.py::Test_GetWeights::TestWeightsForClimatologyMode::test_weights_for_seasonal_climatology_with_JFD", "tests/test_temporal.py::Test_GetWeights::TestWeightsForClimatologyMode::test_weights_for_annual_climatology", "tests/test_temporal.py::Test_GetWeights::TestWeightsForClimatologyMode::test_weights_for_daily_climatology" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-05-26 23:39:23+00:00
apache-2.0
6,275
xCDAT__xcdat-257
diff --git a/xcdat/axis.py b/xcdat/axis.py index 55eac02..5d1a256 100644 --- a/xcdat/axis.py +++ b/xcdat/axis.py @@ -100,22 +100,22 @@ def swap_lon_axis( The Dataset with swapped lon axes orientation. """ ds = dataset.copy() - lon: xr.DataArray = dataset.bounds._get_coords("lon").copy() + lon: xr.DataArray = _get_coord_var(ds, "X").copy() lon_bounds: xr.DataArray = dataset.bounds.get_bounds("lon").copy() with xr.set_options(keep_attrs=True): if to == (-180, 180): - lon = ((lon + 180) % 360) - 180 - lon_bounds = ((lon_bounds + 180) % 360) - 180 - ds = _reassign_lon(ds, lon, lon_bounds) + new_lon = ((lon + 180) % 360) - 180 + new_lon_bounds = ((lon_bounds + 180) % 360) - 180 + ds = _reassign_lon(ds, new_lon, new_lon_bounds) elif to == (0, 360): - lon = lon % 360 - lon_bounds = lon_bounds % 360 - ds = _reassign_lon(ds, lon, lon_bounds) + new_lon = lon % 360 + new_lon_bounds = lon_bounds % 360 + ds = _reassign_lon(ds, new_lon, new_lon_bounds) # Handle cases where a prime meridian cell exists, which can occur # after swapping to (0, 360). - p_meridian_index = _get_prime_meridian_index(lon_bounds) + p_meridian_index = _get_prime_meridian_index(new_lon_bounds) if p_meridian_index is not None: ds = _align_lon_to_360(ds, p_meridian_index) else: @@ -124,8 +124,13 @@ def swap_lon_axis( "orientations." ) + # If the swapped axis orientation is the same as the existing axis + # orientation, return the original Dataset. + if new_lon.identical(lon): + return dataset + if sort_ascending: - ds = ds.sortby(lon.name, ascending=True) + ds = ds.sortby(new_lon.name, ascending=True) return ds
xCDAT/xcdat
092854ac8327ebce6d9581e773a7f837f6dbc170
diff --git a/tests/test_axis.py b/tests/test_axis.py index 1263ed8..abf0943 100644 --- a/tests/test_axis.py +++ b/tests/test_axis.py @@ -143,6 +143,32 @@ class TestSwapLonAxis: with pytest.raises(ValueError): swap_lon_axis(ds_180, to=(0, 360)) + def test_does_not_swap_if_desired_orientation_is_the_same_as_the_existing_orientation( + self, + ): + ds_360 = xr.Dataset( + coords={ + "lon": xr.DataArray( + name="lon", + data=np.array([60, 150, 271]), + dims=["lon"], + attrs={"units": "degrees_east", "axis": "X", "bounds": "lon_bnds"}, + ) + }, + data_vars={ + "lon_bnds": xr.DataArray( + name="lon_bnds", + data=np.array([[0, 120], [120, 181], [181, 360]]), + dims=["lon", "bnds"], + attrs={"is_generated": "True"}, + ) + }, + ) + + result = swap_lon_axis(ds_360, to=(0, 360)) + + assert result.identical(ds_360) + def test_swap_from_360_to_180_and_sorts(self): ds_360 = xr.Dataset( coords={
[Bug]: Converting the longitude axis orientation to the same system results in odd behaviors ### What happened? > As a side note, there is some weird behavior here: the longitude axis goes from size 360 to 361 (and one set of lon_bnds > goes from 0 to 0). I'm not sure if this is specific to converting from one longitude coordinate system to the same system > (something people wouldn't normally do) or a more generic issue. This doesn't happen when converting to (-180, 180). > &mdash; @pochedls from https://github.com/xCDAT/xcdat/pull/239#issuecomment-1146235781 ### What did you expect to happen? The coordinates and coordinate bounds should remain the same if attempting to convert to the same axis system. Solution: Detect that the desired axis system is the same as the existing system, so use a `pass` statement to ignore. ### Minimal Complete Verifiable Example _No response_ ### Relevant log output _No response_ ### Anything else we need to know? _No response_ ### Environment `main` branch of xcdat
0.0
092854ac8327ebce6d9581e773a7f837f6dbc170
[ "tests/test_axis.py::TestSwapLonAxis::test_does_not_swap_if_desired_orientation_is_the_same_as_the_existing_orientation" ]
[ "tests/test_axis.py::TestCenterTimes::test_raises_error_if_time_coord_var_does_not_exist_in_dataset", "tests/test_axis.py::TestCenterTimes::test_raises_error_if_time_bounds_does_not_exist_in_the_dataset", "tests/test_axis.py::TestCenterTimes::test_gets_time_as_the_midpoint_between_time_bounds", "tests/test_axis.py::TestSwapLonAxis::test_raises_error_with_incorrect_lon_orientation_for_swapping", "tests/test_axis.py::TestSwapLonAxis::test_raises_error_if_lon_bounds_contains_more_than_one_prime_meridian_cell", "tests/test_axis.py::TestSwapLonAxis::test_swap_from_360_to_180_and_sorts", "tests/test_axis.py::TestSwapLonAxis::test_swap_from_180_to_360_and_sorts_with_prime_meridian_cell_in_lon_bnds" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2022-06-10 23:02:55+00:00
apache-2.0
6,276
xarg__kuku-4
diff --git a/kuku/dump.py b/kuku/dump.py index b9366c0..614fec6 100644 --- a/kuku/dump.py +++ b/kuku/dump.py @@ -48,11 +48,13 @@ def dump(rendering: Rendering) -> str: template_output = [] template_header = "# Source: {}\n".format(template_path) for k8s_object in k8s_objects: - # Override the default to_dict method so we can update the k8s keys if not k8s_object: + if k8s_object is None: + continue raise ValueError( "Template '{}' returned {} object".format(template_path, k8s_object) ) + # Override the default to_dict method so we can update the k8s keys k8s_object.to_dict = MethodType(_camelized_to_dict, k8s_object) k8s_object = k8s_object.to_dict()
xarg/kuku
be65d1b83cc3725cf7fbce16069559bd2e5cef93
diff --git a/kuku/tests/test_dump.py b/kuku/tests/test_dump.py index e69de29..0ccafa8 100644 --- a/kuku/tests/test_dump.py +++ b/kuku/tests/test_dump.py @@ -0,0 +1,6 @@ +from kuku.dump import dump + + +def test_dump_with_none_object(): + output = dump({"dir1": [None, ]}) + assert output == "# Source: dir1\n"
Feature request: ability to enable/disable whole K8s resource I want to do something like this: ```python def template(context): if not context["deployment"]["enabled"]: return context_name = context["name"] labels = {"app": context_name} ... ``` This fails with ``` Traceback (most recent call last): File ".../bin/kuku", line 10, in <module> sys.exit(cli()) File ".../kuku/cli.py", line 59, in cli output = dump(rendering) File ".../kuku/dump.py", line 54, in dump "Template '{}' returned {} object".format(template_path, k8s_object) ValueError: Template 'deployment/k8s/templates/cronjob.py' returned None object ``` I am going to submit a PR that is going to skip the serialization/dumping of the `k8s_object` if it's `None`
0.0
be65d1b83cc3725cf7fbce16069559bd2e5cef93
[ "kuku/tests/test_dump.py::test_dump_with_none_object" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2019-10-03 16:13:45+00:00
apache-2.0
6,277
xarray-contrib__xskillscore-230
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 1be599e..8c0f2fe 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -13,6 +13,6 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: 3.6 + python-version: 3.8 - name: Lint via pre-commit checks uses: pre-commit/[email protected] diff --git a/.github/workflows/xskillscore_installs.yml b/.github/workflows/xskillscore_installs.yml index 68dfc61..4dc3d9d 100644 --- a/.github/workflows/xskillscore_installs.yml +++ b/.github/workflows/xskillscore_installs.yml @@ -17,7 +17,7 @@ jobs: - name: Setup python uses: actions/setup-python@v2 with: - python-version: 3.6 + python-version: 3.8 - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e0b8757..211bb5b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -34,6 +34,8 @@ Internal Changes - Added Python 3.7 and Python 3.8 to the CI. Use the latest version of Python 3 for development. (:issue:`21`, :pr:`189`). `Aaron Spring`_ - Lint with the latest black. (:issue:`179`, :pr:`191`). `Ray Bell`_ +- Update mape algorithm from scikit-learn v0.24.0 and test against it. + (:issue:`160`, :pr:`230`) `Ray Bell`_ xskillscore v0.0.18 (2020-09-23) diff --git a/ci/doc.yml b/ci/doc.yml index afbdf91..ee45765 100644 --- a/ci/doc.yml +++ b/ci/doc.yml @@ -2,7 +2,7 @@ name: xskillscore-docs channels: - conda-forge dependencies: - - python=3.6 + - python=3.8 - bottleneck - doc8 - importlib_metadata diff --git a/xskillscore/core/deterministic.py b/xskillscore/core/deterministic.py index aafe46a..a8958eb 100644 --- a/xskillscore/core/deterministic.py +++ b/xskillscore/core/deterministic.py @@ -1046,12 +1046,13 @@ def mape(a, b, dim=None, weights=None, skipna=False, keep_attrs=False): .. math:: \\mathrm{MAPE} = \\frac{1}{n} \\sum_{i=1}^{n} \\frac{\\vert a_{i} - b_{i} \\vert} - {\\vert a_{i} \\vert} + {max(\epsilon, \\vert a_{i} \\vert)} .. note:: The percent error is calculated in reference to ``a``. Percent error is reported as decimal percent. I.e., a value of 1 is - 100%. + 100%. :math:`\epsilon` is an arbitrary small yet strictly positive + number to avoid undefined results when ``a`` is zero. Parameters ---------- @@ -1078,6 +1079,10 @@ def mape(a, b, dim=None, weights=None, skipna=False, keep_attrs=False): xarray.Dataset or xarray.DataArray Mean Absolute Percentage Error. + See Also + -------- + sklearn.metrics.mean_absolute_percentage_error + References ---------- https://en.wikipedia.org/wiki/Mean_absolute_percentage_error diff --git a/xskillscore/core/np_deterministic.py b/xskillscore/core/np_deterministic.py index e6fb19a..ae736ac 100644 --- a/xskillscore/core/np_deterministic.py +++ b/xskillscore/core/np_deterministic.py @@ -652,7 +652,7 @@ def _mape(a, b, weights, axis, skipna): .. math:: \\mathrm{MAPE} = \\frac{1}{n} \\sum_{i=1}^{n} \\frac{\\vert a_{i} - b_{i} \\vert} - {\\vert a_{i} \\vert} + {max(\epsilon, \\vert a_{i} \\vert)} Parameters ---------- @@ -679,6 +679,13 @@ def _mape(a, b, weights, axis, skipna): Percent error is reported as decimal percent. I.e., a value of 1 is 100%. + \epsilon is an arbitrary small yet strictly positive number to avoid + undefined results when ``a`` is zero. + + See Also + -------- + sklearn.metrics.mean_absolute_percentage_error + References ---------- https://en.wikipedia.org/wiki/Mean_absolute_percentage_error @@ -687,8 +694,8 @@ def _mape(a, b, weights, axis, skipna): if skipna: a, b, weights = _match_nans(a, b, weights) weights = _check_weights(weights) - # replace divided by 0 with nan - mape = np.absolute(a - b) / np.absolute(np.where(a != 0, a, np.nan)) + epsilon = np.finfo(np.float64).eps + mape = np.absolute(a - b) / np.maximum(np.absolute(a), epsilon) if weights is not None: return sumfunc(mape * weights, axis=axis) / sumfunc(weights, axis=axis) else:
xarray-contrib/xskillscore
6783decc906adeefca05ef54f04461c326634677
diff --git a/.github/workflows/xskillscore_testing.yml b/.github/workflows/xskillscore_testing.yml index 4ed6d27..0508ea9 100644 --- a/.github/workflows/xskillscore_testing.yml +++ b/.github/workflows/xskillscore_testing.yml @@ -60,7 +60,7 @@ jobs: channels: conda-forge mamba-version: '*' activate-environment: xskillscore-docs-notebooks - python-version: 3.6 + python-version: 3.8 - name: Set up conda environment run: | mamba env update -f ci/docs_notebooks.yml diff --git a/xskillscore/tests/conftest.py b/xskillscore/tests/conftest.py index 059f3e2..9998ae9 100644 --- a/xskillscore/tests/conftest.py +++ b/xskillscore/tests/conftest.py @@ -68,6 +68,13 @@ def b_nan(b): return b.copy().where(b < 0.5) +# with zeros [email protected] +def a_with_zeros(a): + """Zeros""" + return a.copy().where(a < 0.5, 0) + + # dask @pytest.fixture def a_dask(a): @@ -116,6 +123,12 @@ def b_1d_nan(a_1d_nan): return b [email protected] +def a_1d_with_zeros(a_with_zeros): + """Timeseries of a with zeros""" + return a_with_zeros.isel(lon=0, lat=0, drop=True) + + # weights @pytest.fixture def weights(a): diff --git a/xskillscore/tests/test_metric_results_accurate.py b/xskillscore/tests/test_metric_results_accurate.py index a00e3d4..54f1481 100644 --- a/xskillscore/tests/test_metric_results_accurate.py +++ b/xskillscore/tests/test_metric_results_accurate.py @@ -2,7 +2,12 @@ import numpy as np import pytest import sklearn.metrics from scipy.stats import pearsonr, spearmanr -from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score +from sklearn.metrics import ( + mean_absolute_error, + mean_absolute_percentage_error, + mean_squared_error, + r2_score, +) import xskillscore as xs from xskillscore.core.deterministic import ( @@ -23,6 +28,11 @@ xs_skl_metrics = [ (r2, r2_score), (mse, mean_squared_error), (mae, mean_absolute_error), + (mape, mean_absolute_percentage_error), +] + +xs_skl_metrics_with_zeros = [ + (mape, mean_absolute_percentage_error), ] xs_scipy_metrics = [ @@ -34,7 +44,6 @@ xs_scipy_metrics = [ xs_np_metrics = [ - (mape, lambda x, y: np.mean(np.abs((x - y) / x))), (me, lambda x, y: np.mean(x - y)), (smape, lambda x, y: 1 / len(x) * np.sum(np.abs(y - x) / (np.abs(x) + np.abs(y)))), ] @@ -69,6 +78,15 @@ def test_xs_same_as_skl_same_name(a_1d, b_1d, request): assert np.allclose(actual, expected) [email protected]("xs_skl_metrics", xs_skl_metrics_with_zeros) +def test_xs_same_as_skl_with_zeros(a_1d_with_zeros, b_1d, xs_skl_metrics): + """Tests xskillscore metric is same as scikit-learn metric.""" + xs_metric, skl_metric = xs_skl_metrics + actual = xs_metric(a_1d_with_zeros, b_1d, "time") + expected = skl_metric(a_1d_with_zeros, b_1d) + assert np.allclose(actual, expected) + + @pytest.mark.parametrize("xs_scipy_metrics", xs_scipy_metrics) def test_xs_same_as_scipy(a_1d, b_1d, xs_scipy_metrics): """Tests xskillscore metric is same as scipy metric."""
Add MAPE in See Also and in testing from sklearn `sklearn` has MAPE now in their master branch (https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/_regression.py#L197). Once released, we can test against this and put it in "See Also" for MAPE.
0.0
6783decc906adeefca05ef54f04461c326634677
[ "xskillscore/tests/test_metric_results_accurate.py::test_xs_same_as_skl_with_zeros[xs_skl_metrics0]" ]
[ "xskillscore/tests/test_metric_results_accurate.py::test_xs_same_as_skl[xs_skl_metrics0]", "xskillscore/tests/test_metric_results_accurate.py::test_xs_same_as_skl[xs_skl_metrics1]", "xskillscore/tests/test_metric_results_accurate.py::test_xs_same_as_skl[xs_skl_metrics2]", "xskillscore/tests/test_metric_results_accurate.py::test_xs_same_as_skl[xs_skl_metrics3]", "xskillscore/tests/test_metric_results_accurate.py::test_xs_same_as_skl_rmse[False]", "xskillscore/tests/test_metric_results_accurate.py::test_xs_same_as_scipy[xs_scipy_metrics0]", "xskillscore/tests/test_metric_results_accurate.py::test_xs_same_as_scipy[xs_scipy_metrics1]", "xskillscore/tests/test_metric_results_accurate.py::test_xs_same_as_scipy[xs_scipy_metrics2]", "xskillscore/tests/test_metric_results_accurate.py::test_xs_same_as_scipy[xs_scipy_metrics3]", "xskillscore/tests/test_metric_results_accurate.py::test_mape_same_as_numpy[xs_np_metrics0]", "xskillscore/tests/test_metric_results_accurate.py::test_mape_same_as_numpy[xs_np_metrics1]" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-01-05 03:58:59+00:00
apache-2.0
6,278
xarray-contrib__xskillscore-339
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4a955fd..40f2270 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,11 @@ Changelog History xskillscore v0.0.23 (2021-xx-xx) -------------------------------- +Internal Changes +~~~~~~~~~~~~~~~~ +- :py:func:`~xskillscore.resampling.resample_iterations_idx` do not break when ``dim`` is + not coordinate. (:issue:`303`, :pr:`339`) `Aaron Spring`_ + xskillscore v0.0.22 (2021-06-29) -------------------------------- diff --git a/xskillscore/core/resampling.py b/xskillscore/core/resampling.py index e4f1096..5481029 100644 --- a/xskillscore/core/resampling.py +++ b/xskillscore/core/resampling.py @@ -107,6 +107,8 @@ def resample_iterations(forecast, iterations, dim="member", dim_max=None, replac forecast_smp.append(forecast.isel({dim: idx}).assign_coords({dim: new_dim})) forecast_smp = xr.concat(forecast_smp, dim="iteration", **CONCAT_KWARGS) forecast_smp["iteration"] = np.arange(iterations) + if dim not in forecast.coords: + del forecast_smp.coords[dim] return forecast_smp.transpose(..., "iteration") @@ -172,7 +174,12 @@ def resample_iterations_idx( for interannual-to-decadal predictions experiments. Climate Dynamics, 40(1–2), 245–272. https://doi.org/10/f4jjvf """ - # equivalent to above + if dim not in forecast.coords: + forecast.coords[dim] = np.arange(0, forecast[dim].size) + dim_coord_set = True + else: + dim_coord_set = False + select_dim_items = forecast[dim].size new_dim = forecast[dim] @@ -205,4 +212,6 @@ def resample_iterations_idx( # return only dim_max members if dim_max is not None and dim_max <= forecast[dim].size: forecast_smp = forecast_smp.isel({dim: slice(None, dim_max)}) + if dim_coord_set: + del forecast_smp.coords[dim] return forecast_smp
xarray-contrib/xskillscore
ef0c0fd34add126eb88a0334b3da348b9eef971b
diff --git a/xskillscore/tests/test_resampling.py b/xskillscore/tests/test_resampling.py index 6d09fe3..572a55c 100644 --- a/xskillscore/tests/test_resampling.py +++ b/xskillscore/tests/test_resampling.py @@ -154,3 +154,14 @@ def test_resample_inputs(a_1d, func, input, chunk, replace): assert is_dask_collection(actual) if chunk else not is_dask_collection(actual) # input type preserved assert type(actual) == type(a_1d) + + [email protected]("func", resample_iterations_funcs) +def test_resample_dim_no_coord(func): + """resample_iterations doesnt fail when no dim coords""" + da = xr.DataArray( + np.random.rand(100, 3, 3), + coords=[("time", np.arange(100)), ("x", np.arange(3)), ("y", np.arange(3))], + ) + del da.coords["time"] + assert "time" not in func(da, 2, dim="time").coords
raise error if forecast doesn't contain coords in resample_iterations_idx `a = xr.DataArray(np.random.rand(1000, 3, 3), dims=['time', 'x', 'y'])` doesn't work in `xs.resample_iterations_idx(a, 500, 'time')` ``` xr.DataArray( np.random.rand(1000, 3, 3), coords=[("time", np.arange(1000)), ("x", np.arange(3)), ("y", np.arange(3))], ) ``` does. Taken from https://github.com/xarray-contrib/xskillscore/pull/302#issuecomment-832863346
0.0
ef0c0fd34add126eb88a0334b3da348b9eef971b
[ "xskillscore/tests/test_resampling.py::test_resample_dim_no_coord[resample_iterations]", "xskillscore/tests/test_resampling.py::test_resample_dim_no_coord[resample_iterations_idx]" ]
[ "xskillscore/tests/test_resampling.py::test_resampling_roughly_identical_mean", "xskillscore/tests/test_resampling.py::test_gen_idx_replace[True]", "xskillscore/tests/test_resampling.py::test_gen_idx_replace[False]", "xskillscore/tests/test_resampling.py::test_resample_replace_False_once_same_mean[resample_iterations]", "xskillscore/tests/test_resampling.py::test_resample_replace_False_once_same_mean[resample_iterations_idx]", "xskillscore/tests/test_resampling.py::test_resample_dim_max[None-resample_iterations]", "xskillscore/tests/test_resampling.py::test_resample_dim_max[None-resample_iterations_idx]", "xskillscore/tests/test_resampling.py::test_resample_dim_max[5-resample_iterations]", "xskillscore/tests/test_resampling.py::test_resample_dim_max[5-resample_iterations_idx]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-Dataset-True-True]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-Dataset-True-False]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-Dataset-False-True]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-Dataset-False-False]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-multidim", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-DataArray-True-True]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-DataArray-True-False]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-DataArray-False-True]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-DataArray-False-False]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-Dataset-True-True]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-Dataset-True-False]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-Dataset-False-True]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-Dataset-False-False]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-multidim", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-DataArray-True-True]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-DataArray-True-False]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-DataArray-False-True]", "xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-DataArray-False-False]" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-07-24 22:26:25+00:00
apache-2.0
6,279
xgcm__xhistogram-17
diff --git a/xhistogram/core.py b/xhistogram/core.py index c14f977..9f992c2 100644 --- a/xhistogram/core.py +++ b/xhistogram/core.py @@ -88,7 +88,6 @@ def _histogram_2d_vectorized(*args, bins=None, weights=None, density=False, """Calculate the histogram independently on each row of a 2D array""" N_inputs = len(args) - bins = _ensure_bins_is_a_list_of_arrays(bins, N_inputs) a0 = args[0] # consistency checks for inputa @@ -128,7 +127,9 @@ def _histogram_2d_vectorized(*args, bins=None, weights=None, density=False, # just throw out everything outside of the bins, as np.histogram does # TODO: make this optional? slices = (slice(None),) + (N_inputs * (slice(1, -1),)) - return bin_counts[slices] + bin_counts = bin_counts[slices] + + return bin_counts def histogram(*args, bins=None, axis=None, weights=None, density=False, @@ -242,9 +243,29 @@ def histogram(*args, bins=None, axis=None, weights=None, density=False, else: weights_reshaped = None - h = _histogram_2d_vectorized(*all_args_reshaped, bins=bins, - weights=weights_reshaped, - density=density, block_size=block_size) + n_inputs = len(all_args_reshaped) + bins = _ensure_bins_is_a_list_of_arrays(bins, n_inputs) + + bin_counts = _histogram_2d_vectorized(*all_args_reshaped, bins=bins, + weights=weights_reshaped, + density=density, + block_size=block_size) + + if density: + # Normalise by dividing by bin counts and areas such that all the + # histogram data integrated over all dimensions = 1 + bin_widths = [np.diff(b) for b in bins] + if n_inputs == 1: + bin_areas = bin_widths[0] + elif n_inputs == 2: + bin_areas = np.outer(*bin_widths) + else: + # Slower, but N-dimensional logic + bin_areas = np.prod(np.ix_(*bin_widths)) + + h = bin_counts / bin_areas / bin_counts.sum() + else: + h = bin_counts if h.shape[0] == 1: assert do_full_array diff --git a/xhistogram/xarray.py b/xhistogram/xarray.py index c4d41d8..8b9af1f 100644 --- a/xhistogram/xarray.py +++ b/xhistogram/xarray.py @@ -126,7 +126,7 @@ def histogram(*args, bins=None, dim=None, weights=None, density=False, axis = None h_data = _histogram(*args_data, weights=weights_data, bins=bins, axis=axis, - block_size=block_size) + density=density, block_size=block_size) # create output dims new_dims = [a.name + bin_dim_suffix for a in args[:N_args]] @@ -155,6 +155,12 @@ def histogram(*args, bins=None, dim=None, weights=None, density=False, da_out = xr.DataArray(h_data, dims=output_dims, coords=all_coords, name=output_name) + + if density: + # correct for overcounting the bins which weren't histogrammed along + n_bins_bystander_dims = da_out.isel(**{bd: 0 for bd in new_dims}).size + da_out = da_out * n_bins_bystander_dims + return da_out # we need weights to be passed through apply_func's alignment algorithm,
xgcm/xhistogram
c53fea67ab8ed4cb47dac625301454433c9eab09
diff --git a/xhistogram/test/test_core.py b/xhistogram/test/test_core.py index 1ba4f88..a5b0a05 100644 --- a/xhistogram/test/test_core.py +++ b/xhistogram/test/test_core.py @@ -27,6 +27,33 @@ def test_histogram_results_1d(block_size): np.testing.assert_array_equal(hist, h_na) [email protected]('block_size', [None, 1, 2]) +def test_histogram_results_1d_density(block_size): + nrows, ncols = 5, 20 + data = np.random.randn(nrows, ncols) + bins = np.linspace(-4, 4, 10) + + h = histogram(data, bins=bins, axis=1, block_size=block_size, density=True) + assert h.shape == (nrows, len(bins)-1) + + # make sure we get the same thing as histogram + hist, _ = np.histogram(data, bins=bins, density=True) + np.testing.assert_allclose(hist, h.sum(axis=0)) + + # check integral is 1 + widths = np.diff(bins) + integral = np.sum(hist * widths) + np.testing.assert_allclose(integral, 1.0) + + # now try with no axis + h_na = histogram(data, bins=bins, block_size=block_size, density=True) + np.testing.assert_array_equal(hist, h_na) + + # check integral is 1 + integral = np.sum(h_na * widths) + np.testing.assert_allclose(integral, 1.0) + + @pytest.mark.parametrize('block_size', [None, 1, 2]) def test_histogram_results_1d_weighted(block_size): nrows, ncols = 5, 20 @@ -52,7 +79,6 @@ def test_histogram_results_1d_weighted_broadcasting(block_size): np.testing.assert_array_equal(2*h, h_w) - def test_histogram_results_2d(): nrows, ncols = 5, 20 data_a = np.random.randn(nrows, ncols) @@ -70,6 +96,61 @@ def test_histogram_results_2d(): np.testing.assert_array_equal(hist, h) +def test_histogram_results_2d_density(): + nrows, ncols = 5, 20 + data_a = np.random.randn(nrows, ncols) + data_b = np.random.randn(nrows, ncols) + nbins_a = 9 + bins_a = np.linspace(-4, 4, nbins_a + 1) + nbins_b = 10 + bins_b = np.linspace(-4, 4, nbins_b + 1) + + h = histogram(data_a, data_b, bins=[bins_a, bins_b], density=True) + assert h.shape == (nbins_a, nbins_b) + + hist, _, _ = np.histogram2d(data_a.ravel(), data_b.ravel(), + bins=[bins_a, bins_b], density=True) + np.testing.assert_allclose(hist, h) + + # check integral is 1 + widths_a = np.diff(bins_a) + widths_b = np.diff(bins_b) + areas = np.outer(widths_a, widths_b) + integral = np.sum(hist * areas) + np.testing.assert_allclose(integral, 1.0) + + +def test_histogram_results_3d_density(): + nrows, ncols = 5, 20 + data_a = np.random.randn(nrows, ncols) + data_b = np.random.randn(nrows, ncols) + data_c = np.random.randn(nrows, ncols) + nbins_a = 9 + bins_a = np.linspace(-4, 4, nbins_a + 1) + nbins_b = 10 + bins_b = np.linspace(-4, 4, nbins_b + 1) + nbins_c = 9 + bins_c = np.linspace(-4, 4, nbins_c + 1) + + h = histogram(data_a, data_b, data_c, bins=[bins_a, bins_b, bins_c], + density=True) + + assert h.shape == (nbins_a, nbins_b, nbins_c) + + hist, _ = np.histogramdd((data_a.ravel(), data_b.ravel(), data_c.ravel()), + bins=[bins_a, bins_b, bins_c], density=True) + + np.testing.assert_allclose(hist, h) + + # check integral is 1 + widths_a = np.diff(bins_a) + widths_b = np.diff(bins_b) + widths_c = np.diff(bins_c) + areas = np.einsum('i,j,k', widths_a, widths_b, widths_c) + integral = np.sum(hist * areas) + np.testing.assert_allclose(integral, 1.0) + + @pytest.mark.parametrize('block_size', [None, 5, 'auto']) @pytest.mark.parametrize('use_dask', [False, True]) def test_histogram_shape(use_dask, block_size): diff --git a/xhistogram/test/test_xarray.py b/xhistogram/test/test_xarray.py index 0018f0d..7be7449 100644 --- a/xhistogram/test/test_xarray.py +++ b/xhistogram/test/test_xarray.py @@ -59,6 +59,32 @@ def test_histogram_ones(ones, ndims): _check_result(h, d) [email protected]('ndims', [1, 2, 3, 4]) +def test_histogram_ones_density(ones, ndims): + dims = ones.dims + if ones.ndim < ndims: + pytest.skip( + "Don't need to test when number of dimension combinations " + "exceeds the number of array dimensions") + + # everything should be in the middle bin (index 1) + bins = np.array([0, 0.9, 1.1, 2]) + bin_area = 0.2 + + def _check_result(h_density, d): + other_dims = [dim for dim in ones.dims if dim not in d] + if len(other_dims) > 0: + assert set(other_dims) <= set(h_density.dims) + + # check that all integrals over pdfs at different locations are = 1 + h_integrals = (h_density * bin_area).sum(dim='ones_bin') + np.testing.assert_allclose(h_integrals.values, 1.0) + + for d in combinations(dims, ndims): + h_density = histogram(ones, bins=[bins], dim=d, density=True) + _check_result(h_density, d) + + # TODO: refactor this test to use better fixtures # (it currently has a ton of loops) @pytest.mark.parametrize('ndims', [1, 2, 3, 4])
Support for density=True keyword Numpy histogram has the `density` keyword, which normalized the outputs by the bin spacing. https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html We can easily support this in xhistogram, but it has to .be plugged in. Just opening this issue to make a note of it.
0.0
c53fea67ab8ed4cb47dac625301454433c9eab09
[ "xhistogram/test/test_core.py::test_histogram_results_1d_density[None]", "xhistogram/test/test_core.py::test_histogram_results_1d_density[1]", "xhistogram/test/test_core.py::test_histogram_results_1d_density[2]", "xhistogram/test/test_core.py::test_histogram_results_2d_density", "xhistogram/test/test_xarray.py::test_histogram_ones_density[1D-1]", "xhistogram/test/test_xarray.py::test_histogram_ones_density[2D-1]", "xhistogram/test/test_xarray.py::test_histogram_ones_density[2D-2]", "xhistogram/test/test_xarray.py::test_histogram_ones_density[3D-1]", "xhistogram/test/test_xarray.py::test_histogram_ones_density[3D-2]", "xhistogram/test/test_xarray.py::test_histogram_ones_density[3D-3]", "xhistogram/test/test_xarray.py::test_histogram_ones_density[4D-1]", "xhistogram/test/test_xarray.py::test_histogram_ones_density[4D-2]", "xhistogram/test/test_xarray.py::test_histogram_ones_density[4D-3]", "xhistogram/test/test_xarray.py::test_histogram_ones_density[4D-4]" ]
[ "xhistogram/test/test_core.py::test_histogram_results_1d[None]", "xhistogram/test/test_core.py::test_histogram_results_1d[1]", "xhistogram/test/test_core.py::test_histogram_results_1d[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[None]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[1]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[None]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[1]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[auto]", "xhistogram/test/test_core.py::test_histogram_results_2d", "xhistogram/test/test_core.py::test_histogram_shape[False-None]", "xhistogram/test/test_core.py::test_histogram_shape[False-5]", "xhistogram/test/test_core.py::test_histogram_shape[False-auto]", "xhistogram/test/test_core.py::test_histogram_shape[True-None]", "xhistogram/test/test_core.py::test_histogram_shape[True-5]", "xhistogram/test/test_core.py::test_histogram_shape[True-auto]", "xhistogram/test/test_xarray.py::test_histogram_ones[1D-1]", "xhistogram/test/test_xarray.py::test_histogram_ones[2D-1]", "xhistogram/test/test_xarray.py::test_histogram_ones[2D-2]", "xhistogram/test/test_xarray.py::test_histogram_ones[3D-1]", "xhistogram/test/test_xarray.py::test_histogram_ones[3D-2]", "xhistogram/test/test_xarray.py::test_histogram_ones[3D-3]", "xhistogram/test/test_xarray.py::test_histogram_ones[4D-1]", "xhistogram/test/test_xarray.py::test_histogram_ones[4D-2]", "xhistogram/test/test_xarray.py::test_histogram_ones[4D-3]", "xhistogram/test/test_xarray.py::test_histogram_ones[4D-4]", "xhistogram/test/test_xarray.py::test_weights[1D-1]", "xhistogram/test/test_xarray.py::test_weights[2D-1]", "xhistogram/test/test_xarray.py::test_weights[2D-2]", "xhistogram/test/test_xarray.py::test_weights[3D-1]", "xhistogram/test/test_xarray.py::test_weights[3D-2]", "xhistogram/test/test_xarray.py::test_weights[3D-3]", "xhistogram/test/test_xarray.py::test_weights[4D-1]", "xhistogram/test/test_xarray.py::test_weights[4D-2]", "xhistogram/test/test_xarray.py::test_weights[4D-3]", "xhistogram/test/test_xarray.py::test_weights[4D-4]", "xhistogram/test/test_xarray.py::test_dims_and_coords" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-02-12 15:42:06+00:00
mit
6,280
xgcm__xhistogram-20
diff --git a/doc/contributing.rst b/doc/contributing.rst index cc351d3..80841b5 100644 --- a/doc/contributing.rst +++ b/doc/contributing.rst @@ -8,6 +8,12 @@ GitHub repo: `https://github.com/xgcm/xhistogram <https://github.com/xgcm/xhisto Release History --------------- +v0.1.? +~~~~~ + +- Aligned definition of `bins` with `numpy.histogram` (:pr:`???`) + By `Dougie Squire <https://github.com/dougiesquire>`_. + v0.1.1 ~~~~~~ diff --git a/xhistogram/core.py b/xhistogram/core.py index 9f992c2..559d1a2 100644 --- a/xhistogram/core.py +++ b/xhistogram/core.py @@ -107,6 +107,15 @@ def _histogram_2d_vectorized(*args, bins=None, weights=None, density=False, # https://github.com/numpy/numpy/blob/9c98662ee2f7daca3f9fae9d5144a9a8d3cabe8c/numpy/lib/histograms.py#L864-L882 # for now we stick with `digitize` because it's easy to understand how it works + # Add small increment to the last bin edge to make the final bin right-edge inclusive + # Note, this is the approach taken by sklearn, e.g. + # https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py#L592 + # but a better approach would be to use something like _search_sorted_inclusive() in + # numpy histogram. This is an additional motivation for moving to searchsorted + bins = [np.concatenate(( + b[:-1], + b[-1:] + 1e-8)) for b in bins] + # the maximum possible value of of digitize is nbins # for right=False: # - 0 corresponds to a < b[0] @@ -154,6 +163,9 @@ def histogram(*args, bins=None, axis=None, weights=None, density=False, * A combination [int, array] or [array, int], where int is the number of bins and array is the bin edges. + When bin edges are specified, all but the last (righthand-most) bin include + the left edge and exclude the right edge. The last bin includes both edges. + A ``TypeError`` will be raised if ``args`` contains dask arrays and ``bins`` are not specified explicitly as a list of arrays. axis : None or int or tuple of ints, optional diff --git a/xhistogram/xarray.py b/xhistogram/xarray.py index cd9b65f..2dc8ba9 100644 --- a/xhistogram/xarray.py +++ b/xhistogram/xarray.py @@ -31,6 +31,9 @@ def histogram(*args, bins=None, dim=None, weights=None, density=False, * A combination [int, array] or [array, int], where int is the number of bins and array is the bin edges. + When bin edges are specified, all but the last (righthand-most) bin include + the left edge and exclude the right edge. The last bin includes both edges. + A ``TypeError`` will be raised if ``args`` contains dask arrays and ``bins`` are not specified explicitly as a list of arrays. dim : tuple of strings, optional
xgcm/xhistogram
4ae9f0a5afd50bf2c143828e6114bfcbbcce905b
diff --git a/xhistogram/test/test_core.py b/xhistogram/test/test_core.py index a5b0a05..e4802fb 100644 --- a/xhistogram/test/test_core.py +++ b/xhistogram/test/test_core.py @@ -79,6 +79,27 @@ def test_histogram_results_1d_weighted_broadcasting(block_size): np.testing.assert_array_equal(2*h, h_w) [email protected]('block_size', [None, 1, 2]) +def test_histogram_right_edge(block_size): + """Test that last bin is both left- and right-edge inclusive as it + is for numpy.histogram + """ + nrows, ncols = 5, 20 + data = np.ones((nrows, ncols)) + bins = np.array([0, 0.5, 1]) # All data at rightmost edge + + h = histogram(data, bins=bins, axis=1, block_size=block_size) + assert h.shape == (nrows, len(bins)-1) + + # make sure we get the same thing as histogram (all data in the last bin) + hist, _ = np.histogram(data, bins=bins) + np.testing.assert_array_equal(hist, h.sum(axis=0)) + + # now try with no axis + h_na = histogram(data, bins=bins, block_size=block_size) + np.testing.assert_array_equal(hist, h_na) + + def test_histogram_results_2d(): nrows, ncols = 5, 20 data_a = np.random.randn(nrows, ncols)
Align definition of `bins` with numpy.histogram When bin edges are specified in `numpy.histogram`, the last bin is closed on both sides. From the `numpy` docs (https://numpy.org/doc/stable/reference/generated/numpy.histogram.html): > All but the last (righthand-most) bin is half-open. In other words, if bins is: > > [1, 2, 3, 4] > then the first bin is [1, 2) (including 1, but excluding 2) and the second [2, 3). The last bin, however, is [3, 4], which includes 4. This is not the case for `xhistogram`, e.g.: ```python import numpy as np import xarray as xr from xhistogram.xarray import histogram as xhist data = np.ones(10) da = xr.DataArray(data, coords=[range(10)], dims=['x']).rename('test') bins = np.array([0, 0.5, 1]) print(f'xhistogram: {xhist(da, bins=[bins]).values}') print(f'numpy histogram: {np.histogram(data, bins=bins)[0]}') ``` ``` xhistogram: [0 0] numpy histogram: [ 0 10] ``` Could we make it the case?
0.0
4ae9f0a5afd50bf2c143828e6114bfcbbcce905b
[ "xhistogram/test/test_core.py::test_histogram_right_edge[None]", "xhistogram/test/test_core.py::test_histogram_right_edge[1]", "xhistogram/test/test_core.py::test_histogram_right_edge[2]" ]
[ "xhistogram/test/test_core.py::test_histogram_results_1d[None]", "xhistogram/test/test_core.py::test_histogram_results_1d[1]", "xhistogram/test/test_core.py::test_histogram_results_1d[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_density[None]", "xhistogram/test/test_core.py::test_histogram_results_1d_density[1]", "xhistogram/test/test_core.py::test_histogram_results_1d_density[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[None]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[1]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[None]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[1]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[auto]", "xhistogram/test/test_core.py::test_histogram_results_2d", "xhistogram/test/test_core.py::test_histogram_results_2d_density", "xhistogram/test/test_core.py::test_histogram_shape[False-None]", "xhistogram/test/test_core.py::test_histogram_shape[False-5]", "xhistogram/test/test_core.py::test_histogram_shape[False-auto]", "xhistogram/test/test_core.py::test_histogram_shape[True-None]", "xhistogram/test/test_core.py::test_histogram_shape[True-5]", "xhistogram/test/test_core.py::test_histogram_shape[True-auto]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-09-18 06:19:48+00:00
mit
6,281
xgcm__xhistogram-45
diff --git a/doc/contributing.rst b/doc/contributing.rst index 67aa942..e2a8407 100644 --- a/doc/contributing.rst +++ b/doc/contributing.rst @@ -140,6 +140,13 @@ Preparing Pull Requests Release History --------------- +v0.2.1 (not yet released) +~~~~~~~~~~~~~~~~~~~~~~~~~ +- Implemented various options for users for providing bins to + xhistogram that mimic the numpy histogram API. This included + adding a range argument to the xhistogram API :issue:`13`. + By `Dougie Squire <https://github.com/dougiesquire>`_. + v0.2.0 ~~~~~~ diff --git a/xhistogram/core.py b/xhistogram/core.py index 9275e3b..3470506 100644 --- a/xhistogram/core.py +++ b/xhistogram/core.py @@ -3,8 +3,10 @@ Numpy API for xhistogram. """ +import dask import numpy as np from functools import reduce +from collections.abc import Iterable from .duck_array_ops import ( digitize, bincount, @@ -13,16 +15,45 @@ from .duck_array_ops import ( concatenate, broadcast_arrays, ) -import warnings +# range is a keyword so save the builtin so they can use it. +_range = range -def _ensure_bins_is_a_list_of_arrays(bins, N_expected): + +def _ensure_correctly_formatted_bins(bins, N_expected): + # TODO: This could be done better / more robustly + if bins is None: + raise ValueError("bins must be provided") + if isinstance(bins, (int, str, np.ndarray)): + bins = N_expected * [bins] if len(bins) == N_expected: return bins - elif N_expected == 1: - return [bins] else: - raise ValueError("Can't figure out what to do with bins.") + raise ValueError( + "The number of bin definitions doesn't match the number of args" + ) + + +def _ensure_correctly_formatted_range(range_, N_expected): + # TODO: This could be done better / more robustly + def _iterable_nested(x): + return all(isinstance(i, Iterable) for i in x) + + if range_ is not None: + if (len(range_) == 2) & (not _iterable_nested(range_)): + return N_expected * [range_] + elif N_expected == len(range_): + if all(len(x) == 2 for x in range_): + return range_ + else: + raise ValueError( + "range should be provided as (lower_range, upper_range). In the " + + "case of multiple args, range should be a list of such tuples" + ) + else: + raise ValueError("The number of ranges doesn't match the number of args") + else: + return N_expected * [range_] def _bincount_2d(bin_indices, weights, N, hist_shapes): @@ -148,7 +179,13 @@ def _histogram_2d_vectorized( def histogram( - *args, bins=None, axis=None, weights=None, density=False, block_size="auto" + *args, + bins=None, + range=None, + axis=None, + weights=None, + density=False, + block_size="auto", ): """Histogram applied along specified axis / axes. @@ -158,23 +195,38 @@ def histogram( Input data. The number of input arguments determines the dimensonality of the histogram. For example, two arguments prodocue a 2D histogram. All args must have the same size. - bins : int or array_like or a list of ints or arrays, optional + bins : int, str or numpy array or a list of ints, strs and/or arrays, optional If a list, there should be one entry for each item in ``args``. - The bin specification: + The bin specifications are as follows: - * If int, the number of bins for all arguments in ``args``. - * If array_like, the bin edges for all arguments in ``args``. - * If a list of ints, the number of bins for every argument in ``args``. - * If a list arrays, the bin edges for each argument in ``args`` - (required format for Dask inputs). - * A combination [int, array] or [array, int], where int - is the number of bins and array is the bin edges. + * If int; the number of bins for all arguments in ``args``. + * If str; the method used to automatically calculate the optimal bin width + for all arguments in ``args``, as defined by numpy `histogram_bin_edges`. + * If numpy array; the bin edges for all arguments in ``args``. + * If a list of ints, strs and/or arrays; the bin specification as + above for every argument in ``args``. When bin edges are specified, all but the last (righthand-most) bin include the left edge and exclude the right edge. The last bin includes both edges. - A ``TypeError`` will be raised if ``args`` contains dask arrays and - ``bins`` are not specified explicitly as a list of arrays. + A TypeError will be raised if args contains dask arrays and bins are not + specified explicitly as an array or list of arrays. This is because other + bin specifications trigger computation. + range : (float, float) or a list of (float, float), optional + If a list, there should be one entry for each item in ``args``. + The range specifications are as follows: + + * If (float, float); the lower and upper range(s) of the bins for all + arguments in ``args``. Values outside the range are ignored. The first + element of the range must be less than or equal to the second. `range` + affects the automatic bin computation as well. In this case, while bin + width is computed to be optimal based on the actual data within `range`, + the bin count will fill the entire range including portions containing + no data. + * If a list of (float, float); the ranges as above for every argument in + ``args``. + * If not provided, range is simply ``(arg.min(), arg.max())`` for each + arg. axis : None or int or tuple of ints, optional Axis or axes along which the histogram is computed. The default is to compute the histogram of the flattened array @@ -203,25 +255,19 @@ def histogram( ------- hist : array The values of the histogram. + bin_edges : list of arrays + Return the bin edges for each input array. See Also -------- numpy.histogram, numpy.bincount, numpy.digitize """ - # Future warning for https://github.com/xgcm/xhistogram/pull/45 - warnings.warn( - "Future versions of xhistogram.core.histogram will return a " - + "tuple containing arrays of the the histogram bins and the " - + "histogram values, rather than just an array of the histogram " - + "values. This API change will only affect users of " - + "xhistogram.core. Users of xhistogram.xarray can ignore this " - + "message.", - FutureWarning, - ) - a0 = args[0] ndim = a0.ndim + n_inputs = len(args) + + is_dask_array = any([dask.is_dask_collection(a) for a in args]) if axis is not None: axis = np.atleast_1d(axis) @@ -236,11 +282,11 @@ def histogram( axis_normed.append(ax_positive) axis = np.atleast_1d(axis_normed) - do_full_array = (axis is None) or (set(axis) == set(range(a0.ndim))) + do_full_array = (axis is None) or (set(axis) == set(_range(a0.ndim))) if do_full_array: kept_axes_shape = None else: - kept_axes_shape = tuple([a0.shape[i] for i in range(a0.ndim) if i not in axis]) + kept_axes_shape = tuple([a0.shape[i] for i in _range(a0.ndim) if i not in axis]) all_args = list(args) if weights is not None: @@ -254,7 +300,7 @@ def histogram( # reshape the array to 2D # axis 0: preserved axis after histogram # axis 1: calculate histogram along this axis - new_pos = tuple(range(-len(axis), 0)) + new_pos = tuple(_range(-len(axis), 0)) c = np.moveaxis(a, axis, new_pos) split_idx = c.ndim - len(axis) dims_0 = c.shape[:split_idx] @@ -272,8 +318,23 @@ def histogram( else: weights_reshaped = None - n_inputs = len(all_args_reshaped) - bins = _ensure_bins_is_a_list_of_arrays(bins, n_inputs) + # Some sanity checks and format bins and range correctly + bins = _ensure_correctly_formatted_bins(bins, n_inputs) + range = _ensure_correctly_formatted_range(range, n_inputs) + + # histogram_bin_edges trigges computation on dask arrays. It would be possible + # to write a version of this that doesn't trigger when `range` is provided, but + # for now let's just use np.histogram_bin_edges + if is_dask_array: + if not all([isinstance(b, np.ndarray) for b in bins]): + raise TypeError( + "When using dask arrays, bins must be provided as numpy array(s) of edges" + ) + else: + bins = [ + np.histogram_bin_edges(a, b, r, weights_reshaped) + for a, b, r in zip(all_args_reshaped, bins, range) + ] bin_counts = _histogram_2d_vectorized( *all_args_reshaped, @@ -306,4 +367,4 @@ def histogram( final_shape = kept_axes_shape + h.shape[1:] h = reshape(h, final_shape) - return h + return h, bins diff --git a/xhistogram/xarray.py b/xhistogram/xarray.py index 8c5e944..aea7a1f 100644 --- a/xhistogram/xarray.py +++ b/xhistogram/xarray.py @@ -3,22 +3,23 @@ Xarray API for xhistogram. """ import xarray as xr -import numpy as np from collections import OrderedDict from .core import histogram as _histogram -import warnings + +# range is a keyword so save the builtin so they can use it. +_range = range def histogram( *args, bins=None, + range=None, dim=None, weights=None, density=False, block_size="auto", keep_coords=False, - bin_dim_suffix="_bin", - bin_edge_suffix="_bin_edge" + bin_dim_suffix="_bin" ): """Histogram applied along specified dimensions. @@ -28,23 +29,38 @@ def histogram( Input data. The number of input arguments determines the dimensonality of the histogram. For example, two arguments prodocue a 2D histogram. All args must be aligned and have the same dimensions. - bins : int or array_like or a list of ints or arrays, optional + bins : int, str or numpy array or a list of ints, strs and/or arrays, optional If a list, there should be one entry for each item in ``args``. - The bin specification: + The bin specifications are as follows: - * If int, the number of bins for all arguments in ``args``. - * If array_like, the bin edges for all arguments in ``args``. - * If a list of ints, the number of bins for every argument in ``args``. - * If a list arrays, the bin edges for each argument in ``args`` - (required format for Dask inputs). - * A combination [int, array] or [array, int], where int - is the number of bins and array is the bin edges. + * If int; the number of bins for all arguments in ``args``. + * If str; the method used to automatically calculate the optimal bin width + for all arguments in ``args``, as defined by numpy `histogram_bin_edges`. + * If numpy array; the bin edges for all arguments in ``args``. + * If a list of ints, strs and/or arrays; the bin specification as + above for every argument in ``args``. When bin edges are specified, all but the last (righthand-most) bin include the left edge and exclude the right edge. The last bin includes both edges. - A ``TypeError`` will be raised if ``args`` contains dask arrays and - ``bins`` are not specified explicitly as a list of arrays. + A TypeError will be raised if args contains dask arrays and bins are not + specified explicitly as an array or list of arrays. This is because other + bin specifications trigger computation. + range : (float, float) or a list of (float, float), optional + If a list, there should be one entry for each item in ``args``. + The range specifications are as follows: + + * If (float, float); the lower and upper range(s) of the bins for all + arguments in ``args``. Values outside the range are ignored. The first + element of the range must be less than or equal to the second. `range` + affects the automatic bin computation as well. In this case, while bin + width is computed to be optimal based on the actual data within `range`, + the bin count will fill the entire range including portions containing + no data. + * If a list of (float, float); the ranges as above for every argument in + ``args``. + * If not provided, range is simply ``(arg.min(), arg.max())`` for each + arg. dim : tuple of strings, optional Dimensions over which which the histogram is computed. The default is to compute the histogram of the flattened array. @@ -72,11 +88,15 @@ def histogram( chunks (dask inputs) or an experimental built-in heuristic (numpy inputs). keep_coords : bool, optional If ``True``, keep all coordinates. Default: ``False`` + bin_dim_suffix : str, optional + Suffix to append to input arg names to define names of output bin + dimensions Returns ------- - hist : array - The values of the histogram. + hist : xarray.DataArray + The values of the histogram. For each bin, the midpoint of the bin edges + is given along the bin coordinates. """ @@ -85,12 +105,6 @@ def histogram( # TODO: allow list of weights as well N_weights = 1 if weights is not None else 0 - # some sanity checks - # TODO: replace this with a more robust function - assert len(bins) == N_args - for bin in bins: - assert isinstance(bin, np.ndarray), "all bins must be numpy arrays" - for a in args: # TODO: make this a more robust check assert a.name is not None, "all arrays must have a name" @@ -140,21 +154,15 @@ def histogram( dims_to_keep = [] axis = None - # Allow future warning for https://github.com/xgcm/xhistogram/pull/45 - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - message="Future versions of xhistogram\\.core\\.histogram will return", - category=FutureWarning, - ) - h_data = _histogram( - *args_data, - weights=weights_data, - bins=bins, - axis=axis, - density=density, - block_size=block_size - ) + h_data, bins = _histogram( + *args_data, + weights=weights_data, + bins=bins, + range=range, + axis=axis, + density=density, + block_size=block_size + ) # create output dims new_dims = [a.name + bin_dim_suffix for a in args[:N_args]] @@ -195,7 +203,7 @@ def histogram( # this feels like a hack # def _histogram_wrapped(*args, **kwargs): # alist = list(args) - # weights = [alist.pop() for n in range(N_weights)] + # weights = [alist.pop() for n in _range(N_weights)] # if N_weights == 0: # weights = None # elif N_weights == 1:
xgcm/xhistogram
c62cf9fdfae95fe52638448dddaee797c3fc283f
diff --git a/xhistogram/test/test_core.py b/xhistogram/test/test_core.py index eef22dd..e0df0ee 100644 --- a/xhistogram/test/test_core.py +++ b/xhistogram/test/test_core.py @@ -3,40 +3,61 @@ import numpy as np from itertools import combinations import dask.array as dsa -from ..core import histogram +from ..core import ( + histogram, + _ensure_correctly_formatted_bins, + _ensure_correctly_formatted_range, +) from .fixtures import empty_dask_array import pytest +bins_int = 10 +bins_str = "auto" +bins_arr = np.linspace(-4, 4, 10) +range_ = (0, 1) + + @pytest.mark.parametrize("density", [False, True]) @pytest.mark.parametrize("block_size", [None, 1, 2]) @pytest.mark.parametrize("axis", [1, None]) -def test_histogram_results_1d(block_size, density, axis): [email protected]("bins", [10, np.linspace(-4, 4, 10), "auto"]) [email protected]("range_", [None, (-4, 4)]) +def test_histogram_results_1d(block_size, density, axis, bins, range_): nrows, ncols = 5, 20 # Setting the random seed here prevents np.testing.assert_allclose # from failing beow. We should investigate this further. np.random.seed(2) data = np.random.randn(nrows, ncols) - bins = np.linspace(-4, 4, 10) - h = histogram(data, bins=bins, axis=axis, block_size=block_size, density=density) + h, bin_edges = histogram( + data, bins=bins, range=range_, axis=axis, block_size=block_size, density=density + ) - expected_shape = (nrows, len(bins) - 1) if axis == 1 else (len(bins) - 1,) + expected_shape = ( + (nrows, len(bin_edges[0]) - 1) if axis == 1 else (len(bin_edges[0]) - 1,) + ) assert h.shape == expected_shape # make sure we get the same thing as numpy.histogram if axis: + bins_np = np.histogram_bin_edges( + data, bins=bins, range=range_ + ) # Use same bins for all slices below expected = np.stack( - [np.histogram(data[i], bins=bins, density=density)[0] for i in range(nrows)] + [ + np.histogram(data[i], bins=bins_np, range=range_, density=density)[0] + for i in range(nrows) + ] ) else: - expected = np.histogram(data, bins=bins, density=density)[0] + expected = np.histogram(data, bins=bins, range=range_, density=density)[0] norm = nrows if (density and axis) else 1 np.testing.assert_allclose(h, expected / norm) if density: - widths = np.diff(bins) + widths = np.diff(bin_edges) integral = np.sum(h * widths) np.testing.assert_allclose(integral, 1.0) @@ -46,9 +67,9 @@ def test_histogram_results_1d_weighted(block_size): nrows, ncols = 5, 20 data = np.random.randn(nrows, ncols) bins = np.linspace(-4, 4, 10) - h = histogram(data, bins=bins, axis=1, block_size=block_size) + h, _ = histogram(data, bins=bins, axis=1, block_size=block_size) weights = 2 * np.ones_like(data) - h_w = histogram(data, bins=bins, axis=1, weights=weights, block_size=block_size) + h_w, _ = histogram(data, bins=bins, axis=1, weights=weights, block_size=block_size) np.testing.assert_array_equal(2 * h, h_w) @@ -58,9 +79,9 @@ def test_histogram_results_1d_weighted_broadcasting(block_size): nrows, ncols = 5, 20 data = np.random.randn(nrows, ncols) bins = np.linspace(-4, 4, 10) - h = histogram(data, bins=bins, axis=1, block_size=block_size) + h, _ = histogram(data, bins=bins, axis=1, block_size=block_size) weights = 2 * np.ones((1, ncols)) - h_w = histogram(data, bins=bins, axis=1, weights=weights, block_size=block_size) + h_w, _ = histogram(data, bins=bins, axis=1, weights=weights, block_size=block_size) np.testing.assert_array_equal(2 * h, h_w) @@ -73,7 +94,7 @@ def test_histogram_right_edge(block_size): data = np.ones((nrows, ncols)) bins = np.array([0, 0.5, 1]) # All data at rightmost edge - h = histogram(data, bins=bins, axis=1, block_size=block_size) + h, _ = histogram(data, bins=bins, axis=1, block_size=block_size) assert h.shape == (nrows, len(bins) - 1) # make sure we get the same thing as histogram (all data in the last bin) @@ -81,7 +102,7 @@ def test_histogram_right_edge(block_size): np.testing.assert_array_equal(hist, h.sum(axis=0)) # now try with no axis - h_na = histogram(data, bins=bins, block_size=block_size) + h_na, _ = histogram(data, bins=bins, block_size=block_size) np.testing.assert_array_equal(hist, h_na) @@ -94,7 +115,7 @@ def test_histogram_results_2d(): nbins_b = 10 bins_b = np.linspace(-4, 4, nbins_b + 1) - h = histogram(data_a, data_b, bins=[bins_a, bins_b]) + h, _ = histogram(data_a, data_b, bins=[bins_a, bins_b]) assert h.shape == (nbins_a, nbins_b) hist, _, _ = np.histogram2d(data_a.ravel(), data_b.ravel(), bins=[bins_a, bins_b]) @@ -110,7 +131,7 @@ def test_histogram_results_2d_density(): nbins_b = 10 bins_b = np.linspace(-4, 4, nbins_b + 1) - h = histogram(data_a, data_b, bins=[bins_a, bins_b], density=True) + h, _ = histogram(data_a, data_b, bins=[bins_a, bins_b], density=True) assert h.shape == (nbins_a, nbins_b) hist, _, _ = np.histogram2d( @@ -138,7 +159,9 @@ def test_histogram_results_3d_density(): nbins_c = 9 bins_c = np.linspace(-4, 4, nbins_c + 1) - h = histogram(data_a, data_b, data_c, bins=[bins_a, bins_b, bins_c], density=True) + h, _ = histogram( + data_a, data_b, data_c, bins=[bins_a, bins_b, bins_c], density=True + ) assert h.shape == (nbins_a, nbins_b, nbins_c) @@ -173,18 +196,18 @@ def test_histogram_shape(use_dask, block_size): bins = np.linspace(-4, 4, 27) # no axis - c = histogram(b, bins=bins, block_size=block_size) + c, _ = histogram(b, bins=bins, block_size=block_size) assert c.shape == (len(bins) - 1,) # same thing for axis in [(0, 1, 2, 3), (0, 1, 3, 2), (3, 2, 1, 0), (3, 2, 0, 1)]: - c = histogram(b, bins=bins, axis=axis) + c, _ = histogram(b, bins=bins, axis=axis) assert c.shape == (len(bins) - 1,) if use_dask: assert isinstance(c, dsa.Array) # scalar axis (check positive and negative) for axis in list(range(4)) + list(range(-1, -5, -1)): - c = histogram(b, bins=bins, axis=axis, block_size=block_size) + c, _ = histogram(b, bins=bins, axis=axis, block_size=block_size) shape = list(b.shape) del shape[axis] expected_shape = tuple(shape) + (len(bins) - 1,) @@ -195,10 +218,70 @@ def test_histogram_shape(use_dask, block_size): # two axes for i, j in combinations(range(4), 2): axis = (i, j) - c = histogram(b, bins=bins, axis=axis, block_size=block_size) + c, _ = histogram(b, bins=bins, axis=axis, block_size=block_size) shape = list(b.shape) partial_shape = [shape[k] for k in range(b.ndim) if k not in axis] expected_shape = tuple(partial_shape) + (len(bins) - 1,) assert c.shape == expected_shape if use_dask: assert isinstance(c, dsa.Array) + + +def test_histogram_dask(): + """ Test that fails with dask arrays and inappropriate bins""" + shape = 10, 15, 12, 20 + b = empty_dask_array(shape, chunks=(1,) + shape[1:]) + histogram(b, bins=bins_arr) # Should work when bins is all numpy arrays + with pytest.raises(TypeError): # Should fail otherwise + histogram(b, bins=bins_int) + histogram(b, bins=bins_str) + histogram(b, b, bins=[bins_arr, bins_int]) + + [email protected]( + "in_out", + [ + (bins_int, 1, [bins_int]), # ( bins_in, n_args, bins_out ) + (bins_str, 1, [bins_str]), + (bins_arr, 1, [bins_arr]), + ([bins_int], 1, [bins_int]), + (bins_int, 2, 2 * [bins_int]), + (bins_str, 2, 2 * [bins_str]), + (bins_arr, 2, 2 * [bins_arr]), + ([bins_int, bins_str, bins_arr], 3, [bins_int, bins_str, bins_arr]), + ([bins_arr], 2, None), + (None, 1, None), + ([bins_arr, bins_arr], 1, None), + ], +) +def test_ensure_correctly_formatted_bins(in_out): + """ Test the helper function _ensure_correctly_formatted_bins""" + bins_in, n, bins_expected = in_out + if bins_expected is not None: + bins = _ensure_correctly_formatted_bins(bins_in, n) + assert bins == bins_expected + else: + with pytest.raises((ValueError, TypeError)): + _ensure_correctly_formatted_bins(bins_in, n) + + [email protected]( + "in_out", + [ + (range_, 1, [range_]), # ( range_in, n_args, range_out ) + (range_, 2, [range_, range_]), + ([range_, range_], 2, [range_, range_]), + ([(range_[0],)], 1, None), + ([range_], 2, None), + ([range_, range_], 1, None), + ], +) +def test_ensure_correctly_formatted_range(in_out): + """ Test the helper function _ensure_correctly_formatted_range""" + range_in, n, range_expected = in_out + if range_expected is not None: + range_ = _ensure_correctly_formatted_range(range_in, n) + assert range_ == range_expected + else: + with pytest.raises(ValueError): + _ensure_correctly_formatted_range(range_in, n)
Bins argument of type int doesn't work Currently the documentation suggests that one can pass an int `xhist(bins=...)` similarly to that of numpy, but it doesn't work and looks like it isn't tested for in the pytest suite. ```python import numpy as np import xarray as xr # Demo data A = np.arange(100) da = xr.DataArray(A, dims='time').rename('test') ``` Case 1: `xhist(da, bins=10)` ```python-traceback --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-48-1c6580238521> in <module> ----> 1 xhist(da, bins=10) ~/miniconda3/envs/analysis/lib/python3.6/site-packages/xhistogram/xarray.py in histogram(bins, dim, weights, density, block_size, bin_dim_suffix, bin_edge_suffix, *args) 72 # some sanity checks 73 # TODO: replace this with a more robust function ---> 74 assert len(bins)==N_args 75 for bin in bins: 76 assert isinstance(bin, np.ndarray), 'all bins must be numpy arrays' TypeError: object of type 'int' has no len() ``` Case 2: `xhist(da, bins=[10])` ```python-traceback --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-49-85ba491a8442> in <module> ----> 1 xhist(da, bins=[10]) ~/miniconda3/envs/analysis/lib/python3.6/site-packages/xhistogram/xarray.py in histogram(bins, dim, weights, density, block_size, bin_dim_suffix, bin_edge_suffix, *args) 74 assert len(bins)==N_args 75 for bin in bins: ---> 76 assert isinstance(bin, np.ndarray), 'all bins must be numpy arrays' 77 78 for a in args: AssertionError: all bins must be numpy arrays ``` Case 3: `xhist(da, bins=[np.array(10)])` ```python-traceback --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-50-394f2e7f4bcc> in <module> ----> 1 xhist(da, bins=[np.array(10)]) ~/miniconda3/envs/analysis/lib/python3.6/site-packages/xhistogram/xarray.py in histogram(bins, dim, weights, density, block_size, bin_dim_suffix, bin_edge_suffix, *args) 127 128 h_data = _histogram(*args_data, weights=weights_data, bins=bins, axis=axis, --> 129 block_size=block_size) 130 131 # create output dims ~/miniconda3/envs/analysis/lib/python3.6/site-packages/xhistogram/core.py in histogram(bins, axis, weights, density, block_size, *args) 245 h = _histogram_2d_vectorized(*all_args_reshaped, bins=bins, 246 weights=weights_reshaped, --> 247 density=density, block_size=block_size) 248 249 if h.shape[0] == 1: ~/miniconda3/envs/analysis/lib/python3.6/site-packages/xhistogram/core.py in _histogram_2d_vectorized(bins, weights, density, right, block_size, *args) 95 for a, b in zip(args, bins): 96 assert a.ndim == 2 ---> 97 assert b.ndim == 1 98 assert a.shape == a0.shape 99 if weights is not None: AssertionError: ```
0.0
c62cf9fdfae95fe52638448dddaee797c3fc283f
[ "xhistogram/test/test_core.py::test_histogram_results_1d[None-10-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-10-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-10-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-10-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-10-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-10-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-10-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-10-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-10-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-10-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-10-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-10-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-bins1-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-bins1-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-bins1-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-bins1-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-bins1-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-bins1-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-bins1-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-bins1-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-bins1-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-bins1-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-bins1-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-bins1-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-auto-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-auto-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-auto-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-auto-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-auto-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-auto-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-auto-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-auto-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-auto-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-auto-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-auto-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[None-auto-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-10-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-10-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-10-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-10-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-10-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-10-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-10-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-10-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-10-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-10-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-10-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-10-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-bins1-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-bins1-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-bins1-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-bins1-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-bins1-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-bins1-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-bins1-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-bins1-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-bins1-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-bins1-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-bins1-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-bins1-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-auto-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-auto-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-auto-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-auto-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-auto-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-auto-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-auto-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-auto-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-auto-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-auto-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-auto-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[range_1-auto-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[None]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[1]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[None]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[1]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[auto]", "xhistogram/test/test_core.py::test_histogram_right_edge[None]", "xhistogram/test/test_core.py::test_histogram_right_edge[1]", "xhistogram/test/test_core.py::test_histogram_right_edge[2]", "xhistogram/test/test_core.py::test_histogram_results_2d", "xhistogram/test/test_core.py::test_histogram_results_2d_density", "xhistogram/test/test_core.py::test_histogram_shape[False-None]", "xhistogram/test/test_core.py::test_histogram_shape[False-5]", "xhistogram/test/test_core.py::test_histogram_shape[False-auto]", "xhistogram/test/test_core.py::test_histogram_shape[True-None]", "xhistogram/test/test_core.py::test_histogram_shape[True-5]", "xhistogram/test/test_core.py::test_histogram_shape[True-auto]", "xhistogram/test/test_core.py::test_histogram_dask", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out0]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out1]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out2]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out3]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out4]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out5]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out6]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out7]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out8]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out9]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out10]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out0]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out1]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out2]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out3]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out4]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out5]" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-04-12 04:35:02+00:00
mit
6,282
xgcm__xhistogram-52
diff --git a/doc/contributing.rst b/doc/contributing.rst index 5dac01f..aa787a4 100644 --- a/doc/contributing.rst +++ b/doc/contributing.rst @@ -140,8 +140,12 @@ Preparing Pull Requests Release History --------------- + v0.2.1 (not yet released) ~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Fixed bug with density calculation when NaNs are present :issue:`51`. + By `Dougie Squire <https://github.com/dougiesquire>`_. - Implemented various options for users for providing bins to xhistogram that mimic the numpy histogram API. This included adding a range argument to the xhistogram API :issue:`13`. diff --git a/xhistogram/core.py b/xhistogram/core.py index c13dda9..48f31c9 100644 --- a/xhistogram/core.py +++ b/xhistogram/core.py @@ -436,7 +436,13 @@ def histogram( # Slower, but N-dimensional logic bin_areas = np.prod(np.ix_(*bin_widths)) - h = bin_counts / bin_areas / bin_counts.sum() + # Sum over the last n_inputs axes, which correspond to the bins. All other axes + # are "bystander" axes. Sums must be done independently for each bystander axes + # so that nans are dealt with correctly (#51) + bin_axes = tuple(_range(-n_inputs, 0)) + bin_count_sums = bin_counts.sum(axis=bin_axes) + bin_count_sums_shape = bin_count_sums.shape + len(bin_axes) * (1,) + h = bin_counts / bin_areas / reshape(bin_count_sums, bin_count_sums_shape) else: h = bin_counts diff --git a/xhistogram/xarray.py b/xhistogram/xarray.py index a587abc..922fc26 100644 --- a/xhistogram/xarray.py +++ b/xhistogram/xarray.py @@ -197,11 +197,6 @@ def histogram( da_out = xr.DataArray(h_data, dims=output_dims, coords=all_coords, name=output_name) - if density: - # correct for overcounting the bins which weren't histogrammed along - n_bins_bystander_dims = da_out.isel(**{bd: 0 for bd in new_dims}).size - da_out = da_out * n_bins_bystander_dims - return da_out # we need weights to be passed through apply_func's alignment algorithm,
xgcm/xhistogram
8a6765ac25c749961a32b209e54d47ed483651fc
diff --git a/xhistogram/test/test_core.py b/xhistogram/test/test_core.py index f6ebcc3..3a692c6 100644 --- a/xhistogram/test/test_core.py +++ b/xhistogram/test/test_core.py @@ -24,12 +24,17 @@ range_ = (0, 1) @pytest.mark.parametrize("axis", [1, None]) @pytest.mark.parametrize("bins", [10, np.linspace(-4, 4, 10), "auto"]) @pytest.mark.parametrize("range_", [None, (-4, 4)]) -def test_histogram_results_1d(block_size, density, axis, bins, range_): [email protected]("add_nans", [False, True]) +def test_histogram_results_1d(block_size, density, axis, bins, range_, add_nans): nrows, ncols = 5, 20 # Setting the random seed here prevents np.testing.assert_allclose # from failing beow. We should investigate this further. np.random.seed(2) data = np.random.randn(nrows, ncols) + if add_nans: + N_nans = 20 + data.ravel()[np.random.choice(data.size, N_nans, replace=False)] = np.nan + bins = np.linspace(-4, 4, 10) h, bin_edges = histogram( data, bins=bins, range=range_, axis=axis, block_size=block_size, density=density @@ -53,12 +58,11 @@ def test_histogram_results_1d(block_size, density, axis, bins, range_): ) else: expected = np.histogram(data, bins=bins, range=range_, density=density)[0] - norm = nrows if (density and axis) else 1 - np.testing.assert_allclose(h, expected / norm) + np.testing.assert_allclose(h, expected) if density: - widths = np.diff(bin_edges) - integral = np.sum(h * widths) + widths = np.diff(bins) + integral = np.sum(h * widths, axis) np.testing.assert_allclose(integral, 1.0) @@ -150,10 +154,15 @@ def test_histogram_results_2d_broadcasting(dask): np.testing.assert_array_equal(hist, h) -def test_histogram_results_2d_density(): [email protected]("add_nans", [False, True]) +def test_histogram_results_2d_density(add_nans): nrows, ncols = 5, 20 data_a = np.random.randn(nrows, ncols) data_b = np.random.randn(nrows, ncols) + if add_nans: + N_nans = 20 + data_a.ravel()[np.random.choice(data_a.size, N_nans, replace=False)] = np.nan + data_b.ravel()[np.random.choice(data_b.size, N_nans, replace=False)] = np.nan nbins_a = 9 bins_a = np.linspace(-4, 4, nbins_a + 1) nbins_b = 10 @@ -175,11 +184,17 @@ def test_histogram_results_2d_density(): np.testing.assert_allclose(integral, 1.0) -def test_histogram_results_3d_density(): [email protected]("add_nans", [False, True]) +def test_histogram_results_3d_density(add_nans): nrows, ncols = 5, 20 data_a = np.random.randn(nrows, ncols) data_b = np.random.randn(nrows, ncols) data_c = np.random.randn(nrows, ncols) + if add_nans: + N_nans = 20 + data_a.ravel()[np.random.choice(data_a.size, N_nans, replace=False)] = np.nan + data_b.ravel()[np.random.choice(data_b.size, N_nans, replace=False)] = np.nan + data_c.ravel()[np.random.choice(data_c.size, N_nans, replace=False)] = np.nan nbins_a = 9 bins_a = np.linspace(-4, 4, nbins_a + 1) nbins_b = 10
Bug with density calculation when NaNs are present There is a bug in the way histograms are normalised to densities that manifests when there are NaNs in the input data: ```python import numpy as np import xarray as xr import matplotlib.pyplot as plt from xhistogram.xarray import histogram data = np.random.normal(size=(10,2)) # Add some nans N_nans = 6 data.ravel()[np.random.choice(data.size, N_nans, replace=False)] = np.nan bins = np.linspace(-5,5,5) bin_centers = 0.5 * (bins[:-1] + bins[1:]) # np.histogram ----- h, _ = np.histogram(data[:,0], bins, density=True) plt.plot(bin_centers, h, label='numpy histogram') # xhistogram ----- da = xr.DataArray( data, dims=['s', 'x'], coords=[range(data.shape[0]), range(data.shape[1])]).rename('test') h2 = histogram(da, bins=[bins], dim=['s'], density=True) plt.plot(bin_centers, h2[0,:], linestyle='--', label='xhistogram') plt.legend() plt.xlabel('bins') plt.ylabel('pdf') ``` <img width="399" alt="Screen Shot 2021-05-05 at 8 31 17 pm" src="https://user-images.githubusercontent.com/42455466/117128520-df907d00-ade0-11eb-9452-641c2b779633.png"> This bug comes about when there are dimensions that are not being histogram'd ("bystander" dimensions). Currently we sum over all axis to estimate the area/volume of our histogram and then account bystander dimensions [as a secondary step](https://github.com/xgcm/xhistogram/blob/master/xhistogram/xarray.py#L180). However, this can produce incorrect results when NaNs are present because there may be a different number of NaNs along each bystander dimension.
0.0
8a6765ac25c749961a32b209e54d47ed483651fc
[ "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-1-2-True]" ]
[ "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[None]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[1]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[None]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[1]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[auto]", "xhistogram/test/test_core.py::test_histogram_right_edge[None]", "xhistogram/test/test_core.py::test_histogram_right_edge[1]", "xhistogram/test/test_core.py::test_histogram_right_edge[2]", "xhistogram/test/test_core.py::test_histogram_results_2d", "xhistogram/test/test_core.py::test_histogram_results_2d_broadcasting[False]", "xhistogram/test/test_core.py::test_histogram_results_2d_broadcasting[True]", "xhistogram/test/test_core.py::test_histogram_results_2d_density[False]", "xhistogram/test/test_core.py::test_histogram_results_2d_density[True]", "xhistogram/test/test_core.py::test_histogram_shape[False-None]", "xhistogram/test/test_core.py::test_histogram_shape[False-5]", "xhistogram/test/test_core.py::test_histogram_shape[False-auto]", "xhistogram/test/test_core.py::test_histogram_shape[True-None]", "xhistogram/test/test_core.py::test_histogram_shape[True-5]", "xhistogram/test/test_core.py::test_histogram_shape[True-auto]", "xhistogram/test/test_core.py::test_histogram_dask", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out0]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out1]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out2]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out3]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out4]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out5]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out6]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out7]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out8]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out9]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out10]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out0]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out1]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out2]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out3]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out4]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out5]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2021-05-05 10:39:31+00:00
mit
6,283
xgcm__xhistogram-77
diff --git a/doc/contributing.rst b/doc/contributing.rst index 9ccbb9e..fd4ba77 100644 --- a/doc/contributing.rst +++ b/doc/contributing.rst @@ -141,17 +141,17 @@ Release History --------------- v0.3.2 (not released) -~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~ -- Fix bug producing TypeError when weights is provided with +- Fix bug producing TypeError when `weights` is provided with `keep_coords=True` :issue:`78`. By `Dougie Squire <https://github.com/dougiesquire>`_. -- Raise TypeError when weights is a dask array and bin edges are +- Raise TypeError when `weights` is a dask array and bin edges are not explicitly provided :issue:`12`. By `Dougie Squire <https://github.com/dougiesquire>`_. v0.3.1 -~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~ - Add DOI badge and CITATION.cff. By `Julius Busecke <https://github.com/jbusecke>`_. diff --git a/setup.py b/setup.py index 4481b45..532192a 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ CLASSIFIERS = [ "Topic :: Scientific/Engineering", ] -INSTALL_REQUIRES = ["xarray>=0.12.0", "dask[array]", "numpy>=1.17"] +INSTALL_REQUIRES = ["xarray>=0.12.0", "dask[array]>=2.3.0", "numpy>=1.17"] PYTHON_REQUIRES = ">=3.7" DESCRIPTION = "Fast, flexible, label-aware histograms for numpy and xarray" diff --git a/xhistogram/core.py b/xhistogram/core.py index 04cef56..181325f 100644 --- a/xhistogram/core.py +++ b/xhistogram/core.py @@ -278,9 +278,9 @@ def histogram( When bin edges are specified, all but the last (righthand-most) bin include the left edge and exclude the right edge. The last bin includes both edges. - A TypeError will be raised if args contains dask arrays and bins are not - specified explicitly as an array or list of arrays. This is because other - bin specifications trigger computation. + A TypeError will be raised if args or weights contains dask arrays and bins + are not specified explicitly as an array or list of arrays. This is because + other bin specifications trigger computation. range : (float, float) or a list of (float, float), optional If a list, there should be one entry for each item in ``args``. The range specifications are as follows: @@ -336,7 +336,7 @@ def histogram( ndim = a0.ndim n_inputs = len(args) - is_dask_array = any([dask.is_dask_collection(a) for a in args]) + is_dask_array = any([dask.is_dask_collection(a) for a in list(args) + [weights]]) if axis is not None: axis = np.atleast_1d(axis) diff --git a/xhistogram/xarray.py b/xhistogram/xarray.py index f1b7976..975aa9b 100644 --- a/xhistogram/xarray.py +++ b/xhistogram/xarray.py @@ -43,9 +43,9 @@ def histogram( When bin edges are specified, all but the last (righthand-most) bin include the left edge and exclude the right edge. The last bin includes both edges. - A TypeError will be raised if args contains dask arrays and bins are not - specified explicitly as an array or list of arrays. This is because other - bin specifications trigger computation. + A TypeError will be raised if args or weights contains dask arrays and bins + are not specified explicitly as an array or list of arrays. This is because + other bin specifications trigger computation. range : (float, float) or a list of (float, float), optional If a list, there should be one entry for each item in ``args``. The range specifications are as follows:
xgcm/xhistogram
44a78384e9c6c7d14caed51d013311ec5753b3f1
diff --git a/xhistogram/test/test_core.py b/xhistogram/test/test_core.py index db0bf99..0868dff 100644 --- a/xhistogram/test/test_core.py +++ b/xhistogram/test/test_core.py @@ -9,10 +9,12 @@ from ..core import ( _ensure_correctly_formatted_bins, _ensure_correctly_formatted_range, ) -from .fixtures import empty_dask_array +from .fixtures import empty_dask_array, example_dataarray import pytest +import contextlib + bins_int = 10 bins_str = "auto" @@ -271,15 +273,44 @@ def test_histogram_shape(use_dask, block_size): assert isinstance(c, dsa.Array) -def test_histogram_dask(): - """Test that fails with dask arrays and inappropriate bins""" [email protected]("arg_type", ["dask", "numpy"]) [email protected]("weights_type", ["dask", "numpy", None]) [email protected]("bins_type", ["int", "str", "numpy"]) +def test_histogram_dask(arg_type, weights_type, bins_type): + """Test that a TypeError is raised with dask arrays and inappropriate bins""" shape = 10, 15, 12, 20 - b = empty_dask_array(shape, chunks=(1,) + shape[1:]) - histogram(b, bins=bins_arr) # Should work when bins is all numpy arrays - with pytest.raises(TypeError): # Should fail otherwise - histogram(b, bins=bins_int) - histogram(b, bins=bins_str) - histogram(b, b, bins=[bins_arr, bins_int]) + + if arg_type == "dask": + arg = empty_dask_array(shape) + else: + arg = example_dataarray(shape) + + if weights_type == "dask": + weights = empty_dask_array(shape) + elif weights_type == "numpy": + weights = example_dataarray(shape) + else: + weights = None + + if bins_type == "int": + bins = bins_int + elif bins_type == "str": + bins = bins_str + else: + bins = bins_arr + + # TypeError should be returned when + # 1. args or weights is a dask array and bins is not a numpy array, or + # 2. bins is a string and weights is a numpy array + cond_1 = ((arg_type == "dask") | (weights_type == "dask")) & (bins_type != "numpy") + cond_2 = (weights_type == "numpy") & (bins_type == "str") + should_TypeError = cond_1 | cond_2 + + with contextlib.ExitStack() as stack: + if should_TypeError: + stack.enter_context(pytest.raises(TypeError)) + histogram(arg, bins=bins, weights=weights) + histogram(arg, arg, bins=[bins, bins], weights=weights) @pytest.mark.parametrize(
numpy / dask verison compatibility bug There is a bug in xhistogram with numpy version >= 1.17 and dask version < 2.3 ```python import xarray as xr import numpy as np from xhistogram.xarray import histogram nt, nx = 100, 30 da = xr.DataArray(np.random.randn(nt, nx), dims=['time', 'x'], name='foo').chunk({'time': 1}) bins = np.linspace(-4, 4, 20) h = histogram(da, bins=[bins], dim=['x']) ``` This should be lazy. However, as reported by @stb2145 in https://github.com/pangeo-data/pangeo/issues/690, certain numpy / dask combination produce the warning ``` /srv/conda/envs/notebook/lib/python3.7/site-packages/dask/array/core.py:1263: FutureWarning: The `numpy.moveaxis` function is not implemented by Dask array. You may want to use the da.map_blocks function or something similar to silence this warning. Your code may stop working in a future release. FutureWarning, ``` and evaluates eagerly. In https://github.com/pangeo-data/pangeo/issues/690 we found a workaround involving setting the environment variable `NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=0`. However, in the meantime, the root issues was fixed in dask (https://github.com/dask/dask/issues/2559). We can avoid this bug by requiring dask >= 2.3.0 as a dependency for xhistogram. I guess that's the easiest way to go.
0.0
44a78384e9c6c7d14caed51d013311ec5753b3f1
[ "xhistogram/test/test_core.py::test_histogram_dask[int-dask-numpy]", "xhistogram/test/test_core.py::test_histogram_dask[str-dask-numpy]", "xhistogram/test/test_core.py::test_histogram_dask[numpy-dask-numpy]" ]
[ "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-10-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-bins1-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-None-auto-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-10-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-bins1-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[False-range_1-auto-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-10-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-bins1-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-None-auto-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-10-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-bins1-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-1-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-1-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-1-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-1-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-1-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-1-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-None-None-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-None-None-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-None-1-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-None-1-True]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-None-2-False]", "xhistogram/test/test_core.py::test_histogram_results_1d[True-range_1-auto-None-2-True]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[None]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[1]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[None]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[1]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[2]", "xhistogram/test/test_core.py::test_histogram_results_1d_weighted_broadcasting[auto]", "xhistogram/test/test_core.py::test_histogram_right_edge[None]", "xhistogram/test/test_core.py::test_histogram_right_edge[1]", "xhistogram/test/test_core.py::test_histogram_right_edge[2]", "xhistogram/test/test_core.py::test_histogram_results_2d", "xhistogram/test/test_core.py::test_histogram_results_2d_broadcasting[False]", "xhistogram/test/test_core.py::test_histogram_results_2d_broadcasting[True]", "xhistogram/test/test_core.py::test_histogram_results_2d_density[False]", "xhistogram/test/test_core.py::test_histogram_results_2d_density[True]", "xhistogram/test/test_core.py::test_histogram_shape[False-None]", "xhistogram/test/test_core.py::test_histogram_shape[False-5]", "xhistogram/test/test_core.py::test_histogram_shape[False-auto]", "xhistogram/test/test_core.py::test_histogram_shape[True-None]", "xhistogram/test/test_core.py::test_histogram_shape[True-5]", "xhistogram/test/test_core.py::test_histogram_shape[True-auto]", "xhistogram/test/test_core.py::test_histogram_dask[int-dask-dask]", "xhistogram/test/test_core.py::test_histogram_dask[int-numpy-dask]", "xhistogram/test/test_core.py::test_histogram_dask[int-numpy-numpy]", "xhistogram/test/test_core.py::test_histogram_dask[int-None-dask]", "xhistogram/test/test_core.py::test_histogram_dask[int-None-numpy]", "xhistogram/test/test_core.py::test_histogram_dask[str-dask-dask]", "xhistogram/test/test_core.py::test_histogram_dask[str-numpy-dask]", "xhistogram/test/test_core.py::test_histogram_dask[str-numpy-numpy]", "xhistogram/test/test_core.py::test_histogram_dask[str-None-dask]", "xhistogram/test/test_core.py::test_histogram_dask[str-None-numpy]", "xhistogram/test/test_core.py::test_histogram_dask[numpy-dask-dask]", "xhistogram/test/test_core.py::test_histogram_dask[numpy-numpy-dask]", "xhistogram/test/test_core.py::test_histogram_dask[numpy-numpy-numpy]", "xhistogram/test/test_core.py::test_histogram_dask[numpy-None-dask]", "xhistogram/test/test_core.py::test_histogram_dask[numpy-None-numpy]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out0]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out1]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out2]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out3]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out4]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out5]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out6]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out7]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out8]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out9]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_bins[in_out10]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out0]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out1]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out2]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out3]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out4]", "xhistogram/test/test_core.py::test_ensure_correctly_formatted_range[in_out5]", "xhistogram/test/test_core.py::test_histogram_results_datetime[False-None]", "xhistogram/test/test_core.py::test_histogram_results_datetime[False-1]", "xhistogram/test/test_core.py::test_histogram_results_datetime[False-2]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-07-21 06:59:51+00:00
mit
6,284
xgcm__xhistogram-8
diff --git a/xhistogram/xarray.py b/xhistogram/xarray.py index 230a44c..c4d41d8 100644 --- a/xhistogram/xarray.py +++ b/xhistogram/xarray.py @@ -4,6 +4,7 @@ Xarray API for xhistogram. import xarray as xr import numpy as np +from collections import OrderedDict from .core import histogram as _histogram @@ -95,11 +96,11 @@ def histogram(*args, bins=None, dim=None, weights=None, density=False, # roll our own broadcasting # now manually expand the arrays - all_dims = set([d for a in args for d in a.dims]) - all_dims_ordered = list(all_dims) + all_dims = [d for a in args for d in a.dims] + all_dims_ordered = list(OrderedDict.fromkeys(all_dims)) args_expanded = [] for a in args: - expand_keys = all_dims - set(a.dims) + expand_keys = [d for d in all_dims_ordered if d not in a.dims] a_expanded = a.expand_dims({k: 1 for k in expand_keys}) args_expanded.append(a_expanded) @@ -118,7 +119,7 @@ def histogram(*args, bins=None, dim=None, weights=None, density=False, weights_data = None if dim is not None: - dims_to_keep = [d for d in a_dims if d not in dim] + dims_to_keep = [d for d in all_dims_ordered if d not in dim] axis = [args_transposed[0].get_axis_num(d) for d in dim] else: dims_to_keep = [] @@ -129,11 +130,19 @@ def histogram(*args, bins=None, dim=None, weights=None, density=False, # create output dims new_dims = [a.name + bin_dim_suffix for a in args[:N_args]] - bin_centers = [0.5*(bin[:-1] + bin[1:]) for bin in bins] - coords = {name: ((name,), bin_center, a.attrs) - for name, bin_center, a in zip(new_dims, bin_centers, args)} output_dims = dims_to_keep + new_dims + # create new coords + bin_centers = [0.5*(bin[:-1] + bin[1:]) for bin in bins] + new_coords = {name: ((name,), bin_center, a.attrs) + for name, bin_center, a in zip(new_dims, bin_centers, args)} + + old_coords = {name: a0[name] + for name in dims_to_keep if name in a0.coords} + all_coords = {} + all_coords.update(old_coords) + all_coords.update(new_coords) + # CF conventions tell us how to specify cell boundaries # http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/cf-conventions.html#cell-boundaries # However, they require introduction of an additional dimension. @@ -143,7 +152,8 @@ def histogram(*args, bins=None, dim=None, weights=None, density=False, for name, bin_edge, a in zip(edge_dims, bins, args)} output_name = '_'.join(['histogram'] + [a.name for a in args[:N_args]]) - da_out = xr.DataArray(h_data, dims=output_dims, coords=coords, + + da_out = xr.DataArray(h_data, dims=output_dims, coords=all_coords, name=output_name) return da_out
xgcm/xhistogram
a636393e1d10d2f8609967869c6ce028d3d9ba41
diff --git a/xhistogram/test/test_xarray.py b/xhistogram/test/test_xarray.py index 9b46dd4..0018f0d 100644 --- a/xhistogram/test/test_xarray.py +++ b/xhistogram/test/test_xarray.py @@ -94,3 +94,31 @@ def test_weights(ones, ndims): for d in combinations(dims, nc+1): h = histogram(ones, weights=weights, bins=[bins], dim=d) _check_result(h, d) + + +# test for issue #5 +def test_dims_and_coords(): + time_axis = np.arange(4) + depth_axis = np.arange(10) + X_axis = np.arange(30) + Y_axis = np.arange(30) + + dat1 = np.random.randint(low=0, high=100, + size=(len(time_axis), len(depth_axis), + len(X_axis), len(Y_axis))) + array1 = xr.DataArray(dat1, coords=[time_axis,depth_axis,X_axis,Y_axis], + dims=['time', 'depth', 'X', 'Y'], name='one') + + dat2 = np.random.randint(low=0, high=50, + size=(len(time_axis), len(depth_axis), + len(X_axis), len(Y_axis))) + array2 = xr.DataArray(dat2, coords=[time_axis,depth_axis,X_axis,Y_axis], + dims=['time','depth','X','Y'], name='two') + + bins1 = np.linspace(0, 100, 50) + bins2 = np.linspace(0,50,25) + + result = histogram(array1,array2,dim = ['X','Y'] , bins=[bins1,bins2]) + assert result.dims == ('time', 'depth', 'one_bin', 'two_bin') + assert result.time.identical(array1.time) + assert result.depth.identical(array2.depth)
axes not used for making hist got shifted Say we have two xarray.DataArrays with dims (t:4, z:10, x:100, y:100). If we make histagram based on dimensions x and y, what we have in the result is (t:10, z:4, x_bin, y_bin). The t and z get exchanged. ```python import numpy as np import xarray as xr from xhistogram.xarray import histogram # create data for testing time_axis = range(4) depth_axis = range(10) X_axis = range(30) Y_axis = range(30) dat1 = np.random.randint(low = 0, high = 100,size=(len(time_axis),len(depth_axis),len(X_axis),len(Y_axis))) array1 = xr.DataArray(dat1, coords = [time_axis,depth_axis,X_axis,Y_axis], dims = ['time','depth','X','Y']) dat2 = np.random.randint(low = 0, high = 50,size=(len(time_axis),len(depth_axis),len(X_axis),len(Y_axis))) array2 = xr.DataArray(dat2, coords = [time_axis,depth_axis,X_axis,Y_axis], dims = ['time','depth','X','Y']) # create bins and rename arrays bins1 = np.linspace(0, 100, 50) bins2 = np.linspace(0,50,25) array1 = array1.rename('one') array2 = array2.rename('two') result= histogram(array1,array2,dim = ['X','Y'] , bins=[bins1,bins2]) ``` The dimensions of result is (time: 10, depth: 4, one_bin: 49, two_bin: 24) instead of (time: 4, depth: 10, one_bin: 49, two_bin: 24). Is this a bug of the code or just my misusing of the function?
0.0
a636393e1d10d2f8609967869c6ce028d3d9ba41
[ "xhistogram/test/test_xarray.py::test_dims_and_coords" ]
[ "xhistogram/test/test_xarray.py::test_histogram_ones[1D-1]", "xhistogram/test/test_xarray.py::test_histogram_ones[2D-1]", "xhistogram/test/test_xarray.py::test_histogram_ones[2D-2]", "xhistogram/test/test_xarray.py::test_histogram_ones[3D-1]", "xhistogram/test/test_xarray.py::test_histogram_ones[3D-2]", "xhistogram/test/test_xarray.py::test_histogram_ones[3D-3]", "xhistogram/test/test_xarray.py::test_histogram_ones[4D-1]", "xhistogram/test/test_xarray.py::test_histogram_ones[4D-2]", "xhistogram/test/test_xarray.py::test_histogram_ones[4D-3]", "xhistogram/test/test_xarray.py::test_histogram_ones[4D-4]", "xhistogram/test/test_xarray.py::test_weights[1D-1]", "xhistogram/test/test_xarray.py::test_weights[2D-1]", "xhistogram/test/test_xarray.py::test_weights[2D-2]", "xhistogram/test/test_xarray.py::test_weights[3D-1]", "xhistogram/test/test_xarray.py::test_weights[3D-2]", "xhistogram/test/test_xarray.py::test_weights[3D-3]", "xhistogram/test/test_xarray.py::test_weights[4D-1]", "xhistogram/test/test_xarray.py::test_weights[4D-2]", "xhistogram/test/test_xarray.py::test_weights[4D-3]", "xhistogram/test/test_xarray.py::test_weights[4D-4]" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-08-02 14:17:03+00:00
mit
6,285
xlab-si__xopera-opera-116
diff --git a/docs/index.rst b/docs/index.rst index db233d6..f7328c8 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -12,6 +12,8 @@ The following documentation explains the usage of xOpera TOSCA orchestrator. Wan installation examples documentation + saas + .. Indices and tables ================== diff --git a/docs/saas.rst b/docs/saas.rst new file mode 100644 index 0000000..1080af4 --- /dev/null +++ b/docs/saas.rst @@ -0,0 +1,74 @@ +.. _SaaS: + +*********** +xOpera SaaS +*********** + +The Software as a Service edition of xOpera is available at https://xopera-radon.xlab.si/ui/_. + +It is a multi-user multi-platform multi-deployment multifunctional service offering all capabilities of the +console-based ``opera``, providing all of its functionalities. + +Please read the warnings below, as you accept some inherent risks when using xOpera-SaaS + +Using the browser version is straightforward. + +Using the xOpera SaaS API through ``curl``:: + + csar_base64="$(base64 --wrap 0 test.csar)" + api="https://xopera-radon.xlab.si/api" + secret_base64="$(echo 'hello!' | base64 -)" + + your_username=YOUR_USERNAME + your_password=YOUR_PASSWORD + + # login process (would be automated by browser) + alias cookiecurl="curl -sSL --cookie-jar cookiejar.txt --cookie cookiejar.txt" + response="$(cookiecurl $api/credential)" + redirect_url="$(echo $response | xmllint --html --xpath "string(//form[@id='kc-form-login']/@action)" - 2>/dev/null)" + cookiecurl "$redirect_url" -d "username=$your_username" -d "password=$your_password" -d credentialId="" + + # normal usage + cookiecurl "$api/credential" + cookiecurl "$api/credential" -XPOST -d "{\"name\": \"credential1\", \"path\": \"/tmp/credential.txt\", \"contents\": \"$secret_base64\"}" + cookiecurl "$api/credential" + cookiecurl "$api/credential/1" + cookiecurl "$api/workspace" + cookiecurl "$api/workspace" -XPOST -d '{"name": "workspace1"}' + cookiecurl "$api/workspace/1/credential/1" -XPUT + cookiecurl "$api/workspace/1/credential" + cookiecurl "$api/credential/1" + cookiecurl "$api/workspace/1" + cookiecurl "$api/workspace/1/project" -XPOST -d "{\"name\": \"myproject\", \"csar\": \"$csar_base64\"}" + cookiecurl "$api/workspace/1/project" + cookiecurl "$api/workspace/1" + cookiecurl "$api/workspace/1/project/1/creationStatus" + cookiecurl "$api/workspace/1/project/1/debugPackage" + + # interaction with the project (identical to xopera-api), instructions copied from there + project_url="$api/workspace/1/project/1" + cookiecurl "$project_url/status" + cookiecurl "$project_url/validate" -XPOST -H "Content-Type: application/json" -d @inputs-request.json + cookiecurl "$project_url/deploy" -XPOST -H "Content-Type: application/json" -d @inputs-request.json + cookiecurl "$project_url/status" | jq + cookiecurl "$project_url/outputs" + cookiecurl "$project_url/undeploy" -XPOST + +For further interaction with each project, see +`the xopera-api specification <https://github.com/xlab-si/xopera-api/blob/master/openapi-spec.yml>`_ + + +==================================================== +Warnings about your credentials and general security +==================================================== + +Your credentials - not for xOpera-SaaS, but those you add for services you access in CSARs etc - are stored in +plaintext on the server xOpera-SaaS is running on. +All assigned workspaces have access to them, as they have control of the filesystem, therefore all users with access +to the workspace also have access to them. +You need to use caution with the credentials you submit. + +If you request xOpera-SaaS server administrators to help you or access your project, they will also be in a position +to access the credentials. +Whenever possible, use temporary credentials with limited access to the smallest required set of capabilities +to improve you security. diff --git a/examples/attribute_mapping/service.yaml b/examples/attribute_mapping/service.yaml index cfd8b1d..61b7370 100644 --- a/examples/attribute_mapping/service.yaml +++ b/examples/attribute_mapping/service.yaml @@ -20,7 +20,7 @@ node_types: operations: create: inputs: - id: { default: { get_property: [ SELF, enrolment_number ] } } + id: { default: { get_property: [ SELF, enrolment_number ] }, type: integer } outputs: student_id: [ SELF, student_id ] implementation: playbooks/create-student.yaml @@ -51,8 +51,10 @@ relationship_types: inputs: student_id: default: { get_attribute: [ TARGET, student_id ] } + type: string student_ids: default: { get_attribute: [ SOURCE, student_ids ] } + type: list outputs: new_list: [ SOURCE, student_ids ] implementation: playbooks/teacher-teaches-student--preconfigure-source.yaml diff --git a/examples/nginx_openstack/library/nginx/server/types.yaml b/examples/nginx_openstack/library/nginx/server/types.yaml index 845e467..45a73e6 100644 --- a/examples/nginx_openstack/library/nginx/server/types.yaml +++ b/examples/nginx_openstack/library/nginx/server/types.yaml @@ -22,7 +22,6 @@ node_types: implementation: primary: playbooks/uninstall.yaml - relationship_types: my.relationships.NginxSiteHosting: derived_from: tosca.relationships.HostedOn @@ -33,5 +32,6 @@ relationship_types: inputs: marker: default: { get_attribute: [ TARGET, host, id ] } + type: string implementation: primary: playbooks/reload.yaml diff --git a/examples/nginx_openstack/library/openstack/vm/types.yaml b/examples/nginx_openstack/library/openstack/vm/types.yaml index df58465..31411e7 100644 --- a/examples/nginx_openstack/library/openstack/vm/types.yaml +++ b/examples/nginx_openstack/library/openstack/vm/types.yaml @@ -34,15 +34,16 @@ node_types: operations: create: inputs: - vm_name: { default: { get_property: [ SELF, name ] } } - image: { default: { get_property: [ SELF, image ] } } - flavor: { default: { get_property: [ SELF, flavor ] } } - network: { default: { get_property: [ SELF, network ] } } - key_name: { default: { get_property: [ SELF, key_name ] } } + vm_name: { default: { get_property: [ SELF, name ] }, type: string } + image: { default: { get_property: [ SELF, image ] }, type: string } + flavor: { default: { get_property: [ SELF, flavor ] }, type: string } + network: { default: { get_property: [ SELF, network ] }, type: string } + key_name: { default: { get_property: [ SELF, key_name ] }, type: string } security_groups: - default: { get_property: [ SELF, security_groups ] } + default: { get_property: [ SELF, security_groups ] } + type: string implementation: playbooks/create.yaml delete: inputs: - id: { default: { get_attribute: [ SELF, id ] } } + id: { default: { get_attribute: [ SELF, id ] }, type: string } implementation: playbooks/delete.yaml diff --git a/examples/policy_triggers/service.yaml b/examples/policy_triggers/service.yaml index a8addc9..a131c56 100644 --- a/examples/policy_triggers/service.yaml +++ b/examples/policy_triggers/service.yaml @@ -34,7 +34,7 @@ interface_types: operations: scale_down: inputs: - adjustment: { default: { get_property: [ SELF, name ] } } + adjustment: { default: 1, type: integer } description: Operation for scaling down. implementation: playbooks/scale_down.yaml @@ -43,7 +43,7 @@ interface_types: operations: scale_up: inputs: - adjustment: { default: { get_property: [ SELF, name ] } } + adjustment: { default: 1, type: integer } description: Operation for scaling up. implementation: playbooks/scale_up.yaml @@ -83,8 +83,8 @@ policy_types: condition: - not: - and: - - available_instances: [ { greater: 42 } ] - - available_space: [ { greater: 1000 } ] + - available_instances: [ { greater_than: 42 } ] + - available_space: [ { greater_than: 1000 } ] action: - call_operation: operation: radon.interfaces.scaling.ScaleDown.scale_down @@ -116,8 +116,8 @@ policy_types: condition: - not: - and: - - available_instances: [ { greater: 42 } ] - - available_space: [ { greater: 1000 } ] + - available_instances: [ { greater_than: 42 } ] + - available_space: [ { greater_than: 1000 } ] action: - call_operation: operation: radon.interfaces.scaling.ScaleUp.scale_up @@ -160,52 +160,46 @@ topology_template: key_name: my_key requirements: - host: workstation - capabilities: - host_capability: - properties: - num_cpus: 1 - disk_size: 10 GB - mem_size: 4096 MB policies: - scale_down: - type: radon.policies.scaling.ScaleDown - properties: - cpu_upper_bound: 90 - adjustment: 1 + - scale_down: + type: radon.policies.scaling.ScaleDown + properties: + cpu_upper_bound: 90 + adjustment: 1 - scale_up: - type: radon.policies.scaling.ScaleUp - properties: - cpu_upper_bound: 90 - adjustment: 1 + - scale_up: + type: radon.policies.scaling.ScaleUp + properties: + cpu_upper_bound: 90 + adjustment: 1 - autoscale: - type: radon.policies.scaling.AutoScale - properties: - min_size: 3 - max_size: 7 - targets: [ openstack_vm ] - triggers: - radon.triggers.scaling: - description: A trigger for autoscaling - event: auto_scale_trigger - schedule: - start_time: 2020-04-08T21:59:43.10-06:00 - end_time: 2022-04-08T21:59:43.10-06:00 - target_filter: - node: openstack_vm - requirement: workstation - capability: host_capability - condition: - constraint: - - not: - - and: - - available_instances: [ { greater: 42 } ] - - available_space: [ { greater: 1000 } ] - period: 60 sec - evaluations: 2 - method: average - action: - - call_operation: radon.interfaces.scaling.AutoScale.retrieve_info - - call_operation: radon.interfaces.scaling.AutoScale.autoscale + - autoscale: + type: radon.policies.scaling.AutoScale + properties: + min_size: 3 + max_size: 7 + targets: [ openstack_vm ] + triggers: + radon.triggers.scaling: + description: A trigger for autoscaling + event: auto_scale_trigger + schedule: + start_time: 2020-04-08T21:59:43.10-06:00 + end_time: 2022-04-08T21:59:43.10-06:00 + target_filter: + node: openstack_vm + requirement: workstation + capability: host_capability + condition: + constraint: + - not: + - and: + - available_instances: [ { greater_than: 42 } ] + - available_space: [ { greater_than: 1000 } ] + period: 60 sec + evaluations: 2 + method: average + action: + - call_operation: radon.interfaces.scaling.AutoScale.retrieve_info + - call_operation: radon.interfaces.scaling.AutoScale.autoscale diff --git a/src/opera/parser/tosca/v_1_3/topology_template.py b/src/opera/parser/tosca/v_1_3/topology_template.py index ce17793..a973590 100644 --- a/src/opera/parser/tosca/v_1_3/topology_template.py +++ b/src/opera/parser/tosca/v_1_3/topology_template.py @@ -3,6 +3,7 @@ from opera.template.topology import Topology from ..entity import Entity from ..map import Map +from ..list import List from ..string import String from .group_definition import GroupDefinition @@ -19,7 +20,7 @@ class TopologyTemplate(Entity): node_templates=Map(NodeTemplate), relationship_templates=Map(RelationshipTemplate), groups=Map(GroupDefinition), - policies=Map(PolicyDefinition), + policies=List(Map(PolicyDefinition)), outputs=Map(ParameterDefinition), # TODO(@tadeboro): substitution_mappings and workflows )
xlab-si/xopera-opera
0eabdc0c7305979c897772fb5c935a5755fb903d
diff --git a/tests/integration/misc-tosca-types/service-template.yaml b/tests/integration/misc-tosca-types/service-template.yaml index 610c071..27818b0 100644 --- a/tests/integration/misc-tosca-types/service-template.yaml +++ b/tests/integration/misc-tosca-types/service-template.yaml @@ -105,11 +105,11 @@ topology_template: members: [ my-workstation1, my-workstation2 ] policies: - test: - type: daily_test_policies.test - properties: - test_id: *test - targets: [ hello, setter, workstation_group ] + - test: + type: daily_test_policies.test + properties: + test_id: *test + targets: [ hello, setter, workstation_group ] outputs: output_prop: diff --git a/tests/unit/opera/parser/tosca/v_1_3/test_topology_template.py b/tests/unit/opera/parser/tosca/v_1_3/test_topology_template.py index 6ffaa6c..9423610 100644 --- a/tests/unit/opera/parser/tosca/v_1_3/test_topology_template.py +++ b/tests/unit/opera/parser/tosca/v_1_3/test_topology_template.py @@ -20,8 +20,8 @@ class TestParse: my_group: type: group.type policies: - my_policy: - type: policy.type + - my_policy: + type: policy.type outputs: my_output: type: string
Policies in service templates should be serialized as a list ## Description This issue describe the inconsistency spotted in opera's TOSCA YAML v1.3 parser. [Section 3.9 of TOSCA Simple Profile in YAML v1.3](https://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.3/os/TOSCA-Simple-Profile-YAML-v1.3-os.html#_Toc26969450) states that `policies` keyname within the `topology_template` should be represented as an optional list of TOSCA policy definitions that apply to the topology template. So, the syntax for TOSCA policies here should look like this: ```yaml topology_template: node_templates: # left off for brevity policies: - policy1: type: tosca.policies.placement # left off for brevity - policy2: type: tosca.policies.placement # left off for brevity - policy3: type: tosca.policies.placement # left off for brevity ``` When we try to use that syntax and deploy it with opera, we get an error (`Expected map.`), saying that opera expected a YAML dictionary/map for the `policies` keyname. And by looking into opera's parser we soon realize that policies are not serialized as a list, but rather as a map which is in contradiction with the TOSCA standard and it needs to be fixed. ## Steps To fix the issue in the TOSCA parser, we should modify the content in `src/opera/parser/tosca/v_1_3/topology_template.py` and also fix any tests or examples that use the wrong synax. ## Current behaviour Right now opera treats policies in `topology_template` as YAML maps. ## Expected results Opera's TOSCA parser should serialize policies as YAML lists.
0.0
0eabdc0c7305979c897772fb5c935a5755fb903d
[ "tests/unit/opera/parser/tosca/v_1_3/test_topology_template.py::TestParse::test_full" ]
[ "tests/unit/opera/parser/tosca/v_1_3/test_topology_template.py::TestParse::test_minimal" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-10-19 11:34:43+00:00
apache-2.0
6,286
xlab-si__xopera-opera-207
diff --git a/.gitignore b/.gitignore index b67d06d..226bf78 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,89 @@ +# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] +*$py.class +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ eggs/ .eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ *.egg-info/ +.installed.cfg *.egg -dist/ +MANIFEST -venv/ -.venv/ +# PyInstaller +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ .pytest_cache/ +cover/ + +# PyBuilder +.pybuilder/ +target/ + +# pyenv +.python-version + +# pipenv +Pipfile.lock + +# Environments +.env +.venv +.venv* +env/ +venv/ +venv* +ENV/ +env.bak/ +venv.bak/ + +# mypy .mypy_cache/ +.dmypy.json +dmypy.json + +# packages +*.7z +*.dmg +*.gz +*.iso +*.jar +*.rar +*.tar +*.tar.gz +*.zip +*.log.* +*.csar + +# default opera storage folder +.opera diff --git a/examples/policy_triggers/service.yaml b/examples/policy_triggers/service.yaml index 858e329..77c027f 100644 --- a/examples/policy_triggers/service.yaml +++ b/examples/policy_triggers/service.yaml @@ -33,6 +33,10 @@ node_types: type: radon.interfaces.scaling.ScaleDown autoscaling: type: radon.interfaces.scaling.AutoScale + requirements: + - host: + capability: tosca.capabilities.Compute + relationship: tosca.relationships.HostedOn interface_types: radon.interfaces.scaling.ScaleDown: diff --git a/src/opera/parser/tosca/v_1_3/node_template.py b/src/opera/parser/tosca/v_1_3/node_template.py index f2d7040..823844c 100644 --- a/src/opera/parser/tosca/v_1_3/node_template.py +++ b/src/opera/parser/tosca/v_1_3/node_template.py @@ -86,4 +86,7 @@ class NodeTemplate(CollectorMixin, Entity): ) ) + if undeclared_requirements: + self.abort("Undeclared requirements: {}.".format(", ".join(undeclared_requirements)), self.loc) + return requirements
xlab-si/xopera-opera
442ff83960f03ba4507a078394a6447f22edf648
diff --git a/tests/integration/concurrency/service.yaml b/tests/integration/concurrency/service.yaml index 6934ddd..7e13804 100644 --- a/tests/integration/concurrency/service.yaml +++ b/tests/integration/concurrency/service.yaml @@ -134,7 +134,7 @@ topology_template: time: "1" requirements: - host: my-workstation - - dependency1: hello-1 - - dependency2: hello-2 - - dependency7: hello-7 - - dependency13: hello-13 + - dependency: hello-1 + - dependency: hello-2 + - dependency: hello-7 + - dependency: hello-13 diff --git a/tests/integration/misc_tosca_types/modules/node_types/test/test.yaml b/tests/integration/misc_tosca_types/modules/node_types/test/test.yaml index cc1c473..3ad3b9a 100644 --- a/tests/integration/misc_tosca_types/modules/node_types/test/test.yaml +++ b/tests/integration/misc_tosca_types/modules/node_types/test/test.yaml @@ -40,10 +40,7 @@ node_types: test_capability: type: daily_test.capabilities.test requirements: - - host1: + - host: capability: tosca.capabilities.Compute relationship: daily_test.relationships.test - - host2: - capability: tosca.capabilities.Compute - relationship: daily_test.relationships.interfaces ... diff --git a/tests/unit/opera/parser/test_tosca.py b/tests/unit/opera/parser/test_tosca.py index a81f803..b472187 100644 --- a/tests/unit/opera/parser/test_tosca.py +++ b/tests/unit/opera/parser/test_tosca.py @@ -316,3 +316,29 @@ class TestExecute: ast = tosca.load(tmp_path, name) with pytest.raises(ParseError, match="Missing a required property: property3"): ast.get_template({}) + + def test_undeclared_requirements(self, tmp_path, yaml_text): + name = pathlib.PurePath("template.yaml") + (tmp_path / name).write_text(yaml_text( + # language=yaml + """ + tosca_definitions_version: tosca_simple_yaml_1_3 + topology_template: + node_templates: + node_1: + type: tosca.nodes.SoftwareComponent + node_2: + type: tosca.nodes.SoftwareComponent + requirements: + - dependency: node_1 + node_3: + type: tosca.nodes.SoftwareComponent + requirements: + - dependency_not_defined1: node_1 + """ + )) + storage = Storage(tmp_path / pathlib.Path(".opera")) + storage.write("template.yaml", "root_file") + ast = tosca.load(tmp_path, name) + with pytest.raises(ParseError, match="Undeclared requirements: dependency_not_defined1"): + ast.get_template({})
Repair the concurrency deployment integration test ## Description With this issue, we want to repair the concurrency integration test showing the right interdependency of nodes done in the original test and changed in a [subsequent commit](https://github.com/xlab-si/xopera-opera/commit/a803058071405f02c5be16eedc9ec172175625d1#diff-26bd1afdf3f59f80a604bac88b4519fd737e2fb182b745882653ec3b300224cbR137). In the test the node dependency for node `hello-14` should be used as defined in [TOSCA 1.3 5.9.1.3 Definition](https://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.3/csprd01/TOSCA-Simple-Profile-YAML-v1.3-csprd01.html#_Toc9262333) `tosca.nodes.Root` where the `dependency` requirement has an UNBOUNDED max. limit of occurrences. ## Steps Running the test in the current version of the test with this code for node `hello-14` executes without returning an error and produces the output not intended by the initial version of the test: ``` YAML hello-14: type: hello_type properties: time: "1" requirements: - host: my-workstation - dependency1: hello-1 - dependency2: hello-2 - dependency7: hello-7 - dependency13: hello-13 ``` ## Current behavior Currently, opera executes the test without taking into account the undefined `dependency1` requirement in which will be addressed in a separate issue. When defining dependencies for node `hello-14` as in the initially intended test: ``` YAML hello-14: type: hello_type properties: time: "1" requirements: - host: my-workstation - dependency: hello-1 - dependency: hello-2 - dependency: hello-7 - dependency: hello-13 ``` opera produces the correct outputs. ## Expected results The execution of the corrected test should produce this output: ``` [Worker_0] Deploying my-workstation_0 [Worker_0] Deployment of my-workstation_0 complete [Worker_0] Deploying hello-1_0 [Worker_0] Executing create on hello-1_0 [Worker_2] Deploying hello-2_0 [Worker_3] Deploying hello-3_0 [Worker_4] Deploying hello-4_0 [Worker_5] Deploying hello-8_0 [Worker_6] Deploying hello-9_0 [Worker_7] Deploying hello-10_0 [Worker_4] Executing create on hello-4_0 [Worker_2] Executing create on hello-2_0 [Worker_5] Executing create on hello-8_0 [Worker_3] Executing create on hello-3_0 [Worker_6] Executing create on hello-9_0 [Worker_7] Executing create on hello-10_0 [Worker_4] Executing start on hello-4_0 [Worker_3] Executing start on hello-3_0 [Worker_6] Executing start on hello-9_0 [Worker_7] Executing start on hello-10_0 [Worker_3] Deployment of hello-3_0 complete [Worker_1] Deploying hello-12_0 [Worker_1] Executing create on hello-12_0 [Worker_4] Deployment of hello-4_0 complete [Worker_3] Deploying hello-13_0 [Worker_3] Executing create on hello-13_0 [Worker_6] Deployment of hello-9_0 complete [Worker_5] Executing start on hello-8_0 [Worker_2] Executing start on hello-2_0 [Worker_0] Executing start on hello-1_0 [Worker_1] Executing start on hello-12_0 [Worker_7] Deployment of hello-10_0 complete [Worker_3] Executing start on hello-13_0 [Worker_1] Deployment of hello-12_0 complete [Worker_3] Deployment of hello-13_0 complete [Worker_2] Deployment of hello-2_0 complete [Worker_0] Deployment of hello-1_0 complete [Worker_8] Deploying hello-5_0 [Worker_4] Deploying hello-11_0 [Worker_5] Deployment of hello-8_0 complete [Worker_4] Executing create on hello-11_0 [Worker_8] Executing create on hello-5_0 [Worker_4] Executing start on hello-11_0 [Worker_8] Executing start on hello-5_0 [Worker_8] Deployment of hello-5_0 complete [Worker_4] Deployment of hello-11_0 complete [Worker_4] Deploying hello-6_0 [Worker_4] Executing create on hello-6_0 [Worker_4] Executing start on hello-6_0 [Worker_4] Deployment of hello-6_0 complete [Worker_6] Deploying hello-7_0 [Worker_6] Executing create on hello-7_0 [Worker_6] Executing start on hello-7_0 [Worker_6] Deployment of hello-7_0 complete [Worker_7] Deploying hello-14_0 [Worker_7] Executing create on hello-14_0 [Worker_7] Executing start on hello-14_0 [Worker_7] Deployment of hello-14_0 complete ```
0.0
442ff83960f03ba4507a078394a6447f22edf648
[ "tests/unit/opera/parser/test_tosca.py::TestExecute::test_undeclared_requirements" ]
[ "tests/unit/opera/parser/test_tosca.py::TestLoad::test_load_minimal_document", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_empty_document_is_invalid", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ0]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ1]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ2]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ3]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ4]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ5]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ6]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ7]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ0]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ1]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ2]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ3]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ4]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ5]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ6]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ7]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_loads_template_part", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_load_from_csar_subfolder", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_duplicate_import", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_imports_from_multiple_levels", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_merge_topology_template", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_merge_duplicate_node_templates_invalid", "tests/unit/opera/parser/test_tosca.py::TestExecute::test_undefined_required_properties1", "tests/unit/opera/parser/test_tosca.py::TestExecute::test_undefined_required_properties2", "tests/unit/opera/parser/test_tosca.py::TestExecute::test_undefined_required_properties3" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2021-05-12 14:12:15+00:00
apache-2.0
6,287
xlab-si__xopera-opera-52
diff --git a/src/opera/parser/tosca/v_1_3/topology_template.py b/src/opera/parser/tosca/v_1_3/topology_template.py index 1af5075..ce17793 100644 --- a/src/opera/parser/tosca/v_1_3/topology_template.py +++ b/src/opera/parser/tosca/v_1_3/topology_template.py @@ -64,3 +64,20 @@ class TopologyTemplate(Entity): ), ) for name, definition in self.get("outputs", {}).items() } + + def merge(self, other): + for key in ( + "inputs", + "node_templates", + "data_types", + "relationship_templates", + "groups", + "policies", + "outputs" + ): + if key not in other.data: + continue + if key in self.data: + self.data[key].merge(other.data[key]) + else: + self.data[key] = other.data[key]
xlab-si/xopera-opera
ffe72a4dce9ac24f33304582577fed7b56ae34cd
diff --git a/tests/unit/opera/parser/test_tosca.py b/tests/unit/opera/parser/test_tosca.py index 88741eb..15607c7 100644 --- a/tests/unit/opera/parser/test_tosca.py +++ b/tests/unit/opera/parser/test_tosca.py @@ -140,3 +140,68 @@ class TestLoad: )) tosca.load(tmp_path, name) + + def test_merge_topology_template(self, tmp_path, yaml_text): + name = pathlib.PurePath("template.yaml") + (tmp_path / name).write_text(yaml_text( + """ + tosca_definitions_version: tosca_simple_yaml_1_3 + imports: + - merge.yaml + topology_template: + inputs: + some-input: + type: string + node_templates: + my_node: + type: tosca.nodes.SoftwareComponent + """ + )) + (tmp_path / "merge.yaml").write_text(yaml_text( + """ + tosca_definitions_version: tosca_simple_yaml_1_3 + topology_template: + inputs: + other-input: + type: string + node_templates: + other_node: + type: tosca.nodes.SoftwareComponent + """ + )) + tosca.load(tmp_path, name) + + def test_merge_duplicate_node_templates_invalid(self, tmp_path, yaml_text): + name = pathlib.PurePath("template.yaml") + (tmp_path / name).write_text(yaml_text( + """ + tosca_definitions_version: tosca_simple_yaml_1_3 + imports: + - merge1.yaml + - merge2.yaml + topology_template: + node_templates: + my_node: + type: tosca.nodes.SoftwareComponent + """ + )) + (tmp_path / "merge1.yaml").write_text(yaml_text( + """ + tosca_definitions_version: tosca_simple_yaml_1_3 + topology_template: + node_templates: + other_node: + type: tosca.nodes.SoftwareComponent + """ + )) + (tmp_path / "merge2.yaml").write_text(yaml_text( + """ + tosca_definitions_version: tosca_simple_yaml_1_3 + topology_template: + node_templates: + other_node: + type: tosca.nodes.SoftwareComponent + """ + )) + with pytest.raises(ParseError): + tosca.load(tmp_path, name) \ No newline at end of file
node_templates section defined in imported service templates ## Description In some cases, it would be useful to import a service template that already contains some node_templates defined. For example, in a situation, when a service template describing existing infastruture that cannot be modified externally (e.g. HPC nodes) is automatically generated by some service and later imported into other templates. ## Prerequisites - `opera` installed - A valid TOSCA Service Template file `service.yaml` that contains an import definition of another valid TOSCA Service Template file with **topology_template** and **node_templates** sections defined. ## Steps - User runs deploy `opera deploy service.yaml` ## Current behaviour - xOpera returns an error `Duplicate keys 'node_templates' found in service.yml` ## Expected behavior - Service template is deployed without error.
0.0
ffe72a4dce9ac24f33304582577fed7b56ae34cd
[ "tests/unit/opera/parser/test_tosca.py::TestLoad::test_merge_topology_template" ]
[ "tests/unit/opera/parser/test_tosca.py::TestLoad::test_load_minimal_document", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_empty_document_is_invalid", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ0]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ1]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ2]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ3]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ4]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ5]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ6]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_stdlib_is_present[typ7]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ0]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ1]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ2]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ3]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ4]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ5]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ6]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_custom_type_is_present[typ7]", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_loads_template_part", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_load_from_csar_subfolder", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_duplicate_import", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_imports_from_multiple_levels", "tests/unit/opera/parser/test_tosca.py::TestLoad::test_merge_duplicate_node_templates_invalid" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2020-06-03 12:47:35+00:00
apache-2.0
6,288
xlab-si__xopera-opera-76
diff --git a/src/opera/parser/yaml/constructor.py b/src/opera/parser/yaml/constructor.py index 464b4fa..154526a 100644 --- a/src/opera/parser/yaml/constructor.py +++ b/src/opera/parser/yaml/constructor.py @@ -1,4 +1,5 @@ from yaml.constructor import BaseConstructor, ConstructorError +from collections import Counter from opera.parser.utils.location import Location @@ -56,6 +57,14 @@ class Constructor(BaseConstructor): data = Node({}, self._pos(node)) yield data data.value.update(self.construct_mapping(node)) + counts = Counter(n.bare for n in data.value) + duplicates = [k for k, v in counts.items() if v > 1] + if duplicates: + raise ConstructorError( + None, None, + "Duplicate map names: {}".format(', '.join(duplicates)), + node.start_mark, + ) def construct_undefined(self, node): raise ConstructorError(
xlab-si/xopera-opera
5437501466c3a18ef4a5c53cb6ce35ae9f726fe7
diff --git a/tests/unit/opera/parser/yaml/test_constructor.py b/tests/unit/opera/parser/yaml/test_constructor.py index cec2c01..ee88892 100644 --- a/tests/unit/opera/parser/yaml/test_constructor.py +++ b/tests/unit/opera/parser/yaml/test_constructor.py @@ -3,6 +3,7 @@ import math import pytest from yaml.error import Mark from yaml.nodes import MappingNode, ScalarNode, SequenceNode +from yaml.constructor import ConstructorError from opera.parser.yaml.constructor import Constructor @@ -138,3 +139,19 @@ class TestNull: assert res.loc.line == 9 assert res.loc.column == 9 assert res.loc.stream_name == "map" + + def test_construct_map_duplicate(self): + mark = Mark(None, None, 8, 8, None, None) + children = [ + ( + ScalarNode("tag:yaml.org,2002:str", "node1", start_mark=mark), + ScalarNode("tag:yaml.org,2002:str", "node1", start_mark=mark), + ), + ( + ScalarNode("tag:yaml.org,2002:str", "node1", start_mark=mark), + ScalarNode("tag:yaml.org,2002:str", "node1", start_mark=mark), + ) + ] + node = MappingNode("tag:yaml.org,2002:map", children, start_mark=mark) + with pytest.raises(ConstructorError): + res, = Constructor("map").construct_yaml_map(node)
Duplicate node_template names ## Description If a service template has 2 node template with same node template names defined, one of them is omitted, and template execution proceeds without error. According to section **3.1.3.1 Additional Requirements** > Duplicate Template names within a Service Template’s Topology Template SHALL be considered an error. ## Prerequisites - `opera` installed - `service.yaml` TOSCA Service Template containing 2 node templates with same name. ``` tosca_definitions_version: tosca_simple_yaml_1_3 topology_template: node_templates: node_1: type: tosca.nodes.SoftwareComponent node_1: type: tosca.nodes.SoftwareComponent ``` ## Steps - User runs deploy `opera deploy service.yaml` ## Current behavior - xOpera proceeds with deployment. ## Expected behavior - xOpera returns a parser error.
0.0
5437501466c3a18ef4a5c53cb6ce35ae9f726fe7
[ "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_map_duplicate" ]
[ "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_null[NULL]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_null[Null]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_null[null]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_null[~]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_null[]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_bool_true[True-True]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_bool_true[true-True]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_bool_true[TRUE-True]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_bool_true[False-False]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_bool_true[false-False]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_bool_true[FALSE-False]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[1-1]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[0-0]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[100-100]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[987654-987654]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[-100--100]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[+100-100]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[00005-5]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[054-54]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[0o1-1]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[0o0-0]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[0o100-64]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[0o765-501]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[0o0000015-13]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[0x1-1]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[0x0-0]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[0x100-256]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[0x90abc-592572]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_int[0xAaBbFdE-179027934]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[+.inf-inf]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[-.inf--inf]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[.inf-inf]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[+.Inf-inf]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[-.Inf--inf]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[.Inf-inf]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[+.INF-inf]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[-.INF--inf]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[.INF-inf]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[+.987-0.987]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[-.765--0.765]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[.0987-0.0987]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[+.6e-3-0.0006]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[-.5E+2--50.0]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[.4E32-4e+31]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[+1.3-1.3]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[-2.4--2.4]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[3.5-3.5]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[+1.3E-3-0.0013]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[-2.42e+5--242000.0]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[3.5e7-35000000.0]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[+13E-3-0.013]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[-2e+5--200000.0]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_no_nan[3E7-30000000.0]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_nan_only[.nan]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_nan_only[.NaN]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_float_nan_only[.NAN]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_str[abc]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_str[1.2.3]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_str[NaN]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_str[INF]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_str[.NAn]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_str[", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_str[\\n]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_str[\\t]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_str[1", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_str[https://url]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_str[:bare]", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_str[multi\\n", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_seq", "tests/unit/opera/parser/yaml/test_constructor.py::TestNull::test_construct_map" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2020-07-22 09:51:34+00:00
apache-2.0
6,289
xlwings__jsondiff-64
diff --git a/.github/workflows/pr_check.yml b/.github/workflows/pr_check.yml index aafc4f9..41155fb 100644 --- a/.github/workflows/pr_check.yml +++ b/.github/workflows/pr_check.yml @@ -19,7 +19,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install Dependencies run: | - pip install -r dev-requirements.txt + pip install .[test] - name: Run Tests run: | python -m pytest diff --git a/dev-requirements.txt b/dev-requirements.txt deleted file mode 100644 index 9a54149..0000000 --- a/dev-requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -hypothesis -pytest diff --git a/jsondiff/__init__.py b/jsondiff/__init__.py index e2920c4..a3fdcc2 100644 --- a/jsondiff/__init__.py +++ b/jsondiff/__init__.py @@ -84,7 +84,7 @@ class CompactJsonDiffSyntax(object): def emit_list_diff(self, a, b, s, inserted, changed, deleted): if s == 0.0: return {replace: b} if isinstance(b, dict) else b - elif s == 1.0: + elif s == 1.0 and not (inserted or changed or deleted): return {} else: d = changed @@ -97,7 +97,7 @@ class CompactJsonDiffSyntax(object): def emit_dict_diff(self, a, b, s, added, changed, removed): if s == 0.0: return {replace: b} if isinstance(b, dict) else b - elif s == 1.0: + elif s == 1.0 and not (added or changed or removed): return {} else: changed.update(added) @@ -171,9 +171,9 @@ class ExplicitJsonDiffSyntax(object): return d def emit_list_diff(self, a, b, s, inserted, changed, deleted): - if s == 0.0: + if s == 0.0 and not (inserted or changed or deleted): return b - elif s == 1.0: + elif s == 1.0 and not (inserted or changed or deleted): return {} else: d = changed @@ -184,9 +184,9 @@ class ExplicitJsonDiffSyntax(object): return d def emit_dict_diff(self, a, b, s, added, changed, removed): - if s == 0.0: + if s == 0.0 and not (added or changed or removed): return b - elif s == 1.0: + elif s == 1.0 and not (added or changed or removed): return {} else: d = {} @@ -218,9 +218,9 @@ class SymmetricJsonDiffSyntax(object): return d def emit_list_diff(self, a, b, s, inserted, changed, deleted): - if s == 0.0: + if s == 0.0 and not (inserted or changed or deleted): return [a, b] - elif s == 1.0: + elif s == 1.0 and not (inserted or changed or deleted): return {} else: d = changed @@ -231,9 +231,9 @@ class SymmetricJsonDiffSyntax(object): return d def emit_dict_diff(self, a, b, s, added, changed, removed): - if s == 0.0: + if s == 0.0 and not (added or changed or removed): return [a, b] - elif s == 1.0: + elif s == 1.0 and not (added or changed or removed): return {} else: d = changed diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..3f46156 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,39 @@ +[build-system] +requires = ["setuptools>=43.0.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "jsondiff" +description = "Diff JSON and JSON-like structures in Python" +dynamic = ["version"] +readme = "README.rst" +license= {file = "LICENSE" } +requires-python = ">=3.8" +authors = [ + { name = "Zoomer Analytics LLC", email = "[email protected]"} +] +keywords = ['json', 'diff', 'diffing', 'difference', 'patch', 'delta', 'dict', 'LCS'] +classifiers = [ + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python :: 3', +] + +[project.optional-dependencies] +test = [ + "hypothesis", + "pytest" +] + +[project.urls] +"Homepage" = "https://github.com/xlwings/jsondiff" +"Bug Tracker" = "https://github.com/xlwings/jsondiff/issues" + +[project.scripts] +jdiff = "jsondiff.cli:main" + +[tool.setuptools.packages.find] +include = ["jsondiff*"] +exclude = ["tests*"] + +[tool.setuptools.dynamic] +version = {attr = "jsondiff.__version__"} diff --git a/setup.py b/setup.py index 08a30dc..bf508c8 100644 --- a/setup.py +++ b/setup.py @@ -1,26 +1,4 @@ -import os -import re -from setuptools import setup, find_packages +# Maintained for legacy compatibility +from setuptools import setup -with open(os.path.join(os.path.dirname(__file__), 'jsondiff', '__init__.py')) as f: - version = re.compile(r".*__version__ = '(.*?)'", re.S).match(f.read()).group(1) - -setup( - name='jsondiff', - packages=find_packages(exclude=['tests']), - version=version, - description='Diff JSON and JSON-like structures in Python', - author='Zoomer Analytics LLC', - author_email='[email protected]', - url='https://github.com/ZoomerAnalytics/jsondiff', - keywords=['json', 'diff', 'diffing', 'difference', 'patch', 'delta', 'dict', 'LCS'], - classifiers=[ - 'License :: OSI Approved :: MIT License', - 'Programming Language :: Python :: 3', - ], - entry_points={ - 'console_scripts': [ - 'jdiff=jsondiff.cli:main' - ] - } -) +setup()
xlwings/jsondiff
aa55a7f2fbce6d24f44e863ffc4db132d03b62ab
diff --git a/tests/test_jsondiff.py b/tests/test_jsondiff.py index 8328ee9..2cbbc66 100644 --- a/tests/test_jsondiff.py +++ b/tests/test_jsondiff.py @@ -1,5 +1,6 @@ import sys import unittest +import pytest from jsondiff import diff, replace, add, discard, insert, delete, update, JsonDiffer @@ -134,3 +135,30 @@ class JsonDiffTests(unittest.TestCase): self.fail('cannot diff long arrays') finally: sys.setrecursionlimit(r) + + [email protected]( + ("a", "b", "syntax", "expected"), + [ + pytest.param([], [{"a": True}], "explicit", {insert: [(0, {"a": True})]}, + id="issue59_"), + pytest.param([{"a": True}], [], "explicit", {delete: [0]}, + id="issue59_"), + pytest.param([], [{"a": True}], "compact", [{"a": True}], + id="issue59_"), + pytest.param([{"a": True}], [], "compact", [], + id="issue59_"), + pytest.param([], [{"a": True}], "symmetric", {insert: [(0, {"a": True})]}, + id="issue59_"), + pytest.param([{"a": True}], [], "symmetric", {delete: [(0, {"a": True})]}, + id="issue59_"), + pytest.param({1: 2}, {5: 3}, "symmetric", {delete: {1: 2}, insert: {5: 3}}, + id="issue36_"), + pytest.param({1: 2}, {5: 3}, "compact", {replace: {5: 3}}, + id="issue36_"), + ], +) +class TestSpecificIssue: + def test_issue(self, a, b, syntax, expected): + actual = diff(a, b, syntax=syntax) + assert actual == expected
No difference label if one of JSON is empty Hi, I did some tests with the library and I have a case scenario where one of the compared JSON is empty: { }. I am using syntax='explicit' and the diff returns me exactly the JSON that is not the one empty. My problem is that I would like it to return me something like: ```js { insert: ... } ``` The "insert" tag is quite important during my parsing.
0.0
aa55a7f2fbce6d24f44e863ffc4db132d03b62ab
[ "tests/test_jsondiff.py::TestSpecificIssue::test_issue[issue59_5]", "tests/test_jsondiff.py::TestSpecificIssue::test_issue[issue59_1]", "tests/test_jsondiff.py::TestSpecificIssue::test_issue[issue59_4]", "tests/test_jsondiff.py::TestSpecificIssue::test_issue[issue59_0]", "tests/test_jsondiff.py::TestSpecificIssue::test_issue[issue36_0]" ]
[ "tests/test_jsondiff.py::TestSpecificIssue::test_issue[issue36_1]", "tests/test_jsondiff.py::TestSpecificIssue::test_issue[issue59_2]", "tests/test_jsondiff.py::TestSpecificIssue::test_issue[issue59_3]", "tests/test_jsondiff.py::JsonDiffTests::test_long_arrays", "tests/test_jsondiff.py::JsonDiffTests::test_explicit_syntax", "tests/test_jsondiff.py::JsonDiffTests::test_compact_syntax", "tests/test_jsondiff.py::JsonDiffTests::test_dump", "tests/test_jsondiff.py::JsonDiffTests::test_symmetric_syntax", "tests/test_jsondiff.py::JsonDiffTests::test_a", "tests/test_jsondiff.py::JsonDiffTests::test_marshal" ]
{ "failed_lite_validators": [ "has_added_files", "has_removed_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-06-04 02:09:09+00:00
mit
6,290
xonsh__xonsh-3772
diff --git a/news/simple-variables.rst b/news/simple-variables.rst index 3b1d6478..0592b11f 100644 --- a/news/simple-variables.rst +++ b/news/simple-variables.rst @@ -1,7 +1,7 @@ **Added:** * Xonsh now supports bash-style variable assignments preceding - subprocess commands (e.g. ``$FOO = "bar" bash -c r"echo $FOO"``). + subprocess commands (e.g. ``$FOO="bar" bash -c r"echo $FOO"``). **Changed:** diff --git a/xonsh/parsers/base.py b/xonsh/parsers/base.py index 2b6ce18e..63d7a82d 100644 --- a/xonsh/parsers/base.py +++ b/xonsh/parsers/base.py @@ -3325,11 +3325,7 @@ class BaseParser(object): p[0] = ast.Str(s=p1.value, lineno=p1.lineno, col_offset=p1.lexpos) def p_envvar_assign_left(self, p): - """envvar_assign_left : dollar_name_tok EQUALS - | dollar_name_tok WS EQUALS - | dollar_name_tok EQUALS WS - | dollar_name_tok WS EQUALS WS - """ + """envvar_assign_left : dollar_name_tok EQUALS""" p[0] = p[1] def p_envvar_assign(self, p):
xonsh/xonsh
f23e9195a0ac174e0db953b8e6604863858f8e88
diff --git a/tests/test_parser.py b/tests/test_parser.py index 53f5a4cd..67de9208 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -2240,6 +2240,10 @@ def test_bang_ls_envvar_listval(): check_xonsh_ast({"WAKKA": [".", "."]}, "!(ls $WAKKA)", False) +def test_bang_envvar_args(): + check_xonsh_ast({"LS": "ls"}, "!($LS .)", False) + + def test_question(): check_xonsh_ast({}, "range?") @@ -2502,7 +2506,7 @@ def test_ls_quotes_3_space(): def test_leading_envvar_assignment(): - check_xonsh_ast({}, "![$FOO= 'foo' $BAR =2 echo r'$BAR']", False) + check_xonsh_ast({}, "![$FOO='foo' $BAR=2 echo r'$BAR']", False) def test_echo_comma():
conda init error Hi! Xonsh is super cool! I'm happy every time I'm writing xonshrc! Today I tried to install master and got an error with conda init: ```bash pip install -U git+https://github.com/xonsh/xonsh xonfig +------------------+----------------------+ | xonsh | 0.9.21.dev31 | | Git SHA | d42b4140 | ``` ``` cat ~/.xonshrc # >>> conda initialize >>> # !! Contents within this block are managed by 'conda init' !! import sys as _sys from types import ModuleType as _ModuleType _mod = _ModuleType("xontrib.conda", "Autogenerated from $(/opt/miniconda/bin/conda shell.xonsh hook)") __xonsh__.execer.exec($("/opt/miniconda/bin/conda" "shell.xonsh" "hook"), glbs=_mod.__dict__, filename="$(/opt/miniconda/bin/conda shell.xonsh hook)") _sys.modules["xontrib.conda"] = _mod del _sys, _mod, _ModuleType # <<< conda initialize <<< ``` ``` xonsh Traceback (most recent call last): File "/opt/miniconda/lib/python3.8/site-packages/xonsh/proc.py", line 1737, in wait r = self.f(self.args, stdin, stdout, stderr, spec, spec.stack) File "/opt/miniconda/lib/python3.8/site-packages/xonsh/proc.py", line 1232, in proxy_two return f(args, stdin) File "/opt/miniconda/lib/python3.8/site-packages/xonsh/aliases.py", line 583, in source_alias print_color( File "/opt/miniconda/lib/python3.8/site-packages/xonsh/tools.py", line 1768, in print_color builtins.__xonsh__.shell.shell.print_color(string, **kwargs) AttributeError: 'NoneType' object has no attribute 'shell' xonsh> ``` How I can fix this? Thanks! ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
0.0
f23e9195a0ac174e0db953b8e6604863858f8e88
[ "tests/test_parser.py::test_bang_envvar_args" ]
[ "tests/test_parser.py::test_int_literal", "tests/test_parser.py::test_int_literal_underscore", "tests/test_parser.py::test_float_literal", "tests/test_parser.py::test_float_literal_underscore", "tests/test_parser.py::test_imag_literal", "tests/test_parser.py::test_float_imag_literal", "tests/test_parser.py::test_complex", "tests/test_parser.py::test_str_literal", "tests/test_parser.py::test_bytes_literal", "tests/test_parser.py::test_raw_literal", "tests/test_parser.py::test_f_literal", "tests/test_parser.py::test_fstring_adaptor[f\"$HOME\"-$HOME]", "tests/test_parser.py::test_fstring_adaptor[f\"{0}", "tests/test_parser.py::test_fstring_adaptor[f\"{'$HOME'}\"-$HOME]", "tests/test_parser.py::test_raw_bytes_literal", "tests/test_parser.py::test_unary_plus", "tests/test_parser.py::test_unary_minus", "tests/test_parser.py::test_unary_invert", "tests/test_parser.py::test_binop_plus", "tests/test_parser.py::test_binop_minus", "tests/test_parser.py::test_binop_times", "tests/test_parser.py::test_binop_matmult", "tests/test_parser.py::test_binop_div", "tests/test_parser.py::test_binop_mod", "tests/test_parser.py::test_binop_floordiv", "tests/test_parser.py::test_binop_pow", "tests/test_parser.py::test_plus_pow", "tests/test_parser.py::test_plus_plus", "tests/test_parser.py::test_plus_minus", "tests/test_parser.py::test_minus_plus", "tests/test_parser.py::test_minus_minus", "tests/test_parser.py::test_minus_plus_minus", "tests/test_parser.py::test_times_plus", "tests/test_parser.py::test_plus_times", "tests/test_parser.py::test_times_times", "tests/test_parser.py::test_times_div", "tests/test_parser.py::test_times_div_mod", "tests/test_parser.py::test_times_div_mod_floor", "tests/test_parser.py::test_str_str", "tests/test_parser.py::test_str_str_str", "tests/test_parser.py::test_str_plus_str", "tests/test_parser.py::test_str_times_int", "tests/test_parser.py::test_int_times_str", "tests/test_parser.py::test_group_plus_times", "tests/test_parser.py::test_plus_group_times", "tests/test_parser.py::test_group", "tests/test_parser.py::test_lt", "tests/test_parser.py::test_gt", "tests/test_parser.py::test_eq", "tests/test_parser.py::test_le", "tests/test_parser.py::test_ge", "tests/test_parser.py::test_ne", "tests/test_parser.py::test_in", "tests/test_parser.py::test_is", "tests/test_parser.py::test_not_in", "tests/test_parser.py::test_is_not", "tests/test_parser.py::test_lt_lt", "tests/test_parser.py::test_lt_lt_lt", "tests/test_parser.py::test_not", "tests/test_parser.py::test_or", "tests/test_parser.py::test_or_or", "tests/test_parser.py::test_and", "tests/test_parser.py::test_and_and", "tests/test_parser.py::test_and_or", "tests/test_parser.py::test_or_and", "tests/test_parser.py::test_group_and_and", "tests/test_parser.py::test_group_and_or", "tests/test_parser.py::test_if_else_expr", "tests/test_parser.py::test_if_else_expr_expr", "tests/test_parser.py::test_str_idx", "tests/test_parser.py::test_str_slice", "tests/test_parser.py::test_str_step", "tests/test_parser.py::test_str_slice_all", "tests/test_parser.py::test_str_slice_upper", "tests/test_parser.py::test_str_slice_lower", "tests/test_parser.py::test_str_slice_other", "tests/test_parser.py::test_str_slice_lower_other", "tests/test_parser.py::test_str_slice_upper_other", "tests/test_parser.py::test_list_empty", "tests/test_parser.py::test_list_one", "tests/test_parser.py::test_list_one_comma", "tests/test_parser.py::test_list_two", "tests/test_parser.py::test_list_three", "tests/test_parser.py::test_list_three_comma", "tests/test_parser.py::test_list_one_nested", "tests/test_parser.py::test_list_list_four_nested", "tests/test_parser.py::test_list_tuple_three_nested", "tests/test_parser.py::test_list_set_tuple_three_nested", "tests/test_parser.py::test_list_tuple_one_nested", "tests/test_parser.py::test_tuple_tuple_one_nested", "tests/test_parser.py::test_dict_list_one_nested", "tests/test_parser.py::test_dict_list_one_nested_comma", "tests/test_parser.py::test_dict_tuple_one_nested", "tests/test_parser.py::test_dict_tuple_one_nested_comma", "tests/test_parser.py::test_dict_list_two_nested", "tests/test_parser.py::test_set_tuple_one_nested", "tests/test_parser.py::test_set_tuple_two_nested", "tests/test_parser.py::test_tuple_empty", "tests/test_parser.py::test_tuple_one_bare", "tests/test_parser.py::test_tuple_two_bare", "tests/test_parser.py::test_tuple_three_bare", "tests/test_parser.py::test_tuple_three_bare_comma", "tests/test_parser.py::test_tuple_one_comma", "tests/test_parser.py::test_tuple_two", "tests/test_parser.py::test_tuple_three", "tests/test_parser.py::test_tuple_three_comma", "tests/test_parser.py::test_bare_tuple_of_tuples", "tests/test_parser.py::test_set_one", "tests/test_parser.py::test_set_one_comma", "tests/test_parser.py::test_set_two", "tests/test_parser.py::test_set_two_comma", "tests/test_parser.py::test_set_three", "tests/test_parser.py::test_dict_empty", "tests/test_parser.py::test_dict_one", "tests/test_parser.py::test_dict_one_comma", "tests/test_parser.py::test_dict_two", "tests/test_parser.py::test_dict_two_comma", "tests/test_parser.py::test_dict_three", "tests/test_parser.py::test_dict_from_dict_two_xy", "tests/test_parser.py::test_dict_from_dict_two_x_first", "tests/test_parser.py::test_dict_from_dict_two_x_second", "tests/test_parser.py::test_unpack_range_tuple", "tests/test_parser.py::test_unpack_range_tuple_4", "tests/test_parser.py::test_unpack_range_tuple_parens", "tests/test_parser.py::test_unpack_range_tuple_parens_4", "tests/test_parser.py::test_unpack_range_list", "tests/test_parser.py::test_unpack_range_list_4", "tests/test_parser.py::test_unpack_range_set", "tests/test_parser.py::test_unpack_range_set_4", "tests/test_parser.py::test_true", "tests/test_parser.py::test_false", "tests/test_parser.py::test_none", "tests/test_parser.py::test_elipssis", "tests/test_parser.py::test_not_implemented_name", "tests/test_parser.py::test_genexpr", "tests/test_parser.py::test_genexpr_if", "tests/test_parser.py::test_genexpr_if_and", "tests/test_parser.py::test_dbl_genexpr", "tests/test_parser.py::test_genexpr_if_genexpr", "tests/test_parser.py::test_genexpr_if_genexpr_if", "tests/test_parser.py::test_listcomp", "tests/test_parser.py::test_listcomp_if", "tests/test_parser.py::test_listcomp_if_and", "tests/test_parser.py::test_listcomp_multi_if", "tests/test_parser.py::test_dbl_listcomp", "tests/test_parser.py::test_listcomp_if_listcomp", "tests/test_parser.py::test_listcomp_if_listcomp_if", "tests/test_parser.py::test_setcomp", "tests/test_parser.py::test_setcomp_if", "tests/test_parser.py::test_setcomp_if_and", "tests/test_parser.py::test_dbl_setcomp", "tests/test_parser.py::test_setcomp_if_setcomp", "tests/test_parser.py::test_setcomp_if_setcomp_if", "tests/test_parser.py::test_dictcomp", "tests/test_parser.py::test_dictcomp_unpack_parens", "tests/test_parser.py::test_dictcomp_unpack_no_parens", "tests/test_parser.py::test_dictcomp_if", "tests/test_parser.py::test_dictcomp_if_and", "tests/test_parser.py::test_dbl_dictcomp", "tests/test_parser.py::test_dictcomp_if_dictcomp", "tests/test_parser.py::test_dictcomp_if_dictcomp_if", "tests/test_parser.py::test_lambda", "tests/test_parser.py::test_lambda_x", "tests/test_parser.py::test_lambda_kwx", "tests/test_parser.py::test_lambda_x_y", "tests/test_parser.py::test_lambda_x_y_z", "tests/test_parser.py::test_lambda_x_kwy", "tests/test_parser.py::test_lambda_kwx_kwy", "tests/test_parser.py::test_lambda_kwx_kwy_kwz", "tests/test_parser.py::test_lambda_x_comma", "tests/test_parser.py::test_lambda_x_y_comma", "tests/test_parser.py::test_lambda_x_y_z_comma", "tests/test_parser.py::test_lambda_x_kwy_comma", "tests/test_parser.py::test_lambda_kwx_kwy_comma", "tests/test_parser.py::test_lambda_kwx_kwy_kwz_comma", "tests/test_parser.py::test_lambda_args", "tests/test_parser.py::test_lambda_args_x", "tests/test_parser.py::test_lambda_args_x_y", "tests/test_parser.py::test_lambda_args_x_kwy", "tests/test_parser.py::test_lambda_args_kwx_y", "tests/test_parser.py::test_lambda_args_kwx_kwy", "tests/test_parser.py::test_lambda_x_args", "tests/test_parser.py::test_lambda_x_args_y", "tests/test_parser.py::test_lambda_x_args_y_z", "tests/test_parser.py::test_lambda_kwargs", "tests/test_parser.py::test_lambda_x_kwargs", "tests/test_parser.py::test_lambda_x_y_kwargs", "tests/test_parser.py::test_lambda_x_kwy_kwargs", "tests/test_parser.py::test_lambda_args_kwargs", "tests/test_parser.py::test_lambda_x_args_kwargs", "tests/test_parser.py::test_lambda_x_y_args_kwargs", "tests/test_parser.py::test_lambda_kwx_args_kwargs", "tests/test_parser.py::test_lambda_x_kwy_args_kwargs", "tests/test_parser.py::test_lambda_x_args_y_kwargs", "tests/test_parser.py::test_lambda_x_args_kwy_kwargs", "tests/test_parser.py::test_lambda_args_y_kwargs", "tests/test_parser.py::test_lambda_star_x", "tests/test_parser.py::test_lambda_star_x_y", "tests/test_parser.py::test_lambda_star_x_kwargs", "tests/test_parser.py::test_lambda_star_kwx_kwargs", "tests/test_parser.py::test_lambda_x_star_y", "tests/test_parser.py::test_lambda_x_y_star_z", "tests/test_parser.py::test_lambda_x_kwy_star_y", "tests/test_parser.py::test_lambda_x_kwy_star_kwy", "tests/test_parser.py::test_lambda_x_star_y_kwargs", "tests/test_parser.py::test_lambda_x_divide_y_star_z_kwargs", "tests/test_parser.py::test_call_range", "tests/test_parser.py::test_call_range_comma", "tests/test_parser.py::test_call_range_x_y", "tests/test_parser.py::test_call_range_x_y_comma", "tests/test_parser.py::test_call_range_x_y_z", "tests/test_parser.py::test_call_dict_kwx", "tests/test_parser.py::test_call_dict_kwx_comma", "tests/test_parser.py::test_call_dict_kwx_kwy", "tests/test_parser.py::test_call_tuple_gen", "tests/test_parser.py::test_call_tuple_genifs", "tests/test_parser.py::test_call_range_star", "tests/test_parser.py::test_call_range_x_star", "tests/test_parser.py::test_call_int", "tests/test_parser.py::test_call_int_base_dict", "tests/test_parser.py::test_call_dict_kwargs", "tests/test_parser.py::test_call_list_many_star_args", "tests/test_parser.py::test_call_list_many_starstar_args", "tests/test_parser.py::test_call_list_many_star_and_starstar_args", "tests/test_parser.py::test_call_alot", "tests/test_parser.py::test_call_alot_next", "tests/test_parser.py::test_call_alot_next_next", "tests/test_parser.py::test_getattr", "tests/test_parser.py::test_getattr_getattr", "tests/test_parser.py::test_dict_tuple_key", "tests/test_parser.py::test_pipe_op", "tests/test_parser.py::test_pipe_op_two", "tests/test_parser.py::test_pipe_op_three", "tests/test_parser.py::test_xor_op", "tests/test_parser.py::test_xor_op_two", "tests/test_parser.py::test_xor_op_three", "tests/test_parser.py::test_xor_pipe", "tests/test_parser.py::test_amp_op", "tests/test_parser.py::test_amp_op_two", "tests/test_parser.py::test_amp_op_three", "tests/test_parser.py::test_lshift_op", "tests/test_parser.py::test_lshift_op_two", "tests/test_parser.py::test_lshift_op_three", "tests/test_parser.py::test_rshift_op", "tests/test_parser.py::test_rshift_op_two", "tests/test_parser.py::test_rshift_op_three", "tests/test_parser.py::test_named_expr", "tests/test_parser.py::test_named_expr_list", "tests/test_parser.py::test_equals", "tests/test_parser.py::test_equals_semi", "tests/test_parser.py::test_x_y_equals_semi", "tests/test_parser.py::test_equals_two", "tests/test_parser.py::test_equals_two_semi", "tests/test_parser.py::test_equals_three", "tests/test_parser.py::test_equals_three_semi", "tests/test_parser.py::test_plus_eq", "tests/test_parser.py::test_sub_eq", "tests/test_parser.py::test_times_eq", "tests/test_parser.py::test_matmult_eq", "tests/test_parser.py::test_div_eq", "tests/test_parser.py::test_floordiv_eq", "tests/test_parser.py::test_pow_eq", "tests/test_parser.py::test_mod_eq", "tests/test_parser.py::test_xor_eq", "tests/test_parser.py::test_ampersand_eq", "tests/test_parser.py::test_bitor_eq", "tests/test_parser.py::test_lshift_eq", "tests/test_parser.py::test_rshift_eq", "tests/test_parser.py::test_bare_unpack", "tests/test_parser.py::test_lhand_group_unpack", "tests/test_parser.py::test_rhand_group_unpack", "tests/test_parser.py::test_grouped_unpack", "tests/test_parser.py::test_double_grouped_unpack", "tests/test_parser.py::test_double_ungrouped_unpack", "tests/test_parser.py::test_stary_eq", "tests/test_parser.py::test_stary_x", "tests/test_parser.py::test_tuple_x_stary", "tests/test_parser.py::test_list_x_stary", "tests/test_parser.py::test_bare_x_stary", "tests/test_parser.py::test_bare_x_stary_z", "tests/test_parser.py::test_equals_list", "tests/test_parser.py::test_equals_dict", "tests/test_parser.py::test_equals_attr", "tests/test_parser.py::test_equals_annotation", "tests/test_parser.py::test_dict_keys", "tests/test_parser.py::test_assert_msg", "tests/test_parser.py::test_assert", "tests/test_parser.py::test_pass", "tests/test_parser.py::test_del", "tests/test_parser.py::test_del_comma", "tests/test_parser.py::test_del_two", "tests/test_parser.py::test_del_two_comma", "tests/test_parser.py::test_del_with_parens", "tests/test_parser.py::test_raise", "tests/test_parser.py::test_raise_x", "tests/test_parser.py::test_raise_x_from", "tests/test_parser.py::test_import_x", "tests/test_parser.py::test_import_xy", "tests/test_parser.py::test_import_xyz", "tests/test_parser.py::test_from_x_import_y", "tests/test_parser.py::test_from_dot_import_y", "tests/test_parser.py::test_from_dotx_import_y", "tests/test_parser.py::test_from_dotdotx_import_y", "tests/test_parser.py::test_from_dotdotdotx_import_y", "tests/test_parser.py::test_from_dotdotdotdotx_import_y", "tests/test_parser.py::test_from_import_x_y", "tests/test_parser.py::test_from_import_x_y_z", "tests/test_parser.py::test_from_dot_import_x_y", "tests/test_parser.py::test_from_dot_import_x_y_z", "tests/test_parser.py::test_from_dot_import_group_x_y", "tests/test_parser.py::test_import_x_as_y", "tests/test_parser.py::test_import_xy_as_z", "tests/test_parser.py::test_import_x_y_as_z", "tests/test_parser.py::test_import_x_as_y_z", "tests/test_parser.py::test_import_x_as_y_z_as_a", "tests/test_parser.py::test_from_dot_import_x_as_y", "tests/test_parser.py::test_from_x_import_star", "tests/test_parser.py::test_from_x_import_group_x_y_z", "tests/test_parser.py::test_from_x_import_group_x_y_z_comma", "tests/test_parser.py::test_from_x_import_y_as_z", "tests/test_parser.py::test_from_x_import_y_as_z_a_as_b", "tests/test_parser.py::test_from_dotx_import_y_as_z_a_as_b_c_as_d", "tests/test_parser.py::test_continue", "tests/test_parser.py::test_break", "tests/test_parser.py::test_global", "tests/test_parser.py::test_global_xy", "tests/test_parser.py::test_nonlocal_x", "tests/test_parser.py::test_nonlocal_xy", "tests/test_parser.py::test_yield", "tests/test_parser.py::test_yield_x", "tests/test_parser.py::test_yield_x_comma", "tests/test_parser.py::test_yield_x_y", "tests/test_parser.py::test_return_x_starexpr", "tests/test_parser.py::test_yield_from_x", "tests/test_parser.py::test_return", "tests/test_parser.py::test_return_x", "tests/test_parser.py::test_return_x_comma", "tests/test_parser.py::test_return_x_y", "tests/test_parser.py::test_if_true", "tests/test_parser.py::test_if_true_twolines", "tests/test_parser.py::test_if_true_twolines_deindent", "tests/test_parser.py::test_if_true_else", "tests/test_parser.py::test_if_true_x", "tests/test_parser.py::test_if_switch", "tests/test_parser.py::test_if_switch_elif1_else", "tests/test_parser.py::test_if_switch_elif2_else", "tests/test_parser.py::test_if_nested", "tests/test_parser.py::test_while", "tests/test_parser.py::test_while_else", "tests/test_parser.py::test_for", "tests/test_parser.py::test_for_zip", "tests/test_parser.py::test_for_idx", "tests/test_parser.py::test_for_zip_idx", "tests/test_parser.py::test_for_attr", "tests/test_parser.py::test_for_zip_attr", "tests/test_parser.py::test_for_else", "tests/test_parser.py::test_async_for", "tests/test_parser.py::test_with", "tests/test_parser.py::test_with_as", "tests/test_parser.py::test_with_xy", "tests/test_parser.py::test_with_x_as_y_z", "tests/test_parser.py::test_with_x_as_y_a_as_b", "tests/test_parser.py::test_with_in_func", "tests/test_parser.py::test_async_with", "tests/test_parser.py::test_try", "tests/test_parser.py::test_try_except_t", "tests/test_parser.py::test_try_except_t_as_e", "tests/test_parser.py::test_try_except_t_u", "tests/test_parser.py::test_try_except_t_u_as_e", "tests/test_parser.py::test_try_except_t_except_u", "tests/test_parser.py::test_try_except_else", "tests/test_parser.py::test_try_except_finally", "tests/test_parser.py::test_try_except_else_finally", "tests/test_parser.py::test_try_finally", "tests/test_parser.py::test_func", "tests/test_parser.py::test_func_ret", "tests/test_parser.py::test_func_ret_42", "tests/test_parser.py::test_func_ret_42_65", "tests/test_parser.py::test_func_rarrow", "tests/test_parser.py::test_func_x", "tests/test_parser.py::test_func_kwx", "tests/test_parser.py::test_func_x_y", "tests/test_parser.py::test_func_x_y_z", "tests/test_parser.py::test_func_x_kwy", "tests/test_parser.py::test_func_kwx_kwy", "tests/test_parser.py::test_func_kwx_kwy_kwz", "tests/test_parser.py::test_func_x_comma", "tests/test_parser.py::test_func_x_y_comma", "tests/test_parser.py::test_func_x_y_z_comma", "tests/test_parser.py::test_func_x_kwy_comma", "tests/test_parser.py::test_func_kwx_kwy_comma", "tests/test_parser.py::test_func_kwx_kwy_kwz_comma", "tests/test_parser.py::test_func_args", "tests/test_parser.py::test_func_args_x", "tests/test_parser.py::test_func_args_x_y", "tests/test_parser.py::test_func_args_x_kwy", "tests/test_parser.py::test_func_args_kwx_y", "tests/test_parser.py::test_func_args_kwx_kwy", "tests/test_parser.py::test_func_x_args", "tests/test_parser.py::test_func_x_args_y", "tests/test_parser.py::test_func_x_args_y_z", "tests/test_parser.py::test_func_kwargs", "tests/test_parser.py::test_func_x_kwargs", "tests/test_parser.py::test_func_x_y_kwargs", "tests/test_parser.py::test_func_x_kwy_kwargs", "tests/test_parser.py::test_func_args_kwargs", "tests/test_parser.py::test_func_x_args_kwargs", "tests/test_parser.py::test_func_x_y_args_kwargs", "tests/test_parser.py::test_func_kwx_args_kwargs", "tests/test_parser.py::test_func_x_kwy_args_kwargs", "tests/test_parser.py::test_func_x_args_y_kwargs", "tests/test_parser.py::test_func_x_args_kwy_kwargs", "tests/test_parser.py::test_func_args_y_kwargs", "tests/test_parser.py::test_func_star_x", "tests/test_parser.py::test_func_star_x_y", "tests/test_parser.py::test_func_star_x_kwargs", "tests/test_parser.py::test_func_star_kwx_kwargs", "tests/test_parser.py::test_func_x_star_y", "tests/test_parser.py::test_func_x_y_star_z", "tests/test_parser.py::test_func_x_kwy_star_y", "tests/test_parser.py::test_func_x_kwy_star_kwy", "tests/test_parser.py::test_func_x_star_y_kwargs", "tests/test_parser.py::test_func_x_divide", "tests/test_parser.py::test_func_x_divide_y_star_z_kwargs", "tests/test_parser.py::test_func_tx", "tests/test_parser.py::test_func_txy", "tests/test_parser.py::test_class", "tests/test_parser.py::test_class_obj", "tests/test_parser.py::test_class_int_flt", "tests/test_parser.py::test_class_obj_kw", "tests/test_parser.py::test_decorator", "tests/test_parser.py::test_decorator_2", "tests/test_parser.py::test_decorator_call", "tests/test_parser.py::test_decorator_call_args", "tests/test_parser.py::test_decorator_dot_call_args", "tests/test_parser.py::test_decorator_dot_dot_call_args", "tests/test_parser.py::test_broken_prompt_func", "tests/test_parser.py::test_class_with_methods", "tests/test_parser.py::test_nested_functions", "tests/test_parser.py::test_function_blank_line", "tests/test_parser.py::test_async_func", "tests/test_parser.py::test_async_decorator", "tests/test_parser.py::test_async_await", "tests/test_parser.py::test_named_expr_args", "tests/test_parser.py::test_named_expr_if", "tests/test_parser.py::test_named_expr_elif", "tests/test_parser.py::test_named_expr_while", "tests/test_parser.py::test_path_literal", "tests/test_parser.py::test_path_fstring_literal", "tests/test_parser.py::test_dollar_name", "tests/test_parser.py::test_dollar_py", "tests/test_parser.py::test_dollar_py_test", "tests/test_parser.py::test_dollar_py_recursive_name", "tests/test_parser.py::test_dollar_py_test_recursive_name", "tests/test_parser.py::test_dollar_py_test_recursive_test", "tests/test_parser.py::test_dollar_name_set", "tests/test_parser.py::test_dollar_py_set", "tests/test_parser.py::test_dollar_sub", "tests/test_parser.py::test_dollar_sub_space", "tests/test_parser.py::test_ls_dot", "tests/test_parser.py::test_lambda_in_atparens", "tests/test_parser.py::test_generator_in_atparens", "tests/test_parser.py::test_bare_tuple_in_atparens", "tests/test_parser.py::test_nested_madness", "tests/test_parser.py::test_atparens_intoken", "tests/test_parser.py::test_ls_dot_nesting", "tests/test_parser.py::test_ls_dot_nesting_var", "tests/test_parser.py::test_ls_dot_str", "tests/test_parser.py::test_ls_nest_ls", "tests/test_parser.py::test_ls_nest_ls_dashl", "tests/test_parser.py::test_ls_envvar_strval", "tests/test_parser.py::test_ls_envvar_listval", "tests/test_parser.py::test_bang_sub", "tests/test_parser.py::test_bang_sub_space", "tests/test_parser.py::test_bang_ls_dot", "tests/test_parser.py::test_bang_ls_dot_nesting", "tests/test_parser.py::test_bang_ls_dot_nesting_var", "tests/test_parser.py::test_bang_ls_dot_str", "tests/test_parser.py::test_bang_ls_nest_ls", "tests/test_parser.py::test_bang_ls_nest_ls_dashl", "tests/test_parser.py::test_bang_ls_envvar_strval", "tests/test_parser.py::test_bang_ls_envvar_listval", "tests/test_parser.py::test_question", "tests/test_parser.py::test_dobquestion", "tests/test_parser.py::test_question_chain", "tests/test_parser.py::test_ls_regex", "tests/test_parser.py::test_backtick", "tests/test_parser.py::test_ls_regex_octothorpe", "tests/test_parser.py::test_ls_explicitregex", "tests/test_parser.py::test_rbacktick", "tests/test_parser.py::test_ls_explicitregex_octothorpe", "tests/test_parser.py::test_ls_glob", "tests/test_parser.py::test_gbacktick", "tests/test_parser.py::test_pbacktrick", "tests/test_parser.py::test_pgbacktick", "tests/test_parser.py::test_prbacktick", "tests/test_parser.py::test_ls_glob_octothorpe", "tests/test_parser.py::test_ls_customsearch", "tests/test_parser.py::test_custombacktick", "tests/test_parser.py::test_ls_customsearch_octothorpe", "tests/test_parser.py::test_injection", "tests/test_parser.py::test_rhs_nested_injection", "tests/test_parser.py::test_merged_injection", "tests/test_parser.py::test_backtick_octothorpe", "tests/test_parser.py::test_uncaptured_sub", "tests/test_parser.py::test_hiddenobj_sub", "tests/test_parser.py::test_slash_envarv_echo", "tests/test_parser.py::test_echo_double_eq", "tests/test_parser.py::test_bang_two_cmds_one_pipe", "tests/test_parser.py::test_bang_three_cmds_two_pipes", "tests/test_parser.py::test_bang_one_cmd_write", "tests/test_parser.py::test_bang_one_cmd_append", "tests/test_parser.py::test_bang_two_cmds_write", "tests/test_parser.py::test_bang_two_cmds_append", "tests/test_parser.py::test_bang_cmd_background", "tests/test_parser.py::test_bang_cmd_background_nospace", "tests/test_parser.py::test_bang_git_quotes_no_space", "tests/test_parser.py::test_bang_git_quotes_space", "tests/test_parser.py::test_bang_git_two_quotes_space", "tests/test_parser.py::test_bang_git_two_quotes_space_space", "tests/test_parser.py::test_bang_ls_quotes_3_space", "tests/test_parser.py::test_two_cmds_one_pipe", "tests/test_parser.py::test_three_cmds_two_pipes", "tests/test_parser.py::test_two_cmds_one_and_brackets", "tests/test_parser.py::test_three_cmds_two_ands", "tests/test_parser.py::test_two_cmds_one_doubleamps", "tests/test_parser.py::test_three_cmds_two_doubleamps", "tests/test_parser.py::test_two_cmds_one_or", "tests/test_parser.py::test_three_cmds_two_ors", "tests/test_parser.py::test_two_cmds_one_doublepipe", "tests/test_parser.py::test_three_cmds_two_doublepipe", "tests/test_parser.py::test_one_cmd_write", "tests/test_parser.py::test_one_cmd_append", "tests/test_parser.py::test_two_cmds_write", "tests/test_parser.py::test_two_cmds_append", "tests/test_parser.py::test_cmd_background", "tests/test_parser.py::test_cmd_background_nospace", "tests/test_parser.py::test_git_quotes_no_space", "tests/test_parser.py::test_git_quotes_space", "tests/test_parser.py::test_git_two_quotes_space", "tests/test_parser.py::test_git_two_quotes_space_space", "tests/test_parser.py::test_ls_quotes_3_space", "tests/test_parser.py::test_echo_comma", "tests/test_parser.py::test_echo_internal_comma", "tests/test_parser.py::test_comment_only", "tests/test_parser.py::test_echo_slash_question", "tests/test_parser.py::test_bad_quotes", "tests/test_parser.py::test_redirect", "tests/test_parser.py::test_use_subshell[![(cat)]]", "tests/test_parser.py::test_use_subshell[![(cat;)]]", "tests/test_parser.py::test_use_subshell[![(cd", "tests/test_parser.py::test_use_subshell[![(echo", "tests/test_parser.py::test_use_subshell[![(if", "tests/test_parser.py::test_redirect_abspath[$[cat", "tests/test_parser.py::test_redirect_abspath[$[(cat)", "tests/test_parser.py::test_redirect_abspath[$[<", "tests/test_parser.py::test_redirect_abspath[![<", "tests/test_parser.py::test_redirect_output[]", "tests/test_parser.py::test_redirect_output[o]", "tests/test_parser.py::test_redirect_output[out]", "tests/test_parser.py::test_redirect_output[1]", "tests/test_parser.py::test_redirect_error[e]", "tests/test_parser.py::test_redirect_error[err]", "tests/test_parser.py::test_redirect_error[2]", "tests/test_parser.py::test_redirect_all[a]", "tests/test_parser.py::test_redirect_all[all]", "tests/test_parser.py::test_redirect_all[&]", "tests/test_parser.py::test_redirect_error_to_output[-e>o]", "tests/test_parser.py::test_redirect_error_to_output[-e>out]", "tests/test_parser.py::test_redirect_error_to_output[-err>o]", "tests/test_parser.py::test_redirect_error_to_output[-2>1]", "tests/test_parser.py::test_redirect_error_to_output[-e>1]", "tests/test_parser.py::test_redirect_error_to_output[-err>1]", "tests/test_parser.py::test_redirect_error_to_output[-2>out]", "tests/test_parser.py::test_redirect_error_to_output[-2>o]", "tests/test_parser.py::test_redirect_error_to_output[-err>&1]", "tests/test_parser.py::test_redirect_error_to_output[-e>&1]", "tests/test_parser.py::test_redirect_error_to_output[-2>&1]", "tests/test_parser.py::test_redirect_error_to_output[o-e>o]", "tests/test_parser.py::test_redirect_error_to_output[o-e>out]", "tests/test_parser.py::test_redirect_error_to_output[o-err>o]", "tests/test_parser.py::test_redirect_error_to_output[o-2>1]", "tests/test_parser.py::test_redirect_error_to_output[o-e>1]", "tests/test_parser.py::test_redirect_error_to_output[o-err>1]", "tests/test_parser.py::test_redirect_error_to_output[o-2>out]", "tests/test_parser.py::test_redirect_error_to_output[o-2>o]", "tests/test_parser.py::test_redirect_error_to_output[o-err>&1]", "tests/test_parser.py::test_redirect_error_to_output[o-e>&1]", "tests/test_parser.py::test_redirect_error_to_output[o-2>&1]", "tests/test_parser.py::test_redirect_error_to_output[out-e>o]", "tests/test_parser.py::test_redirect_error_to_output[out-e>out]", "tests/test_parser.py::test_redirect_error_to_output[out-err>o]", "tests/test_parser.py::test_redirect_error_to_output[out-2>1]", "tests/test_parser.py::test_redirect_error_to_output[out-e>1]", "tests/test_parser.py::test_redirect_error_to_output[out-err>1]", "tests/test_parser.py::test_redirect_error_to_output[out-2>out]", "tests/test_parser.py::test_redirect_error_to_output[out-2>o]", "tests/test_parser.py::test_redirect_error_to_output[out-err>&1]", "tests/test_parser.py::test_redirect_error_to_output[out-e>&1]", "tests/test_parser.py::test_redirect_error_to_output[out-2>&1]", "tests/test_parser.py::test_redirect_error_to_output[1-e>o]", "tests/test_parser.py::test_redirect_error_to_output[1-e>out]", "tests/test_parser.py::test_redirect_error_to_output[1-err>o]", "tests/test_parser.py::test_redirect_error_to_output[1-2>1]", "tests/test_parser.py::test_redirect_error_to_output[1-e>1]", "tests/test_parser.py::test_redirect_error_to_output[1-err>1]", "tests/test_parser.py::test_redirect_error_to_output[1-2>out]", "tests/test_parser.py::test_redirect_error_to_output[1-2>o]", "tests/test_parser.py::test_redirect_error_to_output[1-err>&1]", "tests/test_parser.py::test_redirect_error_to_output[1-e>&1]", "tests/test_parser.py::test_redirect_error_to_output[1-2>&1]", "tests/test_parser.py::test_redirect_output_to_error[e-o>e]", "tests/test_parser.py::test_redirect_output_to_error[e-o>err]", "tests/test_parser.py::test_redirect_output_to_error[e-out>e]", "tests/test_parser.py::test_redirect_output_to_error[e-1>2]", "tests/test_parser.py::test_redirect_output_to_error[e-o>2]", "tests/test_parser.py::test_redirect_output_to_error[e-out>2]", "tests/test_parser.py::test_redirect_output_to_error[e-1>err]", "tests/test_parser.py::test_redirect_output_to_error[e-1>e]", "tests/test_parser.py::test_redirect_output_to_error[e-out>&2]", "tests/test_parser.py::test_redirect_output_to_error[e-o>&2]", "tests/test_parser.py::test_redirect_output_to_error[e-1>&2]", "tests/test_parser.py::test_redirect_output_to_error[err-o>e]", "tests/test_parser.py::test_redirect_output_to_error[err-o>err]", "tests/test_parser.py::test_redirect_output_to_error[err-out>e]", "tests/test_parser.py::test_redirect_output_to_error[err-1>2]", "tests/test_parser.py::test_redirect_output_to_error[err-o>2]", "tests/test_parser.py::test_redirect_output_to_error[err-out>2]", "tests/test_parser.py::test_redirect_output_to_error[err-1>err]", "tests/test_parser.py::test_redirect_output_to_error[err-1>e]", "tests/test_parser.py::test_redirect_output_to_error[err-out>&2]", "tests/test_parser.py::test_redirect_output_to_error[err-o>&2]", "tests/test_parser.py::test_redirect_output_to_error[err-1>&2]", "tests/test_parser.py::test_redirect_output_to_error[2-o>e]", "tests/test_parser.py::test_redirect_output_to_error[2-o>err]", "tests/test_parser.py::test_redirect_output_to_error[2-out>e]", "tests/test_parser.py::test_redirect_output_to_error[2-1>2]", "tests/test_parser.py::test_redirect_output_to_error[2-o>2]", "tests/test_parser.py::test_redirect_output_to_error[2-out>2]", "tests/test_parser.py::test_redirect_output_to_error[2-1>err]", "tests/test_parser.py::test_redirect_output_to_error[2-1>e]", "tests/test_parser.py::test_redirect_output_to_error[2-out>&2]", "tests/test_parser.py::test_redirect_output_to_error[2-o>&2]", "tests/test_parser.py::test_redirect_output_to_error[2-1>&2]", "tests/test_parser.py::test_macro_call_empty", "tests/test_parser.py::test_macro_call_one_arg[x]", "tests/test_parser.py::test_macro_call_one_arg[True]", "tests/test_parser.py::test_macro_call_one_arg[None]", "tests/test_parser.py::test_macro_call_one_arg[import", "tests/test_parser.py::test_macro_call_one_arg[x=10]", "tests/test_parser.py::test_macro_call_one_arg[\"oh", "tests/test_parser.py::test_macro_call_one_arg[...]", "tests/test_parser.py::test_macro_call_one_arg[", "tests/test_parser.py::test_macro_call_one_arg[if", "tests/test_parser.py::test_macro_call_one_arg[{x:", "tests/test_parser.py::test_macro_call_one_arg[{1,", "tests/test_parser.py::test_macro_call_one_arg[(x,y)]", "tests/test_parser.py::test_macro_call_one_arg[(x,", "tests/test_parser.py::test_macro_call_one_arg[((x,", "tests/test_parser.py::test_macro_call_one_arg[g()]", "tests/test_parser.py::test_macro_call_one_arg[range(10)]", "tests/test_parser.py::test_macro_call_one_arg[range(1,", "tests/test_parser.py::test_macro_call_one_arg[()]", "tests/test_parser.py::test_macro_call_one_arg[{}]", "tests/test_parser.py::test_macro_call_one_arg[[]]", "tests/test_parser.py::test_macro_call_one_arg[[1,", "tests/test_parser.py::test_macro_call_one_arg[@(x)]", "tests/test_parser.py::test_macro_call_one_arg[!(ls", "tests/test_parser.py::test_macro_call_one_arg[![ls", "tests/test_parser.py::test_macro_call_one_arg[$(ls", "tests/test_parser.py::test_macro_call_one_arg[${x", "tests/test_parser.py::test_macro_call_one_arg[$[ls", "tests/test_parser.py::test_macro_call_one_arg[@$(which", "tests/test_parser.py::test_macro_call_two_args[x-True]", "tests/test_parser.py::test_macro_call_two_args[x-import", "tests/test_parser.py::test_macro_call_two_args[x-\"oh", "tests/test_parser.py::test_macro_call_two_args[x-", "tests/test_parser.py::test_macro_call_two_args[x-{x:", "tests/test_parser.py::test_macro_call_two_args[x-{1,", "tests/test_parser.py::test_macro_call_two_args[x-(x,", "tests/test_parser.py::test_macro_call_two_args[x-g()]", "tests/test_parser.py::test_macro_call_two_args[x-range(1,", "tests/test_parser.py::test_macro_call_two_args[x-{}]", "tests/test_parser.py::test_macro_call_two_args[x-[1,", "tests/test_parser.py::test_macro_call_two_args[x-!(ls", "tests/test_parser.py::test_macro_call_two_args[x-$(ls", "tests/test_parser.py::test_macro_call_two_args[x-$[ls", "tests/test_parser.py::test_macro_call_two_args[None-True]", "tests/test_parser.py::test_macro_call_two_args[None-import", "tests/test_parser.py::test_macro_call_two_args[None-\"oh", "tests/test_parser.py::test_macro_call_two_args[None-", "tests/test_parser.py::test_macro_call_two_args[None-{x:", "tests/test_parser.py::test_macro_call_two_args[None-{1,", "tests/test_parser.py::test_macro_call_two_args[None-(x,", "tests/test_parser.py::test_macro_call_two_args[None-g()]", "tests/test_parser.py::test_macro_call_two_args[None-range(1,", "tests/test_parser.py::test_macro_call_two_args[None-{}]", "tests/test_parser.py::test_macro_call_two_args[None-[1,", "tests/test_parser.py::test_macro_call_two_args[None-!(ls", "tests/test_parser.py::test_macro_call_two_args[None-$(ls", "tests/test_parser.py::test_macro_call_two_args[None-$[ls", "tests/test_parser.py::test_macro_call_two_args[x=10-True]", "tests/test_parser.py::test_macro_call_two_args[x=10-import", "tests/test_parser.py::test_macro_call_two_args[x=10-\"oh", "tests/test_parser.py::test_macro_call_two_args[x=10-", "tests/test_parser.py::test_macro_call_two_args[x=10-{x:", "tests/test_parser.py::test_macro_call_two_args[x=10-{1,", "tests/test_parser.py::test_macro_call_two_args[x=10-(x,", "tests/test_parser.py::test_macro_call_two_args[x=10-g()]", "tests/test_parser.py::test_macro_call_two_args[x=10-range(1,", "tests/test_parser.py::test_macro_call_two_args[x=10-{}]", "tests/test_parser.py::test_macro_call_two_args[x=10-[1,", "tests/test_parser.py::test_macro_call_two_args[x=10-!(ls", "tests/test_parser.py::test_macro_call_two_args[x=10-$(ls", "tests/test_parser.py::test_macro_call_two_args[x=10-$[ls", "tests/test_parser.py::test_macro_call_two_args[...-True]", "tests/test_parser.py::test_macro_call_two_args[...-import", "tests/test_parser.py::test_macro_call_two_args[...-\"oh", "tests/test_parser.py::test_macro_call_two_args[...-", "tests/test_parser.py::test_macro_call_two_args[...-{x:", "tests/test_parser.py::test_macro_call_two_args[...-{1,", "tests/test_parser.py::test_macro_call_two_args[...-(x,", "tests/test_parser.py::test_macro_call_two_args[...-g()]", "tests/test_parser.py::test_macro_call_two_args[...-range(1,", "tests/test_parser.py::test_macro_call_two_args[...-{}]", "tests/test_parser.py::test_macro_call_two_args[...-[1,", "tests/test_parser.py::test_macro_call_two_args[...-!(ls", "tests/test_parser.py::test_macro_call_two_args[...-$(ls", "tests/test_parser.py::test_macro_call_two_args[...-$[ls", "tests/test_parser.py::test_macro_call_two_args[if", "tests/test_parser.py::test_macro_call_two_args[{x:", "tests/test_parser.py::test_macro_call_two_args[(x,y)-True]", "tests/test_parser.py::test_macro_call_two_args[(x,y)-import", "tests/test_parser.py::test_macro_call_two_args[(x,y)-\"oh", "tests/test_parser.py::test_macro_call_two_args[(x,y)-", "tests/test_parser.py::test_macro_call_two_args[(x,y)-{x:", "tests/test_parser.py::test_macro_call_two_args[(x,y)-{1,", "tests/test_parser.py::test_macro_call_two_args[(x,y)-(x,", "tests/test_parser.py::test_macro_call_two_args[(x,y)-g()]", "tests/test_parser.py::test_macro_call_two_args[(x,y)-range(1,", "tests/test_parser.py::test_macro_call_two_args[(x,y)-{}]", "tests/test_parser.py::test_macro_call_two_args[(x,y)-[1,", "tests/test_parser.py::test_macro_call_two_args[(x,y)-!(ls", "tests/test_parser.py::test_macro_call_two_args[(x,y)-$(ls", "tests/test_parser.py::test_macro_call_two_args[(x,y)-$[ls", "tests/test_parser.py::test_macro_call_two_args[((x,", "tests/test_parser.py::test_macro_call_two_args[range(10)-True]", "tests/test_parser.py::test_macro_call_two_args[range(10)-import", "tests/test_parser.py::test_macro_call_two_args[range(10)-\"oh", "tests/test_parser.py::test_macro_call_two_args[range(10)-", "tests/test_parser.py::test_macro_call_two_args[range(10)-{x:", "tests/test_parser.py::test_macro_call_two_args[range(10)-{1,", "tests/test_parser.py::test_macro_call_two_args[range(10)-(x,", "tests/test_parser.py::test_macro_call_two_args[range(10)-g()]", "tests/test_parser.py::test_macro_call_two_args[range(10)-range(1,", "tests/test_parser.py::test_macro_call_two_args[range(10)-{}]", "tests/test_parser.py::test_macro_call_two_args[range(10)-[1,", "tests/test_parser.py::test_macro_call_two_args[range(10)-!(ls", "tests/test_parser.py::test_macro_call_two_args[range(10)-$(ls", "tests/test_parser.py::test_macro_call_two_args[range(10)-$[ls", "tests/test_parser.py::test_macro_call_two_args[()-True]", "tests/test_parser.py::test_macro_call_two_args[()-import", "tests/test_parser.py::test_macro_call_two_args[()-\"oh", "tests/test_parser.py::test_macro_call_two_args[()-", "tests/test_parser.py::test_macro_call_two_args[()-{x:", "tests/test_parser.py::test_macro_call_two_args[()-{1,", "tests/test_parser.py::test_macro_call_two_args[()-(x,", "tests/test_parser.py::test_macro_call_two_args[()-g()]", "tests/test_parser.py::test_macro_call_two_args[()-range(1,", "tests/test_parser.py::test_macro_call_two_args[()-{}]", "tests/test_parser.py::test_macro_call_two_args[()-[1,", "tests/test_parser.py::test_macro_call_two_args[()-!(ls", "tests/test_parser.py::test_macro_call_two_args[()-$(ls", "tests/test_parser.py::test_macro_call_two_args[()-$[ls", "tests/test_parser.py::test_macro_call_two_args[[]-True]", "tests/test_parser.py::test_macro_call_two_args[[]-import", "tests/test_parser.py::test_macro_call_two_args[[]-\"oh", "tests/test_parser.py::test_macro_call_two_args[[]-", "tests/test_parser.py::test_macro_call_two_args[[]-{x:", "tests/test_parser.py::test_macro_call_two_args[[]-{1,", "tests/test_parser.py::test_macro_call_two_args[[]-(x,", "tests/test_parser.py::test_macro_call_two_args[[]-g()]", "tests/test_parser.py::test_macro_call_two_args[[]-range(1,", "tests/test_parser.py::test_macro_call_two_args[[]-{}]", "tests/test_parser.py::test_macro_call_two_args[[]-[1,", "tests/test_parser.py::test_macro_call_two_args[[]-!(ls", "tests/test_parser.py::test_macro_call_two_args[[]-$(ls", "tests/test_parser.py::test_macro_call_two_args[[]-$[ls", "tests/test_parser.py::test_macro_call_two_args[@(x)-True]", "tests/test_parser.py::test_macro_call_two_args[@(x)-import", "tests/test_parser.py::test_macro_call_two_args[@(x)-\"oh", "tests/test_parser.py::test_macro_call_two_args[@(x)-", "tests/test_parser.py::test_macro_call_two_args[@(x)-{x:", "tests/test_parser.py::test_macro_call_two_args[@(x)-{1,", "tests/test_parser.py::test_macro_call_two_args[@(x)-(x,", "tests/test_parser.py::test_macro_call_two_args[@(x)-g()]", "tests/test_parser.py::test_macro_call_two_args[@(x)-range(1,", "tests/test_parser.py::test_macro_call_two_args[@(x)-{}]", "tests/test_parser.py::test_macro_call_two_args[@(x)-[1,", "tests/test_parser.py::test_macro_call_two_args[@(x)-!(ls", "tests/test_parser.py::test_macro_call_two_args[@(x)-$(ls", "tests/test_parser.py::test_macro_call_two_args[@(x)-$[ls", "tests/test_parser.py::test_macro_call_two_args[![ls", "tests/test_parser.py::test_macro_call_two_args[${x", "tests/test_parser.py::test_macro_call_two_args[@$(which", "tests/test_parser.py::test_macro_call_three_args[x-True-None]", "tests/test_parser.py::test_macro_call_three_args[x-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-True-if", "tests/test_parser.py::test_macro_call_three_args[x-True-{1,", "tests/test_parser.py::test_macro_call_three_args[x-True-((x,", "tests/test_parser.py::test_macro_call_three_args[x-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-True-[]]", "tests/test_parser.py::test_macro_call_three_args[x-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-True-${x", "tests/test_parser.py::test_macro_call_three_args[x-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[x-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-x=10-if", "tests/test_parser.py::test_macro_call_three_args[x-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[x-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[x-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[x-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[x-", "tests/test_parser.py::test_macro_call_three_args[x-{x:", "tests/test_parser.py::test_macro_call_three_args[x-(x,", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[x-{}-None]", "tests/test_parser.py::test_macro_call_three_args[x-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-{}-if", "tests/test_parser.py::test_macro_call_three_args[x-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[x-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[x-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[x-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-{}-${x", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[x-$(ls", "tests/test_parser.py::test_macro_call_three_args[x-@$(which", "tests/test_parser.py::test_macro_call_three_args[import", "tests/test_parser.py::test_macro_call_three_args[...-True-None]", "tests/test_parser.py::test_macro_call_three_args[...-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-True-if", "tests/test_parser.py::test_macro_call_three_args[...-True-{1,", "tests/test_parser.py::test_macro_call_three_args[...-True-((x,", "tests/test_parser.py::test_macro_call_three_args[...-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-True-[]]", "tests/test_parser.py::test_macro_call_three_args[...-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-True-${x", "tests/test_parser.py::test_macro_call_three_args[...-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[...-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-x=10-if", "tests/test_parser.py::test_macro_call_three_args[...-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[...-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[...-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[...-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[...-", "tests/test_parser.py::test_macro_call_three_args[...-{x:", "tests/test_parser.py::test_macro_call_three_args[...-(x,", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[...-{}-None]", "tests/test_parser.py::test_macro_call_three_args[...-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-{}-if", "tests/test_parser.py::test_macro_call_three_args[...-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[...-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[...-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[...-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-{}-${x", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[...-$(ls", "tests/test_parser.py::test_macro_call_three_args[...-@$(which", "tests/test_parser.py::test_macro_call_three_args[{x:", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{x:", "tests/test_parser.py::test_macro_call_three_args[(x,y)-(x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-$(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@$(which", "tests/test_parser.py::test_macro_call_three_args[g()-True-None]", "tests/test_parser.py::test_macro_call_three_args[g()-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-True-if", "tests/test_parser.py::test_macro_call_three_args[g()-True-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-True-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-True-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-True-${x", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-if", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[g()-", "tests/test_parser.py::test_macro_call_three_args[g()-{x:", "tests/test_parser.py::test_macro_call_three_args[g()-(x,", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[g()-{}-None]", "tests/test_parser.py::test_macro_call_three_args[g()-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-{}-if", "tests/test_parser.py::test_macro_call_three_args[g()-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-{}-${x", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[g()-$(ls", "tests/test_parser.py::test_macro_call_three_args[g()-@$(which", "tests/test_parser.py::test_macro_call_three_args[()-True-None]", "tests/test_parser.py::test_macro_call_three_args[()-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-True-if", "tests/test_parser.py::test_macro_call_three_args[()-True-{1,", "tests/test_parser.py::test_macro_call_three_args[()-True-((x,", "tests/test_parser.py::test_macro_call_three_args[()-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-True-[]]", "tests/test_parser.py::test_macro_call_three_args[()-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-True-${x", "tests/test_parser.py::test_macro_call_three_args[()-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[()-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-x=10-if", "tests/test_parser.py::test_macro_call_three_args[()-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[()-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[()-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[()-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[()-", "tests/test_parser.py::test_macro_call_three_args[()-{x:", "tests/test_parser.py::test_macro_call_three_args[()-(x,", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[()-{}-None]", "tests/test_parser.py::test_macro_call_three_args[()-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-{}-if", "tests/test_parser.py::test_macro_call_three_args[()-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[()-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[()-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[()-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-{}-${x", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[()-$(ls", "tests/test_parser.py::test_macro_call_three_args[()-@$(which", "tests/test_parser.py::test_macro_call_three_args[[1,", "tests/test_parser.py::test_macro_call_three_args[![ls", "tests/test_parser.py::test_macro_call_three_args[$[ls", "tests/test_parser.py::test_macro_call_one_trailing[x]", "tests/test_parser.py::test_macro_call_one_trailing[True]", "tests/test_parser.py::test_macro_call_one_trailing[None]", "tests/test_parser.py::test_macro_call_one_trailing[import", "tests/test_parser.py::test_macro_call_one_trailing[x=10]", "tests/test_parser.py::test_macro_call_one_trailing[\"oh", "tests/test_parser.py::test_macro_call_one_trailing[...]", "tests/test_parser.py::test_macro_call_one_trailing[", "tests/test_parser.py::test_macro_call_one_trailing[if", "tests/test_parser.py::test_macro_call_one_trailing[{x:", "tests/test_parser.py::test_macro_call_one_trailing[{1,", "tests/test_parser.py::test_macro_call_one_trailing[(x,y)]", "tests/test_parser.py::test_macro_call_one_trailing[(x,", "tests/test_parser.py::test_macro_call_one_trailing[((x,", "tests/test_parser.py::test_macro_call_one_trailing[g()]", "tests/test_parser.py::test_macro_call_one_trailing[range(10)]", "tests/test_parser.py::test_macro_call_one_trailing[range(1,", "tests/test_parser.py::test_macro_call_one_trailing[()]", "tests/test_parser.py::test_macro_call_one_trailing[{}]", "tests/test_parser.py::test_macro_call_one_trailing[[]]", "tests/test_parser.py::test_macro_call_one_trailing[[1,", "tests/test_parser.py::test_macro_call_one_trailing[@(x)]", "tests/test_parser.py::test_macro_call_one_trailing[!(ls", "tests/test_parser.py::test_macro_call_one_trailing[![ls", "tests/test_parser.py::test_macro_call_one_trailing[$(ls", "tests/test_parser.py::test_macro_call_one_trailing[${x", "tests/test_parser.py::test_macro_call_one_trailing[$[ls", "tests/test_parser.py::test_macro_call_one_trailing[@$(which", "tests/test_parser.py::test_macro_call_one_trailing_space[x]", "tests/test_parser.py::test_macro_call_one_trailing_space[True]", "tests/test_parser.py::test_macro_call_one_trailing_space[None]", "tests/test_parser.py::test_macro_call_one_trailing_space[import", "tests/test_parser.py::test_macro_call_one_trailing_space[x=10]", "tests/test_parser.py::test_macro_call_one_trailing_space[\"oh", "tests/test_parser.py::test_macro_call_one_trailing_space[...]", "tests/test_parser.py::test_macro_call_one_trailing_space[", "tests/test_parser.py::test_macro_call_one_trailing_space[if", "tests/test_parser.py::test_macro_call_one_trailing_space[{x:", "tests/test_parser.py::test_macro_call_one_trailing_space[{1,", "tests/test_parser.py::test_macro_call_one_trailing_space[(x,y)]", "tests/test_parser.py::test_macro_call_one_trailing_space[(x,", "tests/test_parser.py::test_macro_call_one_trailing_space[((x,", "tests/test_parser.py::test_macro_call_one_trailing_space[g()]", "tests/test_parser.py::test_macro_call_one_trailing_space[range(10)]", "tests/test_parser.py::test_macro_call_one_trailing_space[range(1,", "tests/test_parser.py::test_macro_call_one_trailing_space[()]", "tests/test_parser.py::test_macro_call_one_trailing_space[{}]", "tests/test_parser.py::test_macro_call_one_trailing_space[[]]", "tests/test_parser.py::test_macro_call_one_trailing_space[[1,", "tests/test_parser.py::test_macro_call_one_trailing_space[@(x)]", "tests/test_parser.py::test_macro_call_one_trailing_space[!(ls", "tests/test_parser.py::test_macro_call_one_trailing_space[![ls", "tests/test_parser.py::test_macro_call_one_trailing_space[$(ls", "tests/test_parser.py::test_macro_call_one_trailing_space[${x", "tests/test_parser.py::test_macro_call_one_trailing_space[$[ls", "tests/test_parser.py::test_macro_call_one_trailing_space[@$(which", "tests/test_parser.py::test_empty_subprocbang[echo!-!(-)]", "tests/test_parser.py::test_empty_subprocbang[echo!-$(-)]", "tests/test_parser.py::test_empty_subprocbang[echo!-![-]]", "tests/test_parser.py::test_empty_subprocbang[echo!-$[-]]", "tests/test_parser.py::test_empty_subprocbang[echo", "tests/test_parser.py::test_single_subprocbang[echo!x-!(-)]", "tests/test_parser.py::test_single_subprocbang[echo!x-$(-)]", "tests/test_parser.py::test_single_subprocbang[echo!x-![-]]", "tests/test_parser.py::test_single_subprocbang[echo!x-$[-]]", "tests/test_parser.py::test_single_subprocbang[echo", "tests/test_parser.py::test_arg_single_subprocbang[echo", "tests/test_parser.py::test_arg_single_subprocbang_nested[echo", "tests/test_parser.py::test_many_subprocbang[echo!x", "tests/test_parser.py::test_many_subprocbang[echo", "tests/test_parser.py::test_many_subprocbang[timeit!", "tests/test_parser.py::test_many_subprocbang[timeit!!!!-!(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!!!!-$(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!!!!-![-]]", "tests/test_parser.py::test_many_subprocbang[timeit!!!!-$[-]]", "tests/test_parser.py::test_many_subprocbang[timeit!!(ls)-!(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!!(ls)-$(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!!(ls)-![-]]", "tests/test_parser.py::test_many_subprocbang[timeit!!(ls)-$[-]]", "tests/test_parser.py::test_many_subprocbang[timeit!\"!)\"-!(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!\"!)\"-$(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!\"!)\"-![-]]", "tests/test_parser.py::test_many_subprocbang[timeit!\"!)\"-$[-]]", "tests/test_parser.py::test_withbang_single_suite[pass\\n]", "tests/test_parser.py::test_withbang_single_suite[x", "tests/test_parser.py::test_withbang_single_suite[export", "tests/test_parser.py::test_withbang_single_suite[with", "tests/test_parser.py::test_withbang_as_single_suite[pass\\n]", "tests/test_parser.py::test_withbang_as_single_suite[x", "tests/test_parser.py::test_withbang_as_single_suite[export", "tests/test_parser.py::test_withbang_as_single_suite[with", "tests/test_parser.py::test_withbang_single_suite_trailing[pass\\n]", "tests/test_parser.py::test_withbang_single_suite_trailing[x", "tests/test_parser.py::test_withbang_single_suite_trailing[export", "tests/test_parser.py::test_withbang_single_suite_trailing[with", "tests/test_parser.py::test_withbang_single_simple[pass]", "tests/test_parser.py::test_withbang_single_simple[x", "tests/test_parser.py::test_withbang_single_simple[export", "tests/test_parser.py::test_withbang_single_simple[[1,\\n", "tests/test_parser.py::test_withbang_single_simple_opt[pass]", "tests/test_parser.py::test_withbang_single_simple_opt[x", "tests/test_parser.py::test_withbang_single_simple_opt[export", "tests/test_parser.py::test_withbang_single_simple_opt[[1,\\n", "tests/test_parser.py::test_withbang_as_many_suite[pass\\n]", "tests/test_parser.py::test_withbang_as_many_suite[x", "tests/test_parser.py::test_withbang_as_many_suite[export", "tests/test_parser.py::test_withbang_as_many_suite[with", "tests/test_parser.py::test_subproc_raw_str_literal", "tests/test_parser.py::test_syntax_error_del_literal", "tests/test_parser.py::test_syntax_error_del_constant", "tests/test_parser.py::test_syntax_error_del_emptytuple", "tests/test_parser.py::test_syntax_error_del_call", "tests/test_parser.py::test_syntax_error_del_lambda", "tests/test_parser.py::test_syntax_error_del_ifexp", "tests/test_parser.py::test_syntax_error_del_comps[[i", "tests/test_parser.py::test_syntax_error_del_comps[{i", "tests/test_parser.py::test_syntax_error_del_comps[(i", "tests/test_parser.py::test_syntax_error_del_comps[{k:v", "tests/test_parser.py::test_syntax_error_del_ops[x", "tests/test_parser.py::test_syntax_error_del_ops[-x]", "tests/test_parser.py::test_syntax_error_del_cmp[x", "tests/test_parser.py::test_syntax_error_lonely_del", "tests/test_parser.py::test_syntax_error_assign_literal", "tests/test_parser.py::test_syntax_error_assign_constant", "tests/test_parser.py::test_syntax_error_assign_emptytuple", "tests/test_parser.py::test_syntax_error_assign_call", "tests/test_parser.py::test_syntax_error_assign_lambda", "tests/test_parser.py::test_syntax_error_assign_ifexp", "tests/test_parser.py::test_syntax_error_assign_comps[[i", "tests/test_parser.py::test_syntax_error_assign_comps[{i", "tests/test_parser.py::test_syntax_error_assign_comps[(i", "tests/test_parser.py::test_syntax_error_assign_comps[{k:v", "tests/test_parser.py::test_syntax_error_assign_ops[x", "tests/test_parser.py::test_syntax_error_assign_ops[-x]", "tests/test_parser.py::test_syntax_error_assign_cmp[x", "tests/test_parser.py::test_syntax_error_augassign_literal", "tests/test_parser.py::test_syntax_error_augassign_constant", "tests/test_parser.py::test_syntax_error_augassign_emptytuple", "tests/test_parser.py::test_syntax_error_augassign_call", "tests/test_parser.py::test_syntax_error_augassign_lambda", "tests/test_parser.py::test_syntax_error_augassign_ifexp", "tests/test_parser.py::test_syntax_error_augassign_comps[[i", "tests/test_parser.py::test_syntax_error_augassign_comps[{i", "tests/test_parser.py::test_syntax_error_augassign_comps[(i", "tests/test_parser.py::test_syntax_error_augassign_comps[{k:v", "tests/test_parser.py::test_syntax_error_augassign_ops[x", "tests/test_parser.py::test_syntax_error_augassign_ops[-x]", "tests/test_parser.py::test_syntax_error_augassign_cmp[x", "tests/test_parser.py::test_syntax_error_bar_kwonlyargs", "tests/test_parser.py::test_syntax_error_bar_posonlyargs", "tests/test_parser.py::test_syntax_error_bar_posonlyargs_no_comma", "tests/test_parser.py::test_syntax_error_nondefault_follows_default", "tests/test_parser.py::test_syntax_error_posonly_nondefault_follows_default", "tests/test_parser.py::test_syntax_error_lambda_nondefault_follows_default", "tests/test_parser.py::test_syntax_error_lambda_posonly_nondefault_follows_default" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-09-16 12:58:56+00:00
bsd-2-clause
6,291
xonsh__xonsh-4218
diff --git a/news/fix_aliases_infinite_loop.rst b/news/fix_aliases_infinite_loop.rst new file mode 100644 index 00000000..60a5ab32 --- /dev/null +++ b/news/fix_aliases_infinite_loop.rst @@ -0,0 +1,23 @@ +**Added:** + +* Ability to call the tool by the name from callable alias with the same name without the infinite loop error. + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* <news item> + +**Security:** + +* <news item> diff --git a/xonsh/procs/proxies.py b/xonsh/procs/proxies.py index 4e9b100b..d9a5ff74 100644 --- a/xonsh/procs/proxies.py +++ b/xonsh/procs/proxies.py @@ -500,10 +500,16 @@ class ProcProxyThread(threading.Thread): sp_stderr = sys.stderr # run the function itself try: + alias_stack = builtins.__xonsh__.env.get("__ALIAS_STACK", "") + if self.env.get("__ALIAS_NAME"): + alias_stack += ":" + self.env["__ALIAS_NAME"] + with STDOUT_DISPATCHER.register(sp_stdout), STDERR_DISPATCHER.register( sp_stderr ), xt.redirect_stdout(STDOUT_DISPATCHER), xt.redirect_stderr( STDERR_DISPATCHER + ), builtins.__xonsh__.env.swap( + __ALIAS_STACK=alias_stack ): r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec, spec.stack) except SystemExit as e: diff --git a/xonsh/procs/specs.py b/xonsh/procs/specs.py index 845c4e52..384edf71 100644 --- a/xonsh/procs/specs.py +++ b/xonsh/procs/specs.py @@ -355,6 +355,8 @@ class SubprocSpec: # pure attrs self.args = list(cmd) self.alias = None + self.alias_name = None + self.alias_stack = builtins.__xonsh__.env.get("__ALIAS_STACK", "").split(":") self.binary_loc = None self.is_proxy = False self.background = False @@ -442,6 +444,7 @@ class SubprocSpec: kwargs = {n: getattr(self, n) for n in self.kwnames} self.prep_env(kwargs) if callable(self.alias): + kwargs["env"]["__ALIAS_NAME"] = self.alias_name p = self.cls(self.alias, self.cmd, **kwargs) else: self.prep_preexec_fn(kwargs, pipeline_group=pipeline_group) @@ -589,17 +592,29 @@ class SubprocSpec: def resolve_alias(self): """Sets alias in command, if applicable.""" cmd0 = self.cmd[0] + + if cmd0 in self.alias_stack: + # Disabling the alias resolving to prevent infinite loop in call stack + # and futher using binary_loc to resolve the alias name. + self.alias = None + return + if callable(cmd0): alias = cmd0 else: alias = builtins.aliases.get(cmd0, None) + if alias is not None: + self.alias_name = cmd0 self.alias = alias def resolve_binary_loc(self): """Sets the binary location""" alias = self.alias if alias is None: - binary_loc = xenv.locate_binary(self.cmd[0]) + cmd0 = self.cmd[0] + binary_loc = xenv.locate_binary(cmd0) + if binary_loc == cmd0 and cmd0 in self.alias_stack: + raise Exception(f'Recursive calls to "{cmd0}" alias.') elif callable(alias): binary_loc = None else:
xonsh/xonsh
4dc08232e615a75a524fbf96f17402a7a5b353a5
diff --git a/tests/test_integrations.py b/tests/test_integrations.py index 9ade1fa8..fb125d28 100644 --- a/tests/test_integrations.py +++ b/tests/test_integrations.py @@ -472,15 +472,57 @@ a ), ] +UNIX_TESTS = [ + # testing alias stack: lambda function + ( + """ +def _echo(): + echo hello + +aliases['echo'] = _echo +echo +""", + "hello\n", + 0, + ), + # testing alias stack: ExecAlias + ( + """ +aliases['echo'] = "echo @('hello')" +echo +""", + "hello\n", + 0, + ), + # testing alias stack: callable alias (ExecAlias) + no binary location + infinite loop + ( + """ +aliases['first'] = "second @(1)" +aliases['second'] = "first @(1)" +first +""", + lambda out: 'Recursive calls to "first" alias.' in out, + 0, + ), +] @skip_if_no_xonsh @pytest.mark.parametrize("case", ALL_PLATFORMS) def test_script(case): script, exp_out, exp_rtn = case out, err, rtn = run_xonsh(script) - assert exp_out == out + if callable(exp_out): + assert exp_out(out) + else: + assert exp_out == out assert exp_rtn == rtn +@skip_if_no_xonsh +@skip_if_on_windows [email protected]("case", UNIX_TESTS) +def test_unix_tests(case): + test_script(case) + ALL_PLATFORMS_STDERR = [ # test redirecting a function alias
Infinite alias call I'm trying to override 'ls' command to display dotfiles in my $DOTFILES directory. This code goes into an endless loop because _ls function calls ls command and it calls _ls function. ```python def _ls(): if $(pwd).rstrip(os.linesep) == $DOTFILES: ls -Ga else: ls -G aliases['ls'] = _ls ``` ## xonfig <details> ``` +------------------+---------------------+ | xonsh | 0.8.10 | | Git SHA | 2cb42bdb | | Commit Date | Feb 6 16:49:16 2019 | | Python | 3.6.4 | | PLY | 3.11 | | have readline | True | | prompt toolkit | 2.0.7 | | shell type | prompt_toolkit2 | | pygments | 2.3.1 | | on posix | True | | on linux | False | | on darwin | True | | on windows | False | | on cygwin | False | | on msys2 | False | | is superuser | False | | default encoding | utf-8 | | xonsh encoding | utf-8 | | encoding errors | surrogateescape | +------------------+---------------------+ ``` </details> ## Current Behavior <!--- Tell us what happens instead of the expected behavior --> <!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error To enter debug mode, set the environment variableNSH_DEBUG=1` _before_ starting `xonsh`. `XO On Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` --> When I input "ls" command, the terminal freezes.
0.0
4dc08232e615a75a524fbf96f17402a7a5b353a5
[ "tests/test_integrations.py::test_script[case21]", "tests/test_integrations.py::test_unix_tests[case0]" ]
[ "tests/test_integrations.py::test_script[case0]", "tests/test_integrations.py::test_script[case1]", "tests/test_integrations.py::test_script[case2]", "tests/test_integrations.py::test_script[case3]", "tests/test_integrations.py::test_script[case4]", "tests/test_integrations.py::test_script[case5]", "tests/test_integrations.py::test_script[case6]", "tests/test_integrations.py::test_script[case7]", "tests/test_integrations.py::test_script[case8]", "tests/test_integrations.py::test_script[case9]", "tests/test_integrations.py::test_script[case10]", "tests/test_integrations.py::test_script[case11]", "tests/test_integrations.py::test_script[case12]", "tests/test_integrations.py::test_script[case13]", "tests/test_integrations.py::test_script[case14]", "tests/test_integrations.py::test_script[case15]", "tests/test_integrations.py::test_script[case16]", "tests/test_integrations.py::test_script[case17]", "tests/test_integrations.py::test_script[case18]", "tests/test_integrations.py::test_script[case19]", "tests/test_integrations.py::test_script[case20]", "tests/test_integrations.py::test_unix_tests[case1]", "tests/test_integrations.py::test_unix_tests[case2]", "tests/test_integrations.py::test_script_stderr[case0]", "tests/test_integrations.py::test_single_command_no_windows[pwd-None-<lambda>]", "tests/test_integrations.py::test_single_command_no_windows[echo", "tests/test_integrations.py::test_single_command_no_windows[ls", "tests/test_integrations.py::test_single_command_no_windows[$FOO='foo'", "tests/test_integrations.py::test_eof_syntax_error", "tests/test_integrations.py::test_open_quote_syntax_error", "tests/test_integrations.py::test_printfile", "tests/test_integrations.py::test_printname", "tests/test_integrations.py::test_sourcefile", "tests/test_integrations.py::test_subshells[\\nwith", "tests/test_integrations.py::test_redirect_out_to_file[pwd-<lambda>]", "tests/test_integrations.py::test_pipe_between_subprocs[cat", "tests/test_integrations.py::test_negative_exit_codes_fail", "tests/test_integrations.py::test_ampersand_argument[echo", "tests/test_integrations.py::test_single_command_return_code[sys.exit(0)-0]", "tests/test_integrations.py::test_single_command_return_code[sys.exit(100)-100]", "tests/test_integrations.py::test_single_command_return_code[sh", "tests/test_integrations.py::test_argv0" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-04-03 15:43:05+00:00
bsd-2-clause
6,292
xonsh__xonsh-4221
diff --git a/news/subproc_captured_print_stderr.rst b/news/subproc_captured_print_stderr.rst new file mode 100644 index 00000000..0227acf6 --- /dev/null +++ b/news/subproc_captured_print_stderr.rst @@ -0,0 +1,23 @@ +**Added:** + +* Added XONSH_SUBPROC_CAPTURED_PRINT_STDERR environment variable to hide unwanted printing the stderr when using captured object. + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* <news item> + +**Security:** + +* <news item> diff --git a/xonsh/environ.py b/xonsh/environ.py index 3e499128..f1fa33b3 100644 --- a/xonsh/environ.py +++ b/xonsh/environ.py @@ -857,6 +857,10 @@ class GeneralSetting(Xettings): "should cause an end to execution. This is less useful at a terminal. " "The error that is raised is a ``subprocess.CalledProcessError``.", ) + XONSH_SUBPROC_CAPTURED_PRINT_STDERR = Var.with_default( + True, + "If ``True`` the stderr from captured subproc will be printed automatically.", + ) TERM = Var.no_default( "str", "TERM is sometimes set by the terminal emulator. This is used (when " diff --git a/xonsh/procs/pipelines.py b/xonsh/procs/pipelines.py index cfa5ad5d..351252d2 100644 --- a/xonsh/procs/pipelines.py +++ b/xonsh/procs/pipelines.py @@ -396,12 +396,16 @@ class CommandPipeline: if self.stderr_postfix: b += self.stderr_postfix stderr_has_buffer = hasattr(sys.stderr, "buffer") - # write bytes to std stream - if stderr_has_buffer: - sys.stderr.buffer.write(b) - else: - sys.stderr.write(b.decode(encoding=enc, errors=err)) - sys.stderr.flush() + show_stderr = self.captured != "object" or env.get( + "XONSH_SUBPROC_CAPTURED_PRINT_STDERR", True + ) + if show_stderr: + # write bytes to std stream + if stderr_has_buffer: + sys.stderr.buffer.write(b) + else: + sys.stderr.write(b.decode(encoding=enc, errors=err)) + sys.stderr.flush() # save the raw bytes self._raw_error = b # do some munging of the line before we save it to the attr
xonsh/xonsh
16884fc605d185c0ae0a84e36cf762595aafc2e1
diff --git a/tests/test_integrations.py b/tests/test_integrations.py index de96da1c..e9b05e07 100644 --- a/tests/test_integrations.py +++ b/tests/test_integrations.py @@ -174,6 +174,22 @@ print(x.returncode) "hallo on err\n1\n", 0, ), + # test captured streaming alias without stderr + ( + """ +def _test_stream(args, stdin, stdout, stderr): + print('hallo on err', file=stderr) + print('hallo on out', file=stdout) + return 1 + +aliases['test-stream'] = _test_stream +with __xonsh__.env.swap(XONSH_SUBPROC_CAPTURED_PRINT_STDERR=False): + x = !(test-stream) + print(x.returncode) +""", + "1\n", + 0, + ), # test piping aliases ( """
Captured object shows unwanted stderr Hi! Captured object shows unwanted stderr i.e. before object repr: ```python cd /tmp echo @(""" import sys print('out', file=sys.stdout) print('err', file=sys.stderr) """) > ttt.py r=!(python ttt.py) # No stdout, no stderr - good !(python ttt.py) # stderr before object repr - bad # err # CommandPipeline( # stdin=<_io.BytesIO object at 0x7f4c48b3c900>, # stdout=<_io.BytesIO object at 0x7f4c48b3cdb0>, # stderr=<_io.BytesIO object at 0x7f4c48b3c680>, # pid=6314, # returncode=0, # args=['python', 'ttt.py'], # alias=None, # stdin_redirect=['<stdin>', 'r'], # stdout_redirect=[10, 'wb'], # stderr_redirect=[12, 'w'], # timestamps=[1608146243.7313583, 1608146243.761544], # executed_cmd=['python', 'ttt.py'], # input='', # output='out\n', # errors='err\n' # ) # Prepared by xontrib-hist-format ``` ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
0.0
16884fc605d185c0ae0a84e36cf762595aafc2e1
[ "tests/test_integrations.py::test_script[case6]" ]
[ "tests/test_integrations.py::test_script[case0]", "tests/test_integrations.py::test_script[case1]", "tests/test_integrations.py::test_script[case2]", "tests/test_integrations.py::test_script[case3]", "tests/test_integrations.py::test_script[case4]", "tests/test_integrations.py::test_script[case7]", "tests/test_integrations.py::test_script[case8]", "tests/test_integrations.py::test_script[case9]", "tests/test_integrations.py::test_script[case10]", "tests/test_integrations.py::test_script[case11]", "tests/test_integrations.py::test_script[case12]", "tests/test_integrations.py::test_script[case13]", "tests/test_integrations.py::test_script[case14]", "tests/test_integrations.py::test_script[case15]", "tests/test_integrations.py::test_script[case16]", "tests/test_integrations.py::test_script[case17]", "tests/test_integrations.py::test_script[case18]", "tests/test_integrations.py::test_script[case19]", "tests/test_integrations.py::test_script[case20]", "tests/test_integrations.py::test_script[case21]", "tests/test_integrations.py::test_script_stderr[case0]", "tests/test_integrations.py::test_single_command_no_windows[pwd-None-<lambda>]", "tests/test_integrations.py::test_single_command_no_windows[echo", "tests/test_integrations.py::test_single_command_no_windows[ls", "tests/test_integrations.py::test_single_command_no_windows[$FOO='foo'", "tests/test_integrations.py::test_eof_syntax_error", "tests/test_integrations.py::test_open_quote_syntax_error", "tests/test_integrations.py::test_printfile", "tests/test_integrations.py::test_printname", "tests/test_integrations.py::test_sourcefile", "tests/test_integrations.py::test_subshells[\\nwith", "tests/test_integrations.py::test_redirect_out_to_file[pwd-<lambda>]", "tests/test_integrations.py::test_pipe_between_subprocs[cat", "tests/test_integrations.py::test_negative_exit_codes_fail", "tests/test_integrations.py::test_ampersand_argument[echo", "tests/test_integrations.py::test_single_command_return_code[sys.exit(0)-0]", "tests/test_integrations.py::test_single_command_return_code[sys.exit(100)-100]", "tests/test_integrations.py::test_single_command_return_code[sh", "tests/test_integrations.py::test_argv0" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2021-04-04 10:20:17+00:00
bsd-2-clause
6,293
xonsh__xonsh-4400
diff --git a/news/fix-jedi-path-completion.rst b/news/fix-jedi-path-completion.rst new file mode 100644 index 00000000..8757b89d --- /dev/null +++ b/news/fix-jedi-path-completion.rst @@ -0,0 +1,23 @@ +**Added:** + +* <news item> + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* ``Jedi`` completer doesn't complete paths with ``~``. + +**Security:** + +* <news item> diff --git a/xontrib/jedi.py b/xontrib/jedi.py index 1d860b82..49d99138 100644 --- a/xontrib/jedi.py +++ b/xontrib/jedi.py @@ -65,8 +65,8 @@ def complete_jedi(context: CompletionContext): # if we're completing a possible command and the prefix contains a valid path, don't complete. if context.command: - path_parts = os.path.split(context.command.prefix) - if len(path_parts) > 1 and os.path.isdir(os.path.join(*path_parts[:-1])): + path_dir = os.path.dirname(context.command.prefix) + if path_dir and os.path.isdir(os.path.expanduser(path_dir)): return None filter_func = get_filter_function()
xonsh/xonsh
65913462438ffe869efabd2ec5f7137ef85efaef
diff --git a/tests/xontribs/test_jedi.py b/tests/xontribs/test_jedi.py index 0681e7fb..166ef200 100644 --- a/tests/xontribs/test_jedi.py +++ b/tests/xontribs/test_jedi.py @@ -253,6 +253,7 @@ def test_special_tokens(jedi_xontrib): @skip_if_on_windows def test_no_command_path_completion(jedi_xontrib, completion_context_parse): assert jedi_xontrib.complete_jedi(completion_context_parse("./", 2)) is None + assert jedi_xontrib.complete_jedi(completion_context_parse("~/", 2)) is None assert jedi_xontrib.complete_jedi(completion_context_parse("./e", 3)) is None assert jedi_xontrib.complete_jedi(completion_context_parse("/usr/bin/", 9)) is None assert (
bare path completion <!--- Provide a general summary of the issue in the Title above --> <!--- If you have a question along the lines of "How do I do this Bash command in xonsh" please first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html If you don't find an answer there, please do open an issue! --> ## xonfig <details> ``` +------------------+----------------------+ | xonsh | 0.9.27 | | Git SHA | bc3b4962 | | Commit Date | Jun 11 12:27:29 2021 | | Python | 3.9.5 | | PLY | 3.11 | | have readline | True | | prompt toolkit | 3.0.19 | | shell type | prompt_toolkit | | history backend | sqlite | | pygments | 2.9.0 | | on posix | True | | on linux | True | | distro | manjaro | | on wsl | False | | on darwin | False | | on windows | False | | on cygwin | False | | on msys2 | False | | is superuser | False | | default encoding | utf-8 | | xonsh encoding | utf-8 | | encoding errors | surrogateescape | | on jupyter | False | | jupyter kernel | None | | xontrib 1 | abbrevs | | xontrib 2 | argcomplete | | xontrib 3 | avox_poetry | | xontrib 4 | back2dir | | xontrib 5 | broot | | xontrib 6 | cmd_done | | xontrib 7 | commands | | xontrib 8 | fzf-widgets | | xontrib 9 | hist_navigator | | xontrib 10 | jedi | | xontrib 11 | powerline3 | | xontrib 12 | prompt_ret_code | | xontrib 13 | vox | | xontrib 14 | voxapi | +------------------+----------------------+ ``` </details> ## Expected Behavior <!--- Tell us what should happen --> upon typing just the path, xonsh `cd` into that. auto-completion for this is not working. ![image](https://user-images.githubusercontent.com/6702219/124352483-f35c3200-dc1d-11eb-98d0-b779d2775ca7.png) ## Current Behavior <!--- Tell us what happens instead of the expected behavior --> <!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error To enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`. On Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` --> ![image](https://user-images.githubusercontent.com/6702219/124352420-b42de100-dc1d-11eb-807e-973fe98c8496.png) ### Traceback (if applicable) <details> ``` traceback ``` </details> ## Steps to Reproduce <!--- Please try to write out a minimal reproducible snippet to trigger the bug, it will help us fix it! --> ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
0.0
65913462438ffe869efabd2ec5f7137ef85efaef
[ "tests/xontribs/test_jedi.py::test_no_command_path_completion" ]
[ "tests/xontribs/test_jedi.py::test_completer_added", "tests/xontribs/test_jedi.py::test_jedi_api[new-context0]", "tests/xontribs/test_jedi.py::test_jedi_api[old-context0]", "tests/xontribs/test_jedi.py::test_multiline", "tests/xontribs/test_jedi.py::test_rich_completions[completion0-xx]", "tests/xontribs/test_jedi.py::test_rich_completions[completion1-xx]", "tests/xontribs/test_jedi.py::test_rich_completions[completion2-from_bytes]", "tests/xontribs/test_jedi.py::test_rich_completions[completion3-imag]", "tests/xontribs/test_jedi.py::test_rich_completions[completion4-bytes=]", "tests/xontribs/test_jedi.py::test_rich_completions[completion5-bytes=]", "tests/xontribs/test_jedi.py::test_rich_completions[completion6-collections]", "tests/xontribs/test_jedi.py::test_rich_completions[completion7-NameError]", "tests/xontribs/test_jedi.py::test_rich_completions[completion8-\"name\"]", "tests/xontribs/test_jedi.py::test_rich_completions[completion9-passwd\"]", "tests/xontribs/test_jedi.py::test_rich_completions[completion10-class]", "tests/xontribs/test_jedi.py::test_special_tokens" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_added_files" ], "has_test_patch": true, "is_lite": false }
2021-07-28 21:13:37+00:00
bsd-2-clause
6,294
xonsh__xonsh-4401
diff --git a/docs/keyboard_shortcuts.rst b/docs/keyboard_shortcuts.rst index 5131a273..45ef7401 100644 --- a/docs/keyboard_shortcuts.rst +++ b/docs/keyboard_shortcuts.rst @@ -31,4 +31,6 @@ Xonsh comes pre-baked with a few keyboard shortcuts. The following is only avail - Cut highlighted section * - Ctrl-V *[Beta]* - Paste clipboard contents + * - Ctrl-Right + - Complete a single auto-suggestion word diff --git a/news/auto-suggest-word-alias.rst b/news/auto-suggest-word-alias.rst new file mode 100644 index 00000000..045f4ea0 --- /dev/null +++ b/news/auto-suggest-word-alias.rst @@ -0,0 +1,23 @@ +**Added:** + +* Add ``CTRL-Right`` key binding to complete a single auto-suggestion word. + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* <news item> + +**Security:** + +* <news item> diff --git a/news/fix-jedi-path-completion.rst b/news/fix-jedi-path-completion.rst new file mode 100644 index 00000000..8757b89d --- /dev/null +++ b/news/fix-jedi-path-completion.rst @@ -0,0 +1,23 @@ +**Added:** + +* <news item> + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* ``Jedi`` completer doesn't complete paths with ``~``. + +**Security:** + +* <news item> diff --git a/xonsh/ptk_shell/key_bindings.py b/xonsh/ptk_shell/key_bindings.py index 941c9d46..84598a44 100644 --- a/xonsh/ptk_shell/key_bindings.py +++ b/xonsh/ptk_shell/key_bindings.py @@ -208,9 +208,14 @@ def wrap_selection(buffer, left, right=None): buffer.selection_state = selection_state -def load_xonsh_bindings() -> KeyBindingsBase: +def load_xonsh_bindings(ptk_bindings: KeyBindingsBase) -> KeyBindingsBase: """ Load custom key bindings. + + Parameters + ---------- + ptk_bindings : + The default prompt toolkit bindings. We need these to add aliases to them. """ key_bindings = KeyBindings() handle = key_bindings.add @@ -389,4 +394,12 @@ def load_xonsh_bindings() -> KeyBindingsBase: buff.cut_selection() get_by_name("yank").call(event) + def create_alias(new_keys, original_keys): + bindings = ptk_bindings.get_bindings_for_keys(tuple(original_keys)) + for original_binding in bindings: + handle(*new_keys, filter=original_binding.filter)(original_binding.handler) + + # Complete a single auto-suggestion word + create_alias([Keys.ControlRight], ["escape", "f"]) + return key_bindings diff --git a/xonsh/ptk_shell/shell.py b/xonsh/ptk_shell/shell.py index f61ea789..459c0bab 100644 --- a/xonsh/ptk_shell/shell.py +++ b/xonsh/ptk_shell/shell.py @@ -207,7 +207,8 @@ class PromptToolkitShell(BaseShell): self.prompt_formatter = PTKPromptFormatter(self.prompter) self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx, self) - self.key_bindings = load_xonsh_bindings() + ptk_bindings = self.prompter.app.key_bindings + self.key_bindings = load_xonsh_bindings(ptk_bindings) self._overrides_deprecation_warning_shown = False # Store original `_history_matches` in case we need to restore it diff --git a/xontrib/jedi.py b/xontrib/jedi.py index 1d860b82..49d99138 100644 --- a/xontrib/jedi.py +++ b/xontrib/jedi.py @@ -65,8 +65,8 @@ def complete_jedi(context: CompletionContext): # if we're completing a possible command and the prefix contains a valid path, don't complete. if context.command: - path_parts = os.path.split(context.command.prefix) - if len(path_parts) > 1 and os.path.isdir(os.path.join(*path_parts[:-1])): + path_dir = os.path.dirname(context.command.prefix) + if path_dir and os.path.isdir(os.path.expanduser(path_dir)): return None filter_func = get_filter_function()
xonsh/xonsh
65913462438ffe869efabd2ec5f7137ef85efaef
diff --git a/tests/xontribs/test_jedi.py b/tests/xontribs/test_jedi.py index 0681e7fb..166ef200 100644 --- a/tests/xontribs/test_jedi.py +++ b/tests/xontribs/test_jedi.py @@ -253,6 +253,7 @@ def test_special_tokens(jedi_xontrib): @skip_if_on_windows def test_no_command_path_completion(jedi_xontrib, completion_context_parse): assert jedi_xontrib.complete_jedi(completion_context_parse("./", 2)) is None + assert jedi_xontrib.complete_jedi(completion_context_parse("~/", 2)) is None assert jedi_xontrib.complete_jedi(completion_context_parse("./e", 3)) is None assert jedi_xontrib.complete_jedi(completion_context_parse("/usr/bin/", 9)) is None assert (
fish-like partial suggestion completion Xonsh already supports the fish-like suggestion completion, which is great. I.e., on a greyed suggestion, typing the right arrow completes with the full suggestion. One thing I miss though is the fish partial suggestion completion. In fish, if I am right, when a suggestion is provided, typing "alt + right_arrow" completes the suggestion only until the next separator. Great to use parts of a suggestion only. Any way a partial suggestion completion with alt + right_arrow is either already available, or could be added? :) .
0.0
65913462438ffe869efabd2ec5f7137ef85efaef
[ "tests/xontribs/test_jedi.py::test_no_command_path_completion" ]
[ "tests/xontribs/test_jedi.py::test_completer_added", "tests/xontribs/test_jedi.py::test_jedi_api[new-context0]", "tests/xontribs/test_jedi.py::test_jedi_api[old-context0]", "tests/xontribs/test_jedi.py::test_multiline", "tests/xontribs/test_jedi.py::test_rich_completions[completion0-xx]", "tests/xontribs/test_jedi.py::test_rich_completions[completion1-xx]", "tests/xontribs/test_jedi.py::test_rich_completions[completion2-from_bytes]", "tests/xontribs/test_jedi.py::test_rich_completions[completion3-imag]", "tests/xontribs/test_jedi.py::test_rich_completions[completion4-bytes=]", "tests/xontribs/test_jedi.py::test_rich_completions[completion5-bytes=]", "tests/xontribs/test_jedi.py::test_rich_completions[completion6-collections]", "tests/xontribs/test_jedi.py::test_rich_completions[completion7-NameError]", "tests/xontribs/test_jedi.py::test_rich_completions[completion8-\"name\"]", "tests/xontribs/test_jedi.py::test_rich_completions[completion9-passwd\"]", "tests/xontribs/test_jedi.py::test_rich_completions[completion10-class]", "tests/xontribs/test_jedi.py::test_special_tokens" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-07-28 22:30:26+00:00
bsd-2-clause
6,295
xonsh__xonsh-4673
diff --git a/news/vox-add-prompt-arg.rst b/news/vox-add-prompt-arg.rst new file mode 100644 index 00000000..e811eb6b --- /dev/null +++ b/news/vox-add-prompt-arg.rst @@ -0,0 +1,23 @@ +**Added:** + +* vox new/create accepts a new ``--prompt`` argument, which is passed through to ``python -m venv`` + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* <news item> + +**Security:** + +* <news item> diff --git a/news/vox-respect-prompt.rst b/news/vox-respect-prompt.rst new file mode 100644 index 00000000..8837b7f9 --- /dev/null +++ b/news/vox-respect-prompt.rst @@ -0,0 +1,23 @@ +**Added:** + +* ``prompt.env.env_name`` is now aware of the "prompt" key in ``pyvenv.cfg`` - search order from first to last is: ``$VIRTUAL_ENV_PROMPT``, ``pyvenv.cfg``, ``$VIRTUAL_ENV``, $``CONDA_DEFAULT_ENV`` + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* <news item> + +**Security:** + +* <news item> diff --git a/xonsh/prompt/env.py b/xonsh/prompt/env.py index fa6a9142..2edfd17c 100644 --- a/xonsh/prompt/env.py +++ b/xonsh/prompt/env.py @@ -1,42 +1,70 @@ """Prompt formatter for virtualenv and others""" - -import os +import functools +import re +from pathlib import Path +from typing import Optional from xonsh.built_ins import XSH -def find_env_name(): - """Finds the current environment name from $VIRTUAL_ENV or - $CONDA_DEFAULT_ENV if that is set. +def find_env_name() -> Optional[str]: + """Find current environment name from available sources. + + If ``$VIRTUAL_ENV`` is set, it is determined from the prompt setting in + ``<venv>/pyvenv.cfg`` or from the folder name of the environment. + + Otherwise - if it is set - from ``$CONDA_DEFAULT_ENV``. + """ + virtual_env = XSH.env.get("VIRTUAL_ENV") + if virtual_env: + name = _determine_env_name(virtual_env) + if name: + return name + conda_default_env = XSH.env.get("CONDA_DEFAULT_ENV") + if conda_default_env: + return conda_default_env + + +def env_name() -> str: + """Build env_name based on different sources. Respect order of precedence. + + Name from VIRTUAL_ENV_PROMPT will be used as-is. + Names from other sources are surrounded with ``{env_prefix}`` and + ``{env_postfix}`` fields. """ - env_path = XSH.env.get("VIRTUAL_ENV", "") - if env_path: - env_name = os.path.basename(env_path) - else: - env_name = XSH.env.get("CONDA_DEFAULT_ENV", "") - return env_name + if XSH.env.get("VIRTUAL_ENV_DISABLE_PROMPT"): + return "" + virtual_env_prompt = XSH.env.get("VIRTUAL_ENV_PROMPT") + if virtual_env_prompt: + return virtual_env_prompt + found_envname = find_env_name() + return _surround_env_name(found_envname) if found_envname else "" + [email protected]_cache(maxsize=5) +def _determine_env_name(virtual_env: str) -> str: + """Use prompt setting from pyvenv.cfg or basename of virtual_env. -def env_name(): - """Returns the current env_name if it non-empty, surrounded by the - ``{env_prefix}`` and ``{env_postfix}`` fields. + Tries to be resilient to subtle changes in whitespace and quoting in the + configuration file format as it adheres to no clear standard. """ - env_name = find_env_name() - if XSH.env.get("VIRTUAL_ENV_DISABLE_PROMPT") or not env_name: - # env name prompt printing disabled, or no environment; just return - return - - venv_prompt = XSH.env.get("VIRTUAL_ENV_PROMPT") - if venv_prompt is not None: - return venv_prompt - else: - pf = XSH.shell.prompt_formatter - pre = pf._get_field_value("env_prefix") - post = pf._get_field_value("env_postfix") - return pre + env_name + post - - -def vte_new_tab_cwd(): + venv_path = Path(virtual_env) + pyvenv_cfg = venv_path / "pyvenv.cfg" + if pyvenv_cfg.is_file(): + match = re.search(r"prompt\s*=\s*(.*)", pyvenv_cfg.read_text()) + if match: + return match.group(1).strip().lstrip("'\"").rstrip("'\"") + return venv_path.name + + +def _surround_env_name(name: str) -> str: + pf = XSH.shell.prompt_formatter + pre = pf._get_field_value("env_prefix") + post = pf._get_field_value("env_postfix") + return f"{pre}{name}{post}" + + +def vte_new_tab_cwd() -> None: """This prints an escape sequence that tells VTE terminals the hostname and pwd. This should not be needed in most cases, but sometimes is for certain Linux terminals that do not read the PWD from the environment diff --git a/xontrib/vox.py b/xontrib/vox.py index 3d5dac12..2a994f59 100644 --- a/xontrib/vox.py +++ b/xontrib/vox.py @@ -87,6 +87,7 @@ class VoxHandler(xcli.ArgParserAlias): packages: xcli.Annotated[tp.Sequence[str], xcli.Arg(nargs="*")] = (), requirements: xcli.Annotated[tp.Sequence[str], xcli.Arg(action="append")] = (), link_project_dir=False, + prompt: "str|None" = None, ): """Create a virtual environment in $VIRTUALENV_HOME with python3's ``venv``. @@ -114,6 +115,8 @@ class VoxHandler(xcli.ArgParserAlias): The argument value is passed to ``pip -r`` to be installed. link_project_dir: -l, --link, --link-project Associate the current directory with the new environment. + prompt: --prompt + Provides an alternative prompt prefix for this environment. """ print("Creating environment...") @@ -128,6 +131,7 @@ class VoxHandler(xcli.ArgParserAlias): symlinks=symlinks, with_pip=(not without_pip), interpreter=interpreter, + prompt=prompt, ) if link_project_dir: self.project_set(name) diff --git a/xontrib/voxapi.py b/xontrib/voxapi.py index b74dd1b7..c844c91e 100644 --- a/xontrib/voxapi.py +++ b/xontrib/voxapi.py @@ -139,6 +139,7 @@ class Vox(collections.abc.Mapping): system_site_packages=False, symlinks=False, with_pip=True, + prompt=None, ): """Create a virtual environment in $VIRTUALENV_HOME with python3's ``venv``. @@ -157,8 +158,9 @@ class Vox(collections.abc.Mapping): environment. with_pip : bool If True, ensure pip is installed in the virtual environment. (Default is True) + prompt: str + Provides an alternative prompt prefix for this environment. """ - if interpreter is None: interpreter = _get_vox_default_interpreter() print(f"Using Interpreter: {interpreter}") @@ -176,7 +178,14 @@ class Vox(collections.abc.Mapping): ) ) - self._create(env_path, interpreter, system_site_packages, symlinks, with_pip) + self._create( + env_path, + interpreter, + system_site_packages, + symlinks, + with_pip, + prompt=prompt, + ) events.vox_on_create.fire(name=name) def upgrade(self, name, symlinks=False, with_pip=True, interpreter=None): @@ -219,6 +228,9 @@ class Vox(collections.abc.Mapping): "symlinks": symlinks, "with_pip": with_pip, } + prompt = cfgops.get("prompt") + if prompt: + flags["prompt"] = prompt.lstrip("'\"").rstrip("'\"") # END things we shouldn't be doing. # Ok, do what we came here to do. @@ -233,6 +245,7 @@ class Vox(collections.abc.Mapping): symlinks=False, with_pip=True, upgrade=False, + prompt=None, ): version_output = sp.check_output( [interpreter, "--version"], stderr=sp.STDOUT, text=True @@ -255,8 +268,10 @@ class Vox(collections.abc.Mapping): with_pip, upgrade, ] - cmd = [arg for arg in cmd if arg] # remove empty args + if prompt and module == "venv": + cmd.extend(["--prompt", prompt]) + cmd = [arg for arg in cmd if arg] # remove empty args logging.debug(cmd) return_code = sp.call(cmd)
xonsh/xonsh
5268dd80031fe321b4d1811c2c818ff3236aba5a
diff --git a/tests/prompt/test_base.py b/tests/prompt/test_base.py index 40976a8c..a9994aeb 100644 --- a/tests/prompt/test_base.py +++ b/tests/prompt/test_base.py @@ -1,7 +1,9 @@ +import functools from unittest.mock import Mock import pytest +from xonsh.prompt import env as prompt_env from xonsh.prompt.base import PROMPT_FIELDS, PromptFormatter @@ -118,10 +120,10 @@ def test_format_prompt_with_various_prepost(formatter, xession, live_fields, pre xession.env["VIRTUAL_ENV"] = "env" - live_fields.update({"env_prefix": pre, "env_postfix": post}) - + lf_copy = dict(live_fields) # live_fields fixture is not idempotent! + lf_copy.update({"env_prefix": pre, "env_postfix": post}) exp = pre + "env" + post - assert formatter("{env_name}", fields=live_fields) == exp + assert formatter("{env_name}", fields=lf_copy) == exp def test_noenv_with_disable_set(formatter, xession, live_fields): @@ -132,6 +134,98 @@ def test_noenv_with_disable_set(formatter, xession, live_fields): assert formatter("{env_name}", fields=live_fields) == exp +class TestPromptFromVenvCfg: + WANTED = "wanted" + CONFIGS = [ + f"prompt = '{WANTED}'", + f'prompt = "{WANTED}"', + f'\t prompt = "{WANTED}" ', + f"prompt \t= {WANTED} ", + "nothing = here", + ] + CONFIGS.extend([f"other = fluff\n{t}\nmore = fluff" for t in CONFIGS]) + + @pytest.mark.parametrize("text", CONFIGS) + def test_determine_env_name_from_cfg(self, monkeypatch, tmp_path, text): + monkeypatch.setattr(prompt_env, "_surround_env_name", lambda x: x) + (tmp_path / "pyvenv.cfg").write_text(text) + wanted = self.WANTED if self.WANTED in text else tmp_path.name + assert prompt_env._determine_env_name(tmp_path) == wanted + + +class TestEnvNamePrompt: + def test_no_prompt(self, formatter, live_fields): + assert formatter("{env_name}", fields=live_fields) == "" + + def test_search_order(self, monkeypatch, tmp_path, formatter, xession, live_fields): + xession.shell.prompt_formatter = formatter + + first = "first" + second = "second" + third = "third" + fourth = "fourth" + + pyvenv_cfg = tmp_path / third / "pyvenv.cfg" + pyvenv_cfg.parent.mkdir() + pyvenv_cfg.write_text(f"prompt={second}") + + fmt = functools.partial(formatter, "{env_name}", fields=live_fields) + xession.env.update( + dict( + VIRTUAL_ENV_PROMPT=first, + VIRTUAL_ENV=str(pyvenv_cfg.parent), + CONDA_DEFAULT_ENV=fourth, + ) + ) + + xession.env["VIRTUAL_ENV_DISABLE_PROMPT"] = 0 + assert fmt() == first + + xession.env["VIRTUAL_ENV_DISABLE_PROMPT"] = 1 + assert fmt() == "" + + del xession.env["VIRTUAL_ENV_PROMPT"] + xession.env["VIRTUAL_ENV_DISABLE_PROMPT"] = 0 + assert fmt() == f"({second}) " + + xession.env["VIRTUAL_ENV_DISABLE_PROMPT"] = 1 + assert fmt() == "" + + xession.env["VIRTUAL_ENV_DISABLE_PROMPT"] = 0 + pyvenv_cfg.unlink() + # In the interest of speed the calls are cached, but if the user + # fiddles with environments this will bite them. I will not do anythin + prompt_env._determine_env_name.cache_clear() + assert fmt() == f"({third}) " + + xession.env["VIRTUAL_ENV_DISABLE_PROMPT"] = 1 + assert fmt() == "" + + xession.env["VIRTUAL_ENV_DISABLE_PROMPT"] = 0 + del xession.env["VIRTUAL_ENV"] + assert fmt() == f"({fourth}) " + + xession.env["VIRTUAL_ENV_DISABLE_PROMPT"] = 1 + assert fmt() == "" + + xession.env["VIRTUAL_ENV_DISABLE_PROMPT"] = 0 + del xession.env["CONDA_DEFAULT_ENV"] + assert fmt() == "" + + @pytest.mark.xfail(reason="caching introduces stale values") + def test_env_name_updates_on_filesystem_change(self, tmp_path): + """Due to cache, user might get stale value. + + if user fiddles with env folder or the config, they might get a stale + value from the cache. + """ + cfg = tmp_path / "pyvenv.cfg" + cfg.write_text("prompt=fromfile") + assert prompt_env._determine_env_name(cfg.parent) == "fromfile" + cfg.unlink() + assert prompt_env._determine_env_name(cfg.parent) == cfg.parent.name + + @pytest.mark.parametrize("disable", [0, 1]) def test_custom_env_overrides_default(formatter, xession, live_fields, disable): xession.shell.prompt_formatter = formatter diff --git a/tests/test_vox.py b/tests/test_vox.py index 741e170c..7379d2d4 100644 --- a/tests/test_vox.py +++ b/tests/test_vox.py @@ -368,6 +368,7 @@ _VOX_RM_OPTS = {"-f", "--force"}.union(_HELP_OPTS) "--requirements", "-t", "--temp", + "--prompt", } ), ),
make promp.env.env_name venv --prompt aware (set in pyvenv.cfg) ## xonfig <details> ``` > xonfig <xonsh-code>:1:0 - xonfig <xonsh-code>:1:0 + ![xonfig] +------------------+----------------------+ | xonsh | 0.11.0 | | Git SHA | adfa60ea | | Commit Date | Feb 11 14:53:00 2022 | | Python | 3.9.7 | | PLY | 3.11 | | have readline | True | | prompt toolkit | None | | shell type | readline | | history backend | json | | pygments | 2.11.2 | | on posix | True | | on linux | True | | distro | unknown | | on wsl | False | | on darwin | False | | on windows | False | | on cygwin | False | | on msys2 | False | | is superuser | False | | default encoding | utf-8 | | xonsh encoding | utf-8 | | encoding errors | surrogateescape | | on jupyter | False | | jupyter kernel | None | | xontrib 1 | coreutils | | xontrib 2 | vox | | xontrib 3 | voxapi | | RC file 1 | /home/ob/.xonshrc | +------------------+----------------------+ ``` </details> ## Expected Behavior When activating a venv via `vox activate` the name should be set to the value of `prompt` key in `pyvenv.cfg` if present (file and key) - see https://docs.python.org/3/library/venv.html. ## Current Behavior The prompt is always set to name of venv directory independent of prompt settings. ## Steps to Reproduce ```shell $ python -m venv --prompt "MY SPECIAL PROMPT" .venv $ cat .venv/pyvenv.cfg | grep prompt prompt = 'MY SPECIAL PROMPT' vox activate ./.venv ``` new prompt looks like this: `[17:58:10] (.venv) ob@ob1 ~/oss/xonsh dropthedot|✓` but should look like this: `[17:58:10] (MY SPECIAL PROMPT) ob@ob1 ~/oss/xonsh dropthedot|✓` ## Also add `--prompt` to `vox new`? If this is done, maybe a good idea would be to also add `--prompt` to `vox new` to make this possible on creation (also in the interest of symmetry), but it seems like the common pattern for many xonsh users is to have virtualenvs in `~/.virtualenv` with the venv folder name being the same as the project, therefore automatically having a meaningful prompt, so this does not seem to be an urgently wanted feature for most. I want to prepare a PR for the "passive" part of the functionality, but could also have a stab at adding `--prompt` to `vox new` at a later date. My main itch would be to respect `prompt` from already existing venvs first. ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
0.0
5268dd80031fe321b4d1811c2c818ff3236aba5a
[ "tests/test_vox.py::test_vox_completer[vox" ]
[ "tests/prompt/test_base.py::test_format_prompt[my", "tests/prompt/test_base.py::test_format_prompt[{f}", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{a_number:{0:^3}}cats-", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{current_job:{}", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{none:{}", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{none:{}}--fields0]", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{{{a_string:{{{}}}}}}-{{cats}}-fields0]", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{{{none:{{{}}}}}}-{}-fields0]", "tests/prompt/test_base.py::test_format_prompt_with_broken_template", "tests/prompt/test_base.py::test_format_prompt_with_broken_template_in_func[{user]", "tests/prompt/test_base.py::test_format_prompt_with_broken_template_in_func[{{user]", "tests/prompt/test_base.py::test_format_prompt_with_broken_template_in_func[{{user}]", "tests/prompt/test_base.py::test_format_prompt_with_broken_template_in_func[{user}{hostname]", "tests/prompt/test_base.py::test_format_prompt_with_invalid_func", "tests/prompt/test_base.py::test_format_prompt_with_func_that_raises", "tests/prompt/test_base.py::test_format_prompt_with_no_env", "tests/prompt/test_base.py::test_format_prompt_with_various_envs[env]", "tests/prompt/test_base.py::test_format_prompt_with_various_envs[foo]", "tests/prompt/test_base.py::test_format_prompt_with_various_envs[bar]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[)-(]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[)-[[]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[)-]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[)-", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[]]-(]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[]]-[[]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[]]-]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[]]-", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[-(]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[-[[]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[-]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[-", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[", "tests/prompt/test_base.py::test_noenv_with_disable_set", "tests/prompt/test_base.py::TestEnvNamePrompt::test_no_prompt", "tests/prompt/test_base.py::test_custom_env_overrides_default[0]", "tests/prompt/test_base.py::test_custom_env_overrides_default[1]", "tests/prompt/test_base.py::test_promptformatter_cache", "tests/prompt/test_base.py::test_promptformatter_clears_cache", "tests/test_vox.py::test_vox_completer[vox-positionals0-opts0]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-02-19 14:43:54+00:00
bsd-2-clause
6,296
xonsh__xonsh-4715
diff --git a/news/fix-globbing-path-containing-regex.rst b/news/fix-globbing-path-containing-regex.rst new file mode 100644 index 00000000..30e06e20 --- /dev/null +++ b/news/fix-globbing-path-containing-regex.rst @@ -0,0 +1,23 @@ +**Added:** + +* <news item> + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* Fixed regex globbing for file paths that contain special regex characters (e.g. "test*1/model") + +**Security:** + +* <news item> diff --git a/xonsh/built_ins.py b/xonsh/built_ins.py index 737347ce..cd83ef8b 100644 --- a/xonsh/built_ins.py +++ b/xonsh/built_ins.py @@ -20,7 +20,7 @@ from ast import AST from xonsh.inspectors import Inspector from xonsh.lazyasd import lazyobject -from xonsh.platform import ON_POSIX, ON_WINDOWS +from xonsh.platform import ON_POSIX from xonsh.tools import ( XonshCalledProcessError, XonshError, @@ -92,12 +92,7 @@ def reglob(path, parts=None, i=None): base = "" elif len(parts) > 1: i += 1 - regex = os.path.join(base, parts[i]) - if ON_WINDOWS: - # currently unable to access regex backslash sequences - # on Windows due to paths using \. - regex = regex.replace("\\", "\\\\") - regex = re.compile(regex) + regex = re.compile(parts[i]) files = os.listdir(subdir) files.sort() paths = [] @@ -105,12 +100,12 @@ def reglob(path, parts=None, i=None): if i1 == len(parts): for f in files: p = os.path.join(base, f) - if regex.fullmatch(p) is not None: + if regex.fullmatch(f) is not None: paths.append(p) else: for f in files: p = os.path.join(base, f) - if regex.fullmatch(p) is None or not os.path.isdir(p): + if regex.fullmatch(f) is None or not os.path.isdir(p): continue paths += reglob(p, parts=parts, i=i1) return paths diff --git a/xonsh/dirstack.py b/xonsh/dirstack.py index b73a1ac8..91ffb925 100644 --- a/xonsh/dirstack.py +++ b/xonsh/dirstack.py @@ -20,6 +20,7 @@ _unc_tempDrives: tp.Dict[str, str] = {} """ drive: sharePath for temp drive letters we create for UNC mapping""" [email protected]_type_check def _unc_check_enabled() -> bool: r"""Check whether CMD.EXE is enforcing no-UNC-as-working-directory check.
xonsh/xonsh
f0d77b28e86292e3404c883541d15ff51207bfa3
diff --git a/requirements/tests.txt b/requirements/tests.txt index fc49411f..fea8a4d2 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -18,9 +18,7 @@ pre-commit pyte>=0.8.0 # types related -# mypy==0.931 -git+git://github.com/python/mypy.git@9b3147701f054bf8ef42bd96e33153b05976a5e1 -# TODO: replace above with mypy==0.940 once its released +mypy==0.940 types-ujson # ensure tests run with the amalgamated (==production) xonsh diff --git a/tests/test_builtins.py b/tests/test_builtins.py index bf0c5b6b..ab5a2d1f 100644 --- a/tests/test_builtins.py +++ b/tests/test_builtins.py @@ -1,8 +1,10 @@ """Tests the xonsh builtins.""" import os import re +import shutil import types from ast import AST, Expression, Interactive, Module +from pathlib import Path import pytest @@ -85,6 +87,41 @@ def test_repath_HOME_PATH_var_brace(home_env): assert exp == obs[0] +# helper +def check_repath(path, pattern): + base_testdir = Path("re_testdir") + testdir = base_testdir / path + testdir.mkdir(parents=True) + try: + obs = regexsearch(str(base_testdir / pattern)) + assert [str(testdir)] == obs + finally: + shutil.rmtree(base_testdir) + + +@skip_if_on_windows [email protected]( + "path, pattern", + [ + ("test*1/model", ".*/model"), + ("hello/test*1/model", "hello/.*/model"), + ], +) +def test_repath_containing_asterisk(path, pattern): + check_repath(path, pattern) + + [email protected]( + "path, pattern", + [ + ("test+a/model", ".*/model"), + ("hello/test+1/model", "hello/.*/model"), + ], +) +def test_repath_containing_plus_sign(path, pattern): + check_repath(path, pattern) + + def test_helper_int(home_env): helper(int, "int")
Regex globbing broken for path including active regex characters Paths including active regex characters such as `test+a/model` are not matched by regex globbing: ```xonsh mkdir -p test/model mkdir -p test+a/model mkdir -p test*1/model for d in `.*/model`: print(d) ``` Gives the following output ```output test/model ``` ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
0.0
f0d77b28e86292e3404c883541d15ff51207bfa3
[ "tests/test_builtins.py::test_repath_containing_asterisk[test*1/model-.*/model]", "tests/test_builtins.py::test_repath_containing_asterisk[hello/test*1/model-hello/.*/model]", "tests/test_builtins.py::test_repath_containing_plus_sign[test+a/model-.*/model]", "tests/test_builtins.py::test_repath_containing_plus_sign[hello/test+1/model-hello/.*/model]" ]
[ "tests/test_builtins.py::test_repath_backslash", "tests/test_builtins.py::test_repath_HOME_PATH_itself", "tests/test_builtins.py::test_repath_HOME_PATH_contents", "tests/test_builtins.py::test_repath_HOME_PATH_var", "tests/test_builtins.py::test_repath_HOME_PATH_var_brace", "tests/test_builtins.py::test_helper_int", "tests/test_builtins.py::test_helper_helper", "tests/test_builtins.py::test_helper_env", "tests/test_builtins.py::test_superhelper_int", "tests/test_builtins.py::test_superhelper_helper", "tests/test_builtins.py::test_superhelper_env", "tests/test_builtins.py::test_ensure_list_of_strs[exp0-yo]", "tests/test_builtins.py::test_ensure_list_of_strs[exp1-inp1]", "tests/test_builtins.py::test_ensure_list_of_strs[exp2-42]", "tests/test_builtins.py::test_ensure_list_of_strs[exp3-inp3]", "tests/test_builtins.py::test_list_of_strs_or_callables[exp0-yo]", "tests/test_builtins.py::test_list_of_strs_or_callables[exp1-inp1]", "tests/test_builtins.py::test_list_of_strs_or_callables[exp2-42]", "tests/test_builtins.py::test_list_of_strs_or_callables[exp3-inp3]", "tests/test_builtins.py::test_list_of_strs_or_callables[exp4-<lambda>]", "tests/test_builtins.py::test_list_of_strs_or_callables[exp5-inp5]", "tests/test_builtins.py::test_list_of_list_of_strs_outer_product[inp0-exp0]", "tests/test_builtins.py::test_list_of_list_of_strs_outer_product[inp1-exp1]", "tests/test_builtins.py::test_list_of_list_of_strs_outer_product[inp2-exp2]", "tests/test_builtins.py::test_expand_path[~]", "tests/test_builtins.py::test_expand_path[~/]", "tests/test_builtins.py::test_expand_path[x=~/place]", "tests/test_builtins.py::test_expand_path[x=one:~/place]", "tests/test_builtins.py::test_expand_path[x=one:~/place:~/yo]", "tests/test_builtins.py::test_expand_path[x=~/one:~/place:~/yo]", "tests/test_builtins.py::test_convert_macro_arg_str[str0]", "tests/test_builtins.py::test_convert_macro_arg_str[s]", "tests/test_builtins.py::test_convert_macro_arg_str[S]", "tests/test_builtins.py::test_convert_macro_arg_str[str1]", "tests/test_builtins.py::test_convert_macro_arg_str[string]", "tests/test_builtins.py::test_convert_macro_arg_ast[AST]", "tests/test_builtins.py::test_convert_macro_arg_ast[a]", "tests/test_builtins.py::test_convert_macro_arg_ast[Ast]", "tests/test_builtins.py::test_convert_macro_arg_code[code0]", "tests/test_builtins.py::test_convert_macro_arg_code[compile0]", "tests/test_builtins.py::test_convert_macro_arg_code[c]", "tests/test_builtins.py::test_convert_macro_arg_code[code1]", "tests/test_builtins.py::test_convert_macro_arg_code[compile1]", "tests/test_builtins.py::test_convert_macro_arg_eval[eval0]", "tests/test_builtins.py::test_convert_macro_arg_eval[v]", "tests/test_builtins.py::test_convert_macro_arg_eval[eval1]", "tests/test_builtins.py::test_convert_macro_arg_exec[exec0]", "tests/test_builtins.py::test_convert_macro_arg_exec[x]", "tests/test_builtins.py::test_convert_macro_arg_exec[exec1]", "tests/test_builtins.py::test_convert_macro_arg_type[type0]", "tests/test_builtins.py::test_convert_macro_arg_type[t]", "tests/test_builtins.py::test_convert_macro_arg_type[type1]", "tests/test_builtins.py::test_in_macro_call", "tests/test_builtins.py::test_call_macro_str[x]", "tests/test_builtins.py::test_call_macro_str[42]", "tests/test_builtins.py::test_call_macro_str[x", "tests/test_builtins.py::test_call_macro_ast[x]", "tests/test_builtins.py::test_call_macro_ast[42]", "tests/test_builtins.py::test_call_macro_ast[x", "tests/test_builtins.py::test_call_macro_code[x]", "tests/test_builtins.py::test_call_macro_code[42]", "tests/test_builtins.py::test_call_macro_code[x", "tests/test_builtins.py::test_call_macro_eval[x]", "tests/test_builtins.py::test_call_macro_eval[42]", "tests/test_builtins.py::test_call_macro_eval[x", "tests/test_builtins.py::test_call_macro_exec[if", "tests/test_builtins.py::test_call_macro_raw_arg[x]", "tests/test_builtins.py::test_call_macro_raw_arg[42]", "tests/test_builtins.py::test_call_macro_raw_arg[x", "tests/test_builtins.py::test_call_macro_raw_kwarg[x]", "tests/test_builtins.py::test_call_macro_raw_kwarg[42]", "tests/test_builtins.py::test_call_macro_raw_kwarg[x", "tests/test_builtins.py::test_call_macro_raw_kwargs[x]", "tests/test_builtins.py::test_call_macro_raw_kwargs[42]", "tests/test_builtins.py::test_call_macro_raw_kwargs[x", "tests/test_builtins.py::test_call_macro_ast_eval_expr", "tests/test_builtins.py::test_call_macro_ast_single_expr", "tests/test_builtins.py::test_call_macro_ast_exec_expr", "tests/test_builtins.py::test_call_macro_ast_eval_statement", "tests/test_builtins.py::test_call_macro_ast_single_statement", "tests/test_builtins.py::test_call_macro_ast_exec_statement", "tests/test_builtins.py::test_enter_macro" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-03-14 18:35:12+00:00
bsd-2-clause
6,297
xonsh__xonsh-4835
diff --git a/docs/tutorial.rst b/docs/tutorial.rst index 0cc3de3b..41945aef 100644 --- a/docs/tutorial.rst +++ b/docs/tutorial.rst @@ -1054,6 +1054,23 @@ mode or subprocess mode) by using the ``g````: 5 +Formatted Glob Literals +----------------------- + +Using the ``f`` modifier with either regex or normal globbing makes +the glob pattern behave like a formatted string literal. This can be used to +substitute variables and other expressions into the glob pattern: + +.. code-block:: xonshcon + + >>> touch a aa aaa aba abba aab aabb abcba + >>> mypattern = 'ab' + >>> print(f`{mypattern[0]}+`) + ['a', 'aa', 'aaa'] + >>> print(gf`{mypattern}*`) + ['aba', 'abba', 'abcba'] + + Custom Path Searches -------------------- diff --git a/news/feat-f-glob-strings.rst b/news/feat-f-glob-strings.rst new file mode 100644 index 00000000..7c1d02a3 --- /dev/null +++ b/news/feat-f-glob-strings.rst @@ -0,0 +1,23 @@ +**Added:** + +* Support for f-glob strings (e.g. ``fg`{prefix}*```) + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* <news item> + +**Security:** + +* <news item> diff --git a/xonsh/parsers/base.py b/xonsh/parsers/base.py index ab20d244..7208ddf6 100644 --- a/xonsh/parsers/base.py +++ b/xonsh/parsers/base.py @@ -124,31 +124,6 @@ def xonsh_superhelp(x, lineno=None, col=None): return xonsh_call("__xonsh__.superhelp", [x], lineno=lineno, col=col) -def xonsh_pathsearch(pattern, pymode=False, lineno=None, col=None): - """Creates the AST node for calling the __xonsh__.pathsearch() function. - The pymode argument indicate if it is called from subproc or python mode""" - pymode = ast.NameConstant(value=pymode, lineno=lineno, col_offset=col) - searchfunc, pattern = RE_SEARCHPATH.match(pattern).groups() - pattern = ast.Str(s=pattern, lineno=lineno, col_offset=col) - pathobj = False - if searchfunc.startswith("@"): - func = searchfunc[1:] - elif "g" in searchfunc: - func = "__xonsh__.globsearch" - pathobj = "p" in searchfunc - else: - func = "__xonsh__.regexsearch" - pathobj = "p" in searchfunc - func = load_attribute_chain(func, lineno=lineno, col=col) - pathobj = ast.NameConstant(value=pathobj, lineno=lineno, col_offset=col) - return xonsh_call( - "__xonsh__.pathsearch", - args=[func, pattern, pymode, pathobj], - lineno=lineno, - col=col, - ) - - def load_ctx(x): """Recursively sets ctx to ast.Load()""" if not hasattr(x, "ctx"): @@ -658,6 +633,44 @@ class BaseParser: def _parse_error(self, msg, loc): raise_parse_error(msg, loc, self._source, self.lines) + def xonsh_pathsearch(self, pattern, pymode=False, lineno=None, col=None): + """Creates the AST node for calling the __xonsh__.pathsearch() function. + The pymode argument indicate if it is called from subproc or python mode""" + pymode = ast.NameConstant(value=pymode, lineno=lineno, col_offset=col) + searchfunc, pattern = RE_SEARCHPATH.match(pattern).groups() + if not searchfunc.startswith("@") and "f" in searchfunc: + pattern_as_str = f"f'''{pattern}'''" + try: + pattern = pyparse(pattern_as_str).body[0].value + except SyntaxError: + pattern = None + if pattern is None: + try: + pattern = FStringAdaptor( + pattern_as_str, "f", filename=self.lexer.fname + ).run() + except SyntaxError as e: + self._set_error(str(e), self.currloc(lineno=lineno, column=col)) + else: + pattern = ast.Str(s=pattern, lineno=lineno, col_offset=col) + pathobj = False + if searchfunc.startswith("@"): + func = searchfunc[1:] + elif "g" in searchfunc: + func = "__xonsh__.globsearch" + pathobj = "p" in searchfunc + else: + func = "__xonsh__.regexsearch" + pathobj = "p" in searchfunc + func = load_attribute_chain(func, lineno=lineno, col=col) + pathobj = ast.NameConstant(value=pathobj, lineno=lineno, col_offset=col) + return xonsh_call( + "__xonsh__.pathsearch", + args=[func, pattern, pymode, pathobj], + lineno=lineno, + col=col, + ) + # # Precedence of operators # @@ -2413,7 +2426,9 @@ class BaseParser: def p_atom_pathsearch(self, p): """atom : SEARCHPATH""" - p[0] = xonsh_pathsearch(p[1], pymode=True, lineno=self.lineno, col=self.col) + p[0] = self.xonsh_pathsearch( + p[1], pymode=True, lineno=self.lineno, col=self.col + ) # introduce seemingly superfluous symbol 'atom_dname' to enable reuse it in other places def p_atom_dname_indirection(self, p): @@ -3352,7 +3367,7 @@ class BaseParser: def p_subproc_atom_re(self, p): """subproc_atom : SEARCHPATH""" - p0 = xonsh_pathsearch(p[1], pymode=False, lineno=self.lineno, col=self.col) + p0 = self.xonsh_pathsearch(p[1], pymode=False, lineno=self.lineno, col=self.col) p0._cliarg_action = "extend" p[0] = p0 diff --git a/xonsh/tokenize.py b/xonsh/tokenize.py index 609bbb5b..045b47e7 100644 --- a/xonsh/tokenize.py +++ b/xonsh/tokenize.py @@ -305,7 +305,7 @@ String = group( ) # Xonsh-specific Syntax -SearchPath = r"((?:[rgp]+|@\w*)?)`([^\n`\\]*(?:\\.[^\n`\\]*)*)`" +SearchPath = r"((?:[rgpf]+|@\w*)?)`([^\n`\\]*(?:\\.[^\n`\\]*)*)`" # Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get
xonsh/xonsh
0ddc05e82e3c91130f61173618925619e44cda7e
diff --git a/tests/test_parser.py b/tests/test_parser.py index 400b6c96..05ce324f 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -2376,8 +2376,11 @@ def test_ls_regex(check_xonsh_ast): check_xonsh_ast({}, "$(ls `[Ff]+i*LE` -l)", False) -def test_backtick(check_xonsh_ast): - check_xonsh_ast({}, "print(`.*`)", False) [email protected]("p", ["", "p"]) [email protected]("f", ["", "f"]) [email protected]("glob_type", ["", "r", "g"]) +def test_backtick(p, f, glob_type, check_xonsh_ast): + check_xonsh_ast({}, f"print({p}{f}{glob_type}`.*`)", False) def test_ls_regex_octothorpe(check_xonsh_ast): @@ -2388,10 +2391,6 @@ def test_ls_explicitregex(check_xonsh_ast): check_xonsh_ast({}, "$(ls r`[Ff]+i*LE` -l)", False) -def test_rbacktick(check_xonsh_ast): - check_xonsh_ast({}, "print(r`.*`)", False) - - def test_ls_explicitregex_octothorpe(check_xonsh_ast): check_xonsh_ast({}, "$(ls r`#[Ff]+i*LE` -l)", False) @@ -2400,22 +2399,6 @@ def test_ls_glob(check_xonsh_ast): check_xonsh_ast({}, "$(ls g`[Ff]+i*LE` -l)", False) -def test_gbacktick(check_xonsh_ast): - check_xonsh_ast({}, "print(g`.*`)", False) - - -def test_pbacktrick(check_xonsh_ast): - check_xonsh_ast({}, "print(p`.*`)", False) - - -def test_pgbacktick(check_xonsh_ast): - check_xonsh_ast({}, "print(pg`.*`)", False) - - -def test_prbacktick(check_xonsh_ast): - check_xonsh_ast({}, "print(pr`.*`)", False) - - def test_ls_glob_octothorpe(check_xonsh_ast): check_xonsh_ast({}, "$(ls g`#[Ff]+i*LE` -l)", False)
Feat: add f-glob strings In Xonsh we have augmented string literals with `p`-strings (that return a Path object) and glob strings g`...` (that return a list of results). It would seem that we want glob strings to behave similarly to normal strings, and so there is a case to be made for supporting `f`-strings in this context. An example: ```xonsh >>> ls file-a file-b >>> prefix = "file" >>> echo fg`{prefix}*` file-a file-b ``` ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
0.0
0ddc05e82e3c91130f61173618925619e44cda7e
[ "tests/test_parser.py::test_backtick[-f-]", "tests/test_parser.py::test_backtick[-f-p]", "tests/test_parser.py::test_backtick[r-f-]", "tests/test_parser.py::test_backtick[r-f-p]", "tests/test_parser.py::test_backtick[g-f-]", "tests/test_parser.py::test_backtick[g-f-p]" ]
[ "tests/test_parser.py::test_int_literal", "tests/test_parser.py::test_int_literal_underscore", "tests/test_parser.py::test_float_literal", "tests/test_parser.py::test_float_literal_underscore", "tests/test_parser.py::test_imag_literal", "tests/test_parser.py::test_float_imag_literal", "tests/test_parser.py::test_complex", "tests/test_parser.py::test_str_literal", "tests/test_parser.py::test_bytes_literal", "tests/test_parser.py::test_raw_literal", "tests/test_parser.py::test_f_literal", "tests/test_parser.py::test_f_env_var", "tests/test_parser.py::test_fstring_adaptor[f\"$HOME\"-$HOME]", "tests/test_parser.py::test_fstring_adaptor[f\"{0}", "tests/test_parser.py::test_fstring_adaptor[f\"{$HOME}\"-/foo/bar]", "tests/test_parser.py::test_fstring_adaptor[f\"{", "tests/test_parser.py::test_fstring_adaptor[f\"{'$HOME'}\"-$HOME]", "tests/test_parser.py::test_fstring_adaptor[f\"$HOME", "tests/test_parser.py::test_fstring_adaptor[f\"{${'HOME'}}\"-/foo/bar]", "tests/test_parser.py::test_fstring_adaptor[f'{${$FOO+$BAR}}'-/foo/bar]", "tests/test_parser.py::test_fstring_adaptor[f\"${$FOO}{$BAR}={f'{$HOME}'}\"-$HOME=/foo/bar]", "tests/test_parser.py::test_fstring_adaptor[f\"\"\"foo\\n{f\"_{$HOME}_\"}\\nbar\"\"\"-foo\\n_/foo/bar_\\nbar]", "tests/test_parser.py::test_fstring_adaptor[f\"\"\"foo\\n{f\"_{${'HOME'}}_\"}\\nbar\"\"\"-foo\\n_/foo/bar_\\nbar]", "tests/test_parser.py::test_fstring_adaptor[f\"\"\"foo\\n{f\"_{${", "tests/test_parser.py::test_fstring_adaptor[f'{$HOME=}'-$HOME='/foo/bar']", "tests/test_parser.py::test_raw_bytes_literal", "tests/test_parser.py::test_unary_plus", "tests/test_parser.py::test_unary_minus", "tests/test_parser.py::test_unary_invert", "tests/test_parser.py::test_binop_plus", "tests/test_parser.py::test_binop_minus", "tests/test_parser.py::test_binop_times", "tests/test_parser.py::test_binop_matmult", "tests/test_parser.py::test_binop_div", "tests/test_parser.py::test_binop_mod", "tests/test_parser.py::test_binop_floordiv", "tests/test_parser.py::test_binop_pow", "tests/test_parser.py::test_plus_pow", "tests/test_parser.py::test_plus_plus", "tests/test_parser.py::test_plus_minus", "tests/test_parser.py::test_minus_plus", "tests/test_parser.py::test_minus_minus", "tests/test_parser.py::test_minus_plus_minus", "tests/test_parser.py::test_times_plus", "tests/test_parser.py::test_plus_times", "tests/test_parser.py::test_times_times", "tests/test_parser.py::test_times_div", "tests/test_parser.py::test_times_div_mod", "tests/test_parser.py::test_times_div_mod_floor", "tests/test_parser.py::test_str_str", "tests/test_parser.py::test_str_str_str", "tests/test_parser.py::test_str_plus_str", "tests/test_parser.py::test_str_times_int", "tests/test_parser.py::test_int_times_str", "tests/test_parser.py::test_group_plus_times", "tests/test_parser.py::test_plus_group_times", "tests/test_parser.py::test_group", "tests/test_parser.py::test_lt", "tests/test_parser.py::test_gt", "tests/test_parser.py::test_eq", "tests/test_parser.py::test_le", "tests/test_parser.py::test_ge", "tests/test_parser.py::test_ne", "tests/test_parser.py::test_in", "tests/test_parser.py::test_is", "tests/test_parser.py::test_not_in", "tests/test_parser.py::test_is_not", "tests/test_parser.py::test_lt_lt", "tests/test_parser.py::test_lt_lt_lt", "tests/test_parser.py::test_not", "tests/test_parser.py::test_or", "tests/test_parser.py::test_or_or", "tests/test_parser.py::test_and", "tests/test_parser.py::test_and_and", "tests/test_parser.py::test_and_or", "tests/test_parser.py::test_or_and", "tests/test_parser.py::test_group_and_and", "tests/test_parser.py::test_group_and_or", "tests/test_parser.py::test_if_else_expr", "tests/test_parser.py::test_if_else_expr_expr", "tests/test_parser.py::test_subscription_syntaxes", "tests/test_parser.py::test_subscription_special_syntaxes", "tests/test_parser.py::test_str_idx", "tests/test_parser.py::test_str_slice", "tests/test_parser.py::test_str_step", "tests/test_parser.py::test_str_slice_all", "tests/test_parser.py::test_str_slice_upper", "tests/test_parser.py::test_str_slice_lower", "tests/test_parser.py::test_str_slice_other", "tests/test_parser.py::test_str_slice_lower_other", "tests/test_parser.py::test_str_slice_upper_other", "tests/test_parser.py::test_str_2slice", "tests/test_parser.py::test_str_2step", "tests/test_parser.py::test_str_2slice_all", "tests/test_parser.py::test_str_2slice_upper", "tests/test_parser.py::test_str_2slice_lower", "tests/test_parser.py::test_str_2slice_lowerupper", "tests/test_parser.py::test_str_2slice_other", "tests/test_parser.py::test_str_2slice_lower_other", "tests/test_parser.py::test_str_2slice_upper_other", "tests/test_parser.py::test_str_3slice", "tests/test_parser.py::test_str_3step", "tests/test_parser.py::test_str_3slice_all", "tests/test_parser.py::test_str_3slice_upper", "tests/test_parser.py::test_str_3slice_lower", "tests/test_parser.py::test_str_3slice_lowerlowerupper", "tests/test_parser.py::test_str_3slice_lowerupperlower", "tests/test_parser.py::test_str_3slice_lowerupperupper", "tests/test_parser.py::test_str_3slice_upperlowerlower", "tests/test_parser.py::test_str_3slice_upperlowerupper", "tests/test_parser.py::test_str_3slice_upperupperlower", "tests/test_parser.py::test_str_3slice_other", "tests/test_parser.py::test_str_3slice_lower_other", "tests/test_parser.py::test_str_3slice_upper_other", "tests/test_parser.py::test_str_slice_true", "tests/test_parser.py::test_str_true_slice", "tests/test_parser.py::test_list_empty", "tests/test_parser.py::test_list_one", "tests/test_parser.py::test_list_one_comma", "tests/test_parser.py::test_list_two", "tests/test_parser.py::test_list_three", "tests/test_parser.py::test_list_three_comma", "tests/test_parser.py::test_list_one_nested", "tests/test_parser.py::test_list_list_four_nested", "tests/test_parser.py::test_list_tuple_three_nested", "tests/test_parser.py::test_list_set_tuple_three_nested", "tests/test_parser.py::test_list_tuple_one_nested", "tests/test_parser.py::test_tuple_tuple_one_nested", "tests/test_parser.py::test_dict_list_one_nested", "tests/test_parser.py::test_dict_list_one_nested_comma", "tests/test_parser.py::test_dict_tuple_one_nested", "tests/test_parser.py::test_dict_tuple_one_nested_comma", "tests/test_parser.py::test_dict_list_two_nested", "tests/test_parser.py::test_set_tuple_one_nested", "tests/test_parser.py::test_set_tuple_two_nested", "tests/test_parser.py::test_tuple_empty", "tests/test_parser.py::test_tuple_one_bare", "tests/test_parser.py::test_tuple_two_bare", "tests/test_parser.py::test_tuple_three_bare", "tests/test_parser.py::test_tuple_three_bare_comma", "tests/test_parser.py::test_tuple_one_comma", "tests/test_parser.py::test_tuple_two", "tests/test_parser.py::test_tuple_three", "tests/test_parser.py::test_tuple_three_comma", "tests/test_parser.py::test_bare_tuple_of_tuples", "tests/test_parser.py::test_set_one", "tests/test_parser.py::test_set_one_comma", "tests/test_parser.py::test_set_two", "tests/test_parser.py::test_set_two_comma", "tests/test_parser.py::test_set_three", "tests/test_parser.py::test_dict_empty", "tests/test_parser.py::test_dict_one", "tests/test_parser.py::test_dict_one_comma", "tests/test_parser.py::test_dict_two", "tests/test_parser.py::test_dict_two_comma", "tests/test_parser.py::test_dict_three", "tests/test_parser.py::test_dict_from_dict_one", "tests/test_parser.py::test_dict_from_dict_one_comma", "tests/test_parser.py::test_dict_from_dict_two_xy", "tests/test_parser.py::test_dict_from_dict_two_x_first", "tests/test_parser.py::test_dict_from_dict_two_x_second", "tests/test_parser.py::test_dict_from_dict_two_x_none", "tests/test_parser.py::test_dict_from_dict_three_xyz[True-True-True]", "tests/test_parser.py::test_dict_from_dict_three_xyz[True-True-False]", "tests/test_parser.py::test_dict_from_dict_three_xyz[True-False-True]", "tests/test_parser.py::test_dict_from_dict_three_xyz[True-False-False]", "tests/test_parser.py::test_dict_from_dict_three_xyz[False-True-True]", "tests/test_parser.py::test_dict_from_dict_three_xyz[False-True-False]", "tests/test_parser.py::test_dict_from_dict_three_xyz[False-False-True]", "tests/test_parser.py::test_dict_from_dict_three_xyz[False-False-False]", "tests/test_parser.py::test_unpack_range_tuple", "tests/test_parser.py::test_unpack_range_tuple_4", "tests/test_parser.py::test_unpack_range_tuple_parens", "tests/test_parser.py::test_unpack_range_tuple_parens_4", "tests/test_parser.py::test_unpack_range_list", "tests/test_parser.py::test_unpack_range_list_4", "tests/test_parser.py::test_unpack_range_set", "tests/test_parser.py::test_unpack_range_set_4", "tests/test_parser.py::test_true", "tests/test_parser.py::test_false", "tests/test_parser.py::test_none", "tests/test_parser.py::test_elipssis", "tests/test_parser.py::test_not_implemented_name", "tests/test_parser.py::test_genexpr", "tests/test_parser.py::test_genexpr_if", "tests/test_parser.py::test_genexpr_if_and", "tests/test_parser.py::test_dbl_genexpr", "tests/test_parser.py::test_genexpr_if_genexpr", "tests/test_parser.py::test_genexpr_if_genexpr_if", "tests/test_parser.py::test_listcomp", "tests/test_parser.py::test_listcomp_if", "tests/test_parser.py::test_listcomp_if_and", "tests/test_parser.py::test_listcomp_multi_if", "tests/test_parser.py::test_dbl_listcomp", "tests/test_parser.py::test_listcomp_if_listcomp", "tests/test_parser.py::test_listcomp_if_listcomp_if", "tests/test_parser.py::test_setcomp", "tests/test_parser.py::test_setcomp_if", "tests/test_parser.py::test_setcomp_if_and", "tests/test_parser.py::test_dbl_setcomp", "tests/test_parser.py::test_setcomp_if_setcomp", "tests/test_parser.py::test_setcomp_if_setcomp_if", "tests/test_parser.py::test_dictcomp", "tests/test_parser.py::test_dictcomp_unpack_parens", "tests/test_parser.py::test_dictcomp_unpack_no_parens", "tests/test_parser.py::test_dictcomp_if", "tests/test_parser.py::test_dictcomp_if_and", "tests/test_parser.py::test_dbl_dictcomp", "tests/test_parser.py::test_dictcomp_if_dictcomp", "tests/test_parser.py::test_dictcomp_if_dictcomp_if", "tests/test_parser.py::test_lambda", "tests/test_parser.py::test_lambda_x", "tests/test_parser.py::test_lambda_kwx", "tests/test_parser.py::test_lambda_x_y", "tests/test_parser.py::test_lambda_x_y_z", "tests/test_parser.py::test_lambda_x_kwy", "tests/test_parser.py::test_lambda_kwx_kwy", "tests/test_parser.py::test_lambda_kwx_kwy_kwz", "tests/test_parser.py::test_lambda_x_comma", "tests/test_parser.py::test_lambda_x_y_comma", "tests/test_parser.py::test_lambda_x_y_z_comma", "tests/test_parser.py::test_lambda_x_kwy_comma", "tests/test_parser.py::test_lambda_kwx_kwy_comma", "tests/test_parser.py::test_lambda_kwx_kwy_kwz_comma", "tests/test_parser.py::test_lambda_args", "tests/test_parser.py::test_lambda_args_x", "tests/test_parser.py::test_lambda_args_x_y", "tests/test_parser.py::test_lambda_args_x_kwy", "tests/test_parser.py::test_lambda_args_kwx_y", "tests/test_parser.py::test_lambda_args_kwx_kwy", "tests/test_parser.py::test_lambda_x_args", "tests/test_parser.py::test_lambda_x_args_y", "tests/test_parser.py::test_lambda_x_args_y_z", "tests/test_parser.py::test_lambda_kwargs", "tests/test_parser.py::test_lambda_x_kwargs", "tests/test_parser.py::test_lambda_x_y_kwargs", "tests/test_parser.py::test_lambda_x_kwy_kwargs", "tests/test_parser.py::test_lambda_args_kwargs", "tests/test_parser.py::test_lambda_x_args_kwargs", "tests/test_parser.py::test_lambda_x_y_args_kwargs", "tests/test_parser.py::test_lambda_kwx_args_kwargs", "tests/test_parser.py::test_lambda_x_kwy_args_kwargs", "tests/test_parser.py::test_lambda_x_args_y_kwargs", "tests/test_parser.py::test_lambda_x_args_kwy_kwargs", "tests/test_parser.py::test_lambda_args_y_kwargs", "tests/test_parser.py::test_lambda_star_x", "tests/test_parser.py::test_lambda_star_x_y", "tests/test_parser.py::test_lambda_star_x_kwargs", "tests/test_parser.py::test_lambda_star_kwx_kwargs", "tests/test_parser.py::test_lambda_x_star_y", "tests/test_parser.py::test_lambda_x_y_star_z", "tests/test_parser.py::test_lambda_x_kwy_star_y", "tests/test_parser.py::test_lambda_x_kwy_star_kwy", "tests/test_parser.py::test_lambda_x_star_y_kwargs", "tests/test_parser.py::test_lambda_x_divide_y_star_z_kwargs", "tests/test_parser.py::test_call_range", "tests/test_parser.py::test_call_range_comma", "tests/test_parser.py::test_call_range_x_y", "tests/test_parser.py::test_call_range_x_y_comma", "tests/test_parser.py::test_call_range_x_y_z", "tests/test_parser.py::test_call_dict_kwx", "tests/test_parser.py::test_call_dict_kwx_comma", "tests/test_parser.py::test_call_dict_kwx_kwy", "tests/test_parser.py::test_call_tuple_gen", "tests/test_parser.py::test_call_tuple_genifs", "tests/test_parser.py::test_call_range_star", "tests/test_parser.py::test_call_range_x_star", "tests/test_parser.py::test_call_int", "tests/test_parser.py::test_call_int_base_dict", "tests/test_parser.py::test_call_dict_kwargs", "tests/test_parser.py::test_call_list_many_star_args", "tests/test_parser.py::test_call_list_many_starstar_args", "tests/test_parser.py::test_call_list_many_star_and_starstar_args", "tests/test_parser.py::test_call_alot", "tests/test_parser.py::test_call_alot_next", "tests/test_parser.py::test_call_alot_next_next", "tests/test_parser.py::test_getattr", "tests/test_parser.py::test_getattr_getattr", "tests/test_parser.py::test_dict_tuple_key", "tests/test_parser.py::test_dict_tuple_key_get", "tests/test_parser.py::test_dict_tuple_key_get_3", "tests/test_parser.py::test_pipe_op", "tests/test_parser.py::test_pipe_op_two", "tests/test_parser.py::test_pipe_op_three", "tests/test_parser.py::test_xor_op", "tests/test_parser.py::test_xor_op_two", "tests/test_parser.py::test_xor_op_three", "tests/test_parser.py::test_xor_pipe", "tests/test_parser.py::test_amp_op", "tests/test_parser.py::test_amp_op_two", "tests/test_parser.py::test_amp_op_three", "tests/test_parser.py::test_lshift_op", "tests/test_parser.py::test_lshift_op_two", "tests/test_parser.py::test_lshift_op_three", "tests/test_parser.py::test_rshift_op", "tests/test_parser.py::test_rshift_op_two", "tests/test_parser.py::test_rshift_op_three", "tests/test_parser.py::test_named_expr", "tests/test_parser.py::test_named_expr_list", "tests/test_parser.py::test_equals", "tests/test_parser.py::test_equals_semi", "tests/test_parser.py::test_x_y_equals_semi", "tests/test_parser.py::test_equals_two", "tests/test_parser.py::test_equals_two_semi", "tests/test_parser.py::test_equals_three", "tests/test_parser.py::test_equals_three_semi", "tests/test_parser.py::test_plus_eq", "tests/test_parser.py::test_sub_eq", "tests/test_parser.py::test_times_eq", "tests/test_parser.py::test_matmult_eq", "tests/test_parser.py::test_div_eq", "tests/test_parser.py::test_floordiv_eq", "tests/test_parser.py::test_pow_eq", "tests/test_parser.py::test_mod_eq", "tests/test_parser.py::test_xor_eq", "tests/test_parser.py::test_ampersand_eq", "tests/test_parser.py::test_bitor_eq", "tests/test_parser.py::test_lshift_eq", "tests/test_parser.py::test_rshift_eq", "tests/test_parser.py::test_bare_unpack", "tests/test_parser.py::test_lhand_group_unpack", "tests/test_parser.py::test_rhand_group_unpack", "tests/test_parser.py::test_grouped_unpack", "tests/test_parser.py::test_double_grouped_unpack", "tests/test_parser.py::test_double_ungrouped_unpack", "tests/test_parser.py::test_stary_eq", "tests/test_parser.py::test_stary_x", "tests/test_parser.py::test_tuple_x_stary", "tests/test_parser.py::test_list_x_stary", "tests/test_parser.py::test_bare_x_stary", "tests/test_parser.py::test_bare_x_stary_z", "tests/test_parser.py::test_equals_list", "tests/test_parser.py::test_equals_dict", "tests/test_parser.py::test_equals_attr", "tests/test_parser.py::test_equals_annotation", "tests/test_parser.py::test_equals_annotation_empty", "tests/test_parser.py::test_dict_keys", "tests/test_parser.py::test_assert_msg", "tests/test_parser.py::test_assert", "tests/test_parser.py::test_pass", "tests/test_parser.py::test_del", "tests/test_parser.py::test_del_comma", "tests/test_parser.py::test_del_two", "tests/test_parser.py::test_del_two_comma", "tests/test_parser.py::test_del_with_parens", "tests/test_parser.py::test_raise", "tests/test_parser.py::test_raise_x", "tests/test_parser.py::test_raise_x_from", "tests/test_parser.py::test_import_x", "tests/test_parser.py::test_import_xy", "tests/test_parser.py::test_import_xyz", "tests/test_parser.py::test_from_x_import_y", "tests/test_parser.py::test_from_dot_import_y", "tests/test_parser.py::test_from_dotx_import_y", "tests/test_parser.py::test_from_dotdotx_import_y", "tests/test_parser.py::test_from_dotdotdotx_import_y", "tests/test_parser.py::test_from_dotdotdotdotx_import_y", "tests/test_parser.py::test_from_import_x_y", "tests/test_parser.py::test_from_import_x_y_z", "tests/test_parser.py::test_from_dot_import_x_y", "tests/test_parser.py::test_from_dot_import_x_y_z", "tests/test_parser.py::test_from_dot_import_group_x_y", "tests/test_parser.py::test_import_x_as_y", "tests/test_parser.py::test_import_xy_as_z", "tests/test_parser.py::test_import_x_y_as_z", "tests/test_parser.py::test_import_x_as_y_z", "tests/test_parser.py::test_import_x_as_y_z_as_a", "tests/test_parser.py::test_from_dot_import_x_as_y", "tests/test_parser.py::test_from_x_import_star", "tests/test_parser.py::test_from_x_import_group_x_y_z", "tests/test_parser.py::test_from_x_import_group_x_y_z_comma", "tests/test_parser.py::test_from_x_import_y_as_z", "tests/test_parser.py::test_from_x_import_y_as_z_a_as_b", "tests/test_parser.py::test_from_dotx_import_y_as_z_a_as_b_c_as_d", "tests/test_parser.py::test_continue", "tests/test_parser.py::test_break", "tests/test_parser.py::test_global", "tests/test_parser.py::test_global_xy", "tests/test_parser.py::test_nonlocal_x", "tests/test_parser.py::test_nonlocal_xy", "tests/test_parser.py::test_yield", "tests/test_parser.py::test_yield_x", "tests/test_parser.py::test_yield_x_comma", "tests/test_parser.py::test_yield_x_y", "tests/test_parser.py::test_yield_x_starexpr", "tests/test_parser.py::test_yield_from_x", "tests/test_parser.py::test_return", "tests/test_parser.py::test_return_x", "tests/test_parser.py::test_return_x_comma", "tests/test_parser.py::test_return_x_y", "tests/test_parser.py::test_return_x_starexpr", "tests/test_parser.py::test_if_true", "tests/test_parser.py::test_if_true_twolines", "tests/test_parser.py::test_if_true_twolines_deindent", "tests/test_parser.py::test_if_true_else", "tests/test_parser.py::test_if_true_x", "tests/test_parser.py::test_if_switch", "tests/test_parser.py::test_if_switch_elif1_else", "tests/test_parser.py::test_if_switch_elif2_else", "tests/test_parser.py::test_if_nested", "tests/test_parser.py::test_while", "tests/test_parser.py::test_while_else", "tests/test_parser.py::test_for", "tests/test_parser.py::test_for_zip", "tests/test_parser.py::test_for_idx", "tests/test_parser.py::test_for_zip_idx", "tests/test_parser.py::test_for_attr", "tests/test_parser.py::test_for_zip_attr", "tests/test_parser.py::test_for_else", "tests/test_parser.py::test_async_for", "tests/test_parser.py::test_with", "tests/test_parser.py::test_with_as", "tests/test_parser.py::test_with_xy", "tests/test_parser.py::test_with_x_as_y_z", "tests/test_parser.py::test_with_x_as_y_a_as_b", "tests/test_parser.py::test_with_in_func", "tests/test_parser.py::test_async_with", "tests/test_parser.py::test_try", "tests/test_parser.py::test_try_except_t", "tests/test_parser.py::test_try_except_t_as_e", "tests/test_parser.py::test_try_except_t_u", "tests/test_parser.py::test_try_except_t_u_as_e", "tests/test_parser.py::test_try_except_t_except_u", "tests/test_parser.py::test_try_except_else", "tests/test_parser.py::test_try_except_finally", "tests/test_parser.py::test_try_except_else_finally", "tests/test_parser.py::test_try_finally", "tests/test_parser.py::test_func", "tests/test_parser.py::test_func_ret", "tests/test_parser.py::test_func_ret_42", "tests/test_parser.py::test_func_ret_42_65", "tests/test_parser.py::test_func_rarrow", "tests/test_parser.py::test_func_x", "tests/test_parser.py::test_func_kwx", "tests/test_parser.py::test_func_x_y", "tests/test_parser.py::test_func_x_y_z", "tests/test_parser.py::test_func_x_kwy", "tests/test_parser.py::test_func_kwx_kwy", "tests/test_parser.py::test_func_kwx_kwy_kwz", "tests/test_parser.py::test_func_x_comma", "tests/test_parser.py::test_func_x_y_comma", "tests/test_parser.py::test_func_x_y_z_comma", "tests/test_parser.py::test_func_x_kwy_comma", "tests/test_parser.py::test_func_kwx_kwy_comma", "tests/test_parser.py::test_func_kwx_kwy_kwz_comma", "tests/test_parser.py::test_func_args", "tests/test_parser.py::test_func_args_x", "tests/test_parser.py::test_func_args_x_y", "tests/test_parser.py::test_func_args_x_kwy", "tests/test_parser.py::test_func_args_kwx_y", "tests/test_parser.py::test_func_args_kwx_kwy", "tests/test_parser.py::test_func_x_args", "tests/test_parser.py::test_func_x_args_y", "tests/test_parser.py::test_func_x_args_y_z", "tests/test_parser.py::test_func_kwargs", "tests/test_parser.py::test_func_x_kwargs", "tests/test_parser.py::test_func_x_y_kwargs", "tests/test_parser.py::test_func_x_kwy_kwargs", "tests/test_parser.py::test_func_args_kwargs", "tests/test_parser.py::test_func_x_args_kwargs", "tests/test_parser.py::test_func_x_y_args_kwargs", "tests/test_parser.py::test_func_kwx_args_kwargs", "tests/test_parser.py::test_func_x_kwy_args_kwargs", "tests/test_parser.py::test_func_x_args_y_kwargs", "tests/test_parser.py::test_func_x_args_kwy_kwargs", "tests/test_parser.py::test_func_args_y_kwargs", "tests/test_parser.py::test_func_star_x", "tests/test_parser.py::test_func_star_x_y", "tests/test_parser.py::test_func_star_x_kwargs", "tests/test_parser.py::test_func_star_kwx_kwargs", "tests/test_parser.py::test_func_x_star_y", "tests/test_parser.py::test_func_x_y_star_z", "tests/test_parser.py::test_func_x_kwy_star_y", "tests/test_parser.py::test_func_x_kwy_star_kwy", "tests/test_parser.py::test_func_x_star_y_kwargs", "tests/test_parser.py::test_func_x_divide", "tests/test_parser.py::test_func_x_divide_y_star_z_kwargs", "tests/test_parser.py::test_func_tx", "tests/test_parser.py::test_func_txy", "tests/test_parser.py::test_class", "tests/test_parser.py::test_class_obj", "tests/test_parser.py::test_class_int_flt", "tests/test_parser.py::test_class_obj_kw", "tests/test_parser.py::test_decorator", "tests/test_parser.py::test_decorator_2", "tests/test_parser.py::test_decorator_call", "tests/test_parser.py::test_decorator_call_args", "tests/test_parser.py::test_decorator_dot_call_args", "tests/test_parser.py::test_decorator_dot_dot_call_args", "tests/test_parser.py::test_broken_prompt_func", "tests/test_parser.py::test_class_with_methods", "tests/test_parser.py::test_nested_functions", "tests/test_parser.py::test_function_blank_line", "tests/test_parser.py::test_async_func", "tests/test_parser.py::test_async_decorator", "tests/test_parser.py::test_async_await", "tests/test_parser.py::test_named_expr_args", "tests/test_parser.py::test_named_expr_if", "tests/test_parser.py::test_named_expr_elif", "tests/test_parser.py::test_named_expr_while", "tests/test_parser.py::test_path_literal", "tests/test_parser.py::test_path_fstring_literal", "tests/test_parser.py::test_dollar_name", "tests/test_parser.py::test_dollar_py", "tests/test_parser.py::test_dollar_py_test", "tests/test_parser.py::test_dollar_py_recursive_name", "tests/test_parser.py::test_dollar_py_test_recursive_name", "tests/test_parser.py::test_dollar_py_test_recursive_test", "tests/test_parser.py::test_dollar_name_set", "tests/test_parser.py::test_dollar_py_set", "tests/test_parser.py::test_dollar_sub", "tests/test_parser.py::test_dollar_sub_space[$(ls", "tests/test_parser.py::test_dollar_sub_space[$(", "tests/test_parser.py::test_ls_dot", "tests/test_parser.py::test_lambda_in_atparens", "tests/test_parser.py::test_generator_in_atparens", "tests/test_parser.py::test_bare_tuple_in_atparens", "tests/test_parser.py::test_nested_madness", "tests/test_parser.py::test_atparens_intoken", "tests/test_parser.py::test_ls_dot_nesting", "tests/test_parser.py::test_ls_dot_nesting_var", "tests/test_parser.py::test_ls_dot_str", "tests/test_parser.py::test_ls_nest_ls", "tests/test_parser.py::test_ls_nest_ls_dashl", "tests/test_parser.py::test_ls_envvar_strval", "tests/test_parser.py::test_ls_envvar_listval", "tests/test_parser.py::test_bang_sub", "tests/test_parser.py::test_bang_sub_space[!(ls", "tests/test_parser.py::test_bang_sub_space[!(", "tests/test_parser.py::test_bang_ls_dot", "tests/test_parser.py::test_bang_ls_dot_nesting", "tests/test_parser.py::test_bang_ls_dot_nesting_var", "tests/test_parser.py::test_bang_ls_dot_str", "tests/test_parser.py::test_bang_ls_nest_ls", "tests/test_parser.py::test_bang_ls_nest_ls_dashl", "tests/test_parser.py::test_bang_ls_envvar_strval", "tests/test_parser.py::test_bang_ls_envvar_listval", "tests/test_parser.py::test_bang_envvar_args", "tests/test_parser.py::test_question", "tests/test_parser.py::test_dobquestion", "tests/test_parser.py::test_question_chain", "tests/test_parser.py::test_ls_regex", "tests/test_parser.py::test_backtick[--]", "tests/test_parser.py::test_backtick[--p]", "tests/test_parser.py::test_backtick[r--]", "tests/test_parser.py::test_backtick[r--p]", "tests/test_parser.py::test_backtick[g--]", "tests/test_parser.py::test_backtick[g--p]", "tests/test_parser.py::test_ls_regex_octothorpe", "tests/test_parser.py::test_ls_explicitregex", "tests/test_parser.py::test_ls_explicitregex_octothorpe", "tests/test_parser.py::test_ls_glob", "tests/test_parser.py::test_ls_glob_octothorpe", "tests/test_parser.py::test_ls_customsearch", "tests/test_parser.py::test_custombacktick", "tests/test_parser.py::test_ls_customsearch_octothorpe", "tests/test_parser.py::test_injection", "tests/test_parser.py::test_rhs_nested_injection", "tests/test_parser.py::test_merged_injection", "tests/test_parser.py::test_backtick_octothorpe", "tests/test_parser.py::test_uncaptured_sub", "tests/test_parser.py::test_hiddenobj_sub", "tests/test_parser.py::test_slash_envarv_echo", "tests/test_parser.py::test_echo_double_eq", "tests/test_parser.py::test_bang_two_cmds_one_pipe", "tests/test_parser.py::test_bang_three_cmds_two_pipes", "tests/test_parser.py::test_bang_one_cmd_write", "tests/test_parser.py::test_bang_one_cmd_append", "tests/test_parser.py::test_bang_two_cmds_write", "tests/test_parser.py::test_bang_two_cmds_append", "tests/test_parser.py::test_bang_cmd_background", "tests/test_parser.py::test_bang_cmd_background_nospace", "tests/test_parser.py::test_bang_git_quotes_no_space", "tests/test_parser.py::test_bang_git_quotes_space", "tests/test_parser.py::test_bang_git_two_quotes_space", "tests/test_parser.py::test_bang_git_two_quotes_space_space", "tests/test_parser.py::test_bang_ls_quotes_3_space", "tests/test_parser.py::test_two_cmds_one_pipe", "tests/test_parser.py::test_three_cmds_two_pipes", "tests/test_parser.py::test_two_cmds_one_and_brackets", "tests/test_parser.py::test_three_cmds_two_ands", "tests/test_parser.py::test_two_cmds_one_doubleamps", "tests/test_parser.py::test_three_cmds_two_doubleamps", "tests/test_parser.py::test_two_cmds_one_or", "tests/test_parser.py::test_three_cmds_two_ors", "tests/test_parser.py::test_two_cmds_one_doublepipe", "tests/test_parser.py::test_three_cmds_two_doublepipe", "tests/test_parser.py::test_one_cmd_write", "tests/test_parser.py::test_one_cmd_append", "tests/test_parser.py::test_two_cmds_write", "tests/test_parser.py::test_two_cmds_append", "tests/test_parser.py::test_cmd_background", "tests/test_parser.py::test_cmd_background_nospace", "tests/test_parser.py::test_git_quotes_no_space", "tests/test_parser.py::test_git_quotes_space", "tests/test_parser.py::test_git_two_quotes_space", "tests/test_parser.py::test_git_two_quotes_space_space", "tests/test_parser.py::test_ls_quotes_3_space", "tests/test_parser.py::test_leading_envvar_assignment", "tests/test_parser.py::test_echo_comma", "tests/test_parser.py::test_echo_internal_comma", "tests/test_parser.py::test_comment_only", "tests/test_parser.py::test_echo_slash_question", "tests/test_parser.py::test_bad_quotes", "tests/test_parser.py::test_redirect", "tests/test_parser.py::test_use_subshell[![(cat)]]", "tests/test_parser.py::test_use_subshell[![(cat;)]]", "tests/test_parser.py::test_use_subshell[![(cd", "tests/test_parser.py::test_use_subshell[![(echo", "tests/test_parser.py::test_use_subshell[![(if", "tests/test_parser.py::test_redirect_abspath[$[cat", "tests/test_parser.py::test_redirect_abspath[$[(cat)", "tests/test_parser.py::test_redirect_abspath[$[<", "tests/test_parser.py::test_redirect_abspath[![<", "tests/test_parser.py::test_redirect_output[]", "tests/test_parser.py::test_redirect_output[o]", "tests/test_parser.py::test_redirect_output[out]", "tests/test_parser.py::test_redirect_output[1]", "tests/test_parser.py::test_redirect_error[e]", "tests/test_parser.py::test_redirect_error[err]", "tests/test_parser.py::test_redirect_error[2]", "tests/test_parser.py::test_redirect_all[a]", "tests/test_parser.py::test_redirect_all[all]", "tests/test_parser.py::test_redirect_all[&]", "tests/test_parser.py::test_redirect_error_to_output[-e>o]", "tests/test_parser.py::test_redirect_error_to_output[-e>out]", "tests/test_parser.py::test_redirect_error_to_output[-err>o]", "tests/test_parser.py::test_redirect_error_to_output[-2>1]", "tests/test_parser.py::test_redirect_error_to_output[-e>1]", "tests/test_parser.py::test_redirect_error_to_output[-err>1]", "tests/test_parser.py::test_redirect_error_to_output[-2>out]", "tests/test_parser.py::test_redirect_error_to_output[-2>o]", "tests/test_parser.py::test_redirect_error_to_output[-err>&1]", "tests/test_parser.py::test_redirect_error_to_output[-e>&1]", "tests/test_parser.py::test_redirect_error_to_output[-2>&1]", "tests/test_parser.py::test_redirect_error_to_output[o-e>o]", "tests/test_parser.py::test_redirect_error_to_output[o-e>out]", "tests/test_parser.py::test_redirect_error_to_output[o-err>o]", "tests/test_parser.py::test_redirect_error_to_output[o-2>1]", "tests/test_parser.py::test_redirect_error_to_output[o-e>1]", "tests/test_parser.py::test_redirect_error_to_output[o-err>1]", "tests/test_parser.py::test_redirect_error_to_output[o-2>out]", "tests/test_parser.py::test_redirect_error_to_output[o-2>o]", "tests/test_parser.py::test_redirect_error_to_output[o-err>&1]", "tests/test_parser.py::test_redirect_error_to_output[o-e>&1]", "tests/test_parser.py::test_redirect_error_to_output[o-2>&1]", "tests/test_parser.py::test_redirect_error_to_output[out-e>o]", "tests/test_parser.py::test_redirect_error_to_output[out-e>out]", "tests/test_parser.py::test_redirect_error_to_output[out-err>o]", "tests/test_parser.py::test_redirect_error_to_output[out-2>1]", "tests/test_parser.py::test_redirect_error_to_output[out-e>1]", "tests/test_parser.py::test_redirect_error_to_output[out-err>1]", "tests/test_parser.py::test_redirect_error_to_output[out-2>out]", "tests/test_parser.py::test_redirect_error_to_output[out-2>o]", "tests/test_parser.py::test_redirect_error_to_output[out-err>&1]", "tests/test_parser.py::test_redirect_error_to_output[out-e>&1]", "tests/test_parser.py::test_redirect_error_to_output[out-2>&1]", "tests/test_parser.py::test_redirect_error_to_output[1-e>o]", "tests/test_parser.py::test_redirect_error_to_output[1-e>out]", "tests/test_parser.py::test_redirect_error_to_output[1-err>o]", "tests/test_parser.py::test_redirect_error_to_output[1-2>1]", "tests/test_parser.py::test_redirect_error_to_output[1-e>1]", "tests/test_parser.py::test_redirect_error_to_output[1-err>1]", "tests/test_parser.py::test_redirect_error_to_output[1-2>out]", "tests/test_parser.py::test_redirect_error_to_output[1-2>o]", "tests/test_parser.py::test_redirect_error_to_output[1-err>&1]", "tests/test_parser.py::test_redirect_error_to_output[1-e>&1]", "tests/test_parser.py::test_redirect_error_to_output[1-2>&1]", "tests/test_parser.py::test_redirect_output_to_error[e-o>e]", "tests/test_parser.py::test_redirect_output_to_error[e-o>err]", "tests/test_parser.py::test_redirect_output_to_error[e-out>e]", "tests/test_parser.py::test_redirect_output_to_error[e-1>2]", "tests/test_parser.py::test_redirect_output_to_error[e-o>2]", "tests/test_parser.py::test_redirect_output_to_error[e-out>2]", "tests/test_parser.py::test_redirect_output_to_error[e-1>err]", "tests/test_parser.py::test_redirect_output_to_error[e-1>e]", "tests/test_parser.py::test_redirect_output_to_error[e-out>&2]", "tests/test_parser.py::test_redirect_output_to_error[e-o>&2]", "tests/test_parser.py::test_redirect_output_to_error[e-1>&2]", "tests/test_parser.py::test_redirect_output_to_error[err-o>e]", "tests/test_parser.py::test_redirect_output_to_error[err-o>err]", "tests/test_parser.py::test_redirect_output_to_error[err-out>e]", "tests/test_parser.py::test_redirect_output_to_error[err-1>2]", "tests/test_parser.py::test_redirect_output_to_error[err-o>2]", "tests/test_parser.py::test_redirect_output_to_error[err-out>2]", "tests/test_parser.py::test_redirect_output_to_error[err-1>err]", "tests/test_parser.py::test_redirect_output_to_error[err-1>e]", "tests/test_parser.py::test_redirect_output_to_error[err-out>&2]", "tests/test_parser.py::test_redirect_output_to_error[err-o>&2]", "tests/test_parser.py::test_redirect_output_to_error[err-1>&2]", "tests/test_parser.py::test_redirect_output_to_error[2-o>e]", "tests/test_parser.py::test_redirect_output_to_error[2-o>err]", "tests/test_parser.py::test_redirect_output_to_error[2-out>e]", "tests/test_parser.py::test_redirect_output_to_error[2-1>2]", "tests/test_parser.py::test_redirect_output_to_error[2-o>2]", "tests/test_parser.py::test_redirect_output_to_error[2-out>2]", "tests/test_parser.py::test_redirect_output_to_error[2-1>err]", "tests/test_parser.py::test_redirect_output_to_error[2-1>e]", "tests/test_parser.py::test_redirect_output_to_error[2-out>&2]", "tests/test_parser.py::test_redirect_output_to_error[2-o>&2]", "tests/test_parser.py::test_redirect_output_to_error[2-1>&2]", "tests/test_parser.py::test_macro_call_empty", "tests/test_parser.py::test_macro_call_one_arg[x]", "tests/test_parser.py::test_macro_call_one_arg[True]", "tests/test_parser.py::test_macro_call_one_arg[None]", "tests/test_parser.py::test_macro_call_one_arg[import", "tests/test_parser.py::test_macro_call_one_arg[x=10]", "tests/test_parser.py::test_macro_call_one_arg[\"oh", "tests/test_parser.py::test_macro_call_one_arg[...]", "tests/test_parser.py::test_macro_call_one_arg[", "tests/test_parser.py::test_macro_call_one_arg[if", "tests/test_parser.py::test_macro_call_one_arg[{x:", "tests/test_parser.py::test_macro_call_one_arg[{1,", "tests/test_parser.py::test_macro_call_one_arg[(x,y)]", "tests/test_parser.py::test_macro_call_one_arg[(x,", "tests/test_parser.py::test_macro_call_one_arg[((x,", "tests/test_parser.py::test_macro_call_one_arg[g()]", "tests/test_parser.py::test_macro_call_one_arg[range(10)]", "tests/test_parser.py::test_macro_call_one_arg[range(1,", "tests/test_parser.py::test_macro_call_one_arg[()]", "tests/test_parser.py::test_macro_call_one_arg[{}]", "tests/test_parser.py::test_macro_call_one_arg[[]]", "tests/test_parser.py::test_macro_call_one_arg[[1,", "tests/test_parser.py::test_macro_call_one_arg[@(x)]", "tests/test_parser.py::test_macro_call_one_arg[!(ls", "tests/test_parser.py::test_macro_call_one_arg[![ls", "tests/test_parser.py::test_macro_call_one_arg[$(ls", "tests/test_parser.py::test_macro_call_one_arg[${x", "tests/test_parser.py::test_macro_call_one_arg[$[ls", "tests/test_parser.py::test_macro_call_one_arg[@$(which", "tests/test_parser.py::test_macro_call_two_args[x-True]", "tests/test_parser.py::test_macro_call_two_args[x-import", "tests/test_parser.py::test_macro_call_two_args[x-\"oh", "tests/test_parser.py::test_macro_call_two_args[x-", "tests/test_parser.py::test_macro_call_two_args[x-{x:", "tests/test_parser.py::test_macro_call_two_args[x-{1,", "tests/test_parser.py::test_macro_call_two_args[x-(x,", "tests/test_parser.py::test_macro_call_two_args[x-g()]", "tests/test_parser.py::test_macro_call_two_args[x-range(1,", "tests/test_parser.py::test_macro_call_two_args[x-{}]", "tests/test_parser.py::test_macro_call_two_args[x-[1,", "tests/test_parser.py::test_macro_call_two_args[x-!(ls", "tests/test_parser.py::test_macro_call_two_args[x-$(ls", "tests/test_parser.py::test_macro_call_two_args[x-$[ls", "tests/test_parser.py::test_macro_call_two_args[None-True]", "tests/test_parser.py::test_macro_call_two_args[None-import", "tests/test_parser.py::test_macro_call_two_args[None-\"oh", "tests/test_parser.py::test_macro_call_two_args[None-", "tests/test_parser.py::test_macro_call_two_args[None-{x:", "tests/test_parser.py::test_macro_call_two_args[None-{1,", "tests/test_parser.py::test_macro_call_two_args[None-(x,", "tests/test_parser.py::test_macro_call_two_args[None-g()]", "tests/test_parser.py::test_macro_call_two_args[None-range(1,", "tests/test_parser.py::test_macro_call_two_args[None-{}]", "tests/test_parser.py::test_macro_call_two_args[None-[1,", "tests/test_parser.py::test_macro_call_two_args[None-!(ls", "tests/test_parser.py::test_macro_call_two_args[None-$(ls", "tests/test_parser.py::test_macro_call_two_args[None-$[ls", "tests/test_parser.py::test_macro_call_two_args[x=10-True]", "tests/test_parser.py::test_macro_call_two_args[x=10-import", "tests/test_parser.py::test_macro_call_two_args[x=10-\"oh", "tests/test_parser.py::test_macro_call_two_args[x=10-", "tests/test_parser.py::test_macro_call_two_args[x=10-{x:", "tests/test_parser.py::test_macro_call_two_args[x=10-{1,", "tests/test_parser.py::test_macro_call_two_args[x=10-(x,", "tests/test_parser.py::test_macro_call_two_args[x=10-g()]", "tests/test_parser.py::test_macro_call_two_args[x=10-range(1,", "tests/test_parser.py::test_macro_call_two_args[x=10-{}]", "tests/test_parser.py::test_macro_call_two_args[x=10-[1,", "tests/test_parser.py::test_macro_call_two_args[x=10-!(ls", "tests/test_parser.py::test_macro_call_two_args[x=10-$(ls", "tests/test_parser.py::test_macro_call_two_args[x=10-$[ls", "tests/test_parser.py::test_macro_call_two_args[...-True]", "tests/test_parser.py::test_macro_call_two_args[...-import", "tests/test_parser.py::test_macro_call_two_args[...-\"oh", "tests/test_parser.py::test_macro_call_two_args[...-", "tests/test_parser.py::test_macro_call_two_args[...-{x:", "tests/test_parser.py::test_macro_call_two_args[...-{1,", "tests/test_parser.py::test_macro_call_two_args[...-(x,", "tests/test_parser.py::test_macro_call_two_args[...-g()]", "tests/test_parser.py::test_macro_call_two_args[...-range(1,", "tests/test_parser.py::test_macro_call_two_args[...-{}]", "tests/test_parser.py::test_macro_call_two_args[...-[1,", "tests/test_parser.py::test_macro_call_two_args[...-!(ls", "tests/test_parser.py::test_macro_call_two_args[...-$(ls", "tests/test_parser.py::test_macro_call_two_args[...-$[ls", "tests/test_parser.py::test_macro_call_two_args[if", "tests/test_parser.py::test_macro_call_two_args[{x:", "tests/test_parser.py::test_macro_call_two_args[(x,y)-True]", "tests/test_parser.py::test_macro_call_two_args[(x,y)-import", "tests/test_parser.py::test_macro_call_two_args[(x,y)-\"oh", "tests/test_parser.py::test_macro_call_two_args[(x,y)-", "tests/test_parser.py::test_macro_call_two_args[(x,y)-{x:", "tests/test_parser.py::test_macro_call_two_args[(x,y)-{1,", "tests/test_parser.py::test_macro_call_two_args[(x,y)-(x,", "tests/test_parser.py::test_macro_call_two_args[(x,y)-g()]", "tests/test_parser.py::test_macro_call_two_args[(x,y)-range(1,", "tests/test_parser.py::test_macro_call_two_args[(x,y)-{}]", "tests/test_parser.py::test_macro_call_two_args[(x,y)-[1,", "tests/test_parser.py::test_macro_call_two_args[(x,y)-!(ls", "tests/test_parser.py::test_macro_call_two_args[(x,y)-$(ls", "tests/test_parser.py::test_macro_call_two_args[(x,y)-$[ls", "tests/test_parser.py::test_macro_call_two_args[((x,", "tests/test_parser.py::test_macro_call_two_args[range(10)-True]", "tests/test_parser.py::test_macro_call_two_args[range(10)-import", "tests/test_parser.py::test_macro_call_two_args[range(10)-\"oh", "tests/test_parser.py::test_macro_call_two_args[range(10)-", "tests/test_parser.py::test_macro_call_two_args[range(10)-{x:", "tests/test_parser.py::test_macro_call_two_args[range(10)-{1,", "tests/test_parser.py::test_macro_call_two_args[range(10)-(x,", "tests/test_parser.py::test_macro_call_two_args[range(10)-g()]", "tests/test_parser.py::test_macro_call_two_args[range(10)-range(1,", "tests/test_parser.py::test_macro_call_two_args[range(10)-{}]", "tests/test_parser.py::test_macro_call_two_args[range(10)-[1,", "tests/test_parser.py::test_macro_call_two_args[range(10)-!(ls", "tests/test_parser.py::test_macro_call_two_args[range(10)-$(ls", "tests/test_parser.py::test_macro_call_two_args[range(10)-$[ls", "tests/test_parser.py::test_macro_call_two_args[()-True]", "tests/test_parser.py::test_macro_call_two_args[()-import", "tests/test_parser.py::test_macro_call_two_args[()-\"oh", "tests/test_parser.py::test_macro_call_two_args[()-", "tests/test_parser.py::test_macro_call_two_args[()-{x:", "tests/test_parser.py::test_macro_call_two_args[()-{1,", "tests/test_parser.py::test_macro_call_two_args[()-(x,", "tests/test_parser.py::test_macro_call_two_args[()-g()]", "tests/test_parser.py::test_macro_call_two_args[()-range(1,", "tests/test_parser.py::test_macro_call_two_args[()-{}]", "tests/test_parser.py::test_macro_call_two_args[()-[1,", "tests/test_parser.py::test_macro_call_two_args[()-!(ls", "tests/test_parser.py::test_macro_call_two_args[()-$(ls", "tests/test_parser.py::test_macro_call_two_args[()-$[ls", "tests/test_parser.py::test_macro_call_two_args[[]-True]", "tests/test_parser.py::test_macro_call_two_args[[]-import", "tests/test_parser.py::test_macro_call_two_args[[]-\"oh", "tests/test_parser.py::test_macro_call_two_args[[]-", "tests/test_parser.py::test_macro_call_two_args[[]-{x:", "tests/test_parser.py::test_macro_call_two_args[[]-{1,", "tests/test_parser.py::test_macro_call_two_args[[]-(x,", "tests/test_parser.py::test_macro_call_two_args[[]-g()]", "tests/test_parser.py::test_macro_call_two_args[[]-range(1,", "tests/test_parser.py::test_macro_call_two_args[[]-{}]", "tests/test_parser.py::test_macro_call_two_args[[]-[1,", "tests/test_parser.py::test_macro_call_two_args[[]-!(ls", "tests/test_parser.py::test_macro_call_two_args[[]-$(ls", "tests/test_parser.py::test_macro_call_two_args[[]-$[ls", "tests/test_parser.py::test_macro_call_two_args[@(x)-True]", "tests/test_parser.py::test_macro_call_two_args[@(x)-import", "tests/test_parser.py::test_macro_call_two_args[@(x)-\"oh", "tests/test_parser.py::test_macro_call_two_args[@(x)-", "tests/test_parser.py::test_macro_call_two_args[@(x)-{x:", "tests/test_parser.py::test_macro_call_two_args[@(x)-{1,", "tests/test_parser.py::test_macro_call_two_args[@(x)-(x,", "tests/test_parser.py::test_macro_call_two_args[@(x)-g()]", "tests/test_parser.py::test_macro_call_two_args[@(x)-range(1,", "tests/test_parser.py::test_macro_call_two_args[@(x)-{}]", "tests/test_parser.py::test_macro_call_two_args[@(x)-[1,", "tests/test_parser.py::test_macro_call_two_args[@(x)-!(ls", "tests/test_parser.py::test_macro_call_two_args[@(x)-$(ls", "tests/test_parser.py::test_macro_call_two_args[@(x)-$[ls", "tests/test_parser.py::test_macro_call_two_args[![ls", "tests/test_parser.py::test_macro_call_two_args[${x", "tests/test_parser.py::test_macro_call_two_args[@$(which", "tests/test_parser.py::test_macro_call_three_args[x-True-None]", "tests/test_parser.py::test_macro_call_three_args[x-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-True-if", "tests/test_parser.py::test_macro_call_three_args[x-True-{1,", "tests/test_parser.py::test_macro_call_three_args[x-True-((x,", "tests/test_parser.py::test_macro_call_three_args[x-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-True-[]]", "tests/test_parser.py::test_macro_call_three_args[x-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-True-${x", "tests/test_parser.py::test_macro_call_three_args[x-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[x-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-x=10-if", "tests/test_parser.py::test_macro_call_three_args[x-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[x-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[x-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[x-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[x-", "tests/test_parser.py::test_macro_call_three_args[x-{x:", "tests/test_parser.py::test_macro_call_three_args[x-(x,", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[x-{}-None]", "tests/test_parser.py::test_macro_call_three_args[x-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-{}-if", "tests/test_parser.py::test_macro_call_three_args[x-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[x-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[x-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[x-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-{}-${x", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[x-$(ls", "tests/test_parser.py::test_macro_call_three_args[x-@$(which", "tests/test_parser.py::test_macro_call_three_args[import", "tests/test_parser.py::test_macro_call_three_args[...-True-None]", "tests/test_parser.py::test_macro_call_three_args[...-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-True-if", "tests/test_parser.py::test_macro_call_three_args[...-True-{1,", "tests/test_parser.py::test_macro_call_three_args[...-True-((x,", "tests/test_parser.py::test_macro_call_three_args[...-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-True-[]]", "tests/test_parser.py::test_macro_call_three_args[...-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-True-${x", "tests/test_parser.py::test_macro_call_three_args[...-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[...-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-x=10-if", "tests/test_parser.py::test_macro_call_three_args[...-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[...-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[...-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[...-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[...-", "tests/test_parser.py::test_macro_call_three_args[...-{x:", "tests/test_parser.py::test_macro_call_three_args[...-(x,", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[...-{}-None]", "tests/test_parser.py::test_macro_call_three_args[...-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-{}-if", "tests/test_parser.py::test_macro_call_three_args[...-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[...-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[...-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[...-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-{}-${x", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[...-$(ls", "tests/test_parser.py::test_macro_call_three_args[...-@$(which", "tests/test_parser.py::test_macro_call_three_args[{x:", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{x:", "tests/test_parser.py::test_macro_call_three_args[(x,y)-(x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-$(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@$(which", "tests/test_parser.py::test_macro_call_three_args[g()-True-None]", "tests/test_parser.py::test_macro_call_three_args[g()-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-True-if", "tests/test_parser.py::test_macro_call_three_args[g()-True-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-True-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-True-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-True-${x", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-if", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[g()-", "tests/test_parser.py::test_macro_call_three_args[g()-{x:", "tests/test_parser.py::test_macro_call_three_args[g()-(x,", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[g()-{}-None]", "tests/test_parser.py::test_macro_call_three_args[g()-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-{}-if", "tests/test_parser.py::test_macro_call_three_args[g()-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-{}-${x", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[g()-$(ls", "tests/test_parser.py::test_macro_call_three_args[g()-@$(which", "tests/test_parser.py::test_macro_call_three_args[()-True-None]", "tests/test_parser.py::test_macro_call_three_args[()-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-True-if", "tests/test_parser.py::test_macro_call_three_args[()-True-{1,", "tests/test_parser.py::test_macro_call_three_args[()-True-((x,", "tests/test_parser.py::test_macro_call_three_args[()-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-True-[]]", "tests/test_parser.py::test_macro_call_three_args[()-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-True-${x", "tests/test_parser.py::test_macro_call_three_args[()-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[()-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-x=10-if", "tests/test_parser.py::test_macro_call_three_args[()-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[()-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[()-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[()-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[()-", "tests/test_parser.py::test_macro_call_three_args[()-{x:", "tests/test_parser.py::test_macro_call_three_args[()-(x,", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[()-{}-None]", "tests/test_parser.py::test_macro_call_three_args[()-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-{}-if", "tests/test_parser.py::test_macro_call_three_args[()-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[()-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[()-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[()-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-{}-${x", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[()-$(ls", "tests/test_parser.py::test_macro_call_three_args[()-@$(which", "tests/test_parser.py::test_macro_call_three_args[[1,", "tests/test_parser.py::test_macro_call_three_args[![ls", "tests/test_parser.py::test_macro_call_three_args[$[ls", "tests/test_parser.py::test_macro_call_one_trailing[x]", "tests/test_parser.py::test_macro_call_one_trailing[True]", "tests/test_parser.py::test_macro_call_one_trailing[None]", "tests/test_parser.py::test_macro_call_one_trailing[import", "tests/test_parser.py::test_macro_call_one_trailing[x=10]", "tests/test_parser.py::test_macro_call_one_trailing[\"oh", "tests/test_parser.py::test_macro_call_one_trailing[...]", "tests/test_parser.py::test_macro_call_one_trailing[", "tests/test_parser.py::test_macro_call_one_trailing[if", "tests/test_parser.py::test_macro_call_one_trailing[{x:", "tests/test_parser.py::test_macro_call_one_trailing[{1,", "tests/test_parser.py::test_macro_call_one_trailing[(x,y)]", "tests/test_parser.py::test_macro_call_one_trailing[(x,", "tests/test_parser.py::test_macro_call_one_trailing[((x,", "tests/test_parser.py::test_macro_call_one_trailing[g()]", "tests/test_parser.py::test_macro_call_one_trailing[range(10)]", "tests/test_parser.py::test_macro_call_one_trailing[range(1,", "tests/test_parser.py::test_macro_call_one_trailing[()]", "tests/test_parser.py::test_macro_call_one_trailing[{}]", "tests/test_parser.py::test_macro_call_one_trailing[[]]", "tests/test_parser.py::test_macro_call_one_trailing[[1,", "tests/test_parser.py::test_macro_call_one_trailing[@(x)]", "tests/test_parser.py::test_macro_call_one_trailing[!(ls", "tests/test_parser.py::test_macro_call_one_trailing[![ls", "tests/test_parser.py::test_macro_call_one_trailing[$(ls", "tests/test_parser.py::test_macro_call_one_trailing[${x", "tests/test_parser.py::test_macro_call_one_trailing[$[ls", "tests/test_parser.py::test_macro_call_one_trailing[@$(which", "tests/test_parser.py::test_macro_call_one_trailing_space[x]", "tests/test_parser.py::test_macro_call_one_trailing_space[True]", "tests/test_parser.py::test_macro_call_one_trailing_space[None]", "tests/test_parser.py::test_macro_call_one_trailing_space[import", "tests/test_parser.py::test_macro_call_one_trailing_space[x=10]", "tests/test_parser.py::test_macro_call_one_trailing_space[\"oh", "tests/test_parser.py::test_macro_call_one_trailing_space[...]", "tests/test_parser.py::test_macro_call_one_trailing_space[", "tests/test_parser.py::test_macro_call_one_trailing_space[if", "tests/test_parser.py::test_macro_call_one_trailing_space[{x:", "tests/test_parser.py::test_macro_call_one_trailing_space[{1,", "tests/test_parser.py::test_macro_call_one_trailing_space[(x,y)]", "tests/test_parser.py::test_macro_call_one_trailing_space[(x,", "tests/test_parser.py::test_macro_call_one_trailing_space[((x,", "tests/test_parser.py::test_macro_call_one_trailing_space[g()]", "tests/test_parser.py::test_macro_call_one_trailing_space[range(10)]", "tests/test_parser.py::test_macro_call_one_trailing_space[range(1,", "tests/test_parser.py::test_macro_call_one_trailing_space[()]", "tests/test_parser.py::test_macro_call_one_trailing_space[{}]", "tests/test_parser.py::test_macro_call_one_trailing_space[[]]", "tests/test_parser.py::test_macro_call_one_trailing_space[[1,", "tests/test_parser.py::test_macro_call_one_trailing_space[@(x)]", "tests/test_parser.py::test_macro_call_one_trailing_space[!(ls", "tests/test_parser.py::test_macro_call_one_trailing_space[![ls", "tests/test_parser.py::test_macro_call_one_trailing_space[$(ls", "tests/test_parser.py::test_macro_call_one_trailing_space[${x", "tests/test_parser.py::test_macro_call_one_trailing_space[$[ls", "tests/test_parser.py::test_macro_call_one_trailing_space[@$(which", "tests/test_parser.py::test_empty_subprocbang[echo!-!(-)]", "tests/test_parser.py::test_empty_subprocbang[echo!-$(-)]", "tests/test_parser.py::test_empty_subprocbang[echo!-![-]]", "tests/test_parser.py::test_empty_subprocbang[echo!-$[-]]", "tests/test_parser.py::test_empty_subprocbang[echo", "tests/test_parser.py::test_single_subprocbang[echo!x-!(-)]", "tests/test_parser.py::test_single_subprocbang[echo!x-$(-)]", "tests/test_parser.py::test_single_subprocbang[echo!x-![-]]", "tests/test_parser.py::test_single_subprocbang[echo!x-$[-]]", "tests/test_parser.py::test_single_subprocbang[echo", "tests/test_parser.py::test_arg_single_subprocbang[echo", "tests/test_parser.py::test_arg_single_subprocbang_nested[echo", "tests/test_parser.py::test_many_subprocbang[echo!x", "tests/test_parser.py::test_many_subprocbang[echo", "tests/test_parser.py::test_many_subprocbang[timeit!", "tests/test_parser.py::test_many_subprocbang[timeit!!!!-!(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!!!!-$(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!!!!-![-]]", "tests/test_parser.py::test_many_subprocbang[timeit!!!!-$[-]]", "tests/test_parser.py::test_many_subprocbang[timeit!!(ls)-!(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!!(ls)-$(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!!(ls)-![-]]", "tests/test_parser.py::test_many_subprocbang[timeit!!(ls)-$[-]]", "tests/test_parser.py::test_many_subprocbang[timeit!\"!)\"-!(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!\"!)\"-$(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!\"!)\"-![-]]", "tests/test_parser.py::test_many_subprocbang[timeit!\"!)\"-$[-]]", "tests/test_parser.py::test_withbang_single_suite[pass\\n]", "tests/test_parser.py::test_withbang_single_suite[x", "tests/test_parser.py::test_withbang_single_suite[export", "tests/test_parser.py::test_withbang_single_suite[with", "tests/test_parser.py::test_withbang_as_single_suite[pass\\n]", "tests/test_parser.py::test_withbang_as_single_suite[x", "tests/test_parser.py::test_withbang_as_single_suite[export", "tests/test_parser.py::test_withbang_as_single_suite[with", "tests/test_parser.py::test_withbang_single_suite_trailing[pass\\n]", "tests/test_parser.py::test_withbang_single_suite_trailing[x", "tests/test_parser.py::test_withbang_single_suite_trailing[export", "tests/test_parser.py::test_withbang_single_suite_trailing[with", "tests/test_parser.py::test_withbang_single_simple[pass]", "tests/test_parser.py::test_withbang_single_simple[x", "tests/test_parser.py::test_withbang_single_simple[export", "tests/test_parser.py::test_withbang_single_simple[[1,\\n", "tests/test_parser.py::test_withbang_single_simple_opt[pass]", "tests/test_parser.py::test_withbang_single_simple_opt[x", "tests/test_parser.py::test_withbang_single_simple_opt[export", "tests/test_parser.py::test_withbang_single_simple_opt[[1,\\n", "tests/test_parser.py::test_withbang_as_many_suite[pass\\n]", "tests/test_parser.py::test_withbang_as_many_suite[x", "tests/test_parser.py::test_withbang_as_many_suite[export", "tests/test_parser.py::test_withbang_as_many_suite[with", "tests/test_parser.py::test_subproc_raw_str_literal", "tests/test_parser.py::test_syntax_error_del_literal", "tests/test_parser.py::test_syntax_error_del_constant", "tests/test_parser.py::test_syntax_error_del_emptytuple", "tests/test_parser.py::test_syntax_error_del_call", "tests/test_parser.py::test_syntax_error_del_lambda", "tests/test_parser.py::test_syntax_error_del_ifexp", "tests/test_parser.py::test_syntax_error_del_comps[[i", "tests/test_parser.py::test_syntax_error_del_comps[{i", "tests/test_parser.py::test_syntax_error_del_comps[(i", "tests/test_parser.py::test_syntax_error_del_comps[{k:v", "tests/test_parser.py::test_syntax_error_del_ops[x", "tests/test_parser.py::test_syntax_error_del_ops[-x]", "tests/test_parser.py::test_syntax_error_del_cmp[x", "tests/test_parser.py::test_syntax_error_lonely_del", "tests/test_parser.py::test_syntax_error_assign_literal", "tests/test_parser.py::test_syntax_error_assign_constant", "tests/test_parser.py::test_syntax_error_assign_emptytuple", "tests/test_parser.py::test_syntax_error_assign_call", "tests/test_parser.py::test_syntax_error_assign_lambda", "tests/test_parser.py::test_syntax_error_assign_ifexp", "tests/test_parser.py::test_syntax_error_assign_comps[[i", "tests/test_parser.py::test_syntax_error_assign_comps[{i", "tests/test_parser.py::test_syntax_error_assign_comps[(i", "tests/test_parser.py::test_syntax_error_assign_comps[{k:v", "tests/test_parser.py::test_syntax_error_assign_ops[x", "tests/test_parser.py::test_syntax_error_assign_ops[-x]", "tests/test_parser.py::test_syntax_error_assign_cmp[x", "tests/test_parser.py::test_syntax_error_augassign_literal", "tests/test_parser.py::test_syntax_error_augassign_constant", "tests/test_parser.py::test_syntax_error_augassign_emptytuple", "tests/test_parser.py::test_syntax_error_augassign_call", "tests/test_parser.py::test_syntax_error_augassign_lambda", "tests/test_parser.py::test_syntax_error_augassign_ifexp", "tests/test_parser.py::test_syntax_error_augassign_comps[[i", "tests/test_parser.py::test_syntax_error_augassign_comps[{i", "tests/test_parser.py::test_syntax_error_augassign_comps[(i", "tests/test_parser.py::test_syntax_error_augassign_comps[{k:v", "tests/test_parser.py::test_syntax_error_augassign_ops[x", "tests/test_parser.py::test_syntax_error_augassign_ops[-x]", "tests/test_parser.py::test_syntax_error_augassign_cmp[x", "tests/test_parser.py::test_syntax_error_bar_kwonlyargs", "tests/test_parser.py::test_syntax_error_bar_posonlyargs", "tests/test_parser.py::test_syntax_error_bar_posonlyargs_no_comma", "tests/test_parser.py::test_syntax_error_nondefault_follows_default", "tests/test_parser.py::test_syntax_error_posonly_nondefault_follows_default", "tests/test_parser.py::test_syntax_error_lambda_nondefault_follows_default", "tests/test_parser.py::test_syntax_error_lambda_posonly_nondefault_follows_default", "tests/test_parser.py::test_get_repo_url", "tests/test_parser.py::test_match_and_case_are_not_keywords" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-06-11 16:34:14+00:00
bsd-2-clause
6,298
xonsh__xonsh-4860
diff --git a/news/xonshlexer-fix.rst b/news/xonshlexer-fix.rst new file mode 100644 index 00000000..9207589a --- /dev/null +++ b/news/xonshlexer-fix.rst @@ -0,0 +1,23 @@ +**Added:** + +* <news item> + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* Fixed error caused by unintialized Xonsh session env when using Xonsh as a library just for its Pygments lexer plugin. + +**Security:** + +* <news item> diff --git a/xonsh/pyghooks.py b/xonsh/pyghooks.py index 8c18f285..021c0406 100644 --- a/xonsh/pyghooks.py +++ b/xonsh/pyghooks.py @@ -1644,7 +1644,7 @@ class XonshLexer(Python3Lexer): def __init__(self, *args, **kwargs): # If the lexer is loaded as a pygment plugin, we have to mock # __xonsh__.env and __xonsh__.commands_cache - if not hasattr(XSH, "env"): + if getattr(XSH, "env", None) is None: XSH.env = {} if ON_WINDOWS: pathext = os_environ.get("PATHEXT", [".EXE", ".BAT", ".CMD"])
xonsh/xonsh
52a12aaf858dc58af773da05bac6007fc525f381
diff --git a/tests/test_pyghooks.py b/tests/test_pyghooks.py index f6fc5e5a..48ec4d1b 100644 --- a/tests/test_pyghooks.py +++ b/tests/test_pyghooks.py @@ -9,8 +9,10 @@ import pytest from xonsh.environ import LsColors from xonsh.platform import ON_WINDOWS from xonsh.pyghooks import ( + XSH, Color, Token, + XonshLexer, XonshStyle, code_by_name, color_file, @@ -388,3 +390,15 @@ def test_register_custom_pygments_style(name, styles, refrules): for rule, color in refrules.items(): assert rule in style.styles assert style.styles[rule] == color + + +def test_can_use_xonsh_lexer_without_xession(xession, monkeypatch): + # When Xonsh is used as a library and simply for its lexer plugin, the + # xession's env can be unset, so test that it can yield tokens without + # that env being set. + monkeypatch.setattr(xession, "env", None) + + assert XSH.env is None + lexer = XonshLexer() + assert XSH.env is not None + list(lexer.get_tokens_unprocessed(" some text"))
Error when using Xonsh pygments lexer (pyghooks.XonshLexer) outside of a console session ## xonfig No actual `xonfig` here - using `xonsh` 0.12.6 as a library ## Expected Behavior I should be able to use the `XonshLexer` to highlight code using Pygments even if I'm not in a Xonsh console session. ## Current Behavior When I use the Xonsh lexer in [Pelican](https://blog.getpelican.com/), I get a KeyError because the `XSH.env` dictionary hasn't been initialized, and a check within `pyghooks.XonshLexer` that tries to guard against this doesn't work exactly as it should. ### Traceback (if applicable) The following happens when I try to generate my Pelican static site that uses the `XonshLexer` to highlight Xonsh code: <details> ``` | Traceback (most recent call last): | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/pelican/generators.py", line 616, in generate_context | article = self.readers.read_file( | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/pelican/readers.py", line 573, in read_file | content, reader_metadata = reader.read(path) | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/pelican/readers.py", line 337, in read | content = self._md.convert(text) | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/markdown/core.py", line 267, in convert | newRoot = treeprocessor.run(root) | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/markdown/extensions/codehilite.py", line 224, in run | placeholder = self.md.htmlStash.store(code.hilite()) | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/markdown/extensions/codehilite.py", line 122, in hilite | return highlight(self.src, lexer, formatter) | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/pygments/__init__.py", line 84, in highlight | return format(lex(code, lexer), formatter, outfile) | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/pygments/__init__.py", line 63, in format | formatter.format(tokens, realoutfile) | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/pygments/formatter.py", line 95, in format | return self.format_unencoded(tokensource, outfile) | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/pygments/formatters/html.py", line 879, in format_unencoded | for t, piece in source: | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/pygments/formatters/html.py", line 710, in _wrap_div | for tup in inner: | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/pygments/formatters/html.py", line 728, in _wrap_pre | for tup in inner: | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/pygments/formatters/html.py", line 734, in _wrap_code | for tup in inner: | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/pygments/formatters/html.py", line 753, in _format_lines | for ttype, value in tokensource: | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/pygments/lexer.py", line 188, in streamer | for _, t, v in self.get_tokens_unprocessed(text): | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/xonsh/pyghooks.py", line 1747, in get_tokens_unprocessed | cmd_is_autocd = _command_is_autocd(cmd) | File "/Users/eddiepeters/source/website/.venv/lib/python3.9/site-packages/xonsh/pyghooks.py", line 1611, in _command_is_autocd | if not XSH.env.get("AUTO_CD", False): | AttributeError: 'NoneType' object has no attribute 'get' ``` Note that `XSH.env.get("AUTO_CD", False)` does not allow for the non-existence of `XSH.env`. </details> Fix: The problem is that the `XSH.env` property sometimes exists but is not yet initialized as a dictionary. I believe the following line: https://github.com/xonsh/xonsh/blob/52a12aaf858dc58af773da05bac6007fc525f381/xonsh/pyghooks.py#L1647 Just needs to be changed to: `if getattr(XSH, "env", None) is None:` ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
0.0
52a12aaf858dc58af773da05bac6007fc525f381
[ "tests/test_pyghooks.py::test_can_use_xonsh_lexer_without_xession" ]
[ "tests/test_pyghooks.py::test_color_name_to_pygments_code[RESET-noinherit]", "tests/test_pyghooks.py::test_color_name_to_pygments_code[RED-ansired]", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BACKGROUND_RED-bg:ansired]", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BACKGROUND_INTENSE_RED-bg:ansibrightred]", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BOLD_RED-bold", "tests/test_pyghooks.py::test_color_name_to_pygments_code[UNDERLINE_RED-underline", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BOLD_UNDERLINE_RED-bold", "tests/test_pyghooks.py::test_color_name_to_pygments_code[UNDERLINE_BOLD_RED-underline", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BOLD_FAINT_RED-bold", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BOLD_SLOWBLINK_RED-bold", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BOLD_FASTBLINK_RED-bold", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BOLD_INVERT_RED-bold", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BOLD_CONCEAL_RED-bold", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BOLD_STRIKETHROUGH_RED-bold", "tests/test_pyghooks.py::test_color_name_to_pygments_code[#000-#000]", "tests/test_pyghooks.py::test_color_name_to_pygments_code[#000000-#000000]", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BACKGROUND_#000-bg:#000]", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BACKGROUND_#000000-bg:#000000]", "tests/test_pyghooks.py::test_color_name_to_pygments_code[BG#000-bg:#000]", "tests/test_pyghooks.py::test_color_name_to_pygments_code[bg#000000-bg:#000000]", "tests/test_pyghooks.py::test_code_by_name[RESET-noinherit]", "tests/test_pyghooks.py::test_code_by_name[RED-ansired]", "tests/test_pyghooks.py::test_code_by_name[BACKGROUND_RED-bg:ansired]", "tests/test_pyghooks.py::test_code_by_name[BACKGROUND_INTENSE_RED-bg:ansibrightred]", "tests/test_pyghooks.py::test_code_by_name[BOLD_RED-bold", "tests/test_pyghooks.py::test_code_by_name[UNDERLINE_RED-underline", "tests/test_pyghooks.py::test_code_by_name[BOLD_UNDERLINE_RED-bold", "tests/test_pyghooks.py::test_code_by_name[UNDERLINE_BOLD_RED-underline", "tests/test_pyghooks.py::test_code_by_name[BOLD_FAINT_RED-bold", "tests/test_pyghooks.py::test_code_by_name[BOLD_SLOWBLINK_RED-bold", "tests/test_pyghooks.py::test_code_by_name[BOLD_FASTBLINK_RED-bold", "tests/test_pyghooks.py::test_code_by_name[BOLD_INVERT_RED-bold", "tests/test_pyghooks.py::test_code_by_name[BOLD_CONCEAL_RED-bold", "tests/test_pyghooks.py::test_code_by_name[BOLD_STRIKETHROUGH_RED-bold", "tests/test_pyghooks.py::test_code_by_name[#000-#000]", "tests/test_pyghooks.py::test_code_by_name[#000000-#000000]", "tests/test_pyghooks.py::test_code_by_name[BACKGROUND_#000-bg:#000]", "tests/test_pyghooks.py::test_code_by_name[BACKGROUND_#000000-bg:#000000]", "tests/test_pyghooks.py::test_code_by_name[BG#000-bg:#000]", "tests/test_pyghooks.py::test_code_by_name[bg#000000-bg:#000000]", "tests/test_pyghooks.py::test_color_token_by_name[in_tuple0-exp_ct0-noinherit]", "tests/test_pyghooks.py::test_color_token_by_name[in_tuple1-exp_ct1-ansigreen]", "tests/test_pyghooks.py::test_color_token_by_name[in_tuple2-exp_ct2-bold", "tests/test_pyghooks.py::test_color_token_by_name[in_tuple3-exp_ct3-bg:ansiblack", "tests/test_pyghooks.py::test_XonshStyle_init_file_color_tokens", "tests/test_pyghooks.py::test_colorize_file[fi-regular]", "tests/test_pyghooks.py::test_colorize_file[di-simple_dir]", "tests/test_pyghooks.py::test_colorize_file[ln-sym_link]", "tests/test_pyghooks.py::test_colorize_file[pi-pipe]", "tests/test_pyghooks.py::test_colorize_file[or-orphan]", "tests/test_pyghooks.py::test_colorize_file[su-set_uid]", "tests/test_pyghooks.py::test_colorize_file[sg-set_gid]", "tests/test_pyghooks.py::test_colorize_file[tw-sticky_ow_dir]", "tests/test_pyghooks.py::test_colorize_file[ow-other_writable_dir]", "tests/test_pyghooks.py::test_colorize_file[st-sticky_dir]", "tests/test_pyghooks.py::test_colorize_file[ex-executable]", "tests/test_pyghooks.py::test_colorize_file[*.emf-foo.emf]", "tests/test_pyghooks.py::test_colorize_file[*.zip-foo.zip]", "tests/test_pyghooks.py::test_colorize_file[*.ogg-foo.ogg]", "tests/test_pyghooks.py::test_colorize_file[mh-hard_link]", "tests/test_pyghooks.py::test_colorize_file_symlink[fi-regular]", "tests/test_pyghooks.py::test_colorize_file_symlink[di-simple_dir]", "tests/test_pyghooks.py::test_colorize_file_symlink[ln-sym_link]", "tests/test_pyghooks.py::test_colorize_file_symlink[pi-pipe]", "tests/test_pyghooks.py::test_colorize_file_symlink[or-orphan]", "tests/test_pyghooks.py::test_colorize_file_symlink[su-set_uid]", "tests/test_pyghooks.py::test_colorize_file_symlink[sg-set_gid]", "tests/test_pyghooks.py::test_colorize_file_symlink[tw-sticky_ow_dir]", "tests/test_pyghooks.py::test_colorize_file_symlink[ow-other_writable_dir]", "tests/test_pyghooks.py::test_colorize_file_symlink[st-sticky_dir]", "tests/test_pyghooks.py::test_colorize_file_symlink[ex-executable]", "tests/test_pyghooks.py::test_colorize_file_symlink[*.emf-foo.emf]", "tests/test_pyghooks.py::test_colorize_file_symlink[*.zip-foo.zip]", "tests/test_pyghooks.py::test_colorize_file_symlink[*.ogg-foo.ogg]", "tests/test_pyghooks.py::test_colorize_file_symlink[mh-hard_link]", "tests/test_pyghooks.py::test_colorize_file_ca", "tests/test_pyghooks.py::test_register_custom_pygments_style[test1-styles0-refrules0]", "tests/test_pyghooks.py::test_register_custom_pygments_style[test2-styles1-refrules1]", "tests/test_pyghooks.py::test_register_custom_pygments_style[test3-styles2-refrules2]", "tests/test_pyghooks.py::test_register_custom_pygments_style[test4-styles3-refrules3]", "tests/test_pyghooks.py::test_register_custom_pygments_style[test5-styles4-refrules4]", "tests/test_pyghooks.py::test_register_custom_pygments_style[test6-styles5-refrules5]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
2022-06-30 02:33:05+00:00
bsd-2-clause
6,299
xonsh__xonsh-4907
diff --git a/news/fix-empty-gitstatus.rst b/news/fix-empty-gitstatus.rst new file mode 100644 index 00000000..3a920da1 --- /dev/null +++ b/news/fix-empty-gitstatus.rst @@ -0,0 +1,23 @@ +**Added:** + +* <news item> + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* ``gitstatus`` Prompt-field would be empty on paths without git setup. + +**Security:** + +* <news item> diff --git a/xonsh/prompt/gitstatus.py b/xonsh/prompt/gitstatus.py index 683d8255..e3a41ca8 100644 --- a/xonsh/prompt/gitstatus.py +++ b/xonsh/prompt/gitstatus.py @@ -319,5 +319,11 @@ class GitStatus(MultiPromptField): continue yield frag + def _collect(self, ctx): + if not ctx.pick_val(repo_path): + # no need to display any other fragments + return + yield from super()._collect(ctx) + gitstatus = GitStatus()
xonsh/xonsh
86e4f004e30529f7ef210da9f03ac3223518f85c
diff --git a/tests/prompt/test_gitstatus.py b/tests/prompt/test_gitstatus.py index 9e13628b..ce20044e 100644 --- a/tests/prompt/test_gitstatus.py +++ b/tests/prompt/test_gitstatus.py @@ -1,3 +1,5 @@ +import os + import pytest from xonsh.prompt import gitstatus @@ -14,6 +16,7 @@ def prompts(xession): fields = xession.env["PROMPT_FIELDS"] yield fields fields.clear() + fields.reset() @pytest.fixture @@ -71,3 +74,24 @@ def test_gitstatus_clean(prompts, fake_proc): assert format(prompts.pick("gitstatus")) == exp assert _format_value(prompts.pick("gitstatus"), None, None) == exp assert _format_value(prompts.pick("gitstatus"), "{}", None) == exp + + +def test_no_git(prompts, fake_process, tmp_path): + os.chdir(tmp_path) + err = b"fatal: not a git repository (or any of the parent directories): .git" + for cmd in ( + "git status --porcelain --branch", + "git rev-parse --git-dir", + "git diff --numstat", + ): + fake_process.register_subprocess( + command=cmd, + stderr=err, + returncode=128, + ) + + exp = "" + assert prompts.pick_val("gitstatus.repo_path") == "" + assert format(prompts.pick("gitstatus")) == exp + assert _format_value(prompts.pick("gitstatus"), None, None) == exp + assert _format_value(prompts.pick("gitstatus"), "{}", None) == exp
{gitstatus: {}} is no longer autohiding gitstatus `{gitstatus: {}}` in my right prompt no longer hides itself when I'm not in a git repository. ## xonfig <details> ``` $ xonfig +------------------+---------------------+ | xonsh | 0.13.0 | | Python | 3.10.5 | | PLY | 3.11 | | have readline | True | | prompt toolkit | 3.0.30 | | shell type | prompt_toolkit | | history backend | sqlite | | pygments | 2.12.0 | | on posix | True | | on linux | True | | distro | unknown | | on wsl | False | | on darwin | False | | on windows | False | | on cygwin | False | | on msys2 | False | | is superuser | False | | default encoding | utf-8 | | xonsh encoding | utf-8 | | encoding errors | surrogateescape | | xontrib | [] | | RC file 1 | /home/monk/.xonshrc | +------------------+---------------------+ ``` </details> ## Expected Behavior ``` $ xonsh --no-rc monk@lychee ~ $ $RIGHT_PROMPT='<{gitstatus: {}}>' monk@lychee ~ $ $PROMPT_FIELDS['gitstatus'].fragments = () <> monk@lychee ~ $ <> ``` ## Current Behavior ``` $ xonsh --no-rc monk@lychee ~ $ $RIGHT_PROMPT='<{gitstatus: {}}>' monk@lychee ~ $ $PROMPT_FIELDS['gitstatus'].fragments = () < |✓> monk@lychee ~ $ < > ``` ## Steps to Reproduce see above ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
0.0
86e4f004e30529f7ef210da9f03ac3223518f85c
[ "tests/prompt/test_gitstatus.py::test_no_git" ]
[ "tests/prompt/test_gitstatus.py::test_gitstatus_dirty[hidden0-{CYAN}gitstatus-opt\\u2191\\xb77\\u2193\\xb72{RESET}|{RED}\\u25cf1{RESET}{BLUE}+3{RESET}{BLUE}+49{RESET}{RED}-26{RESET}]", "tests/prompt/test_gitstatus.py::test_gitstatus_dirty[hidden1-{CYAN}gitstatus-opt\\u2191\\xb77\\u2193\\xb72{RESET}|{RED}\\u25cf1{RESET}{BLUE}+3{RESET}]", "tests/prompt/test_gitstatus.py::test_gitstatus_clean" ]
{ "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false }
2022-08-03 06:26:56+00:00
bsd-2-clause
6,300
xonsh__xonsh-4916
diff --git a/news/fix-term-title-update.rst b/news/fix-term-title-update.rst new file mode 100644 index 00000000..88cf5453 --- /dev/null +++ b/news/fix-term-title-update.rst @@ -0,0 +1,24 @@ +**Added:** + +* <news item> + +**Changed:** + +* The terminal's title is updated with the current command's name even if the command is a captured command or a callable alias + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* When using the sway window manager, ``swaymsg -t get_inputs`` no longer fails with the error "Unable to receive IPC response" +* The ``current_job`` variable now works as expected when used in ``$TITLE`` + +**Security:** + +* <news item> diff --git a/xonsh/procs/pipelines.py b/xonsh/procs/pipelines.py index f2c7dd5c..623b3a8f 100644 --- a/xonsh/procs/pipelines.py +++ b/xonsh/procs/pipelines.py @@ -757,17 +757,8 @@ class HiddenCommandPipeline(CommandPipeline): return "" -def pause_call_resume(p, f, *args, **kwargs): - """For a process p, this will call a function f with the remaining args and - and kwargs. If the process cannot accept signals, the function will be called. - - Parameters - ---------- - p : Popen object or similar - f : callable - args : remaining arguments - kwargs : keyword arguments - """ +def resume_process(p): + """Sends SIGCONT to a process if possible.""" can_send_signal = ( hasattr(p, "send_signal") and xp.ON_POSIX @@ -776,15 +767,9 @@ def pause_call_resume(p, f, *args, **kwargs): ) if can_send_signal: try: - p.send_signal(signal.SIGSTOP) + p.send_signal(signal.SIGCONT) except PermissionError: pass - try: - f(*args, **kwargs) - except Exception: - pass - if can_send_signal: - p.send_signal(signal.SIGCONT) class PrevProcCloser(threading.Thread): diff --git a/xonsh/procs/specs.py b/xonsh/procs/specs.py index f1963690..dcd8be5e 100644 --- a/xonsh/procs/specs.py +++ b/xonsh/procs/specs.py @@ -22,7 +22,7 @@ from xonsh.procs.pipelines import ( STDOUT_CAPTURE_KINDS, CommandPipeline, HiddenCommandPipeline, - pause_call_resume, + resume_process, ) from xonsh.procs.posix import PopenThread from xonsh.procs.proxies import ProcProxy, ProcProxyThread @@ -857,14 +857,8 @@ def cmds_to_specs(cmds, captured=False, envs=None): return specs -def _should_set_title(captured=False): - env = XSH.env - return ( - env.get("XONSH_INTERACTIVE") - and not env.get("XONSH_STORE_STDOUT") - and captured not in STDOUT_CAPTURE_KINDS - and XSH.shell is not None - ) +def _should_set_title(): + return XSH.env.get("XONSH_INTERACTIVE") and XSH.shell is not None def run_subproc(cmds, captured=False, envs=None): @@ -888,6 +882,23 @@ def run_subproc(cmds, captured=False, envs=None): print(f"TRACE SUBPROC: {cmds}, captured={captured}", file=sys.stderr) specs = cmds_to_specs(cmds, captured=captured, envs=envs) + if _should_set_title(): + # context manager updates the command information that gets + # accessed by CurrentJobField when setting the terminal's title + with XSH.env["PROMPT_FIELDS"]["current_job"].update_current_cmds(cmds): + # remove current_job from prompt level cache + XSH.env["PROMPT_FIELDS"].reset_key("current_job") + # The terminal's title needs to be set before starting the + # subprocess to avoid accidentally answering interactive questions + # from commands such as `rm -i` (see #1436) + XSH.shell.settitle() + # run the subprocess + return _run_specs(specs, cmds) + else: + return _run_specs(specs, cmds) + + +def _run_specs(specs, cmds): captured = specs[-1].captured if captured == "hiddenobject": command = HiddenCommandPipeline(specs) @@ -906,15 +917,12 @@ def run_subproc(cmds, captured=False, envs=None): "pgrp": command.term_pgid, } ) - if _should_set_title(captured=captured): - # set title here to get currently executing command - pause_call_resume(proc, XSH.shell.settitle) - else: - # for some reason, some programs are in a stopped state when the flow - # reaches this point, hence a SIGCONT should be sent to `proc` to make - # sure that the shell doesn't hang. This `pause_call_resume` invocation - # does this - pause_call_resume(proc, int) + # For some reason, some programs are in a stopped state when the flow + # reaches this point, hence a SIGCONT should be sent to `proc` to make + # sure that the shell doesn't hang. + # See issue #2999 and the fix in PR #3000 + resume_process(proc) + # now figure out what we should return if captured == "object": return command # object can be returned even if backgrounding diff --git a/xonsh/prompt/base.py b/xonsh/prompt/base.py index c0a44a59..2c6e379c 100644 --- a/xonsh/prompt/base.py +++ b/xonsh/prompt/base.py @@ -252,7 +252,7 @@ def _format_value(val, spec, conv) -> str: and 'current_job' returns 'sleep', the result is 'sleep | ', and if 'current_job' returns None, the result is ''. """ - if val is None: + if val is None or (isinstance(val, BasePromptField) and val.value is None): return "" val = xt.FORMATTER.convert_field(val, conv) @@ -331,7 +331,7 @@ class PromptFields(tp.MutableMapping[str, "FieldType"]): _replace_home_cwd, ) from xonsh.prompt.env import env_name, vte_new_tab_cwd - from xonsh.prompt.job import _current_job + from xonsh.prompt.job import CurrentJobField from xonsh.prompt.times import _localtime from xonsh.prompt.vc import branch_bg_color, branch_color, current_branch @@ -349,7 +349,7 @@ class PromptFields(tp.MutableMapping[str, "FieldType"]): curr_branch=current_branch, branch_color=branch_color, branch_bg_color=branch_bg_color, - current_job=_current_job, + current_job=CurrentJobField(name="current_job"), env_name=env_name, env_prefix="(", env_postfix=") ", @@ -403,6 +403,10 @@ class PromptFields(tp.MutableMapping[str, "FieldType"]): """the results are cached and need to be reset between prompts""" self._cache.clear() + def reset_key(self, key): + """remove a single key from the cache (if it exists)""" + self._cache.pop(key, None) + class BasePromptField: value = "" diff --git a/xonsh/prompt/job.py b/xonsh/prompt/job.py index 97313e0a..ceeec1d9 100644 --- a/xonsh/prompt/job.py +++ b/xonsh/prompt/job.py @@ -1,14 +1,30 @@ """Prompt formatter for current jobs""" -import xonsh.jobs as xj +import contextlib +import typing as tp +from xonsh.prompt.base import PromptField -def _current_job(): - j = xj.get_next_task() - if j is not None: - if not j["bg"]: - cmd = j["cmds"][-1] + +class CurrentJobField(PromptField): + _current_cmds: tp.Optional[list] = None + + def update(self, ctx): + if self._current_cmds is not None: + cmd = self._current_cmds[-1] s = cmd[0] if s == "sudo" and len(cmd) > 1: s = cmd[1] - return s + self.value = s + else: + self.value = None + + @contextlib.contextmanager + def update_current_cmds(self, cmds): + """Context manager that updates the information used to update the job name""" + old_cmds = self._current_cmds + try: + self._current_cmds = cmds + yield + finally: + self._current_cmds = old_cmds diff --git a/xonsh/readline_shell.py b/xonsh/readline_shell.py index fce100b8..fb5fa48b 100644 --- a/xonsh/readline_shell.py +++ b/xonsh/readline_shell.py @@ -635,6 +635,8 @@ class ReadlineShell(BaseShell, cmd.Cmd): return self.mlprompt env = XSH.env # pylint: disable=no-member p = env.get("PROMPT") + # clear prompt level cache + env["PROMPT_FIELDS"].reset() try: p = self.prompt_formatter(p) except Exception: # pylint: disable=broad-except
xonsh/xonsh
cb75d27300cd4e4898ff4bfe82c398080c0d19de
diff --git a/tests/prompt/test_base.py b/tests/prompt/test_base.py index 1f102ed1..9b633633 100644 --- a/tests/prompt/test_base.py +++ b/tests/prompt/test_base.py @@ -4,7 +4,7 @@ from unittest.mock import Mock import pytest from xonsh.prompt import env as prompt_env -from xonsh.prompt.base import PromptFields, PromptFormatter +from xonsh.prompt.base import PromptField, PromptFields, PromptFormatter @pytest.fixture @@ -40,8 +40,10 @@ def test_format_prompt(inp, exp, fields, formatter, xession): "a_string": "cats", "a_number": 7, "empty": "", - "current_job": (lambda: "sleep"), + "a_function": (lambda: "hello"), + "current_job": PromptField(value="sleep"), "none": (lambda: None), + "none_pf": PromptField(value=None), } ], ) @@ -49,7 +51,9 @@ def test_format_prompt(inp, exp, fields, formatter, xession): "inp, exp", [ ("{a_number:{0:^3}}cats", " 7 cats"), + ("{a_function:{} | }xonsh", "hello | xonsh"), ("{current_job:{} | }xonsh", "sleep | xonsh"), + ("{none_pf:{} | }xonsh", "xonsh"), ("{none:{} | }{a_string}{empty:!}", "cats!"), ("{none:{}}", ""), ("{{{a_string:{{{}}}}}}", "{{cats}}"), diff --git a/tests/prompt/test_job.py b/tests/prompt/test_job.py new file mode 100644 index 00000000..d8c286cb --- /dev/null +++ b/tests/prompt/test_job.py @@ -0,0 +1,13 @@ +def test_current_job(xession): + prompts = xession.env["PROMPT_FIELDS"] + cmds = (["echo", "hello"], "|", ["grep", "h"]) + + prompts.reset() + assert format(prompts.pick("current_job")) == "" + + with prompts["current_job"].update_current_cmds(cmds): + prompts.reset() + assert format(prompts.pick("current_job")) == "grep" + + prompts.reset() + assert format(prompts.pick("current_job")) == "" diff --git a/tests/test_aliases.py b/tests/test_aliases.py index b1e92b0c..3662e2b9 100644 --- a/tests/test_aliases.py +++ b/tests/test_aliases.py @@ -2,6 +2,7 @@ import inspect import os +import sys import pytest @@ -195,7 +196,7 @@ def test_exec_alias_args(xession): def test_exec_alias_return_value(exp_rtn, xonsh_session, monkeypatch): monkeypatch.setitem(xonsh_session.env, "RAISE_SUBPROC_ERROR", False) stack = inspect.stack() - rtn = ExecAlias(f"python -c 'exit({exp_rtn})'")([], stack=stack) + rtn = ExecAlias(f"{sys.executable} -c 'exit({exp_rtn})'")([], stack=stack) assert rtn == exp_rtn
Current job is not updated in terminal window's title When I run `xonsh` is using its default settings, the `$TITLE` format string (responsible for setting the terminal window's title) is ``` {current_job:{} | }{user}@{hostname}: {cwd} | xonsh ``` The `current_job` variable in `$TITLE` means that when a foreground job is running, the terminal's title should be updated with the job's command. For example, suppose my terminal's title is `yaxollum@fedora: ~ | xonsh` when no jobs are running. When I launch the `cat` command, my terminal's title should be updated to `cat | yaxollum@fedora: ~ | xonsh`. However, under the current `main` version of xonsh, my terminal's title stays unchanged. `git bisect` shows that this was introduced by #4697. Both this issue and #4034 appear to be related to setting the terminal's title, so I'll try to fix both of them in a PR. ## xonfig <details> ``` +------------------+-------------------------+ | xonsh | 0.13.0 | | Git SHA | f2ca59a2 | | Commit Date | Aug 6 05:07:09 2022 | | Python | 3.9.13 | | PLY | 3.11 | | have readline | True | | prompt toolkit | 3.0.29 | | shell type | prompt_toolkit | | history backend | sqlite | | pygments | 2.7.4 | | on posix | True | | on linux | True | | distro | fedora | | on wsl | False | | on darwin | False | | on windows | False | | on cygwin | False | | on msys2 | False | | is superuser | False | | default encoding | utf-8 | | xonsh encoding | utf-8 | | encoding errors | surrogateescape | | xontrib | [] | | RC file 1 | /home/yaxollum/.xonshrc | +------------------+-------------------------+ ``` </details> ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
0.0
cb75d27300cd4e4898ff4bfe82c398080c0d19de
[ "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{none_pf:{}", "tests/prompt/test_job.py::test_current_job" ]
[ "tests/prompt/test_base.py::test_format_prompt[my", "tests/prompt/test_base.py::test_format_prompt[{f}", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{a_number:{0:^3}}cats-", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{a_function:{}", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{current_job:{}", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{none:{}", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{none:{}}--fields0]", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{{{a_string:{{{}}}}}}-{{cats}}-fields0]", "tests/prompt/test_base.py::test_format_prompt_with_format_spec[{{{none:{{{}}}}}}-{}-fields0]", "tests/prompt/test_base.py::test_format_prompt_with_broken_template", "tests/prompt/test_base.py::test_format_prompt_with_broken_template_in_func[{user]", "tests/prompt/test_base.py::test_format_prompt_with_broken_template_in_func[{{user]", "tests/prompt/test_base.py::test_format_prompt_with_broken_template_in_func[{{user}]", "tests/prompt/test_base.py::test_format_prompt_with_broken_template_in_func[{user}{hostname]", "tests/prompt/test_base.py::test_format_prompt_with_invalid_func", "tests/prompt/test_base.py::test_format_prompt_with_func_that_raises", "tests/prompt/test_base.py::test_format_prompt_with_no_env", "tests/prompt/test_base.py::test_format_prompt_with_various_envs[env]", "tests/prompt/test_base.py::test_format_prompt_with_various_envs[foo]", "tests/prompt/test_base.py::test_format_prompt_with_various_envs[bar]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[)-(]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[)-[[]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[)-]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[)-", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[]]-(]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[]]-[[]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[]]-]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[]]-", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[-(]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[-[[]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[-]", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[-", "tests/prompt/test_base.py::test_format_prompt_with_various_prepost[", "tests/prompt/test_base.py::test_noenv_with_disable_set", "tests/prompt/test_base.py::TestPromptFromVenvCfg::test_determine_env_name_from_cfg[prompt", "tests/prompt/test_base.py::TestPromptFromVenvCfg::test_determine_env_name_from_cfg[\\t", "tests/prompt/test_base.py::TestPromptFromVenvCfg::test_determine_env_name_from_cfg[nothing", "tests/prompt/test_base.py::TestPromptFromVenvCfg::test_determine_env_name_from_cfg[other", "tests/prompt/test_base.py::TestEnvNamePrompt::test_no_prompt", "tests/prompt/test_base.py::TestEnvNamePrompt::test_search_order", "tests/prompt/test_base.py::test_custom_env_overrides_default[0]", "tests/prompt/test_base.py::test_custom_env_overrides_default[1]", "tests/prompt/test_base.py::test_promptformatter_cache", "tests/prompt/test_base.py::test_promptformatter_clears_cache", "tests/test_aliases.py::test_imports", "tests/test_aliases.py::test_eval_normal", "tests/test_aliases.py::test_eval_self_reference", "tests/test_aliases.py::test_eval_recursive", "tests/test_aliases.py::test_eval_recursive_callable_partial", "tests/test_aliases.py::test_recursive_callable_partial_all", "tests/test_aliases.py::test_recursive_callable_partial_handles", "tests/test_aliases.py::test_recursive_callable_partial_none", "tests/test_aliases.py::test_subprocess_logical_operators[echo", "tests/test_aliases.py::test_subprocess_io_operators[echo", "tests/test_aliases.py::test_subprocess_io_operators[cat", "tests/test_aliases.py::test_subprocess_io_operators[COMMAND1", "tests/test_aliases.py::test_dict_merging[alias0]", "tests/test_aliases.py::test_dict_merging_assignment[alias0]", "tests/test_aliases.py::test_dict_merging_assignment[alias1]", "tests/test_aliases.py::test_exec_alias_args", "tests/test_aliases.py::test_exec_alias_return_value[0]", "tests/test_aliases.py::test_exec_alias_return_value[1]", "tests/test_aliases.py::test_exec_alias_return_value[2]", "tests/test_aliases.py::test_register_decorator" ]
{ "failed_lite_validators": [ "has_issue_reference", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-08-09 04:54:43+00:00
bsd-2-clause
6,301
xonsh__xonsh-5322
diff --git a/news/fix-redirect-structure.rst b/news/fix-redirect-structure.rst new file mode 100644 index 00000000..a84244a8 --- /dev/null +++ b/news/fix-redirect-structure.rst @@ -0,0 +1,23 @@ +**Added:** + +* <news item> + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* Redirect tokens in quotes (e.g. ">", "2>", "2>1") are now correctly passed to commands as regular arguments. + +**Security:** + +* <news item> diff --git a/xonsh/lexer.py b/xonsh/lexer.py index a0f55feb..ddebd59a 100644 --- a/xonsh/lexer.py +++ b/xonsh/lexer.py @@ -23,7 +23,8 @@ from xonsh.tokenize import ( ERRORTOKEN, GREATER, INDENT, - IOREDIRECT, + IOREDIRECT1, + IOREDIRECT2, LESS, MATCH, NAME, @@ -101,7 +102,8 @@ def token_map(): } for op, typ in _op_map.items(): tm[(OP, op)] = typ - tm[IOREDIRECT] = "IOREDIRECT" + tm[IOREDIRECT1] = "IOREDIRECT1" + tm[IOREDIRECT2] = "IOREDIRECT2" tm[STRING] = "STRING" tm[DOLLARNAME] = "DOLLAR_NAME" tm[NUMBER] = "NUMBER" @@ -255,7 +257,7 @@ def handle_redirect(state, token): key = (typ, st) if (typ, st) in token_map else typ new_tok = _new_token(token_map[key], st, token.start) if state["pymode"][-1][0]: - if typ == IOREDIRECT: + if typ in (IOREDIRECT1, IOREDIRECT2): # Fix Python mode code that was incorrectly recognized as an # IOREDIRECT by the tokenizer (see issue #4994). # The tokenizer does not know when the code should be tokenized in @@ -310,7 +312,8 @@ def special_handlers(): LESS: handle_redirect, GREATER: handle_redirect, RIGHTSHIFT: handle_redirect, - IOREDIRECT: handle_redirect, + IOREDIRECT1: handle_redirect, + IOREDIRECT2: handle_redirect, (OP, "<"): handle_redirect, (OP, ">"): handle_redirect, (OP, ">>"): handle_redirect, diff --git a/xonsh/parsers/base.py b/xonsh/parsers/base.py index e5ede659..c4a6c524 100644 --- a/xonsh/parsers/base.py +++ b/xonsh/parsers/base.py @@ -3432,12 +3432,20 @@ class BaseParser: def p_subproc_atom_redirect(self, p): """ - subproc_atom : GT - | LT - | RSHIFT - | IOREDIRECT - """ - p0 = ast.const_str(s=p[1], lineno=self.lineno, col_offset=self.col) + subproc_atom : GT WS subproc_atom + | LT WS subproc_atom + | RSHIFT WS subproc_atom + | IOREDIRECT1 WS subproc_atom + | IOREDIRECT2 + """ + operator = ast.const_str(s=p[1], lineno=self.lineno, col_offset=self.col) + elts = [operator] if len(p) == 2 else [operator, p[3]] + p0 = ast.Tuple( + elts=elts, + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col, + ) p0._cliarg_action = "append" p[0] = p0 @@ -3523,7 +3531,8 @@ class BaseParser: "LT", "LSHIFT", "RSHIFT", - "IOREDIRECT", + "IOREDIRECT1", + "IOREDIRECT2", "SEARCHPATH", "INDENT", "DEDENT", diff --git a/xonsh/parsers/completion_context.py b/xonsh/parsers/completion_context.py index 04d34350..0984d967 100644 --- a/xonsh/parsers/completion_context.py +++ b/xonsh/parsers/completion_context.py @@ -330,7 +330,8 @@ class CompletionContextParser: "LT", "GT", "RSHIFT", - "IOREDIRECT", + "IOREDIRECT1", + "IOREDIRECT2", } used_tokens |= io_redir_tokens artificial_tokens = {"ANY"} diff --git a/xonsh/procs/specs.py b/xonsh/procs/specs.py index 9a0c639f..7c538a1b 100644 --- a/xonsh/procs/specs.py +++ b/xonsh/procs/specs.py @@ -172,10 +172,6 @@ def _O2E_MAP(): return frozenset({f"{o}>{e}" for e in _REDIR_ERR for o in _REDIR_OUT if o != ""}) -def _is_redirect(x): - return isinstance(x, str) and _REDIR_REGEX.match(x) - - def safe_open(fname, mode, buffering=-1): """Safely attempts to open a file in for xonsh subprocs.""" # file descriptors @@ -401,7 +397,7 @@ class SubprocSpec: else: safe_close(value) msg = "Multiple inputs for stdin for {0!r}" - msg = msg.format(" ".join(self.args)) + msg = msg.format(self.get_command_str()) raise xt.XonshError(msg) @property @@ -417,7 +413,7 @@ class SubprocSpec: else: safe_close(value) msg = "Multiple redirections for stdout for {0!r}" - msg = msg.format(" ".join(self.args)) + msg = msg.format(self.get_command_str()) raise xt.XonshError(msg) @property @@ -433,9 +429,14 @@ class SubprocSpec: else: safe_close(value) msg = "Multiple redirections for stderr for {0!r}" - msg = msg.format(" ".join(self.args)) + msg = msg.format(self.get_command_str()) raise xt.XonshError(msg) + def get_command_str(self): + return " ".join( + " ".join(arg) if isinstance(arg, tuple) else arg for arg in self.args + ) + # # Execution methods # @@ -579,8 +580,7 @@ class SubprocSpec: spec = kls(cmd, cls=cls, **kwargs) # modifications that alter cmds must come after creating instance # perform initial redirects - spec.redirect_leading() - spec.redirect_trailing() + spec.resolve_redirects() # apply aliases spec.resolve_alias() spec.resolve_binary_loc() @@ -590,26 +590,16 @@ class SubprocSpec: spec.resolve_stack() return spec - def redirect_leading(self): - """Manage leading redirects such as with '< input.txt COMMAND'.""" - while len(self.cmd) >= 3 and self.cmd[0] == "<": - self.stdin = safe_open(self.cmd[1], "r") - self.cmd = self.cmd[2:] - - def redirect_trailing(self): - """Manages trailing redirects.""" - while True: - cmd = self.cmd - if len(cmd) >= 3 and _is_redirect(cmd[-2]): - streams = _redirect_streams(cmd[-2], cmd[-1]) - self.stdin, self.stdout, self.stderr = streams - self.cmd = cmd[:-2] - elif len(cmd) >= 2 and _is_redirect(cmd[-1]): - streams = _redirect_streams(cmd[-1]) + def resolve_redirects(self): + """Manages redirects.""" + new_cmd = [] + for c in self.cmd: + if isinstance(c, tuple): + streams = _redirect_streams(*c) self.stdin, self.stdout, self.stderr = streams - self.cmd = cmd[:-1] else: - break + new_cmd.append(c) + self.cmd = new_cmd def resolve_alias(self): """Sets alias in command, if applicable.""" @@ -667,8 +657,7 @@ class SubprocSpec: else: self.cmd = alias + self.cmd[1:] # resolve any redirects the aliases may have applied - self.redirect_leading() - self.redirect_trailing() + self.resolve_redirects() if self.binary_loc is None: return try: diff --git a/xonsh/tokenize.py b/xonsh/tokenize.py index 5127286c..a31fef8a 100644 --- a/xonsh/tokenize.py +++ b/xonsh/tokenize.py @@ -110,7 +110,8 @@ __all__ = token.__all__ + [ # type:ignore "ATDOLLAR", "ATEQUAL", "DOLLARNAME", - "IOREDIRECT", + "IOREDIRECT1", + "IOREDIRECT2", "MATCH", "CASE", ] @@ -135,8 +136,11 @@ N_TOKENS += 3 SEARCHPATH = N_TOKENS tok_name[N_TOKENS] = "SEARCHPATH" N_TOKENS += 1 -IOREDIRECT = N_TOKENS -tok_name[N_TOKENS] = "IOREDIRECT" +IOREDIRECT1 = N_TOKENS +tok_name[N_TOKENS] = "IOREDIRECT1" +N_TOKENS += 1 +IOREDIRECT2 = N_TOKENS +tok_name[N_TOKENS] = "IOREDIRECT2" N_TOKENS += 1 DOLLARNAME = N_TOKENS tok_name[N_TOKENS] = "DOLLARNAME" @@ -335,10 +339,11 @@ _redir_map = ( ) IORedirect = group(group(*_redir_map), f"{group(*_redir_names)}>>?") -_redir_check_0 = set(_redir_map) -_redir_check_1 = {f"{i}>" for i in _redir_names}.union(_redir_check_0) +_redir_check_map = frozenset(_redir_map) + +_redir_check_1 = {f"{i}>" for i in _redir_names} _redir_check_2 = {f"{i}>>" for i in _redir_names}.union(_redir_check_1) -_redir_check = frozenset(_redir_check_2) +_redir_check_single = frozenset(_redir_check_2) Operator = group( r"\*\*=?", @@ -1004,8 +1009,10 @@ def _tokenize(readline, encoding, tolerant=False, tokenize_ioredirects=True): continue token, initial = line[start:end], line[start] - if token in _redir_check: - yield TokenInfo(IOREDIRECT, token, spos, epos, line) + if token in _redir_check_single: + yield TokenInfo(IOREDIRECT1, token, spos, epos, line) + elif token in _redir_check_map: + yield TokenInfo(IOREDIRECT2, token, spos, epos, line) elif initial in numchars or ( # ordinary number initial == "." and token != "." and token != "..." ):
xonsh/xonsh
7461c507b210d1492cac6d2f517ba459ec86bea8
diff --git a/tests/test_integrations.py b/tests/test_integrations.py index 84cdecba..d689a291 100644 --- a/tests/test_integrations.py +++ b/tests/test_integrations.py @@ -886,6 +886,27 @@ aliases['echo'] = _echo assert out == exp +@skip_if_no_xonsh [email protected]( + "cmd, exp", + [ + ("echo '>'", ">\n"), + ("echo '2>'", "2>\n"), + ("echo '2>1'", "2>1\n"), + ], +) +def test_redirect_argument(cmd, exp): + script = f""" +#!/usr/bin/env xonsh +def _echo(args): + print(' '.join(args)) +aliases['echo'] = _echo +{cmd} +""" + out, _, _ = run_xonsh(script) + assert out == exp + + # issue 3402 @skip_if_no_xonsh @skip_if_on_windows diff --git a/tests/test_lexer.py b/tests/test_lexer.py index c707c1cc..ad209655 100644 --- a/tests/test_lexer.py +++ b/tests/test_lexer.py @@ -415,11 +415,14 @@ def test_float_literals(case): assert check_token(case, ["NUMBER", case, 0]) [email protected]( - "case", ["2>1", "err>out", "o>", "all>", "e>o", "e>", "out>", "2>&1"] -) -def test_ioredir(case): - assert check_tokens_subproc(case, [("IOREDIRECT", case, 2)], stop=-2) [email protected]("case", ["o>", "all>", "e>", "out>"]) +def test_ioredir1(case): + assert check_tokens_subproc(case, [("IOREDIRECT1", case, 2)], stop=-2) + + [email protected]("case", ["2>1", "err>out", "e>o", "2>&1"]) +def test_ioredir2(case): + assert check_tokens_subproc(case, [("IOREDIRECT2", case, 2)], stop=-2) @pytest.mark.parametrize("case", [">", ">>", "<", "e>", "> ", ">> ", "< ", "e> "])
Unable to pass a single ">" as an argument `echo spam ">" eggs` is handled and executed exactly the same as `echo spam > eggs`. I think this is because of how data is sent to `cmd_to_specs()` eg both are passed as `['echo', 'spam', '>', 'eggs']`. Also: * `echo spam @(">") eggs` ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
0.0
7461c507b210d1492cac6d2f517ba459ec86bea8
[ "tests/test_integrations.py::test_loading_correctly[True]", "tests/test_integrations.py::test_loading_correctly[False]", "tests/test_lexer.py::test_ioredir1[o>]", "tests/test_lexer.py::test_ioredir1[all>]", "tests/test_lexer.py::test_ioredir1[e>]", "tests/test_lexer.py::test_ioredir1[out>]", "tests/test_lexer.py::test_ioredir2[2>1]", "tests/test_lexer.py::test_ioredir2[err>out]", "tests/test_lexer.py::test_ioredir2[e>o]", "tests/test_lexer.py::test_ioredir2[2>&1]", "tests/test_lexer.py::test_pymode_not_ioredirect[2>1-exp0]", "tests/test_lexer.py::test_pymode_not_ioredirect[a>b-exp1]", "tests/test_lexer.py::test_pymode_not_ioredirect[3>2>1-exp2]", "tests/test_lexer.py::test_pymode_not_ioredirect[36+2>>3-exp3]" ]
[ "tests/test_integrations.py::test_script[case0]", "tests/test_integrations.py::test_script[case1]", "tests/test_integrations.py::test_script[case2]", "tests/test_integrations.py::test_script[case3]", "tests/test_integrations.py::test_script[case4]", "tests/test_integrations.py::test_script[case5]", "tests/test_integrations.py::test_script[case6]", "tests/test_integrations.py::test_script[case7]", "tests/test_integrations.py::test_script[case8]", "tests/test_integrations.py::test_script[case9]", "tests/test_integrations.py::test_script[case10]", "tests/test_integrations.py::test_script[case11]", "tests/test_integrations.py::test_script[case12]", "tests/test_integrations.py::test_script[case13]", "tests/test_integrations.py::test_script[case14]", "tests/test_integrations.py::test_script[case15]", "tests/test_integrations.py::test_script[case16]", "tests/test_integrations.py::test_script[case17]", "tests/test_integrations.py::test_script[case18]", "tests/test_integrations.py::test_script[case19]", "tests/test_integrations.py::test_script[case20]", "tests/test_integrations.py::test_script[case21]", "tests/test_integrations.py::test_script[case22]", "tests/test_integrations.py::test_script[case23]", "tests/test_integrations.py::test_script[case24]", "tests/test_integrations.py::test_script[case25]", "tests/test_integrations.py::test_script[case27]", "tests/test_integrations.py::test_script[case28]", "tests/test_integrations.py::test_script_stderr[case0]", "tests/test_integrations.py::test_single_command_no_windows[pwd-None-<lambda>]", "tests/test_integrations.py::test_single_command_no_windows[echo", "tests/test_integrations.py::test_single_command_no_windows[ls", "tests/test_integrations.py::test_single_command_no_windows[$FOO='foo'", "tests/test_integrations.py::test_eof_syntax_error", "tests/test_integrations.py::test_open_quote_syntax_error", "tests/test_integrations.py::test_atdollar_no_output", "tests/test_integrations.py::test_empty_command", "tests/test_integrations.py::test_printfile", "tests/test_integrations.py::test_printname", "tests/test_integrations.py::test_sourcefile", "tests/test_integrations.py::test_subshells[\\nwith", "tests/test_integrations.py::test_redirect_out_to_file[pwd-<lambda>]", "tests/test_integrations.py::test_pipe_between_subprocs[cat", "tests/test_integrations.py::test_negative_exit_codes_fail", "tests/test_integrations.py::test_ampersand_argument[echo", "tests/test_integrations.py::test_redirect_argument[echo", "tests/test_integrations.py::test_single_command_return_code[import", "tests/test_integrations.py::test_single_command_return_code[sh", "tests/test_integrations.py::test_argv0", "tests/test_integrations.py::test_exec_function_scope[x", "tests/test_lexer.py::test_int_literal", "tests/test_lexer.py::test_hex_literal", "tests/test_lexer.py::test_oct_o_literal", "tests/test_lexer.py::test_bin_literal", "tests/test_lexer.py::test_indent", "tests/test_lexer.py::test_post_whitespace", "tests/test_lexer.py::test_internal_whitespace", "tests/test_lexer.py::test_indent_internal_whitespace", "tests/test_lexer.py::test_assignment", "tests/test_lexer.py::test_multiline", "tests/test_lexer.py::test_atdollar_expression", "tests/test_lexer.py::test_and", "tests/test_lexer.py::test_ampersand", "tests/test_lexer.py::test_not_really_and_pre", "tests/test_lexer.py::test_not_really_and_post", "tests/test_lexer.py::test_not_really_and_pre_post", "tests/test_lexer.py::test_not_really_or_pre", "tests/test_lexer.py::test_not_really_or_post", "tests/test_lexer.py::test_not_really_or_pre_post", "tests/test_lexer.py::test_subproc_line_cont_space", "tests/test_lexer.py::test_subproc_line_cont_nospace", "tests/test_lexer.py::test_atdollar", "tests/test_lexer.py::test_doubleamp", "tests/test_lexer.py::test_pipe", "tests/test_lexer.py::test_doublepipe", "tests/test_lexer.py::test_single_quote_literal", "tests/test_lexer.py::test_double_quote_literal", "tests/test_lexer.py::test_triple_single_quote_literal", "tests/test_lexer.py::test_triple_double_quote_literal", "tests/test_lexer.py::test_single_raw_string_literal", "tests/test_lexer.py::test_double_raw_string_literal", "tests/test_lexer.py::test_single_f_string_literal", "tests/test_lexer.py::test_double_f_string_literal", "tests/test_lexer.py::test_single_unicode_literal", "tests/test_lexer.py::test_double_unicode_literal", "tests/test_lexer.py::test_single_bytes_literal", "tests/test_lexer.py::test_path_string_literal", "tests/test_lexer.py::test_path_fstring_literal", "tests/test_lexer.py::test_regex_globs", "tests/test_lexer.py::test_float_literals[0.0]", "tests/test_lexer.py::test_float_literals[.0]", "tests/test_lexer.py::test_float_literals[0.]", "tests/test_lexer.py::test_float_literals[1e10]", "tests/test_lexer.py::test_float_literals[1.e42]", "tests/test_lexer.py::test_float_literals[0.1e42]", "tests/test_lexer.py::test_float_literals[0.5e-42]", "tests/test_lexer.py::test_float_literals[5E10]", "tests/test_lexer.py::test_float_literals[5e+42]", "tests/test_lexer.py::test_float_literals[1_0e1_0]", "tests/test_lexer.py::test_redir_whitespace[>]", "tests/test_lexer.py::test_redir_whitespace[>>]", "tests/test_lexer.py::test_redir_whitespace[<]", "tests/test_lexer.py::test_redir_whitespace[e>]", "tests/test_lexer.py::test_redir_whitespace[>", "tests/test_lexer.py::test_redir_whitespace[>>", "tests/test_lexer.py::test_redir_whitespace[<", "tests/test_lexer.py::test_redir_whitespace[e>", "tests/test_lexer.py::test_lexer_split[-exp0]", "tests/test_lexer.py::test_lexer_split[", "tests/test_lexer.py::test_lexer_split[echo", "tests/test_lexer.py::test_lexer_split[![echo", "tests/test_lexer.py::test_lexer_split[/usr/bin/echo", "tests/test_lexer.py::test_lexer_split[$(/usr/bin/echo", "tests/test_lexer.py::test_lexer_split[C:\\\\Python\\\\python.exe", "tests/test_lexer.py::test_lexer_split[print(\"\"\"I", "tests/test_lexer.py::test_tolerant_lexer[()]", "tests/test_lexer.py::test_tolerant_lexer[(]", "tests/test_lexer.py::test_tolerant_lexer[)]", "tests/test_lexer.py::test_tolerant_lexer[))]", "tests/test_lexer.py::test_tolerant_lexer['string\\nliteral]", "tests/test_lexer.py::test_tolerant_lexer['''string\\nliteral]", "tests/test_lexer.py::test_tolerant_lexer[string\\nliteral']", "tests/test_lexer.py::test_tolerant_lexer[\"]", "tests/test_lexer.py::test_tolerant_lexer[']", "tests/test_lexer.py::test_tolerant_lexer[\"\"\"]" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2024-04-04 22:34:32+00:00
bsd-2-clause
6,302
xonsh__xonsh-5326
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9f429925..0ec50cb0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: pass_filenames: false - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: 'v0.3.4' + rev: 'v0.3.5' hooks: - id: ruff args: [., --fix, --exit-non-zero-on-fix] @@ -41,7 +41,7 @@ repos: additional_dependencies: - types-ujson - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: trailing-whitespace exclude: | diff --git a/news/brackets-in-args.rst b/news/brackets-in-args.rst new file mode 100644 index 00000000..1478e6c5 --- /dev/null +++ b/news/brackets-in-args.rst @@ -0,0 +1,23 @@ +**Added:** + +* Square brackets can now be used in command arguments without quotes (e.g. `echo a[b]`) + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* <news item> + +**Security:** + +* <news item> diff --git a/xonsh/parsers/base.py b/xonsh/parsers/base.py index c4a6c524..76c2ba4f 100644 --- a/xonsh/parsers/base.py +++ b/xonsh/parsers/base.py @@ -1,5 +1,6 @@ """Implements the base xonsh parser.""" +import itertools import os import re import textwrap @@ -3496,27 +3497,43 @@ class BaseParser: """subproc_arg : subproc_arg_part""" p[0] = p[1] + def _arg_part_combine(self, *arg_parts): + """Combines arg_parts. If all arg_parts are strings, concatenate the strings. + Otherwise, return a list of arg_parts.""" + if all(ast.is_const_str(ap) for ap in arg_parts): + return ast.const_str( + "".join(ap.value for ap in arg_parts), + lineno=arg_parts[0].lineno, + col_offset=arg_parts[0].col_offset, + ) + else: + return list( + itertools.chain.from_iterable( + ap if isinstance(ap, list) else [ap] for ap in arg_parts + ) + ) + def p_subproc_arg_many(self, p): """subproc_arg : subproc_arg subproc_arg_part""" # This glues the string together after parsing + p[0] = self._arg_part_combine(p[1], p[2]) + + def p_subproc_arg_part_brackets(self, p): + """subproc_arg_part : lbracket_tok subproc_arg rbracket_tok""" p1 = p[1] p2 = p[2] - if ast.is_const_str(p1) and ast.is_const_str(p2): - p0 = ast.const_str( - p1.value + p2.value, lineno=p1.lineno, col_offset=p1.col_offset - ) - elif isinstance(p1, list): - if isinstance(p2, list): - p1.extend(p2) - else: - p1.append(p2) - p0 = p1 - elif isinstance(p2, list): - p2.insert(0, p1) - p0 = p2 - else: - p0 = [p1, p2] - p[0] = p0 + p3 = p[3] + p1 = ast.const_str(s=p1.value, lineno=p1.lineno, col_offset=p1.lexpos) + p3 = ast.const_str(s=p3.value, lineno=p3.lineno, col_offset=p3.lexpos) + p[0] = self._arg_part_combine(p1, p2, p3) + + def p_subproc_arg_part_brackets_empty(self, p): + """subproc_arg_part : lbracket_tok rbracket_tok""" + p1 = p[1] + p2 = p[2] + p1 = ast.const_str(s=p1.value, lineno=p1.lineno, col_offset=p1.lexpos) + p2 = ast.const_str(s=p2.value, lineno=p2.lineno, col_offset=p2.lexpos) + p[0] = self._arg_part_combine(p1, p2) def _attach_subproc_arg_part_rules(self): toks = set(self.tokens) diff --git a/xonsh/procs/specs.py b/xonsh/procs/specs.py index 7c538a1b..660cfc85 100644 --- a/xonsh/procs/specs.py +++ b/xonsh/procs/specs.py @@ -250,6 +250,17 @@ def _redirect_streams(r, loc=None): return stdin, stdout, stderr +def _flatten_cmd_redirects(cmd): + """Transforms a command like ['ls', ('>', '/dev/null')] into ['ls', '>', '/dev/null'].""" + new_cmd = [] + for c in cmd: + if isinstance(c, tuple): + new_cmd.extend(c) + else: + new_cmd.append(c) + return new_cmd + + def default_signal_pauser(n, f): """Pauses a signal, as needed.""" signal.pause() @@ -352,7 +363,7 @@ class SubprocSpec: else: self.env = None # pure attrs - self.args = list(cmd) + self.args = _flatten_cmd_redirects(cmd) self.alias = None self.alias_name = None self.alias_stack = XSH.env.get("__ALIAS_STACK", "").split(":") @@ -433,9 +444,7 @@ class SubprocSpec: raise xt.XonshError(msg) def get_command_str(self): - return " ".join( - " ".join(arg) if isinstance(arg, tuple) else arg for arg in self.args - ) + return " ".join(arg for arg in self.args) # # Execution methods @@ -883,6 +892,9 @@ def run_subproc(cmds, captured=False, envs=None): print(f"TRACE SUBPROC: {cmds}, captured={captured}", file=sys.stderr) specs = cmds_to_specs(cmds, captured=captured, envs=envs) + cmds = [ + _flatten_cmd_redirects(cmd) if isinstance(cmd, list) else cmd for cmd in cmds + ] if _should_set_title(): # context manager updates the command information that gets # accessed by CurrentJobField when setting the terminal's title
xonsh/xonsh
08ac0d97590567728d1b0fb817c70eeb617766ca
diff --git a/tests/test_parser.py b/tests/test_parser.py index ae57dfa2..ee131ed8 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -2649,6 +2649,25 @@ def test_echo_slash_question(check_xonsh_ast): check_xonsh_ast({}, "![echo /?]", False) [email protected]( + "case", + [ + "[]", + "[[]]", + "[a]", + "[a][b]", + "a[b]", + "[a]b", + "a[b]c", + "a[b[c]]", + "[a]b[[]c[d,e]f[]g,h]", + "[a@([1,2])]@([3,4])", + ], +) +def test_echo_brackets(case, check_xonsh_ast): + check_xonsh_ast({}, f"![echo {case}]") + + def test_bad_quotes(check_xonsh_ast): with pytest.raises(SyntaxError): check_xonsh_ast({}, '![echo """hello]', False)
Syntax errors in subprocess commands containing [] I am trying to run a command which should be getting passed to a subprocess (in this case rake). The command itself includes square brackets (`[]`) which results in a traceback. The command: `rake so_thing[some,parameters]` The error: <details> ``` Traceback (most recent call last): File "/home/tmacey/.local/lib/python3.6/site-packages/xonsh/ptk/shell.py", line 137, in _push locs=None) File "/home/tmacey/.local/lib/python3.6/site-packages/xonsh/execer.py", line 110, in compile transform=transform) File "/home/tmacey/.local/lib/python3.6/site-packages/xonsh/execer.py", line 79, in parse tree, input = self._parse_ctx_free(input, mode=mode, filename=filename) File "/home/tmacey/.local/lib/python3.6/site-packages/xonsh/execer.py", line 179, in _parse_ctx_free raise original_error from None File "/home/tmacey/.local/lib/python3.6/site-packages/xonsh/execer.py", line 166, in _parse_ctx_free debug_level=(self.debug_level > 2)) File "/home/tmacey/.local/lib/python3.6/site-packages/xonsh/parsers/base.py", line 348, in parse tree = self.parser.parse(input=s, lexer=self.lexer, debug=debug_level) File "/usr/lib/python3.6/site-packages/ply/yacc.py", line 331, in parse return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc) File "/usr/lib/python3.6/site-packages/ply/yacc.py", line 1199, in parseopt_notrack tok = call_errorfunc(self.errorfunc, errtoken, self) File "/usr/lib/python3.6/site-packages/ply/yacc.py", line 193, in call_errorfunc r = errorfunc(token) File "/home/tmacey/.local/lib/python3.6/site-packages/xonsh/parsers/base.py", line 2726, in p_error column=p.lexpos)) File "/home/tmacey/.local/lib/python3.6/site-packages/xonsh/parsers/base.py", line 479, in _parse_error raise err File "<string>", line None SyntaxError: /home/tmacey/.local/lib/python3.6/site-packages/xontrib/jedi.xsh:1:21: ('code: [',) rake generate_stack[01,dev2-useast1] ^ ``` </details> ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
0.0
08ac0d97590567728d1b0fb817c70eeb617766ca
[ "tests/test_parser.py::test_echo_brackets[[]]", "tests/test_parser.py::test_echo_brackets[[[]]]", "tests/test_parser.py::test_echo_brackets[[a]]", "tests/test_parser.py::test_echo_brackets[[a][b]]", "tests/test_parser.py::test_echo_brackets[a[b]]", "tests/test_parser.py::test_echo_brackets[[a]b]", "tests/test_parser.py::test_echo_brackets[a[b]c]", "tests/test_parser.py::test_echo_brackets[a[b[c]]]", "tests/test_parser.py::test_echo_brackets[[a]b[[]c[d,e]f[]g,h]]", "tests/test_parser.py::test_echo_brackets[[a@([1,2])]@([3,4])]" ]
[ "tests/test_parser.py::test_int_literal", "tests/test_parser.py::test_int_literal_underscore", "tests/test_parser.py::test_float_literal", "tests/test_parser.py::test_float_literal_underscore", "tests/test_parser.py::test_imag_literal", "tests/test_parser.py::test_float_imag_literal", "tests/test_parser.py::test_complex", "tests/test_parser.py::test_str_literal", "tests/test_parser.py::test_bytes_literal", "tests/test_parser.py::test_raw_literal", "tests/test_parser.py::test_f_literal", "tests/test_parser.py::test_string_literal_concat[-]", "tests/test_parser.py::test_string_literal_concat[-f]", "tests/test_parser.py::test_string_literal_concat[-r]", "tests/test_parser.py::test_string_literal_concat[-fr]", "tests/test_parser.py::test_string_literal_concat[f-]", "tests/test_parser.py::test_string_literal_concat[f-f]", "tests/test_parser.py::test_string_literal_concat[f-r]", "tests/test_parser.py::test_string_literal_concat[f-fr]", "tests/test_parser.py::test_string_literal_concat[r-]", "tests/test_parser.py::test_string_literal_concat[r-f]", "tests/test_parser.py::test_string_literal_concat[r-r]", "tests/test_parser.py::test_string_literal_concat[r-fr]", "tests/test_parser.py::test_string_literal_concat[fr-]", "tests/test_parser.py::test_string_literal_concat[fr-f]", "tests/test_parser.py::test_string_literal_concat[fr-r]", "tests/test_parser.py::test_string_literal_concat[fr-fr]", "tests/test_parser.py::test_f_env_var", "tests/test_parser.py::test_fstring_adaptor[f\"$HOME\"-$HOME]", "tests/test_parser.py::test_fstring_adaptor[f\"{0}", "tests/test_parser.py::test_fstring_adaptor[f\"{$HOME}\"-/foo/bar]", "tests/test_parser.py::test_fstring_adaptor[f\"{", "tests/test_parser.py::test_fstring_adaptor[f\"{'$HOME'}\"-$HOME]", "tests/test_parser.py::test_fstring_adaptor[f\"$HOME", "tests/test_parser.py::test_fstring_adaptor[f\"{${'HOME'}}\"-/foo/bar]", "tests/test_parser.py::test_fstring_adaptor[f'{${$FOO+$BAR}}'-/foo/bar]", "tests/test_parser.py::test_fstring_adaptor[f\"${$FOO}{$BAR}={f'{$HOME}'}\"-$HOME=/foo/bar]", "tests/test_parser.py::test_fstring_adaptor[f\"\"\"foo\\n{f\"_{$HOME}_\"}\\nbar\"\"\"-foo\\n_/foo/bar_\\nbar]", "tests/test_parser.py::test_fstring_adaptor[f\"\"\"foo\\n{f\"_{${'HOME'}}_\"}\\nbar\"\"\"-foo\\n_/foo/bar_\\nbar]", "tests/test_parser.py::test_fstring_adaptor[f\"\"\"foo\\n{f\"_{${", "tests/test_parser.py::test_fstring_adaptor[f'{$HOME=}'-$HOME='/foo/bar']", "tests/test_parser.py::test_raw_bytes_literal", "tests/test_parser.py::test_unary_plus", "tests/test_parser.py::test_unary_minus", "tests/test_parser.py::test_unary_invert", "tests/test_parser.py::test_binop_plus", "tests/test_parser.py::test_binop_minus", "tests/test_parser.py::test_binop_times", "tests/test_parser.py::test_binop_matmult", "tests/test_parser.py::test_binop_div", "tests/test_parser.py::test_binop_mod", "tests/test_parser.py::test_binop_floordiv", "tests/test_parser.py::test_binop_pow", "tests/test_parser.py::test_plus_pow", "tests/test_parser.py::test_plus_plus", "tests/test_parser.py::test_plus_minus", "tests/test_parser.py::test_minus_plus", "tests/test_parser.py::test_minus_minus", "tests/test_parser.py::test_minus_plus_minus", "tests/test_parser.py::test_times_plus", "tests/test_parser.py::test_plus_times", "tests/test_parser.py::test_times_times", "tests/test_parser.py::test_times_div", "tests/test_parser.py::test_times_div_mod", "tests/test_parser.py::test_times_div_mod_floor", "tests/test_parser.py::test_str_str", "tests/test_parser.py::test_str_str_str", "tests/test_parser.py::test_str_plus_str", "tests/test_parser.py::test_str_times_int", "tests/test_parser.py::test_int_times_str", "tests/test_parser.py::test_group_plus_times", "tests/test_parser.py::test_plus_group_times", "tests/test_parser.py::test_group", "tests/test_parser.py::test_lt", "tests/test_parser.py::test_gt", "tests/test_parser.py::test_eq", "tests/test_parser.py::test_le", "tests/test_parser.py::test_ge", "tests/test_parser.py::test_ne", "tests/test_parser.py::test_in", "tests/test_parser.py::test_is", "tests/test_parser.py::test_not_in", "tests/test_parser.py::test_is_not", "tests/test_parser.py::test_lt_lt", "tests/test_parser.py::test_lt_lt_lt", "tests/test_parser.py::test_not", "tests/test_parser.py::test_or", "tests/test_parser.py::test_or_or", "tests/test_parser.py::test_and", "tests/test_parser.py::test_and_and", "tests/test_parser.py::test_and_or", "tests/test_parser.py::test_or_and", "tests/test_parser.py::test_group_and_and", "tests/test_parser.py::test_group_and_or", "tests/test_parser.py::test_if_else_expr", "tests/test_parser.py::test_if_else_expr_expr", "tests/test_parser.py::test_subscription_syntaxes", "tests/test_parser.py::test_subscription_special_syntaxes", "tests/test_parser.py::test_str_idx", "tests/test_parser.py::test_str_slice", "tests/test_parser.py::test_str_step", "tests/test_parser.py::test_str_slice_all", "tests/test_parser.py::test_str_slice_upper", "tests/test_parser.py::test_str_slice_lower", "tests/test_parser.py::test_str_slice_other", "tests/test_parser.py::test_str_slice_lower_other", "tests/test_parser.py::test_str_slice_upper_other", "tests/test_parser.py::test_str_2slice", "tests/test_parser.py::test_str_2step", "tests/test_parser.py::test_str_2slice_all", "tests/test_parser.py::test_str_2slice_upper", "tests/test_parser.py::test_str_2slice_lower", "tests/test_parser.py::test_str_2slice_lowerupper", "tests/test_parser.py::test_str_2slice_other", "tests/test_parser.py::test_str_2slice_lower_other", "tests/test_parser.py::test_str_2slice_upper_other", "tests/test_parser.py::test_str_3slice", "tests/test_parser.py::test_str_3step", "tests/test_parser.py::test_str_3slice_all", "tests/test_parser.py::test_str_3slice_upper", "tests/test_parser.py::test_str_3slice_lower", "tests/test_parser.py::test_str_3slice_lowerlowerupper", "tests/test_parser.py::test_str_3slice_lowerupperlower", "tests/test_parser.py::test_str_3slice_lowerupperupper", "tests/test_parser.py::test_str_3slice_upperlowerlower", "tests/test_parser.py::test_str_3slice_upperlowerupper", "tests/test_parser.py::test_str_3slice_upperupperlower", "tests/test_parser.py::test_str_3slice_other", "tests/test_parser.py::test_str_3slice_lower_other", "tests/test_parser.py::test_str_3slice_upper_other", "tests/test_parser.py::test_str_slice_true", "tests/test_parser.py::test_str_true_slice", "tests/test_parser.py::test_list_empty", "tests/test_parser.py::test_list_one", "tests/test_parser.py::test_list_one_comma", "tests/test_parser.py::test_list_two", "tests/test_parser.py::test_list_three", "tests/test_parser.py::test_list_three_comma", "tests/test_parser.py::test_list_one_nested", "tests/test_parser.py::test_list_list_four_nested", "tests/test_parser.py::test_list_tuple_three_nested", "tests/test_parser.py::test_list_set_tuple_three_nested", "tests/test_parser.py::test_list_tuple_one_nested", "tests/test_parser.py::test_tuple_tuple_one_nested", "tests/test_parser.py::test_dict_list_one_nested", "tests/test_parser.py::test_dict_list_one_nested_comma", "tests/test_parser.py::test_dict_tuple_one_nested", "tests/test_parser.py::test_dict_tuple_one_nested_comma", "tests/test_parser.py::test_dict_list_two_nested", "tests/test_parser.py::test_set_tuple_one_nested", "tests/test_parser.py::test_set_tuple_two_nested", "tests/test_parser.py::test_tuple_empty", "tests/test_parser.py::test_tuple_one_bare", "tests/test_parser.py::test_tuple_two_bare", "tests/test_parser.py::test_tuple_three_bare", "tests/test_parser.py::test_tuple_three_bare_comma", "tests/test_parser.py::test_tuple_one_comma", "tests/test_parser.py::test_tuple_two", "tests/test_parser.py::test_tuple_three", "tests/test_parser.py::test_tuple_three_comma", "tests/test_parser.py::test_bare_tuple_of_tuples", "tests/test_parser.py::test_set_one", "tests/test_parser.py::test_set_one_comma", "tests/test_parser.py::test_set_two", "tests/test_parser.py::test_set_two_comma", "tests/test_parser.py::test_set_three", "tests/test_parser.py::test_dict_empty", "tests/test_parser.py::test_dict_one", "tests/test_parser.py::test_dict_one_comma", "tests/test_parser.py::test_dict_two", "tests/test_parser.py::test_dict_two_comma", "tests/test_parser.py::test_dict_three", "tests/test_parser.py::test_dict_from_dict_one", "tests/test_parser.py::test_dict_from_dict_one_comma", "tests/test_parser.py::test_dict_from_dict_two_xy", "tests/test_parser.py::test_dict_from_dict_two_x_first", "tests/test_parser.py::test_dict_from_dict_two_x_second", "tests/test_parser.py::test_dict_from_dict_two_x_none", "tests/test_parser.py::test_dict_from_dict_three_xyz[True-True-True]", "tests/test_parser.py::test_dict_from_dict_three_xyz[True-True-False]", "tests/test_parser.py::test_dict_from_dict_three_xyz[True-False-True]", "tests/test_parser.py::test_dict_from_dict_three_xyz[True-False-False]", "tests/test_parser.py::test_dict_from_dict_three_xyz[False-True-True]", "tests/test_parser.py::test_dict_from_dict_three_xyz[False-True-False]", "tests/test_parser.py::test_dict_from_dict_three_xyz[False-False-True]", "tests/test_parser.py::test_dict_from_dict_three_xyz[False-False-False]", "tests/test_parser.py::test_unpack_range_tuple", "tests/test_parser.py::test_unpack_range_tuple_4", "tests/test_parser.py::test_unpack_range_tuple_parens", "tests/test_parser.py::test_unpack_range_tuple_parens_4", "tests/test_parser.py::test_unpack_range_list", "tests/test_parser.py::test_unpack_range_list_4", "tests/test_parser.py::test_unpack_range_set", "tests/test_parser.py::test_unpack_range_set_4", "tests/test_parser.py::test_true", "tests/test_parser.py::test_false", "tests/test_parser.py::test_none", "tests/test_parser.py::test_elipssis", "tests/test_parser.py::test_not_implemented_name", "tests/test_parser.py::test_genexpr", "tests/test_parser.py::test_genexpr_if", "tests/test_parser.py::test_genexpr_if_and", "tests/test_parser.py::test_dbl_genexpr", "tests/test_parser.py::test_genexpr_if_genexpr", "tests/test_parser.py::test_genexpr_if_genexpr_if", "tests/test_parser.py::test_listcomp", "tests/test_parser.py::test_listcomp_if", "tests/test_parser.py::test_listcomp_if_and", "tests/test_parser.py::test_listcomp_multi_if", "tests/test_parser.py::test_dbl_listcomp", "tests/test_parser.py::test_listcomp_if_listcomp", "tests/test_parser.py::test_listcomp_if_listcomp_if", "tests/test_parser.py::test_setcomp", "tests/test_parser.py::test_setcomp_if", "tests/test_parser.py::test_setcomp_if_and", "tests/test_parser.py::test_dbl_setcomp", "tests/test_parser.py::test_setcomp_if_setcomp", "tests/test_parser.py::test_setcomp_if_setcomp_if", "tests/test_parser.py::test_dictcomp", "tests/test_parser.py::test_dictcomp_unpack_parens", "tests/test_parser.py::test_dictcomp_unpack_no_parens", "tests/test_parser.py::test_dictcomp_if", "tests/test_parser.py::test_dictcomp_if_and", "tests/test_parser.py::test_dbl_dictcomp", "tests/test_parser.py::test_dictcomp_if_dictcomp", "tests/test_parser.py::test_dictcomp_if_dictcomp_if", "tests/test_parser.py::test_lambda", "tests/test_parser.py::test_lambda_x", "tests/test_parser.py::test_lambda_kwx", "tests/test_parser.py::test_lambda_x_y", "tests/test_parser.py::test_lambda_x_y_z", "tests/test_parser.py::test_lambda_x_kwy", "tests/test_parser.py::test_lambda_kwx_kwy", "tests/test_parser.py::test_lambda_kwx_kwy_kwz", "tests/test_parser.py::test_lambda_x_comma", "tests/test_parser.py::test_lambda_x_y_comma", "tests/test_parser.py::test_lambda_x_y_z_comma", "tests/test_parser.py::test_lambda_x_kwy_comma", "tests/test_parser.py::test_lambda_kwx_kwy_comma", "tests/test_parser.py::test_lambda_kwx_kwy_kwz_comma", "tests/test_parser.py::test_lambda_args", "tests/test_parser.py::test_lambda_args_x", "tests/test_parser.py::test_lambda_args_x_y", "tests/test_parser.py::test_lambda_args_x_kwy", "tests/test_parser.py::test_lambda_args_kwx_y", "tests/test_parser.py::test_lambda_args_kwx_kwy", "tests/test_parser.py::test_lambda_x_args", "tests/test_parser.py::test_lambda_x_args_y", "tests/test_parser.py::test_lambda_x_args_y_z", "tests/test_parser.py::test_lambda_kwargs", "tests/test_parser.py::test_lambda_x_kwargs", "tests/test_parser.py::test_lambda_x_y_kwargs", "tests/test_parser.py::test_lambda_x_kwy_kwargs", "tests/test_parser.py::test_lambda_args_kwargs", "tests/test_parser.py::test_lambda_x_args_kwargs", "tests/test_parser.py::test_lambda_x_y_args_kwargs", "tests/test_parser.py::test_lambda_kwx_args_kwargs", "tests/test_parser.py::test_lambda_x_kwy_args_kwargs", "tests/test_parser.py::test_lambda_x_args_y_kwargs", "tests/test_parser.py::test_lambda_x_args_kwy_kwargs", "tests/test_parser.py::test_lambda_args_y_kwargs", "tests/test_parser.py::test_lambda_star_x", "tests/test_parser.py::test_lambda_star_x_y", "tests/test_parser.py::test_lambda_star_x_kwargs", "tests/test_parser.py::test_lambda_star_kwx_kwargs", "tests/test_parser.py::test_lambda_x_star_y", "tests/test_parser.py::test_lambda_x_y_star_z", "tests/test_parser.py::test_lambda_x_kwy_star_y", "tests/test_parser.py::test_lambda_x_kwy_star_kwy", "tests/test_parser.py::test_lambda_x_star_y_kwargs", "tests/test_parser.py::test_lambda_x_divide_y_star_z_kwargs", "tests/test_parser.py::test_call_range", "tests/test_parser.py::test_call_range_comma", "tests/test_parser.py::test_call_range_x_y", "tests/test_parser.py::test_call_range_x_y_comma", "tests/test_parser.py::test_call_range_x_y_z", "tests/test_parser.py::test_call_dict_kwx", "tests/test_parser.py::test_call_dict_kwx_comma", "tests/test_parser.py::test_call_dict_kwx_kwy", "tests/test_parser.py::test_call_tuple_gen", "tests/test_parser.py::test_call_tuple_genifs", "tests/test_parser.py::test_call_range_star", "tests/test_parser.py::test_call_range_x_star", "tests/test_parser.py::test_call_int", "tests/test_parser.py::test_call_int_base_dict", "tests/test_parser.py::test_call_dict_kwargs", "tests/test_parser.py::test_call_list_many_star_args", "tests/test_parser.py::test_call_list_many_starstar_args", "tests/test_parser.py::test_call_list_many_star_and_starstar_args", "tests/test_parser.py::test_call_alot", "tests/test_parser.py::test_call_alot_next", "tests/test_parser.py::test_call_alot_next_next", "tests/test_parser.py::test_getattr", "tests/test_parser.py::test_getattr_getattr", "tests/test_parser.py::test_dict_tuple_key", "tests/test_parser.py::test_dict_tuple_key_get", "tests/test_parser.py::test_dict_tuple_key_get_3", "tests/test_parser.py::test_pipe_op", "tests/test_parser.py::test_pipe_op_two", "tests/test_parser.py::test_pipe_op_three", "tests/test_parser.py::test_xor_op", "tests/test_parser.py::test_xor_op_two", "tests/test_parser.py::test_xor_op_three", "tests/test_parser.py::test_xor_pipe", "tests/test_parser.py::test_amp_op", "tests/test_parser.py::test_amp_op_two", "tests/test_parser.py::test_amp_op_three", "tests/test_parser.py::test_lshift_op", "tests/test_parser.py::test_lshift_op_two", "tests/test_parser.py::test_lshift_op_three", "tests/test_parser.py::test_rshift_op", "tests/test_parser.py::test_rshift_op_two", "tests/test_parser.py::test_rshift_op_three", "tests/test_parser.py::test_named_expr", "tests/test_parser.py::test_named_expr_list", "tests/test_parser.py::test_equals", "tests/test_parser.py::test_equals_semi", "tests/test_parser.py::test_x_y_equals_semi", "tests/test_parser.py::test_equals_two", "tests/test_parser.py::test_equals_two_semi", "tests/test_parser.py::test_equals_three", "tests/test_parser.py::test_equals_three_semi", "tests/test_parser.py::test_plus_eq", "tests/test_parser.py::test_sub_eq", "tests/test_parser.py::test_times_eq", "tests/test_parser.py::test_matmult_eq", "tests/test_parser.py::test_div_eq", "tests/test_parser.py::test_floordiv_eq", "tests/test_parser.py::test_pow_eq", "tests/test_parser.py::test_mod_eq", "tests/test_parser.py::test_xor_eq", "tests/test_parser.py::test_ampersand_eq", "tests/test_parser.py::test_bitor_eq", "tests/test_parser.py::test_lshift_eq", "tests/test_parser.py::test_rshift_eq", "tests/test_parser.py::test_bare_unpack", "tests/test_parser.py::test_lhand_group_unpack", "tests/test_parser.py::test_rhand_group_unpack", "tests/test_parser.py::test_grouped_unpack", "tests/test_parser.py::test_double_grouped_unpack", "tests/test_parser.py::test_double_ungrouped_unpack", "tests/test_parser.py::test_stary_eq", "tests/test_parser.py::test_stary_x", "tests/test_parser.py::test_tuple_x_stary", "tests/test_parser.py::test_list_x_stary", "tests/test_parser.py::test_bare_x_stary", "tests/test_parser.py::test_bare_x_stary_z", "tests/test_parser.py::test_equals_list", "tests/test_parser.py::test_equals_dict", "tests/test_parser.py::test_equals_attr", "tests/test_parser.py::test_equals_annotation", "tests/test_parser.py::test_equals_annotation_empty", "tests/test_parser.py::test_dict_keys", "tests/test_parser.py::test_assert_msg", "tests/test_parser.py::test_assert", "tests/test_parser.py::test_pass", "tests/test_parser.py::test_del", "tests/test_parser.py::test_del_comma", "tests/test_parser.py::test_del_two", "tests/test_parser.py::test_del_two_comma", "tests/test_parser.py::test_del_with_parens", "tests/test_parser.py::test_raise", "tests/test_parser.py::test_raise_x", "tests/test_parser.py::test_raise_x_from", "tests/test_parser.py::test_import_x", "tests/test_parser.py::test_import_xy", "tests/test_parser.py::test_import_xyz", "tests/test_parser.py::test_from_x_import_y", "tests/test_parser.py::test_from_dot_import_y", "tests/test_parser.py::test_from_dotx_import_y", "tests/test_parser.py::test_from_dotdotx_import_y", "tests/test_parser.py::test_from_dotdotdotx_import_y", "tests/test_parser.py::test_from_dotdotdotdotx_import_y", "tests/test_parser.py::test_from_import_x_y", "tests/test_parser.py::test_from_import_x_y_z", "tests/test_parser.py::test_from_dot_import_x_y", "tests/test_parser.py::test_from_dot_import_x_y_z", "tests/test_parser.py::test_from_dot_import_group_x_y", "tests/test_parser.py::test_import_x_as_y", "tests/test_parser.py::test_import_xy_as_z", "tests/test_parser.py::test_import_x_y_as_z", "tests/test_parser.py::test_import_x_as_y_z", "tests/test_parser.py::test_import_x_as_y_z_as_a", "tests/test_parser.py::test_from_dot_import_x_as_y", "tests/test_parser.py::test_from_x_import_star", "tests/test_parser.py::test_from_x_import_group_x_y_z", "tests/test_parser.py::test_from_x_import_group_x_y_z_comma", "tests/test_parser.py::test_from_x_import_y_as_z", "tests/test_parser.py::test_from_x_import_y_as_z_a_as_b", "tests/test_parser.py::test_from_dotx_import_y_as_z_a_as_b_c_as_d", "tests/test_parser.py::test_continue", "tests/test_parser.py::test_break", "tests/test_parser.py::test_global", "tests/test_parser.py::test_global_xy", "tests/test_parser.py::test_nonlocal_x", "tests/test_parser.py::test_nonlocal_xy", "tests/test_parser.py::test_yield", "tests/test_parser.py::test_yield_x", "tests/test_parser.py::test_yield_x_comma", "tests/test_parser.py::test_yield_x_y", "tests/test_parser.py::test_yield_x_starexpr", "tests/test_parser.py::test_yield_from_x", "tests/test_parser.py::test_return", "tests/test_parser.py::test_return_x", "tests/test_parser.py::test_return_x_comma", "tests/test_parser.py::test_return_x_y", "tests/test_parser.py::test_return_x_starexpr", "tests/test_parser.py::test_if_true", "tests/test_parser.py::test_if_true_twolines", "tests/test_parser.py::test_if_true_twolines_deindent", "tests/test_parser.py::test_if_true_else", "tests/test_parser.py::test_if_true_x", "tests/test_parser.py::test_if_switch", "tests/test_parser.py::test_if_switch_elif1_else", "tests/test_parser.py::test_if_switch_elif2_else", "tests/test_parser.py::test_if_nested", "tests/test_parser.py::test_while", "tests/test_parser.py::test_while_else", "tests/test_parser.py::test_for", "tests/test_parser.py::test_for_zip", "tests/test_parser.py::test_for_idx", "tests/test_parser.py::test_for_zip_idx", "tests/test_parser.py::test_for_attr", "tests/test_parser.py::test_for_zip_attr", "tests/test_parser.py::test_for_else", "tests/test_parser.py::test_async_for", "tests/test_parser.py::test_with", "tests/test_parser.py::test_with_as", "tests/test_parser.py::test_with_xy", "tests/test_parser.py::test_with_x_as_y_z", "tests/test_parser.py::test_with_x_as_y_a_as_b", "tests/test_parser.py::test_with_in_func", "tests/test_parser.py::test_async_with", "tests/test_parser.py::test_try", "tests/test_parser.py::test_try_except_t", "tests/test_parser.py::test_try_except_t_as_e", "tests/test_parser.py::test_try_except_t_u", "tests/test_parser.py::test_try_except_t_u_as_e", "tests/test_parser.py::test_try_except_t_except_u", "tests/test_parser.py::test_try_except_else", "tests/test_parser.py::test_try_except_finally", "tests/test_parser.py::test_try_except_else_finally", "tests/test_parser.py::test_try_finally", "tests/test_parser.py::test_func", "tests/test_parser.py::test_func_ret", "tests/test_parser.py::test_func_ret_42", "tests/test_parser.py::test_func_ret_42_65", "tests/test_parser.py::test_func_rarrow", "tests/test_parser.py::test_func_x", "tests/test_parser.py::test_func_kwx", "tests/test_parser.py::test_func_x_y", "tests/test_parser.py::test_func_x_y_z", "tests/test_parser.py::test_func_x_kwy", "tests/test_parser.py::test_func_kwx_kwy", "tests/test_parser.py::test_func_kwx_kwy_kwz", "tests/test_parser.py::test_func_x_comma", "tests/test_parser.py::test_func_x_y_comma", "tests/test_parser.py::test_func_x_y_z_comma", "tests/test_parser.py::test_func_x_kwy_comma", "tests/test_parser.py::test_func_kwx_kwy_comma", "tests/test_parser.py::test_func_kwx_kwy_kwz_comma", "tests/test_parser.py::test_func_args", "tests/test_parser.py::test_func_args_x", "tests/test_parser.py::test_func_args_x_y", "tests/test_parser.py::test_func_args_x_kwy", "tests/test_parser.py::test_func_args_kwx_y", "tests/test_parser.py::test_func_args_kwx_kwy", "tests/test_parser.py::test_func_x_args", "tests/test_parser.py::test_func_x_args_y", "tests/test_parser.py::test_func_x_args_y_z", "tests/test_parser.py::test_func_kwargs", "tests/test_parser.py::test_func_x_kwargs", "tests/test_parser.py::test_func_x_y_kwargs", "tests/test_parser.py::test_func_x_kwy_kwargs", "tests/test_parser.py::test_func_args_kwargs", "tests/test_parser.py::test_func_x_args_kwargs", "tests/test_parser.py::test_func_x_y_args_kwargs", "tests/test_parser.py::test_func_kwx_args_kwargs", "tests/test_parser.py::test_func_x_kwy_args_kwargs", "tests/test_parser.py::test_func_x_args_y_kwargs", "tests/test_parser.py::test_func_x_args_kwy_kwargs", "tests/test_parser.py::test_func_args_y_kwargs", "tests/test_parser.py::test_func_star_x", "tests/test_parser.py::test_func_star_x_y", "tests/test_parser.py::test_func_star_x_kwargs", "tests/test_parser.py::test_func_star_kwx_kwargs", "tests/test_parser.py::test_func_x_star_y", "tests/test_parser.py::test_func_x_y_star_z", "tests/test_parser.py::test_func_x_kwy_star_y", "tests/test_parser.py::test_func_x_kwy_star_kwy", "tests/test_parser.py::test_func_x_star_y_kwargs", "tests/test_parser.py::test_func_x_divide", "tests/test_parser.py::test_func_x_divide_y_star_z_kwargs", "tests/test_parser.py::test_func_tx", "tests/test_parser.py::test_func_txy", "tests/test_parser.py::test_class", "tests/test_parser.py::test_class_obj", "tests/test_parser.py::test_class_int_flt", "tests/test_parser.py::test_class_obj_kw", "tests/test_parser.py::test_decorator", "tests/test_parser.py::test_decorator_2", "tests/test_parser.py::test_decorator_call", "tests/test_parser.py::test_decorator_call_args", "tests/test_parser.py::test_decorator_dot_call_args", "tests/test_parser.py::test_decorator_dot_dot_call_args", "tests/test_parser.py::test_broken_prompt_func", "tests/test_parser.py::test_class_with_methods", "tests/test_parser.py::test_nested_functions", "tests/test_parser.py::test_function_blank_line", "tests/test_parser.py::test_async_func", "tests/test_parser.py::test_async_decorator", "tests/test_parser.py::test_async_await", "tests/test_parser.py::test_named_expr_args", "tests/test_parser.py::test_named_expr_if", "tests/test_parser.py::test_named_expr_elif", "tests/test_parser.py::test_named_expr_while", "tests/test_parser.py::test_path_literal", "tests/test_parser.py::test_path_fstring_literal", "tests/test_parser.py::test_path_literal_concat[p-p]", "tests/test_parser.py::test_path_literal_concat[p-pf]", "tests/test_parser.py::test_path_literal_concat[p-pr]", "tests/test_parser.py::test_path_literal_concat[pf-p]", "tests/test_parser.py::test_path_literal_concat[pf-pf]", "tests/test_parser.py::test_path_literal_concat[pf-pr]", "tests/test_parser.py::test_path_literal_concat[pr-p]", "tests/test_parser.py::test_path_literal_concat[pr-pf]", "tests/test_parser.py::test_path_literal_concat[pr-pr]", "tests/test_parser.py::test_dollar_name", "tests/test_parser.py::test_dollar_py", "tests/test_parser.py::test_dollar_py_test", "tests/test_parser.py::test_dollar_py_recursive_name", "tests/test_parser.py::test_dollar_py_test_recursive_name", "tests/test_parser.py::test_dollar_py_test_recursive_test", "tests/test_parser.py::test_dollar_name_set", "tests/test_parser.py::test_dollar_py_set", "tests/test_parser.py::test_dollar_sub", "tests/test_parser.py::test_dollar_sub_space[$(ls", "tests/test_parser.py::test_dollar_sub_space[$(", "tests/test_parser.py::test_ls_dot", "tests/test_parser.py::test_lambda_in_atparens", "tests/test_parser.py::test_generator_in_atparens", "tests/test_parser.py::test_bare_tuple_in_atparens", "tests/test_parser.py::test_nested_madness", "tests/test_parser.py::test_atparens_intoken", "tests/test_parser.py::test_ls_dot_nesting", "tests/test_parser.py::test_ls_dot_nesting_var", "tests/test_parser.py::test_ls_dot_str", "tests/test_parser.py::test_ls_nest_ls", "tests/test_parser.py::test_ls_nest_ls_dashl", "tests/test_parser.py::test_ls_envvar_strval", "tests/test_parser.py::test_ls_envvar_listval", "tests/test_parser.py::test_bang_sub", "tests/test_parser.py::test_bang_sub_space[!(ls", "tests/test_parser.py::test_bang_sub_space[!(", "tests/test_parser.py::test_bang_ls_dot", "tests/test_parser.py::test_bang_ls_dot_nesting", "tests/test_parser.py::test_bang_ls_dot_nesting_var", "tests/test_parser.py::test_bang_ls_dot_str", "tests/test_parser.py::test_bang_ls_nest_ls", "tests/test_parser.py::test_bang_ls_nest_ls_dashl", "tests/test_parser.py::test_bang_ls_envvar_strval", "tests/test_parser.py::test_bang_ls_envvar_listval", "tests/test_parser.py::test_bang_envvar_args", "tests/test_parser.py::test_question", "tests/test_parser.py::test_dobquestion", "tests/test_parser.py::test_question_chain", "tests/test_parser.py::test_ls_regex", "tests/test_parser.py::test_backtick[--]", "tests/test_parser.py::test_backtick[--p]", "tests/test_parser.py::test_backtick[-f-]", "tests/test_parser.py::test_backtick[-f-p]", "tests/test_parser.py::test_backtick[r--]", "tests/test_parser.py::test_backtick[r--p]", "tests/test_parser.py::test_backtick[r-f-]", "tests/test_parser.py::test_backtick[r-f-p]", "tests/test_parser.py::test_backtick[g--]", "tests/test_parser.py::test_backtick[g--p]", "tests/test_parser.py::test_backtick[g-f-]", "tests/test_parser.py::test_backtick[g-f-p]", "tests/test_parser.py::test_ls_regex_octothorpe", "tests/test_parser.py::test_ls_explicitregex", "tests/test_parser.py::test_ls_explicitregex_octothorpe", "tests/test_parser.py::test_ls_glob", "tests/test_parser.py::test_ls_glob_octothorpe", "tests/test_parser.py::test_ls_customsearch", "tests/test_parser.py::test_custombacktick", "tests/test_parser.py::test_ls_customsearch_octothorpe", "tests/test_parser.py::test_injection", "tests/test_parser.py::test_rhs_nested_injection", "tests/test_parser.py::test_merged_injection", "tests/test_parser.py::test_backtick_octothorpe", "tests/test_parser.py::test_uncaptured_sub", "tests/test_parser.py::test_hiddenobj_sub", "tests/test_parser.py::test_slash_envarv_echo", "tests/test_parser.py::test_echo_double_eq", "tests/test_parser.py::test_bang_two_cmds_one_pipe", "tests/test_parser.py::test_bang_three_cmds_two_pipes", "tests/test_parser.py::test_bang_one_cmd_write", "tests/test_parser.py::test_bang_one_cmd_append", "tests/test_parser.py::test_bang_two_cmds_write", "tests/test_parser.py::test_bang_two_cmds_append", "tests/test_parser.py::test_bang_cmd_background", "tests/test_parser.py::test_bang_cmd_background_nospace", "tests/test_parser.py::test_bang_git_quotes_no_space", "tests/test_parser.py::test_bang_git_quotes_space", "tests/test_parser.py::test_bang_git_two_quotes_space", "tests/test_parser.py::test_bang_git_two_quotes_space_space", "tests/test_parser.py::test_bang_ls_quotes_3_space", "tests/test_parser.py::test_two_cmds_one_pipe", "tests/test_parser.py::test_three_cmds_two_pipes", "tests/test_parser.py::test_two_cmds_one_and_brackets", "tests/test_parser.py::test_three_cmds_two_ands", "tests/test_parser.py::test_two_cmds_one_doubleamps", "tests/test_parser.py::test_three_cmds_two_doubleamps", "tests/test_parser.py::test_two_cmds_one_or", "tests/test_parser.py::test_three_cmds_two_ors", "tests/test_parser.py::test_two_cmds_one_doublepipe", "tests/test_parser.py::test_three_cmds_two_doublepipe", "tests/test_parser.py::test_one_cmd_write", "tests/test_parser.py::test_one_cmd_append", "tests/test_parser.py::test_two_cmds_write", "tests/test_parser.py::test_two_cmds_append", "tests/test_parser.py::test_cmd_background", "tests/test_parser.py::test_cmd_background_nospace", "tests/test_parser.py::test_git_quotes_no_space", "tests/test_parser.py::test_git_quotes_space", "tests/test_parser.py::test_git_two_quotes_space", "tests/test_parser.py::test_git_two_quotes_space_space", "tests/test_parser.py::test_ls_quotes_3_space", "tests/test_parser.py::test_leading_envvar_assignment", "tests/test_parser.py::test_echo_comma", "tests/test_parser.py::test_echo_internal_comma", "tests/test_parser.py::test_comment_only", "tests/test_parser.py::test_echo_slash_question", "tests/test_parser.py::test_bad_quotes", "tests/test_parser.py::test_redirect", "tests/test_parser.py::test_use_subshell[![(cat)]]", "tests/test_parser.py::test_use_subshell[![(cat;)]]", "tests/test_parser.py::test_use_subshell[![(cd", "tests/test_parser.py::test_use_subshell[![(echo", "tests/test_parser.py::test_use_subshell[![(if", "tests/test_parser.py::test_redirect_abspath[$[cat", "tests/test_parser.py::test_redirect_abspath[$[(cat)", "tests/test_parser.py::test_redirect_abspath[$[<", "tests/test_parser.py::test_redirect_abspath[![<", "tests/test_parser.py::test_redirect_output[]", "tests/test_parser.py::test_redirect_output[o]", "tests/test_parser.py::test_redirect_output[out]", "tests/test_parser.py::test_redirect_output[1]", "tests/test_parser.py::test_redirect_error[e]", "tests/test_parser.py::test_redirect_error[err]", "tests/test_parser.py::test_redirect_error[2]", "tests/test_parser.py::test_redirect_all[a]", "tests/test_parser.py::test_redirect_all[all]", "tests/test_parser.py::test_redirect_all[&]", "tests/test_parser.py::test_redirect_error_to_output[-e>o]", "tests/test_parser.py::test_redirect_error_to_output[-e>out]", "tests/test_parser.py::test_redirect_error_to_output[-err>o]", "tests/test_parser.py::test_redirect_error_to_output[-2>1]", "tests/test_parser.py::test_redirect_error_to_output[-e>1]", "tests/test_parser.py::test_redirect_error_to_output[-err>1]", "tests/test_parser.py::test_redirect_error_to_output[-2>out]", "tests/test_parser.py::test_redirect_error_to_output[-2>o]", "tests/test_parser.py::test_redirect_error_to_output[-err>&1]", "tests/test_parser.py::test_redirect_error_to_output[-e>&1]", "tests/test_parser.py::test_redirect_error_to_output[-2>&1]", "tests/test_parser.py::test_redirect_error_to_output[o-e>o]", "tests/test_parser.py::test_redirect_error_to_output[o-e>out]", "tests/test_parser.py::test_redirect_error_to_output[o-err>o]", "tests/test_parser.py::test_redirect_error_to_output[o-2>1]", "tests/test_parser.py::test_redirect_error_to_output[o-e>1]", "tests/test_parser.py::test_redirect_error_to_output[o-err>1]", "tests/test_parser.py::test_redirect_error_to_output[o-2>out]", "tests/test_parser.py::test_redirect_error_to_output[o-2>o]", "tests/test_parser.py::test_redirect_error_to_output[o-err>&1]", "tests/test_parser.py::test_redirect_error_to_output[o-e>&1]", "tests/test_parser.py::test_redirect_error_to_output[o-2>&1]", "tests/test_parser.py::test_redirect_error_to_output[out-e>o]", "tests/test_parser.py::test_redirect_error_to_output[out-e>out]", "tests/test_parser.py::test_redirect_error_to_output[out-err>o]", "tests/test_parser.py::test_redirect_error_to_output[out-2>1]", "tests/test_parser.py::test_redirect_error_to_output[out-e>1]", "tests/test_parser.py::test_redirect_error_to_output[out-err>1]", "tests/test_parser.py::test_redirect_error_to_output[out-2>out]", "tests/test_parser.py::test_redirect_error_to_output[out-2>o]", "tests/test_parser.py::test_redirect_error_to_output[out-err>&1]", "tests/test_parser.py::test_redirect_error_to_output[out-e>&1]", "tests/test_parser.py::test_redirect_error_to_output[out-2>&1]", "tests/test_parser.py::test_redirect_error_to_output[1-e>o]", "tests/test_parser.py::test_redirect_error_to_output[1-e>out]", "tests/test_parser.py::test_redirect_error_to_output[1-err>o]", "tests/test_parser.py::test_redirect_error_to_output[1-2>1]", "tests/test_parser.py::test_redirect_error_to_output[1-e>1]", "tests/test_parser.py::test_redirect_error_to_output[1-err>1]", "tests/test_parser.py::test_redirect_error_to_output[1-2>out]", "tests/test_parser.py::test_redirect_error_to_output[1-2>o]", "tests/test_parser.py::test_redirect_error_to_output[1-err>&1]", "tests/test_parser.py::test_redirect_error_to_output[1-e>&1]", "tests/test_parser.py::test_redirect_error_to_output[1-2>&1]", "tests/test_parser.py::test_redirect_output_to_error[e-o>e]", "tests/test_parser.py::test_redirect_output_to_error[e-o>err]", "tests/test_parser.py::test_redirect_output_to_error[e-out>e]", "tests/test_parser.py::test_redirect_output_to_error[e-1>2]", "tests/test_parser.py::test_redirect_output_to_error[e-o>2]", "tests/test_parser.py::test_redirect_output_to_error[e-out>2]", "tests/test_parser.py::test_redirect_output_to_error[e-1>err]", "tests/test_parser.py::test_redirect_output_to_error[e-1>e]", "tests/test_parser.py::test_redirect_output_to_error[e-out>&2]", "tests/test_parser.py::test_redirect_output_to_error[e-o>&2]", "tests/test_parser.py::test_redirect_output_to_error[e-1>&2]", "tests/test_parser.py::test_redirect_output_to_error[err-o>e]", "tests/test_parser.py::test_redirect_output_to_error[err-o>err]", "tests/test_parser.py::test_redirect_output_to_error[err-out>e]", "tests/test_parser.py::test_redirect_output_to_error[err-1>2]", "tests/test_parser.py::test_redirect_output_to_error[err-o>2]", "tests/test_parser.py::test_redirect_output_to_error[err-out>2]", "tests/test_parser.py::test_redirect_output_to_error[err-1>err]", "tests/test_parser.py::test_redirect_output_to_error[err-1>e]", "tests/test_parser.py::test_redirect_output_to_error[err-out>&2]", "tests/test_parser.py::test_redirect_output_to_error[err-o>&2]", "tests/test_parser.py::test_redirect_output_to_error[err-1>&2]", "tests/test_parser.py::test_redirect_output_to_error[2-o>e]", "tests/test_parser.py::test_redirect_output_to_error[2-o>err]", "tests/test_parser.py::test_redirect_output_to_error[2-out>e]", "tests/test_parser.py::test_redirect_output_to_error[2-1>2]", "tests/test_parser.py::test_redirect_output_to_error[2-o>2]", "tests/test_parser.py::test_redirect_output_to_error[2-out>2]", "tests/test_parser.py::test_redirect_output_to_error[2-1>err]", "tests/test_parser.py::test_redirect_output_to_error[2-1>e]", "tests/test_parser.py::test_redirect_output_to_error[2-out>&2]", "tests/test_parser.py::test_redirect_output_to_error[2-o>&2]", "tests/test_parser.py::test_redirect_output_to_error[2-1>&2]", "tests/test_parser.py::test_macro_call_empty", "tests/test_parser.py::test_macro_call_one_arg[x]", "tests/test_parser.py::test_macro_call_one_arg[True]", "tests/test_parser.py::test_macro_call_one_arg[None]", "tests/test_parser.py::test_macro_call_one_arg[import", "tests/test_parser.py::test_macro_call_one_arg[x=10]", "tests/test_parser.py::test_macro_call_one_arg[\"oh", "tests/test_parser.py::test_macro_call_one_arg[...]", "tests/test_parser.py::test_macro_call_one_arg[", "tests/test_parser.py::test_macro_call_one_arg[if", "tests/test_parser.py::test_macro_call_one_arg[{x:", "tests/test_parser.py::test_macro_call_one_arg[{1,", "tests/test_parser.py::test_macro_call_one_arg[(x,y)]", "tests/test_parser.py::test_macro_call_one_arg[(x,", "tests/test_parser.py::test_macro_call_one_arg[((x,", "tests/test_parser.py::test_macro_call_one_arg[g()]", "tests/test_parser.py::test_macro_call_one_arg[range(10)]", "tests/test_parser.py::test_macro_call_one_arg[range(1,", "tests/test_parser.py::test_macro_call_one_arg[()]", "tests/test_parser.py::test_macro_call_one_arg[{}]", "tests/test_parser.py::test_macro_call_one_arg[[]]", "tests/test_parser.py::test_macro_call_one_arg[[1,", "tests/test_parser.py::test_macro_call_one_arg[@(x)]", "tests/test_parser.py::test_macro_call_one_arg[!(ls", "tests/test_parser.py::test_macro_call_one_arg[![ls", "tests/test_parser.py::test_macro_call_one_arg[$(ls", "tests/test_parser.py::test_macro_call_one_arg[${x", "tests/test_parser.py::test_macro_call_one_arg[$[ls", "tests/test_parser.py::test_macro_call_one_arg[@$(which", "tests/test_parser.py::test_macro_call_two_args[x-True]", "tests/test_parser.py::test_macro_call_two_args[x-import", "tests/test_parser.py::test_macro_call_two_args[x-\"oh", "tests/test_parser.py::test_macro_call_two_args[x-", "tests/test_parser.py::test_macro_call_two_args[x-{x:", "tests/test_parser.py::test_macro_call_two_args[x-{1,", "tests/test_parser.py::test_macro_call_two_args[x-(x,", "tests/test_parser.py::test_macro_call_two_args[x-g()]", "tests/test_parser.py::test_macro_call_two_args[x-range(1,", "tests/test_parser.py::test_macro_call_two_args[x-{}]", "tests/test_parser.py::test_macro_call_two_args[x-[1,", "tests/test_parser.py::test_macro_call_two_args[x-!(ls", "tests/test_parser.py::test_macro_call_two_args[x-$(ls", "tests/test_parser.py::test_macro_call_two_args[x-$[ls", "tests/test_parser.py::test_macro_call_two_args[None-True]", "tests/test_parser.py::test_macro_call_two_args[None-import", "tests/test_parser.py::test_macro_call_two_args[None-\"oh", "tests/test_parser.py::test_macro_call_two_args[None-", "tests/test_parser.py::test_macro_call_two_args[None-{x:", "tests/test_parser.py::test_macro_call_two_args[None-{1,", "tests/test_parser.py::test_macro_call_two_args[None-(x,", "tests/test_parser.py::test_macro_call_two_args[None-g()]", "tests/test_parser.py::test_macro_call_two_args[None-range(1,", "tests/test_parser.py::test_macro_call_two_args[None-{}]", "tests/test_parser.py::test_macro_call_two_args[None-[1,", "tests/test_parser.py::test_macro_call_two_args[None-!(ls", "tests/test_parser.py::test_macro_call_two_args[None-$(ls", "tests/test_parser.py::test_macro_call_two_args[None-$[ls", "tests/test_parser.py::test_macro_call_two_args[x=10-True]", "tests/test_parser.py::test_macro_call_two_args[x=10-import", "tests/test_parser.py::test_macro_call_two_args[x=10-\"oh", "tests/test_parser.py::test_macro_call_two_args[x=10-", "tests/test_parser.py::test_macro_call_two_args[x=10-{x:", "tests/test_parser.py::test_macro_call_two_args[x=10-{1,", "tests/test_parser.py::test_macro_call_two_args[x=10-(x,", "tests/test_parser.py::test_macro_call_two_args[x=10-g()]", "tests/test_parser.py::test_macro_call_two_args[x=10-range(1,", "tests/test_parser.py::test_macro_call_two_args[x=10-{}]", "tests/test_parser.py::test_macro_call_two_args[x=10-[1,", "tests/test_parser.py::test_macro_call_two_args[x=10-!(ls", "tests/test_parser.py::test_macro_call_two_args[x=10-$(ls", "tests/test_parser.py::test_macro_call_two_args[x=10-$[ls", "tests/test_parser.py::test_macro_call_two_args[...-True]", "tests/test_parser.py::test_macro_call_two_args[...-import", "tests/test_parser.py::test_macro_call_two_args[...-\"oh", "tests/test_parser.py::test_macro_call_two_args[...-", "tests/test_parser.py::test_macro_call_two_args[...-{x:", "tests/test_parser.py::test_macro_call_two_args[...-{1,", "tests/test_parser.py::test_macro_call_two_args[...-(x,", "tests/test_parser.py::test_macro_call_two_args[...-g()]", "tests/test_parser.py::test_macro_call_two_args[...-range(1,", "tests/test_parser.py::test_macro_call_two_args[...-{}]", "tests/test_parser.py::test_macro_call_two_args[...-[1,", "tests/test_parser.py::test_macro_call_two_args[...-!(ls", "tests/test_parser.py::test_macro_call_two_args[...-$(ls", "tests/test_parser.py::test_macro_call_two_args[...-$[ls", "tests/test_parser.py::test_macro_call_two_args[if", "tests/test_parser.py::test_macro_call_two_args[{x:", "tests/test_parser.py::test_macro_call_two_args[(x,y)-True]", "tests/test_parser.py::test_macro_call_two_args[(x,y)-import", "tests/test_parser.py::test_macro_call_two_args[(x,y)-\"oh", "tests/test_parser.py::test_macro_call_two_args[(x,y)-", "tests/test_parser.py::test_macro_call_two_args[(x,y)-{x:", "tests/test_parser.py::test_macro_call_two_args[(x,y)-{1,", "tests/test_parser.py::test_macro_call_two_args[(x,y)-(x,", "tests/test_parser.py::test_macro_call_two_args[(x,y)-g()]", "tests/test_parser.py::test_macro_call_two_args[(x,y)-range(1,", "tests/test_parser.py::test_macro_call_two_args[(x,y)-{}]", "tests/test_parser.py::test_macro_call_two_args[(x,y)-[1,", "tests/test_parser.py::test_macro_call_two_args[(x,y)-!(ls", "tests/test_parser.py::test_macro_call_two_args[(x,y)-$(ls", "tests/test_parser.py::test_macro_call_two_args[(x,y)-$[ls", "tests/test_parser.py::test_macro_call_two_args[((x,", "tests/test_parser.py::test_macro_call_two_args[range(10)-True]", "tests/test_parser.py::test_macro_call_two_args[range(10)-import", "tests/test_parser.py::test_macro_call_two_args[range(10)-\"oh", "tests/test_parser.py::test_macro_call_two_args[range(10)-", "tests/test_parser.py::test_macro_call_two_args[range(10)-{x:", "tests/test_parser.py::test_macro_call_two_args[range(10)-{1,", "tests/test_parser.py::test_macro_call_two_args[range(10)-(x,", "tests/test_parser.py::test_macro_call_two_args[range(10)-g()]", "tests/test_parser.py::test_macro_call_two_args[range(10)-range(1,", "tests/test_parser.py::test_macro_call_two_args[range(10)-{}]", "tests/test_parser.py::test_macro_call_two_args[range(10)-[1,", "tests/test_parser.py::test_macro_call_two_args[range(10)-!(ls", "tests/test_parser.py::test_macro_call_two_args[range(10)-$(ls", "tests/test_parser.py::test_macro_call_two_args[range(10)-$[ls", "tests/test_parser.py::test_macro_call_two_args[()-True]", "tests/test_parser.py::test_macro_call_two_args[()-import", "tests/test_parser.py::test_macro_call_two_args[()-\"oh", "tests/test_parser.py::test_macro_call_two_args[()-", "tests/test_parser.py::test_macro_call_two_args[()-{x:", "tests/test_parser.py::test_macro_call_two_args[()-{1,", "tests/test_parser.py::test_macro_call_two_args[()-(x,", "tests/test_parser.py::test_macro_call_two_args[()-g()]", "tests/test_parser.py::test_macro_call_two_args[()-range(1,", "tests/test_parser.py::test_macro_call_two_args[()-{}]", "tests/test_parser.py::test_macro_call_two_args[()-[1,", "tests/test_parser.py::test_macro_call_two_args[()-!(ls", "tests/test_parser.py::test_macro_call_two_args[()-$(ls", "tests/test_parser.py::test_macro_call_two_args[()-$[ls", "tests/test_parser.py::test_macro_call_two_args[[]-True]", "tests/test_parser.py::test_macro_call_two_args[[]-import", "tests/test_parser.py::test_macro_call_two_args[[]-\"oh", "tests/test_parser.py::test_macro_call_two_args[[]-", "tests/test_parser.py::test_macro_call_two_args[[]-{x:", "tests/test_parser.py::test_macro_call_two_args[[]-{1,", "tests/test_parser.py::test_macro_call_two_args[[]-(x,", "tests/test_parser.py::test_macro_call_two_args[[]-g()]", "tests/test_parser.py::test_macro_call_two_args[[]-range(1,", "tests/test_parser.py::test_macro_call_two_args[[]-{}]", "tests/test_parser.py::test_macro_call_two_args[[]-[1,", "tests/test_parser.py::test_macro_call_two_args[[]-!(ls", "tests/test_parser.py::test_macro_call_two_args[[]-$(ls", "tests/test_parser.py::test_macro_call_two_args[[]-$[ls", "tests/test_parser.py::test_macro_call_two_args[@(x)-True]", "tests/test_parser.py::test_macro_call_two_args[@(x)-import", "tests/test_parser.py::test_macro_call_two_args[@(x)-\"oh", "tests/test_parser.py::test_macro_call_two_args[@(x)-", "tests/test_parser.py::test_macro_call_two_args[@(x)-{x:", "tests/test_parser.py::test_macro_call_two_args[@(x)-{1,", "tests/test_parser.py::test_macro_call_two_args[@(x)-(x,", "tests/test_parser.py::test_macro_call_two_args[@(x)-g()]", "tests/test_parser.py::test_macro_call_two_args[@(x)-range(1,", "tests/test_parser.py::test_macro_call_two_args[@(x)-{}]", "tests/test_parser.py::test_macro_call_two_args[@(x)-[1,", "tests/test_parser.py::test_macro_call_two_args[@(x)-!(ls", "tests/test_parser.py::test_macro_call_two_args[@(x)-$(ls", "tests/test_parser.py::test_macro_call_two_args[@(x)-$[ls", "tests/test_parser.py::test_macro_call_two_args[![ls", "tests/test_parser.py::test_macro_call_two_args[${x", "tests/test_parser.py::test_macro_call_two_args[@$(which", "tests/test_parser.py::test_macro_call_three_args[x-True-None]", "tests/test_parser.py::test_macro_call_three_args[x-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-True-if", "tests/test_parser.py::test_macro_call_three_args[x-True-{1,", "tests/test_parser.py::test_macro_call_three_args[x-True-((x,", "tests/test_parser.py::test_macro_call_three_args[x-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-True-[]]", "tests/test_parser.py::test_macro_call_three_args[x-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-True-${x", "tests/test_parser.py::test_macro_call_three_args[x-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[x-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-x=10-if", "tests/test_parser.py::test_macro_call_three_args[x-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[x-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[x-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[x-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[x-", "tests/test_parser.py::test_macro_call_three_args[x-{x:", "tests/test_parser.py::test_macro_call_three_args[x-(x,", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[x-{}-None]", "tests/test_parser.py::test_macro_call_three_args[x-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-{}-if", "tests/test_parser.py::test_macro_call_three_args[x-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[x-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[x-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[x-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-{}-${x", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[x-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[x-$(ls", "tests/test_parser.py::test_macro_call_three_args[x-@$(which", "tests/test_parser.py::test_macro_call_three_args[import", "tests/test_parser.py::test_macro_call_three_args[...-True-None]", "tests/test_parser.py::test_macro_call_three_args[...-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-True-if", "tests/test_parser.py::test_macro_call_three_args[...-True-{1,", "tests/test_parser.py::test_macro_call_three_args[...-True-((x,", "tests/test_parser.py::test_macro_call_three_args[...-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-True-[]]", "tests/test_parser.py::test_macro_call_three_args[...-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-True-${x", "tests/test_parser.py::test_macro_call_three_args[...-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[...-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-x=10-if", "tests/test_parser.py::test_macro_call_three_args[...-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[...-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[...-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[...-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[...-", "tests/test_parser.py::test_macro_call_three_args[...-{x:", "tests/test_parser.py::test_macro_call_three_args[...-(x,", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[...-{}-None]", "tests/test_parser.py::test_macro_call_three_args[...-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-{}-if", "tests/test_parser.py::test_macro_call_three_args[...-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[...-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[...-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[...-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-{}-${x", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[...-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[...-$(ls", "tests/test_parser.py::test_macro_call_three_args[...-@$(which", "tests/test_parser.py::test_macro_call_three_args[{x:", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-True-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{x:", "tests/test_parser.py::test_macro_call_three_args[(x,y)-(x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-{}-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[(x,y)-$(ls", "tests/test_parser.py::test_macro_call_three_args[(x,y)-@$(which", "tests/test_parser.py::test_macro_call_three_args[g()-True-None]", "tests/test_parser.py::test_macro_call_three_args[g()-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-True-if", "tests/test_parser.py::test_macro_call_three_args[g()-True-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-True-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-True-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-True-${x", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-if", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[g()-", "tests/test_parser.py::test_macro_call_three_args[g()-{x:", "tests/test_parser.py::test_macro_call_three_args[g()-(x,", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[g()-{}-None]", "tests/test_parser.py::test_macro_call_three_args[g()-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-{}-if", "tests/test_parser.py::test_macro_call_three_args[g()-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-{}-${x", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[g()-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[g()-$(ls", "tests/test_parser.py::test_macro_call_three_args[g()-@$(which", "tests/test_parser.py::test_macro_call_three_args[()-True-None]", "tests/test_parser.py::test_macro_call_three_args[()-True-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-True-if", "tests/test_parser.py::test_macro_call_three_args[()-True-{1,", "tests/test_parser.py::test_macro_call_three_args[()-True-((x,", "tests/test_parser.py::test_macro_call_three_args[()-True-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-True-[]]", "tests/test_parser.py::test_macro_call_three_args[()-True-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-True-${x", "tests/test_parser.py::test_macro_call_three_args[()-x=10-None]", "tests/test_parser.py::test_macro_call_three_args[()-x=10-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-x=10-if", "tests/test_parser.py::test_macro_call_three_args[()-x=10-{1,", "tests/test_parser.py::test_macro_call_three_args[()-x=10-((x,", "tests/test_parser.py::test_macro_call_three_args[()-x=10-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-x=10-[]]", "tests/test_parser.py::test_macro_call_three_args[()-x=10-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-x=10-${x", "tests/test_parser.py::test_macro_call_three_args[()-", "tests/test_parser.py::test_macro_call_three_args[()-{x:", "tests/test_parser.py::test_macro_call_three_args[()-(x,", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-None]", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-if", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-{1,", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-((x,", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-[]]", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-range(10)-${x", "tests/test_parser.py::test_macro_call_three_args[()-{}-None]", "tests/test_parser.py::test_macro_call_three_args[()-{}-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-{}-if", "tests/test_parser.py::test_macro_call_three_args[()-{}-{1,", "tests/test_parser.py::test_macro_call_three_args[()-{}-((x,", "tests/test_parser.py::test_macro_call_three_args[()-{}-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-{}-[]]", "tests/test_parser.py::test_macro_call_three_args[()-{}-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-{}-${x", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-None]", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-\"oh", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-if", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-{1,", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-((x,", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-range(1,", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-[]]", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-!(ls", "tests/test_parser.py::test_macro_call_three_args[()-@(x)-${x", "tests/test_parser.py::test_macro_call_three_args[()-$(ls", "tests/test_parser.py::test_macro_call_three_args[()-@$(which", "tests/test_parser.py::test_macro_call_three_args[[1,", "tests/test_parser.py::test_macro_call_three_args[![ls", "tests/test_parser.py::test_macro_call_three_args[$[ls", "tests/test_parser.py::test_macro_call_one_trailing[x]", "tests/test_parser.py::test_macro_call_one_trailing[True]", "tests/test_parser.py::test_macro_call_one_trailing[None]", "tests/test_parser.py::test_macro_call_one_trailing[import", "tests/test_parser.py::test_macro_call_one_trailing[x=10]", "tests/test_parser.py::test_macro_call_one_trailing[\"oh", "tests/test_parser.py::test_macro_call_one_trailing[...]", "tests/test_parser.py::test_macro_call_one_trailing[", "tests/test_parser.py::test_macro_call_one_trailing[if", "tests/test_parser.py::test_macro_call_one_trailing[{x:", "tests/test_parser.py::test_macro_call_one_trailing[{1,", "tests/test_parser.py::test_macro_call_one_trailing[(x,y)]", "tests/test_parser.py::test_macro_call_one_trailing[(x,", "tests/test_parser.py::test_macro_call_one_trailing[((x,", "tests/test_parser.py::test_macro_call_one_trailing[g()]", "tests/test_parser.py::test_macro_call_one_trailing[range(10)]", "tests/test_parser.py::test_macro_call_one_trailing[range(1,", "tests/test_parser.py::test_macro_call_one_trailing[()]", "tests/test_parser.py::test_macro_call_one_trailing[{}]", "tests/test_parser.py::test_macro_call_one_trailing[[]]", "tests/test_parser.py::test_macro_call_one_trailing[[1,", "tests/test_parser.py::test_macro_call_one_trailing[@(x)]", "tests/test_parser.py::test_macro_call_one_trailing[!(ls", "tests/test_parser.py::test_macro_call_one_trailing[![ls", "tests/test_parser.py::test_macro_call_one_trailing[$(ls", "tests/test_parser.py::test_macro_call_one_trailing[${x", "tests/test_parser.py::test_macro_call_one_trailing[$[ls", "tests/test_parser.py::test_macro_call_one_trailing[@$(which", "tests/test_parser.py::test_macro_call_one_trailing_space[x]", "tests/test_parser.py::test_macro_call_one_trailing_space[True]", "tests/test_parser.py::test_macro_call_one_trailing_space[None]", "tests/test_parser.py::test_macro_call_one_trailing_space[import", "tests/test_parser.py::test_macro_call_one_trailing_space[x=10]", "tests/test_parser.py::test_macro_call_one_trailing_space[\"oh", "tests/test_parser.py::test_macro_call_one_trailing_space[...]", "tests/test_parser.py::test_macro_call_one_trailing_space[", "tests/test_parser.py::test_macro_call_one_trailing_space[if", "tests/test_parser.py::test_macro_call_one_trailing_space[{x:", "tests/test_parser.py::test_macro_call_one_trailing_space[{1,", "tests/test_parser.py::test_macro_call_one_trailing_space[(x,y)]", "tests/test_parser.py::test_macro_call_one_trailing_space[(x,", "tests/test_parser.py::test_macro_call_one_trailing_space[((x,", "tests/test_parser.py::test_macro_call_one_trailing_space[g()]", "tests/test_parser.py::test_macro_call_one_trailing_space[range(10)]", "tests/test_parser.py::test_macro_call_one_trailing_space[range(1,", "tests/test_parser.py::test_macro_call_one_trailing_space[()]", "tests/test_parser.py::test_macro_call_one_trailing_space[{}]", "tests/test_parser.py::test_macro_call_one_trailing_space[[]]", "tests/test_parser.py::test_macro_call_one_trailing_space[[1,", "tests/test_parser.py::test_macro_call_one_trailing_space[@(x)]", "tests/test_parser.py::test_macro_call_one_trailing_space[!(ls", "tests/test_parser.py::test_macro_call_one_trailing_space[![ls", "tests/test_parser.py::test_macro_call_one_trailing_space[$(ls", "tests/test_parser.py::test_macro_call_one_trailing_space[${x", "tests/test_parser.py::test_macro_call_one_trailing_space[$[ls", "tests/test_parser.py::test_macro_call_one_trailing_space[@$(which", "tests/test_parser.py::test_empty_subprocbang[echo!-!(-)]", "tests/test_parser.py::test_empty_subprocbang[echo!-$(-)]", "tests/test_parser.py::test_empty_subprocbang[echo!-![-]]", "tests/test_parser.py::test_empty_subprocbang[echo!-$[-]]", "tests/test_parser.py::test_empty_subprocbang[echo", "tests/test_parser.py::test_single_subprocbang[echo!x-!(-)]", "tests/test_parser.py::test_single_subprocbang[echo!x-$(-)]", "tests/test_parser.py::test_single_subprocbang[echo!x-![-]]", "tests/test_parser.py::test_single_subprocbang[echo!x-$[-]]", "tests/test_parser.py::test_single_subprocbang[echo", "tests/test_parser.py::test_arg_single_subprocbang[echo", "tests/test_parser.py::test_arg_single_subprocbang_nested[echo", "tests/test_parser.py::test_many_subprocbang[echo!x", "tests/test_parser.py::test_many_subprocbang[echo", "tests/test_parser.py::test_many_subprocbang[timeit!", "tests/test_parser.py::test_many_subprocbang[timeit!!!!-!(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!!!!-$(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!!!!-![-]]", "tests/test_parser.py::test_many_subprocbang[timeit!!!!-$[-]]", "tests/test_parser.py::test_many_subprocbang[timeit!!(ls)-!(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!!(ls)-$(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!!(ls)-![-]]", "tests/test_parser.py::test_many_subprocbang[timeit!!(ls)-$[-]]", "tests/test_parser.py::test_many_subprocbang[timeit!\"!)\"-!(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!\"!)\"-$(-)]", "tests/test_parser.py::test_many_subprocbang[timeit!\"!)\"-![-]]", "tests/test_parser.py::test_many_subprocbang[timeit!\"!)\"-$[-]]", "tests/test_parser.py::test_withbang_single_suite[pass\\n]", "tests/test_parser.py::test_withbang_single_suite[x", "tests/test_parser.py::test_withbang_single_suite[export", "tests/test_parser.py::test_withbang_single_suite[with", "tests/test_parser.py::test_withbang_as_single_suite[pass\\n]", "tests/test_parser.py::test_withbang_as_single_suite[x", "tests/test_parser.py::test_withbang_as_single_suite[export", "tests/test_parser.py::test_withbang_as_single_suite[with", "tests/test_parser.py::test_withbang_single_suite_trailing[pass\\n]", "tests/test_parser.py::test_withbang_single_suite_trailing[x", "tests/test_parser.py::test_withbang_single_suite_trailing[export", "tests/test_parser.py::test_withbang_single_suite_trailing[with", "tests/test_parser.py::test_withbang_single_simple[pass]", "tests/test_parser.py::test_withbang_single_simple[x", "tests/test_parser.py::test_withbang_single_simple[export", "tests/test_parser.py::test_withbang_single_simple[[1,\\n", "tests/test_parser.py::test_withbang_single_simple_opt[pass]", "tests/test_parser.py::test_withbang_single_simple_opt[x", "tests/test_parser.py::test_withbang_single_simple_opt[export", "tests/test_parser.py::test_withbang_single_simple_opt[[1,\\n", "tests/test_parser.py::test_withbang_as_many_suite[pass\\n]", "tests/test_parser.py::test_withbang_as_many_suite[x", "tests/test_parser.py::test_withbang_as_many_suite[export", "tests/test_parser.py::test_withbang_as_many_suite[with", "tests/test_parser.py::test_subproc_raw_str_literal", "tests/test_parser.py::test_syntax_error_del_literal", "tests/test_parser.py::test_syntax_error_del_constant", "tests/test_parser.py::test_syntax_error_del_emptytuple", "tests/test_parser.py::test_syntax_error_del_call", "tests/test_parser.py::test_syntax_error_del_lambda", "tests/test_parser.py::test_syntax_error_del_ifexp", "tests/test_parser.py::test_syntax_error_del_comps[[i", "tests/test_parser.py::test_syntax_error_del_comps[{i", "tests/test_parser.py::test_syntax_error_del_comps[(i", "tests/test_parser.py::test_syntax_error_del_comps[{k:v", "tests/test_parser.py::test_syntax_error_del_ops[x", "tests/test_parser.py::test_syntax_error_del_ops[-x]", "tests/test_parser.py::test_syntax_error_del_cmp[x", "tests/test_parser.py::test_syntax_error_lonely_del", "tests/test_parser.py::test_syntax_error_assign_literal", "tests/test_parser.py::test_syntax_error_assign_constant", "tests/test_parser.py::test_syntax_error_assign_emptytuple", "tests/test_parser.py::test_syntax_error_assign_call", "tests/test_parser.py::test_syntax_error_assign_lambda", "tests/test_parser.py::test_syntax_error_assign_ifexp", "tests/test_parser.py::test_syntax_error_assign_comps[[i", "tests/test_parser.py::test_syntax_error_assign_comps[{i", "tests/test_parser.py::test_syntax_error_assign_comps[(i", "tests/test_parser.py::test_syntax_error_assign_comps[{k:v", "tests/test_parser.py::test_syntax_error_assign_ops[x", "tests/test_parser.py::test_syntax_error_assign_ops[-x]", "tests/test_parser.py::test_syntax_error_assign_cmp[x", "tests/test_parser.py::test_syntax_error_augassign_literal", "tests/test_parser.py::test_syntax_error_augassign_constant", "tests/test_parser.py::test_syntax_error_augassign_emptytuple", "tests/test_parser.py::test_syntax_error_augassign_call", "tests/test_parser.py::test_syntax_error_augassign_lambda", "tests/test_parser.py::test_syntax_error_augassign_ifexp", "tests/test_parser.py::test_syntax_error_augassign_comps[[i", "tests/test_parser.py::test_syntax_error_augassign_comps[{i", "tests/test_parser.py::test_syntax_error_augassign_comps[(i", "tests/test_parser.py::test_syntax_error_augassign_comps[{k:v", "tests/test_parser.py::test_syntax_error_augassign_ops[x", "tests/test_parser.py::test_syntax_error_augassign_ops[-x]", "tests/test_parser.py::test_syntax_error_augassign_cmp[x", "tests/test_parser.py::test_syntax_error_bar_kwonlyargs", "tests/test_parser.py::test_syntax_error_bar_posonlyargs", "tests/test_parser.py::test_syntax_error_bar_posonlyargs_no_comma", "tests/test_parser.py::test_syntax_error_nondefault_follows_default", "tests/test_parser.py::test_syntax_error_posonly_nondefault_follows_default", "tests/test_parser.py::test_syntax_error_lambda_nondefault_follows_default", "tests/test_parser.py::test_syntax_error_lambda_posonly_nondefault_follows_default", "tests/test_parser.py::test_syntax_error_literal_concat_different[-p]", "tests/test_parser.py::test_syntax_error_literal_concat_different[-b]", "tests/test_parser.py::test_syntax_error_literal_concat_different[p-]", "tests/test_parser.py::test_syntax_error_literal_concat_different[p-b]", "tests/test_parser.py::test_syntax_error_literal_concat_different[b-]", "tests/test_parser.py::test_syntax_error_literal_concat_different[b-p]", "tests/test_parser.py::test_get_repo_url", "tests/test_parser.py::test_match_and_case_are_not_keywords" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2024-04-08 19:37:32+00:00
bsd-2-clause
6,303
xuanxu__starmatrix-45
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ddd32f0..e643d2d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,12 @@ Changelog ========= + +1.6.0 (unreleased) +================== +- Added Phi function from Strolger et al (2020) + + 1.5.3 (2021-07-14) ================== - Better normalization rates and fits for Greggio DTDs diff --git a/README.rst b/README.rst index 89b1cea..14143c5 100644 --- a/README.rst +++ b/README.rst @@ -141,6 +141,11 @@ The ``dtd_sn`` param in the config file can be set to use any of the available D :greggio-WDD1: DTD from model Wide DD 1 Gyr from Greggio, L. (2005) :greggio-SDCH: DTD from model SD Chandra from Greggio, L. (2005) :greggio-SDSCH: DTD from model SD sub-Chandra from Greggio, L. (2005) +:strolger-fit1: Phi function from Strolger et al (2020) with (ξ, ω, 𝛼) = (10, 600, 220) +:strolger-fit2: Phi function from Strolger et al (2020) with (ξ, ω, 𝛼) = (110, 1000, 2) +:strolger-fit3: Phi function from Strolger et al (2020) with (ξ, ω, 𝛼) = (350, 1200, 20) +:strolger-fit4: Phi function from Strolger et al (2020) with (ξ, ω, 𝛼) = (6000, 6000, -2) +:strolger-optimized: Phi function from Strolger et al (2020) with (ξ, ω, 𝛼) = (-1518, 51, 50) Supernovae yields ----------------- @@ -215,3 +220,4 @@ Starmatrix is built upon a long list of previous works from different authors/pa * *Gronow, S. et al.*, 2021, A&A * *Mori, K. et al.*, 2018, ApJ, 863:176 * *Chen, X., Hu, L. & Wang, L.*, 2021, ApJ +* *Strolger et al*, 2020, ApJ, Vol 890, 2. doi: 10.3847/1538-4357/ab6a97 diff --git a/docs/configuration.rst b/docs/configuration.rst index f8d9052..a92aced 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -87,6 +87,11 @@ The ``dtd_sn`` param in the config file can be set to use any of the available D :greggio-WDD1: DTD from model Wide DD 1 Gyr from Greggio, L. (2005) :greggio-SDCH: DTD from model SD Chandra from Greggio, L. (2005) :greggio-SDSCH: DTD from model SD sub-Chandra from Greggio, L. (2005) +:strolger-fit1: Phi function from Strolger et al (2020) with (ξ, ω, 𝛼) = (10, 600, 220) +:strolger-fit2: Phi function from Strolger et al (2020) with (ξ, ω, 𝛼) = (110, 1000, 2) +:strolger-fit3: Phi function from Strolger et al (2020) with (ξ, ω, 𝛼) = (350, 1200, 20) +:strolger-fit4: Phi function from Strolger et al (2020) with (ξ, ω, 𝛼) = (6000, 6000, -2) +:strolger-optimized: Phi function from Strolger et al (2020) with (ξ, ω, 𝛼) = (-1518, 51, 50) Supernovae yields ----------------- diff --git a/docs/credits.rst b/docs/credits.rst index f9dd88c..7f885a3 100644 --- a/docs/credits.rst +++ b/docs/credits.rst @@ -36,6 +36,7 @@ Starmatrix is built upon a long list of previous works from different authors/pa * *Leung & Nomoto*, 2018, ApJ, Vol 861, Issue 2, Id 143 * *Leung & Nomoto*, 2020, ApJ, Vol 888, Issue 2, Id 80 * *Chen, X., Hu, L. & Wang, L.*, 2021, ApJ +* *Strolger et al*, 2020, ApJ, Vol 890, 2. doi: 10.3847/1538-4357/ab6a97 License ------- diff --git a/src/starmatrix/dtds.py b/src/starmatrix/dtds.py index 5bfdda5..03943c7 100644 --- a/src/starmatrix/dtds.py +++ b/src/starmatrix/dtds.py @@ -7,10 +7,14 @@ Contains some predefined DTDs from different papers/authors: * Maoz & Graur (2017) * Castrillo et al (2020) * Greggio, L. (2005) +* Strolger et al. (2020) """ import math +import scipy.integrate +import starmatrix.constants as constants +from functools import lru_cache def select_dtd(option): @@ -25,7 +29,12 @@ def select_dtd(option): "greggio-WDD1": dtd_wide_dd_1, "greggio-SDCH": dtd_sd_chandra, "greggio-SDSCH": dtd_sd_subchandra, - "chen": dtd_chen + "chen": dtd_chen, + "strolger-fit1": dtds_strolger["fit_1"], + "strolger-fit2": dtds_strolger["fit_2"], + "strolger-fit3": dtds_strolger["fit_3"], + "strolger-fit4": dtds_strolger["fit_4"], + "strolger-optimized": dtds_strolger["optimized"], } return dtds[option] @@ -284,3 +293,57 @@ def dtd_chen(t): rate = 2.069e-4 # [SN / Yr / M*] return rate * dtd + + +class Strolger: + def __init__(self, psi, omega, alpha): + self.psi = psi + self.omega = omega + self.alpha = alpha + + def description(self): + return ("Delay Time Distributions (DTDs) from Strolger et al, " + "The Astrophysical Journal, 2020, 890, 2. " + "DOI: 10.3847/1538-4357/ab6a97") + + def phi(self, t_gyrs): + t_myrs = t_gyrs * 1e3 + + u = t_myrs - self.psi + term_1 = (1/(self.omega * math.pi)) * math.exp((-(u**2))/(2*(self.omega**2))) + + t_low = -math.inf + t_up = self.alpha*(u/self.omega) + + if 12 < t_up: + term_2 = 2 * scipy.integrate.quad(self.term_2_f, t_low, 0)[0] + else: + term_2 = scipy.integrate.quad(self.term_2_f, t_low, t_up)[0] + + return term_1 * term_2 + + def term_2_f(self, t_prime): + return math.exp(-math.pow(t_prime, 2)/2) + + @lru_cache(maxsize=128) + def normalization_rate(self): + return self.efficiency() / self.phi_integrated() + + def efficiency(self): + # SN/M* as Hubble-time-integrated production efficiency SN/Mo + return 1.03e-3 + + def phi_integrated(self): + return scipy.integrate.quad(self.phi, 0, constants.TOTAL_TIME)[0] + + def at_time(self, t): + return self.normalization_rate() * self.phi(t) + + +dtds_strolger = { + "fit_1": Strolger(10, 600, 220).at_time, + "fit_2": Strolger(110, 1000, 2).at_time, + "fit_3": Strolger(350, 1200, 20).at_time, + "fit_4": Strolger(6000, 6000, -2).at_time, + "optimized": Strolger(-1518, 51, 50).at_time, +} diff --git a/src/starmatrix/sample_input/params.yml b/src/starmatrix/sample_input/params.yml index 35fa2c4..eb92429 100644 --- a/src/starmatrix/sample_input/params.yml +++ b/src/starmatrix/sample_input/params.yml @@ -54,6 +54,11 @@ # greggio-WDD1 = DTD from model Wide DD 1 Gyr from Greggio, L. (2005) # greggio-SDCH = DTD from model SD Chandra from Greggio, L. (2005) # greggio-SDSCH = DTD from model SD sub-Chandra from Greggio, L. (2005) +# strolger-fit1 = Phi from Strolger et al (2020) with (ξ, ω, 𝛼) = (10, 600, 220) +# strolger-fit2 = Phi from Strolger et al (2020) with (ξ, ω, 𝛼) = (110, 1000, 2) +# strolger-fit3 = Phi from Strolger et al (2020) with (ξ, ω, 𝛼) = (350, 1200, 20) +# strolger-fit4 = Phi from Strolger et al (2020) with (ξ, ω, 𝛼) = (6000, 6000, -2) +# strolger-optimized = Phi from Strolger et al (2020) with (ξ, ω, 𝛼) = (-1518, 51, 50) # ] # sn_yields = [ diff --git a/src/starmatrix/settings.py b/src/starmatrix/settings.py index cd22f91..8cceb63 100644 --- a/src/starmatrix/settings.py +++ b/src/starmatrix/settings.py @@ -34,7 +34,8 @@ default = { valid_values = { "imf": ["salpeter", "starburst", "chabrier", "ferrini", "kroupa", "miller_scalo", "maschberger"], "dtd_sn": ["rlp", "maoz", "castrillo", "greggio", "chen", "greggio-CDD04", "greggio-CDD1", - "greggio-WDD04", "greggio-WDD1", "greggio-SDCH", "greggio-SDSCH"], + "greggio-WDD04", "greggio-WDD1", "greggio-SDCH", "greggio-SDSCH", + "strolger-fit1", "strolger-fit2", "strolger-fit3", "strolger-fit4", "strolger-optimized"], "sn_yields": ["iwa1998", "sei2013", "ln2020",
xuanxu/starmatrix
b8f09d6799e9f08e0e9df4ce378c5578feb00999
diff --git a/src/starmatrix/tests/dtds/test_strolger.py b/src/starmatrix/tests/dtds/test_strolger.py new file mode 100644 index 0000000..b5c68ab --- /dev/null +++ b/src/starmatrix/tests/dtds/test_strolger.py @@ -0,0 +1,26 @@ +import pytest +from starmatrix.dtds import Strolger + + +def test_parameters_initialization(): + strolger = Strolger(1000, 2000, 3000) + assert strolger.psi == 1000 + assert strolger.omega == 2000 + assert strolger.alpha == 3000 + + +def test_has_description(): + assert Strolger(10, 10, 10).description() is not None + + +def test_is_normalized(): + dtd = Strolger(6000, 6000, -2) + sample_t = 6 + dtd_point = dtd.at_time(sample_t) + assert dtd_point > 0 + assert dtd_point == dtd.phi(sample_t) * dtd.normalization_rate() + + +def test_normalization_rate_uses_hubble_efficiency(): + dtd = Strolger(6000, 6000, -2) + assert dtd.normalization_rate() == 1.03e-3 / dtd.phi_integrated() diff --git a/src/starmatrix/tests/test_dtds.py b/src/starmatrix/tests/test_dtds.py index 25cf0fc..5e60cca 100644 --- a/src/starmatrix/tests/test_dtds.py +++ b/src/starmatrix/tests/test_dtds.py @@ -14,6 +14,7 @@ from starmatrix.dtds import dtd_wide_dd_1 from starmatrix.dtds import dtd_sd_chandra from starmatrix.dtds import dtd_sd_subchandra from starmatrix.dtds import dtd_chen +from starmatrix.dtds import dtds_strolger @pytest.fixture @@ -31,10 +32,13 @@ def test_dtds_presence(available_dtds): def test_select_dtd(available_dtds): dtds = [dtd_ruiz_lapuente, dtd_maoz_graur, dtd_castrillo, dtd_greggio, dtd_chen, - dtd_close_dd_04, dtd_close_dd_1, dtd_wide_dd_04, dtd_wide_dd_1, dtd_sd_chandra, dtd_sd_subchandra] + dtd_close_dd_04, dtd_close_dd_1, dtd_wide_dd_04, dtd_wide_dd_1, dtd_sd_chandra, dtd_sd_subchandra, + dtds_strolger["fit_1"], dtds_strolger["fit_2"], dtds_strolger["fit_3"], dtds_strolger["fit_4"], dtds_strolger["optimized"]] + + assert len(available_dtds) == len(dtds) for i in range(len(available_dtds)): - times = [0, 0.001, 0.04, 0.1, 0.4, 2, 9.] + list(np.random.rand(5)) + list(np.random.rand(5) * 9) + times = [0, 0.001, 0.04, 0.1, 0.4, 1, 2, 9.] + list(np.random.rand(5)) + list(np.random.rand(5) * 9) for time in times: assert select_dtd(available_dtds[i])(time) == dtds[i](time)
Add Strolger's phi In `Strolger et al, ApJ, 2020, 890, 2` there is a Phi function used to fit Delay Time Distributions by parameter optimization. ![Strolger](https://user-images.githubusercontent.com/6528/125192595-f15f2800-e248-11eb-9c24-59140d0e7d93.png) Task: Add Phi with different parameter options to Starmatrix: the 4 fits for binary population synthesis analyses for SD scenarios, and the optimized solution. ![fits](https://user-images.githubusercontent.com/6528/125192551-c379e380-e248-11eb-8f08-474b9f74b741.png) ![params](https://user-images.githubusercontent.com/6528/125192586-e5736600-e248-11eb-8f7b-a4a89e7c8e68.png)
0.0
b8f09d6799e9f08e0e9df4ce378c5578feb00999
[ "src/starmatrix/tests/dtds/test_strolger.py::test_parameters_initialization", "src/starmatrix/tests/dtds/test_strolger.py::test_has_description", "src/starmatrix/tests/dtds/test_strolger.py::test_is_normalized", "src/starmatrix/tests/dtds/test_strolger.py::test_normalization_rate_uses_hubble_efficiency", "src/starmatrix/tests/test_dtds.py::test_dtds_presence", "src/starmatrix/tests/test_dtds.py::test_select_dtd", "src/starmatrix/tests/test_dtds.py::test_no_negative_time_values", "src/starmatrix/tests/test_dtds.py::test_dtd_correction_factor" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-07-17 20:26:30+00:00
mit
6,304
y0-causal-inference__y0-12
diff --git a/src/y0/dsl.py b/src/y0/dsl.py index 148e047..6247e6a 100644 --- a/src/y0/dsl.py +++ b/src/y0/dsl.py @@ -15,8 +15,7 @@ __all__ = [ 'Variable', 'Intervention', 'CounterfactualVariable', - 'ConditionalProbability', - 'JointProbability', + 'Distribution', 'P', 'Probability', 'Sum', @@ -44,6 +43,9 @@ class _Mathable(ABC): def to_latex(self) -> str: """Output this DSL object in the LaTeX string format.""" + def _repr_latex_(self) -> str: # hack for auto-display of latex in jupyter notebook + return f'${self.to_latex()}$' + def __str__(self) -> str: return self.to_text() @@ -84,23 +86,36 @@ class Variable(_Mathable): def __matmul__(self, variables: XList[Variable]) -> CounterfactualVariable: return self.intervene(variables) - def given(self, parents: XList[Variable]) -> ConditionalProbability: + def given(self, parents: Union[XList[Variable], Distribution]) -> Distribution: """Create a distribution in which this variable is conditioned on the given variable(s). + The new distribution is a Markov Kernel. + :param parents: A variable or list of variables to include as conditions in the new conditional distribution :returns: A new conditional probability distribution + :raises TypeError: If a distribution is given as the parents that contains conditionals .. note:: This function can be accessed with the or | operator. """ - return ConditionalProbability( - child=self, - parents=_upgrade_variables(parents), - ) - - def __or__(self, parents: XList[Variable]) -> ConditionalProbability: + if not isinstance(parents, Distribution): + return Distribution( + children=[self], + parents=_upgrade_variables(parents), + ) + elif parents.is_conditioned(): + raise TypeError('can not be given a distribution that has conditionals') + else: + # The parents variable is actually a Distribution instance with no parents, + # so its children become the parents for the new Markov Kernel distribution + return Distribution( + children=[self], + parents=parents.children, # don't think about this too hard + ) + + def __or__(self, parents: XList[Variable]) -> Distribution: return self.given(parents) - def joint(self, children: XList[Variable]) -> JointProbability: + def joint(self, children: XList[Variable]) -> Distribution: """Create a joint distribution between this variable and the given variable(s). :param children: The variable(s) for use with this variable in a joint distribution @@ -108,9 +123,11 @@ class Variable(_Mathable): .. note:: This function can be accessed with the and & operator. """ - return JointProbability([self, *_upgrade_variables(children)]) + return Distribution( + children=[self, *_upgrade_variables(children)], + ) - def __and__(self, children: XList[Variable]) -> JointProbability: + def __and__(self, children: XList[Variable]) -> Distribution: return self.joint(children) def invert(self) -> Intervention: @@ -211,82 +228,97 @@ class CounterfactualVariable(Variable): @dataclass -class JointProbability(_Mathable): - """A joint probability distribution over several variables.""" +class Distribution(_Mathable): + """A general distribution over several child variables, conditioned by several parents.""" children: List[Variable] + parents: List[Variable] = field(default_factory=list) + + def __post_init__(self): + if isinstance(self.children, Variable): + self.children = [self.children] + if not self.children: + raise ValueError('distribution must have at least one child') + if isinstance(self.parents, Variable): + self.parents = [self.parents] def to_text(self) -> str: - """Output this joint probability distribution in the internal string format.""" - return ','.join(child.to_text() for child in self.children) + """Output this distribution in the internal string format.""" + children = ','.join(child.to_text() for child in self.children) + if self.parents: + parents = ','.join(parent.to_text() for parent in self.parents) + return f'{children}|{parents}' + else: + return children def to_latex(self) -> str: - """Output this joint probability distribution in the LaTeX string format.""" - return ','.join(child.to_latex() for child in self.children) + """Output this distribution in the LaTeX string format.""" + children = ','.join(child.to_latex() for child in self.children) + parents = ','.join(parent.to_latex() for parent in self.parents) + return f'{children}|{parents}' + + def is_conditioned(self) -> bool: + """Return if this distribution is conditioned.""" + return 0 < len(self.parents) - def joint(self, children: XList[Variable]) -> JointProbability: - """Create a joint distribution between the variables in this distribution the given variable(s). + def is_markov_kernel(self) -> bool: + """Return if this distribution a markov kernel -> one child variable and one or more conditionals.""" + return len(self.children) == 1 - :param children: The variable(s) with which this joint distribution is extended - :returns: A new joint distribution over all previous and given variables. + def joint(self, children: XList[Variable]) -> Distribution: + """Create a new distribution including the given child variables. + + :param children: The variable(s) with which this distribution's children are extended + :returns: A new distribution. .. note:: This function can be accessed with the and & operator. """ - return JointProbability([ - *self.children, - *_upgrade_variables(children), - ]) + return Distribution( + children=[*self.children, *_upgrade_variables(children)], + parents=self.parents, + ) - def __and__(self, children: XList[Variable]) -> JointProbability: + def __and__(self, children: XList[Variable]) -> Distribution: return self.joint(children) + def given(self, parents: Union[XList[Variable], Distribution]) -> Distribution: + """Create a new mixed distribution additionally conditioned on the given parent variables. -@dataclass -class ConditionalProbability(_Mathable): - """A conditional distribution over a single child variable and one or more parent conditional variables.""" - - child: Variable - parents: List[Variable] - - def to_text(self) -> str: - """Output this conditional probability distribution in the internal string format.""" - parents = ','.join(parent.to_text() for parent in self.parents) - return f'{self.child.to_text()}|{parents}' - - def to_latex(self) -> str: - """Output this conditional probability distribution in the LaTeX string format.""" - parents = ','.join(parent.to_latex() for parent in self.parents) - return f'{self.child.to_latex()}|{parents}' - - def given(self, parents: XList[Variable]) -> ConditionalProbability: - """Create a new conditional distribution with this distribution's children, parents, and the given parent(s). - - :param parents: A variable or list of variables to include as conditions in the new conditional distribution, - in addition to the variables already in this conditional distribution - :returns: A new conditional probability distribution + :param parents: The variable(s) with which this distribution's parents are extended + :returns: A new distribution + :raises TypeError: If a distribution is given as the parents that contains conditionals .. note:: This function can be accessed with the or | operator. """ - return ConditionalProbability( - child=self.child, - parents=[*self.parents, *_upgrade_variables(parents)], - ) - - def __or__(self, parents: XList[Variable]) -> ConditionalProbability: + if not isinstance(parents, Distribution): + return Distribution( + children=self.children, + parents=[*self.parents, *_upgrade_variables(parents)], + ) + elif parents.is_conditioned(): + raise TypeError('can not be given a distribution that has conditionals') + else: + # The parents variable is actually a Distribution instance with no parents, + # so its children get appended as parents for the new mixed distribution + return Distribution( + children=self.children, + parents=[*self.parents, *parents.children], # don't think about this too hard + ) + + def __or__(self, parents: XList[Variable]) -> Distribution: return self.given(parents) class Expression(_Mathable, ABC): """The abstract class representing all expressions.""" - def _repr_latex_(self) -> str: # hack for auto-display of latex in jupyter notebook - return f'${self.to_latex()}$' - + @abstractmethod def __mul__(self, other): - raise NotImplementedError + pass + @abstractmethod def __truediv__(self, other): - raise NotImplementedError + pass class Probability(Expression): @@ -294,32 +326,36 @@ class Probability(Expression): def __init__( self, - probability: Union[Variable, List[Variable], ConditionalProbability, JointProbability], + distribution: Union[Variable, List[Variable], Distribution], *args: Variable, ) -> None: """Create a probability expression over the given variable(s) or distribution. - :param probability: If given a :class:`ConditionalProbability` or :class:`JointProbability`, - creates a probability expression directly over the distribution. If given variable or - list of variables, conveniently creates a :class:`JointProbability` over the variable(s) - first. - :param args: If the first argument (``probability``) was given as a single variable, the - ``args`` variadic argument can be used to specify a list of additiona variables. + :param distribution: If given a :class:`Distribution`, creates a probability expression + directly over the distribution. If given variable or list of variables, conveniently + creates a :class:`Distribtion` with the variable(s) as children. + :param args: If the first argument (``distribution``) was given as a single variable, the + ``args`` variadic argument can be used to specify a list of additional variables. :raises ValueError: If varidic args are used incorrectly (i.e., in combination with a - list of variables, :class:`ConditionalProbability`, or :class:`JointProbability`. + list of variables or :class:`Distribution`. .. note:: This class is so commonly used, that it is aliased as :class:`P`. - Creation with a :class:`ConditionalProbability`: + Creation with a conditional distribution: >>> from y0.dsl import P, A, B >>> P(A | B) - Creation with a :class:`JointProbability`: + Creation with a joint distribution: >>> from y0.dsl import P, A, B >>> P(A & B) + Creation with a mixed joint/conditional distribution: + + >>> from y0.dsl import P, A, B, C + >>> P(A & B | C) + Creation with a single :class:`Variable`: >>> from y0.dsl import P, A @@ -335,30 +371,30 @@ class Probability(Expression): >>> from y0.dsl import P, A, B >>> P(A, B) """ - if isinstance(probability, Variable): + if isinstance(distribution, Variable): if not args: - probability = [probability] + distribution = [distribution] elif not all(isinstance(p, Variable) for p in args): raise ValueError else: - probability = [probability, *args] - if isinstance(probability, list): - probability = JointProbability(probability) - self.probability = probability + distribution = [distribution, *args] + if isinstance(distribution, list): + distribution = Distribution(children=distribution) + self.distribution = distribution def to_text(self) -> str: """Output this probability in the internal string format.""" - return f'P({self.probability.to_text()})' + return f'P({self.distribution.to_text()})' def to_latex(self) -> str: """Output this probability in the LaTeX string format.""" - return f'P({self.probability.to_latex()})' + return f'P({self.distribution.to_latex()})' def __repr__(self): - return f'P({repr(self.probability)})' + return f'P({repr(self.distribution)})' def __eq__(self, other): - return isinstance(other, Probability) and self.probability == other.probability + return isinstance(other, Probability) and self.distribution == other.distribution def __mul__(self, other: Expression) -> Expression: if isinstance(other, Product): diff --git a/src/y0/parser_utils.py b/src/y0/parser_utils.py index f460b57..3d03bf4 100644 --- a/src/y0/parser_utils.py +++ b/src/y0/parser_utils.py @@ -4,9 +4,7 @@ from pyparsing import Group, Optional, ParseResults, Suppress, Word, alphas, delimitedList -from .dsl import ( - ConditionalProbability, CounterfactualVariable, Intervention, JointProbability, Probability, Variable, -) +from .dsl import (CounterfactualVariable, Distribution, Intervention, Probability, Variable) __all__ = [ 'probability_pe', @@ -47,13 +45,9 @@ def _make_variable(_s, _l, tokens: ParseResults) -> Variable: def _make_probability(_s, _l, tokens: ParseResults) -> Probability: children, parents = tokens['children'].asList(), tokens['parents'].asList() - if not parents: - return Probability(JointProbability(children=children)) if not children: raise ValueError - if len(children) > 1: - raise ValueError - return Probability(ConditionalProbability(child=children[0], parents=parents)) + return Probability(Distribution(children=children, parents=parents)) # The suffix "pe" refers to :class:`pyparsing.ParserElement`, which is the
y0-causal-inference/y0
c0b9789f73521e22c2dab116219fda4be4a6da05
diff --git a/tests/test_dsl.py b/tests/test_dsl.py index d7413c8..c3143c2 100644 --- a/tests/test_dsl.py +++ b/tests/test_dsl.py @@ -6,8 +6,8 @@ import itertools as itt import unittest from y0.dsl import ( - A, B, C, ConditionalProbability, CounterfactualVariable, D, Fraction, Intervention, JointProbability, P, Q, S, Sum, - T, Variable, W, X, Y, Z, + A, B, C, CounterfactualVariable, D, Distribution, Fraction, Intervention, P, Q, S, Sum, T, + Variable, W, X, Y, Z, ) V = Variable('V') @@ -19,13 +19,18 @@ class TestDSL(unittest.TestCase): def assert_text(self, s: str, expression): """Assert the expression when it is converted to a string.""" self.assertIsInstance(s, str) - self.assertEqual(s, expression.to_text()) + self.assertEqual(s, expression.to_text(), msg=f'Expression: {repr(expression)}') def test_variable(self): """Test the variable DSL object.""" self.assert_text('A', Variable('A')) self.assert_text('A', A) # shorthand for testing purposes + def test_stop_the_madness(self): + """Test that a variable can not be named "P".""" + with self.assertRaises(ValueError): + _ = Variable('P') + def test_intervention(self): """Test the invervention DSL object.""" self.assert_text('W*', Intervention('W', True)) @@ -70,10 +75,10 @@ class TestDSL(unittest.TestCase): with self.subTest(a=a, b=b), self.assertRaises(ValueError): Y @ Intervention('X', star=a) @ Intervention('X', star=b) - def test_conditional(self): - """Test the ConditionalProbability DSL object.""" + def test_conditional_distribution(self): + """Test the :class:`Distribution` DSL object.""" # Normal instantiation - self.assert_text('A|B', ConditionalProbability(A, [B])) + self.assert_text('A|B', Distribution(A, [B])) # Instantiation with list-based operand to or | operator self.assert_text('A|B', Variable('A') | [B]) @@ -97,29 +102,60 @@ class TestDSL(unittest.TestCase): self.assert_text('Y_{W,X*}|B,C', Y @ W @ ~X | B | C) self.assert_text('Y_{W,X*}|B_{Q*},C', Y @ W @ ~X | B @ Intervention('Q', True) | C) - def test_conditional_probability(self): - """Test generation of conditional probabilities.""" - self.assert_text('P(A|B)', P(ConditionalProbability(A, [B]))) - self.assert_text('P(A|B)', P(A | [B])) - self.assert_text('P(A|B,C)', P(ConditionalProbability(A, [B]) | C)) - self.assert_text('P(A|B,C)', P(A | [B, C])) - self.assert_text('P(A|B,C)', P(A | B | C)) - - def test_joint(self): + def test_joint_distribution(self): """Test the JointProbability DSL object.""" - self.assert_text('A,B', JointProbability([A, B])) + self.assert_text('A,B', Distribution([A, B])) self.assert_text('A,B', A & B) - self.assert_text('A,B,C', JointProbability([A, B, C])) + self.assert_text('A,B,C', Distribution([A, B, C])) self.assert_text('A,B,C', A & B & C) - def test_joint_probability(self): - """Test generation of joint probabilities.""" - # Shortcut for list building + def test_probability(self): + """Test generation of probabilities.""" + # Make sure there are children + with self.assertRaises(ValueError): + Distribution([]) + + # Test markov kernels (AKA has only one child variable) + self.assert_text('P(A|B)', P(Distribution(A, [B]))) + self.assert_text('P(A|B)', P(A | [B])) + self.assert_text('P(A|B,C)', P(Distribution(A, [B]) | C)) + self.assert_text('P(A|B,C)', P(A | [B, C])) + self.assert_text('P(A|B,C)', P(A | B | C)) + self.assert_text('P(A|B,C)', P(A | B & C)) + + # Test simple joint distributions self.assert_text('P(A,B)', P([A, B])) self.assert_text('P(A,B)', P(A, B)) self.assert_text('P(A,B)', P(A & B)) self.assert_text('P(A,B,C)', P(A & B & C)) + # Test mixed with single conditional + self.assert_text('P(A,B|C)', P(Distribution([A, B], [C]))) + self.assert_text('P(A,B|C)', P(Distribution([A, B], C))) + self.assert_text('P(A,B|C)', P(Distribution([A, B]) | C)) + self.assert_text('P(A,B|C)', P(A & B | C)) + + # Test mixed with multiple conditionals + self.assert_text('P(A,B|C,D)', P(Distribution([A, B], [C, D]))) + self.assert_text('P(A,B|C,D)', P(Distribution([A, B]) | C | D)) + self.assert_text('P(A,B|C,D)', P(Distribution([A, B], [C]) | D)) + self.assert_text('P(A,B|C,D)', P(A & B | C | D)) + self.assert_text('P(A,B|C,D)', P(A & B | [C, D])) + self.assert_text('P(A,B|C,D)', P(A & B | Distribution([C, D]))) + self.assert_text('P(A,B|C,D)', P(A & B | C & D)) + + def test_conditioning_errors(self): + """Test erroring on conditionals.""" + for expression in [ + Distribution(B, C), + Distribution([B, C], D), + Distribution([B, C], [D, W]), + ]: + with self.assertRaises(TypeError): + _ = A | expression + with self.assertRaises(TypeError): + _ = X & Y | expression + def test_sum(self): """Test the Sum DSL object.""" # Sum with no variables
Unify joint and conditional probabilities
0.0
c0b9789f73521e22c2dab116219fda4be4a6da05
[ "tests/test_dsl.py::TestDSL::test_conditional_distribution", "tests/test_dsl.py::TestDSL::test_conditioning_errors", "tests/test_dsl.py::TestDSL::test_counterfactual_errors", "tests/test_dsl.py::TestDSL::test_counterfactual_variable", "tests/test_dsl.py::TestDSL::test_intervention", "tests/test_dsl.py::TestDSL::test_jeremy", "tests/test_dsl.py::TestDSL::test_joint_distribution", "tests/test_dsl.py::TestDSL::test_probability", "tests/test_dsl.py::TestDSL::test_stop_the_madness", "tests/test_dsl.py::TestDSL::test_sum", "tests/test_dsl.py::TestDSL::test_variable" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2021-01-13 12:01:03+00:00
mit
6,305
y0-causal-inference__y0-51
diff --git a/src/y0/mutate/canonicalize_expr.py b/src/y0/mutate/canonicalize_expr.py index 61924c0..b5122f7 100644 --- a/src/y0/mutate/canonicalize_expr.py +++ b/src/y0/mutate/canonicalize_expr.py @@ -64,9 +64,6 @@ class Canonicalizer: if isinstance(expression, Probability): # atomic return self._canonicalize_probability(expression) elif isinstance(expression, Sum): - if isinstance(expression.expression, Probability): # also atomic - return expression - return Sum( expression=self.canonicalize(expression.expression), ranges=expression.ranges,
y0-causal-inference/y0
36b3a29d99065c0adca2aecdf28de77c8fdf53fa
diff --git a/tests/test_mutate/test_canonicalize.py b/tests/test_mutate/test_canonicalize.py index 06ed32a..c76d878 100644 --- a/tests/test_mutate/test_canonicalize.py +++ b/tests/test_mutate/test_canonicalize.py @@ -108,13 +108,17 @@ class TestCanonicalize(unittest.TestCase): self.assert_canonicalize(P(A & B | C), P(c1 & c2 | C), [A, B, C]) # Two conditions, C and D for p1, p2 in itt.permutations([C, D]): - self.assert_canonicalize(P(A & B | C | D), P(c1 & c2 | (p1, p2)), [A, B, C, D]) + expected = P(A & B | C | D) + expression = P(c1 & c2 | (p1, p2)) + ordering = [A, B, C, D] + self.assert_canonicalize(expected, expression, ordering) + self.assert_canonicalize(Sum(expected), Sum(expression), ordering) for c1, c2, c3 in itt.permutations([A, B, C]): self.assert_canonicalize(P(A, B, C), P(c1, c2, c3), [A, B, C]) for p1, p2, p3 in itt.permutations([X, Y, Z]): - self.assert_canonicalize( - P(A & B & C | (X, Y, Z)), - P(c1 & c2 & c3 | (p1 & p2 & p3)), - [A, B, C, X, Y, Z], - ) + expected = P(A & B & C | (X, Y, Z)) + expression = P(c1 & c2 & c3 | (p1 & p2 & p3)) + ordering = [A, B, C, X, Y, Z] + self.assert_canonicalize(expected, expression, ordering) + self.assert_canonicalize(Sum(expected), Sum(expression), ordering)
Canonicalize does not work on Sum ```python from y0.dsl import P, Sum, X, Y, Z, Product from y0.mutate import canonicalize expected = Sum[Z](P(Y,Z)) actual = Sum(P(Z, Y),[Z]) expected_vars = expected.get_variables() ordering = list(expected_vars) expected_canonical = canonicalize(expected, ordering) actual_canonical = canonicalize(actual, ordering) print(f"Expected: {expected_canonical}\nActual: {actual_canonical}") ``` ``` Expected: [ sum_{Z} P(Y,Z) ] Actual: [ sum_{Z} P(Z,Y) ] ```
0.0
36b3a29d99065c0adca2aecdf28de77c8fdf53fa
[ "tests/test_mutate/test_canonicalize.py::TestCanonicalize::test_non_markov" ]
[ "tests/test_mutate/test_canonicalize.py::TestCanonicalize::test_atomic", "tests/test_mutate/test_canonicalize.py::TestCanonicalize::test_derived_atomic", "tests/test_mutate/test_canonicalize.py::TestCanonicalize::test_mixed" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2021-06-02 00:36:15+00:00
mit
6,306
yaml2sbml-dev__yaml2sbml-32
diff --git a/yaml2sbml/__init__.py b/yaml2sbml/__init__.py index 7795a69..e923ece 100644 --- a/yaml2sbml/__init__.py +++ b/yaml2sbml/__init__.py @@ -1,3 +1,3 @@ from yaml2sbml.yaml2sbml import yaml2sbml -from yaml2sbml.yaml2PEtab import yaml2petab +from yaml2sbml.yaml2PEtab import yaml2petab, validate_petab_tables from yaml2sbml.yaml_validation import validate_yaml diff --git a/yaml2sbml/yaml2PEtab.py b/yaml2sbml/yaml2PEtab.py index c44b32b..2d53f3f 100644 --- a/yaml2sbml/yaml2PEtab.py +++ b/yaml2sbml/yaml2PEtab.py @@ -8,7 +8,7 @@ import petab import yaml -from .yaml2sbml import parse_yaml, load_yaml_file +from .yaml2sbml import _parse_yaml, _load_yaml_file from .yaml_validation import validate_yaml @@ -50,14 +50,14 @@ def yaml2petab(yaml_file: str, else: sbml_dir = os.path.join(output_dir, model_name + '.xml') - sbml_as_string = parse_yaml(yaml_file) + sbml_as_string = _parse_yaml(yaml_file) with open(sbml_dir, 'w') as f_out: f_out.write(sbml_as_string) # create petab tsv files: - yaml_dict = load_yaml_file(yaml_file) - create_petab_tables_from_yaml(yaml_dict, output_dir) + yaml_dict = _load_yaml_file(yaml_file) + _create_petab_tables_from_yaml(yaml_dict, output_dir) # create yaml file, that organizes the petab problem: if (petab_yaml_name is None) and (measurement_table_name is not None): @@ -72,10 +72,12 @@ def yaml2petab(yaml_file: str, sbml_dir, petab_yaml_name, measurement_table_name) + # validate PEtab tables: + validate_petab_tables(sbml_dir, output_dir) -def create_petab_tables_from_yaml(yaml_dict: dict, - output_dir: str): +def _create_petab_tables_from_yaml(yaml_dict: dict, + output_dir: str): """ Parses the yaml dict to a PEtab observable/parameter table. diff --git a/yaml2sbml/yaml2sbml.py b/yaml2sbml/yaml2sbml.py index a381285..70af213 100644 --- a/yaml2sbml/yaml2sbml.py +++ b/yaml2sbml/yaml2sbml.py @@ -23,14 +23,14 @@ def yaml2sbml(yaml_file: str, sbml_file: str): """ validate_yaml(yaml_file) - sbml_as_string = parse_yaml(yaml_file) + sbml_as_string = _parse_yaml(yaml_file) # write sbml file with open(sbml_file, 'w') as f_out: f_out.write(sbml_as_string) -def parse_yaml(yaml_file: str) -> str: +def _parse_yaml(yaml_file: str) -> str: """ Takes in a yaml file with the specification of ODEs, parses it, and returns the corresponding SBML string. @@ -52,7 +52,7 @@ def parse_yaml(yaml_file: str) -> str: model = document.createModel() model = _create_compartment(model) - yaml_dic = load_yaml_file(yaml_file) + yaml_dic = _load_yaml_file(yaml_file) _convert_yaml_blocks_to_sbml(model, yaml_dic) # check consistency and give warnings for errors in SBML: @@ -89,7 +89,7 @@ def _create_compartment(model: sbml.Model): return model -def load_yaml_file(yaml_file: str) -> dict: +def _load_yaml_file(yaml_file: str) -> dict: """ Loads yaml file and returns the resulting dictionary. @@ -123,13 +123,13 @@ def _convert_yaml_blocks_to_sbml(model: sbml.Model, yaml_dic: dict): Raises: """ - function_dict = {'time': read_time_block, - 'parameters': read_parameters_block, - 'assignments': read_assignments_block, - 'functions': read_functions_block, - 'observables': read_observables_block, - 'odes': read_odes_block, - 'conditions': read_conditions_block} + function_dict = {'time': _read_time_block, + 'parameters': _read_parameters_block, + 'assignments': _read_assignments_block, + 'functions': _read_functions_block, + 'observables': _read_observables_block, + 'odes': _read_odes_block, + 'conditions': _read_conditions_block} for block in yaml_dic: function_dict[block](model, yaml_dic[block]) @@ -137,7 +137,7 @@ def _convert_yaml_blocks_to_sbml(model: sbml.Model, yaml_dic: dict): return model -def read_time_block(model: sbml.Model, time_dic: dict): +def _read_time_block(model: sbml.Model, time_dic: dict): """ Reads and processes the time block. @@ -154,10 +154,10 @@ def read_time_block(model: sbml.Model, time_dic: dict): if time_dic['variable'] == 'time': return else: - create_time(model, time_dic['variable']) + _create_time(model, time_dic['variable']) -def create_time(model: sbml.Model, time_var: str): +def _create_time(model: sbml.Model, time_var: str): """ Creates the time variable, add assignment to 'time' @@ -180,7 +180,7 @@ def create_time(model: sbml.Model, time_var: str): time_assignment.setMath(sbml.parseL3Formula('time')) -def read_parameters_block(model: sbml.Model, parameter_list: list): +def _read_parameters_block(model: sbml.Model, parameter_list: list): """ Reads and processes the parameters block in the ODE yaml file. In particular, it reads the parameters and adds them to the given SBML model. @@ -198,12 +198,12 @@ def read_parameters_block(model: sbml.Model, parameter_list: list): """ for parameter_def in parameter_list: if 'nominalValue' in parameter_def.keys(): - create_parameter(model, parameter_def['parameterId'], parameter_def['nominalValue']) + _create_parameter(model, parameter_def['parameterId'], parameter_def['nominalValue']) else: - create_parameter(model, parameter_def['parameterId']) + _create_parameter(model, parameter_def['parameterId']) -def create_parameter(model: sbml.Model, parameter_id: str, value: str = None): +def _create_parameter(model: sbml.Model, parameter_id: str, value: str = None): """ Creates a parameter and adds it to the given SBML model. Units are set as dimensionless by default. @@ -229,7 +229,7 @@ def create_parameter(model: sbml.Model, parameter_id: str, value: str = None): k.setUnits('dimensionless') -def read_assignments_block(model: sbml.Model, assignment_list: list): +def _read_assignments_block(model: sbml.Model, assignment_list: list): """ Reads and processes the assignments block in the ODE yaml file. In particular, it reads the assignments and adds them to the given SBML file. @@ -248,10 +248,10 @@ def read_assignments_block(model: sbml.Model, assignment_list: list): """ for assignment_def in assignment_list: - create_assignment(model, assignment_def['assignmentId'], assignment_def['formula']) + _create_assignment(model, assignment_def['assignmentId'], assignment_def['formula']) -def create_assignment(model: sbml.Model, assignment_id: str, formula: str): +def _create_assignment(model: sbml.Model, assignment_id: str, formula: str): """ Creates an assignment rule, that assigns id to formula. @@ -275,7 +275,7 @@ def create_assignment(model: sbml.Model, assignment_id: str, formula: str): assignment_rule.setMath(sbml.parseL3Formula(formula)) -def read_functions_block(model: sbml.Model, functions_list: list): +def _read_functions_block(model: sbml.Model, functions_list: list): """ Reads and processes the functions block in the ODE yaml file. In particular, it reads the functions and adds them to the given SBML file @@ -293,11 +293,11 @@ def read_functions_block(model: sbml.Model, functions_list: list): """ for function_def in functions_list: - create_function(model, function_def['functionId'], function_def['arguments'], - function_def['formula']) + _create_function(model, function_def['functionId'], function_def['arguments'], + function_def['formula']) -def create_function(model: sbml.Model, function_id: str, arguments: str, formula: str): +def _create_function(model: sbml.Model, function_id: str, arguments: str, formula: str): """ Creates a functionDefinition and adds it to the given SBML model. @@ -318,7 +318,7 @@ def create_function(model: sbml.Model, function_id: str, arguments: str, formula f.setMath(math) -def read_odes_block(model: sbml.Model, odes_list: list): +def _read_odes_block(model: sbml.Model, odes_list: list): """ Reads and processes the odes block in the ODE yaml file. In particular, it reads the odes and adds a species for @@ -339,11 +339,11 @@ def read_odes_block(model: sbml.Model, odes_list: list): """ for ode_def in odes_list: - create_species(model, ode_def['stateId'], ode_def['initialValue']) - create_rate_rule(model, ode_def['stateId'], ode_def['rightHandSide']) + _create_species(model, ode_def['stateId'], ode_def['initialValue']) + _create_rate_rule(model, ode_def['stateId'], ode_def['rightHandSide']) -def create_species(model: sbml.Model, species_id: str, initial_amount: str): +def _create_species(model: sbml.Model, species_id: str, initial_amount: str): """ Creates a species and adds it to the given SBML model. Units are set as dimensionless by default. @@ -380,7 +380,7 @@ def create_species(model: sbml.Model, species_id: str, initial_amount: str): return s -def create_rate_rule(model: sbml.Model, species: str, formula: str): +def _create_rate_rule(model: sbml.Model, species: str, formula: str): """ Creates a SBML rateRule for a species and adds it to the given model. This is where the ODEs from the text file are encoded. @@ -402,7 +402,7 @@ def create_rate_rule(model: sbml.Model, species: str, formula: str): r.setMath(math_ast) -def read_observables_block(model: sbml.Model, observable_list: list): +def _read_observables_block(model: sbml.Model, observable_list: list): """ Reads an processes the observables block in the ODE yaml file. Since the observables are not represented in the SBML, it only gives @@ -421,7 +421,7 @@ def read_observables_block(model: sbml.Model, observable_list: list): 'only have an effect the output, when called via yaml2PEtab') -def read_conditions_block(model: sbml.Model, observable_list: list): +def _read_conditions_block(model: sbml.Model, observable_list: list): """ Reads an processes the conditions block in the ODE yaml file. Since the conditions are not represented in the SBML, it only gives
yaml2sbml-dev/yaml2sbml
af592aad955dd733d33525fcbe6416a088e6df5d
diff --git a/tests/test_yaml2sbml.py b/tests/test_yaml2sbml.py index 60c8bf0..24b0bb9 100644 --- a/tests/test_yaml2sbml.py +++ b/tests/test_yaml2sbml.py @@ -1,7 +1,7 @@ import os import unittest -from yaml2sbml.yaml2sbml import parse_yaml +from yaml2sbml.yaml2sbml import _parse_yaml class TestYaml2SBML(unittest.TestCase): @@ -20,7 +20,7 @@ class TestYaml2SBML(unittest.TestCase): ode_file = os.path.join(self.test_folder, 'ode_input2.yaml') expected_result_file = os.path.join(self.test_folder, 'true_sbml_output.xml') - sbml_contents = parse_yaml(ode_file) + sbml_contents = _parse_yaml(ode_file) with open(expected_result_file, 'r') as f_in: expected_sbml_contents = f_in.read() @@ -36,7 +36,7 @@ class TestYaml2SBML(unittest.TestCase): ode_file = os.path.join(self.test_folder, 'ode_input2.yaml') expected_result_file = os.path.join(self.test_folder, 'true_sbml_output.xml') - sbml_contents = parse_yaml(ode_file) + sbml_contents = _parse_yaml(ode_file) with open(expected_result_file, 'r') as f_in: expected_sbml_contents = f_in.read()
Clear separation between public and private API It would be nice, to have a clearer division between the public and private API of the tool. The public API should be imported via `import yaml2sbml`, the private one should only contain functions, that start with an underscore.
0.0
af592aad955dd733d33525fcbe6416a088e6df5d
[ "tests/test_yaml2sbml.py::TestYaml2SBML::test_yaml_import", "tests/test_yaml2sbml.py::TestYaml2SBML::test_yaml_import_observables" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-10-27 17:41:06+00:00
mit
6,307
yaml2sbml-dev__yaml2sbml-40
diff --git a/yaml2sbml/YamlModel.py b/yaml2sbml/YamlModel.py index c0ff853..7d26ce3 100644 --- a/yaml2sbml/YamlModel.py +++ b/yaml2sbml/YamlModel.py @@ -17,7 +17,7 @@ class YamlModel: """ Set up yaml model. """ - self._yaml_model = {'time': None, + self._yaml_model = {'time': {}, 'odes': [], 'parameters': [], 'assignments': [], @@ -174,7 +174,7 @@ class YamlModel: for (key, val) in self._yaml_model.items(): if val: - reduced_model_dict[key] = copy.deepcopy(val) + reduced_model_dict[key] = copy.deepcopy(val) return reduced_model_dict @@ -184,10 +184,13 @@ class YamlModel: def set_time(self, time_variable: str): - self._yaml_model['time'] = [time_variable] + self._yaml_model['time'] = {'variable': time_variable} def get_time(self): - return self._yaml_model['time'][0] + if self.is_set_time(): + return self._yaml_model['time']['variable'] + else: + return None # functions adding a value def add_parameter(self, diff --git a/yaml2sbml/yaml_schema.yaml b/yaml2sbml/yaml_schema.yaml index c4f5f11..b3640a8 100644 --- a/yaml2sbml/yaml_schema.yaml +++ b/yaml2sbml/yaml_schema.yaml @@ -6,11 +6,13 @@ description: yaml2sbml file format properties: time: + type: object items: variable: type: string description: defines a time variable, in case the right hand side of the ODE is time-dependent. - + required: + - variable parameters: type: array
yaml2sbml-dev/yaml2sbml
bfb9a46db7980e2476011a4471858ec7bc19f757
diff --git a/tests/test_YamlModel.py b/tests/test_YamlModel.py index a02407f..43842b5 100644 --- a/tests/test_YamlModel.py +++ b/tests/test_YamlModel.py @@ -254,6 +254,21 @@ class TestYamlModel(unittest.TestCase): model.delete_condition(condition_id) self.assertListEqual(model.get_condition_ids(), []) + def test_valid_model(self): + """ + Tests, whether the resulting models are valid. + """ + model = YamlModel() + + model.set_time('t') + model.add_ode(state_id='x', + right_hand_side='k_1 * x + t', + initial_value=0) + model.add_parameter(parameter_id='k_1', + nominal_value=1) + + model.validate_model() + if __name__ == '__main__': suite = unittest.TestSuite() diff --git a/tests/test_yaml2sbml/ode_input_invalid_time_1.yaml b/tests/test_yaml2sbml/ode_input_invalid_time_1.yaml new file mode 100644 index 0000000..08acc01 --- /dev/null +++ b/tests/test_yaml2sbml/ode_input_invalid_time_1.yaml @@ -0,0 +1,9 @@ +time: + - t +odes: +- initialValue: 0 + rightHandSide: k_1 * x + t + stateId: x +parameters: +- nominalValue: 1 + parameterId: k_1 diff --git a/tests/test_yaml2sbml/ode_input_invalid_time_2.yaml b/tests/test_yaml2sbml/ode_input_invalid_time_2.yaml new file mode 100644 index 0000000..dedcf75 --- /dev/null +++ b/tests/test_yaml2sbml/ode_input_invalid_time_2.yaml @@ -0,0 +1,9 @@ +time: + - variable: t +odes: +- initialValue: 0 + rightHandSide: k_1 * x + t + stateId: x +parameters: +- nominalValue: 1 + parameterId: k_1 diff --git a/tests/test_yaml_validation.py b/tests/test_yaml_validation.py index 19cce2f..0bebd4d 100644 --- a/tests/test_yaml_validation.py +++ b/tests/test_yaml_validation.py @@ -36,6 +36,18 @@ class TestYamlValidation(unittest.TestCase): with self.assertRaises(ValidationError): validate_yaml(file_in) + def test_catch_invalid_time_block_missing_variable_key(self): + # time block without kew word "variable" + file_in = os.path.join(self.test_folder, 'ode_input_invalid_time_1.yaml') + with self.assertRaises(ValidationError): + validate_yaml(file_in) + + def test_catch_invalid_time_block_as_array(self): + # time block as array instead of single object + file_in = os.path.join(self.test_folder, 'ode_input_invalid_time_2.yaml') + with self.assertRaises(ValidationError): + validate_yaml(file_in) + if __name__ == '__main__': suite = unittest.TestSuite()
Error in time variable specification with YamlModel ```python from tempfile import NamedTemporaryFile from yaml2sbml import YamlModel model = YamlModel() model.set_time('t') with NamedTemporaryFile(suffix='.xml') as f: model.write_to_sbml(f.name, over_write=True) ``` produces ``` Traceback (most recent call last): model.write_to_sbml(f.name, over_write=True) File "yaml2sbml/YamlModel.py", line 116, in write_to_sbml sbml_as_string = _parse_yaml_dict(reduced_model_dict) File "yaml2sbml/yaml2sbml.py", line 73, in _parse_yaml_dict _convert_yaml_blocks_to_sbml(model, yaml_dict) File "yaml2sbml/yaml2sbml.py", line 152, in _convert_yaml_blocks_to_sbml function_dict[block](model, yaml_dic[block]) File "yaml2sbml/yaml2sbml/yaml2sbml.py", line 171, in _read_time_block if time_dic['variable'] == 'time': TypeError: list indices must be integers or slices, not str ``` This can be fixed by changing the [specification of the time variable](https://github.com/yaml2sbml-dev/yaml2sbml/blob/master/yaml2sbml/YamlModel.py#L187) from ```python self._yaml_model['time'] = [time_variable] ``` to ```python self._yaml_model['time'] = {'variable': time_variable} ``` In the context of the YAML model file, this would be equivalent to changing ```yaml time: - t ``` to ```yaml time: variable: t ```
0.0
bfb9a46db7980e2476011a4471858ec7bc19f757
[ "tests/test_yaml_validation.py::TestYamlValidation::test_catch_invalid_time_block_as_array", "tests/test_yaml_validation.py::TestYamlValidation::test_catch_invalid_time_block_missing_variable_key" ]
[ "tests/test_YamlModel.py::TestYamlModel::test_assignment", "tests/test_YamlModel.py::TestYamlModel::test_condition", "tests/test_YamlModel.py::TestYamlModel::test_function", "tests/test_YamlModel.py::TestYamlModel::test_load_and_write", "tests/test_YamlModel.py::TestYamlModel::test_observable", "tests/test_YamlModel.py::TestYamlModel::test_ode", "tests/test_YamlModel.py::TestYamlModel::test_parameter", "tests/test_YamlModel.py::TestYamlModel::test_time", "tests/test_YamlModel.py::TestYamlModel::test_valid_model", "tests/test_yaml_validation.py::TestYamlValidation::test_validate_yaml_empty_section", "tests/test_yaml_validation.py::TestYamlValidation::test_validate_yaml_typos", "tests/test_yaml_validation.py::TestYamlValidation::test_validate_yaml_typos_required", "tests/test_yaml_validation.py::TestYamlValidation::test_validate_yaml_valid_1", "tests/test_yaml_validation.py::TestYamlValidation::test_validate_yaml_valid_2" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-11-29 13:17:09+00:00
mit
6,308
yasufumy__pipelib-43
diff --git a/pipelib/core.py b/pipelib/core.py index 36f7b37..ec31d3e 100644 --- a/pipelib/core.py +++ b/pipelib/core.py @@ -80,17 +80,17 @@ class Dataset: yield from chain(dataset, *others) return PipelinedDataset(self, f) - def map_parallel(self, map_func, n=None, chunksize=1): + def map_parallel(self, map_func, n=None, chunksize=1, unordered=False): return PipelinedDataset( - self, parallel.MapParallel(map_func, n, chunksize)) + self, parallel.MapParallel(map_func, n, chunksize, unordered)) - def flat_map_parallel(self, map_func, n=None, chunksize=1): + def flat_map_parallel(self, map_func, n=None, chunksize=1, unordered=False): return PipelinedDataset( - self, parallel.FlatMapParallel(map_func, n, chunksize)) + self, parallel.FlatMapParallel(map_func, n, chunksize, unordered)) - def filter_parallel(self, predicate, n=None, chunksize=1): + def filter_parallel(self, predicate, n=None, chunksize=1, unordered=False): return PipelinedDataset( - self, parallel.FilterParallel(predicate, n, chunksize)) + self, parallel.FilterParallel(predicate, n, chunksize, unordered)) def all(self): return list(self) diff --git a/pipelib/parallel.py b/pipelib/parallel.py index 98dfdd8..c7433ee 100644 --- a/pipelib/parallel.py +++ b/pipelib/parallel.py @@ -4,21 +4,27 @@ import multiprocess class MapParallel: - def __init__(self, func, n=None, chunksize=1): + def __init__(self, func, n=None, chunksize=1, unordered=False): self._func = func self._n = n self._chunksize = chunksize + if not unordered: + self._map_method = 'imap' + else: + self._map_method = 'imap_unordered' def __call__(self, dataset): with multiprocess.Pool(self._n) as p: - yield from p.imap_unordered(self._func, dataset, self._chunksize) + yield from getattr(p, self._map_method)( + self._func, dataset, self._chunksize) class FlatMapParallel(MapParallel): def __call__(self, dataset): with multiprocess.Pool(self._n) as p: yield from chain.from_iterable( - p.imap_unordered(self._func, dataset, self._chunksize)) + getattr(p, self._map_method)( + self._func, dataset, self._chunksize)) class FilterParallel(MapParallel): @@ -37,4 +43,5 @@ class FilterParallel(MapParallel): with multiprocess.Pool(self._n) as p: yield from (x for x, keep in - p.imap_unordered(task, dataset, self._chunksize) if keep) + getattr(p, self._map_method)( + task, dataset, self._chunksize) if keep) diff --git a/setup.py b/setup.py index eba6daf..0854f8a 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ except ImportError: setup( name='pipelib', - version='0.2.2', + version='0.2.3', description='pipeline architecture data library', long_description=open('./README.md', encoding='utf-8').read(), long_description_content_type='text/markdown',
yasufumy/pipelib
817dc34a6be1568b6e40b51922b71f36aa5b2c31
diff --git a/tests/test_parallel.py b/tests/test_parallel.py index c65ddc8..5502153 100644 --- a/tests/test_parallel.py +++ b/tests/test_parallel.py @@ -9,24 +9,45 @@ class ParallelTestCase(TestCase): self.data = range(100) def test_map_parallel(self): - result = list(parallel.MapParallel(lambda x: x ** 2)(self.data)) - result.sort() expected = [x ** 2 for x in self.data] + # ordered + result = parallel.MapParallel(lambda x: x ** 2)(self.data) + for x, y in zip(result, expected): + self.assertEqual(x, y) + # unordered + result = list(parallel.MapParallel( + lambda x: x ** 2, unordered=True)(self.data)) + result.sort() self.assertListEqual(result, expected) def test_filter_parallel(self): def predicate(x): return x % 2 == 0 - result = list(parallel.FilterParallel(predicate)(self.data)) - result.sort() task = parallel.FilterParallel._FilterTask(predicate) - expected = [task(x) for x in self.data] - expected = [x[0] for x in expected if x[1]] + expected = [task(x)[0] for x in self.data if task(x)[1]] + + # ordered + result = parallel.FilterParallel(predicate)(self.data) + for x, y in zip(result, expected): + self.assertEqual(x, y) + + # unordered + result = list(parallel.FilterParallel( + predicate, unordered=True)(self.data)) + result.sort() self.assertListEqual(result, expected) def test_flat_map_parallel(self): - result = list(parallel.FlatMapParallel(lambda x: [x])(self.data)) - result.sort() expected = [x for x in self.data] + + # ordered + result = parallel.FlatMapParallel(lambda x: [x])(self.data) + for x, y in zip(result, expected): + self.assertEqual(x, y) + + # unordered + result = list(parallel.FlatMapParallel( + lambda x: [x], unordered=True)(self.data)) + result.sort() self.assertListEqual(result, expected)
Option for order/unorder - [x] `Dataset.map_parallel` - [x] `Dataset.flat_map_parallel` - [x] `Dataset.filter_parallel`
0.0
817dc34a6be1568b6e40b51922b71f36aa5b2c31
[ "tests/test_parallel.py::ParallelTestCase::test_filter_parallel", "tests/test_parallel.py::ParallelTestCase::test_flat_map_parallel", "tests/test_parallel.py::ParallelTestCase::test_map_parallel" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-02-12 04:25:23+00:00
mit
6,309
yt-project__unyt-242
diff --git a/docs/conf.py b/docs/conf.py index e6e89f8..dddfa2f 100755 --- a/docs/conf.py +++ b/docs/conf.py @@ -73,7 +73,7 @@ release = unyt.__version__ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = 'en' +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. diff --git a/unyt/array.py b/unyt/array.py index fae1e95..8e40f84 100644 --- a/unyt/array.py +++ b/unyt/array.py @@ -2155,12 +2155,14 @@ class unyt_quantity(unyt_array): def __round__(self): return type(self)(round(float(self)), self.units) - def reshape(self, shape, order="C"): + def reshape(self, *shape, order="C"): # this is necessary to support some numpy operations # natively, like numpy.meshgrid, which internally performs # reshaping, e.g., arr.reshape(1, -1), which doesn't affect the size, # but does change the object's internal representation to a >0D array # see https://github.com/yt-project/unyt/issues/224 + if len(shape) == 1: + shape = shape[0] if shape == () or shape is None: return super().reshape(shape, order=order) else:
yt-project/unyt
17e016e988fcc59326fcb53c566d56746ebce428
diff --git a/unyt/tests/test_unyt_array.py b/unyt/tests/test_unyt_array.py index 8cf00af..6132d0f 100644 --- a/unyt/tests/test_unyt_array.py +++ b/unyt/tests/test_unyt_array.py @@ -2673,3 +2673,11 @@ def test_reshape_quantity_noop(shape): b = a.reshape(shape) assert b.shape == a.shape == () assert type(b) is unyt_quantity + + +def test_reshape_quantity_via_shape_tuple(): + # this is necessary to support np.tile + a = unyt_quantity(1, "m") + b = a.reshape(-1, 1) + assert b.shape == (1, 1) + assert type(b) is unyt_array
REG: reshape regression ### Description Testing yt against unyt's dev branch, I found 4 tests failing https://github.com/yt-project/yt/runs/7277732032?check_suite_focus=true I think there's only one actual regression with `unyt_array.reshape`, so I have a hunch it's coming from #225 I'll inspect this further and see what I can do to fix the problem here before the next release for information @jzuhone
0.0
17e016e988fcc59326fcb53c566d56746ebce428
[ "unyt/tests/test_unyt_array.py::test_reshape_quantity_via_shape_tuple" ]
[ "unyt/tests/test_unyt_array.py::test_comparisons", "unyt/tests/test_unyt_array.py::test_unyt_array_unyt_quantity_ops", "unyt/tests/test_unyt_array.py::test_selecting", "unyt/tests/test_unyt_array.py::test_iteration", "unyt/tests/test_unyt_array.py::test_unpickling_old_array", "unyt/tests/test_unyt_array.py::test_registry_association", "unyt/tests/test_unyt_array.py::test_to_value", "unyt/tests/test_unyt_array.py::test_astropy", "unyt/tests/test_unyt_array.py::test_pint", "unyt/tests/test_unyt_array.py::test_subclass", "unyt/tests/test_unyt_array.py::test_h5_io", "unyt/tests/test_unyt_array.py::test_equivalencies", "unyt/tests/test_unyt_array.py::test_ytarray_coercion", "unyt/tests/test_unyt_array.py::test_dimensionless_conversion", "unyt/tests/test_unyt_array.py::test_loadtxt_and_savetxt", "unyt/tests/test_unyt_array.py::test_trig_ufunc_degrees", "unyt/tests/test_unyt_array.py::test_initialization_different_registries", "unyt/tests/test_unyt_array.py::test_ones_and_zeros_like", "unyt/tests/test_unyt_array.py::test_coerce_iterable", "unyt/tests/test_unyt_array.py::test_bypass_validation", "unyt/tests/test_unyt_array.py::test_creation", "unyt/tests/test_unyt_array.py::test_conversion_from_int_types[8]", "unyt/tests/test_unyt_array.py::test_conversion_from_int_types[16]", "unyt/tests/test_unyt_array.py::test_conversion_from_int_types[32]", "unyt/tests/test_unyt_array.py::test_conversion_from_int_types[64]", "unyt/tests/test_unyt_array.py::test_overflow_warnings", "unyt/tests/test_unyt_array.py::test_name_attribute", "unyt/tests/test_unyt_array.py::test_mil", "unyt/tests/test_unyt_array.py::test_kip", "unyt/tests/test_unyt_array.py::test_ksi", "unyt/tests/test_unyt_array.py::test_complexvalued", "unyt/tests/test_unyt_array.py::test_string_formatting", "unyt/tests/test_unyt_array.py::test_invalid_expression_quantity_from_string[++1cm]", "unyt/tests/test_unyt_array.py::test_invalid_expression_quantity_from_string[--1cm]", "unyt/tests/test_unyt_array.py::test_invalid_expression_quantity_from_string[cm10]", "unyt/tests/test_unyt_array.py::test_invalid_expression_quantity_from_string[cm", "unyt/tests/test_unyt_array.py::test_invalid_expression_quantity_from_string[.cm]", "unyt/tests/test_unyt_array.py::test_invalid_unit_quantity_from_string[10", "unyt/tests/test_unyt_array.py::test_invalid_unit_quantity_from_string[50.", "unyt/tests/test_unyt_array.py::test_invalid_unit_quantity_from_string[.6", "unyt/tests/test_unyt_array.py::test_invalid_unit_quantity_from_string[infcm]", "unyt/tests/test_unyt_array.py::test_constant_type", "unyt/tests/test_unyt_array.py::test_composite_meshgrid", "unyt/tests/test_unyt_array.py::test_reshape_quantity_to_array[1-expected_output_shape0]", "unyt/tests/test_unyt_array.py::test_reshape_quantity_to_array[shape1-expected_output_shape1]", "unyt/tests/test_unyt_array.py::test_reshape_quantity_to_array[shape2-expected_output_shape2]", "unyt/tests/test_unyt_array.py::test_reshape_quantity_to_array[shape3-expected_output_shape3]", "unyt/tests/test_unyt_array.py::test_reshape_quantity_noop[shape0]", "unyt/tests/test_unyt_array.py::test_reshape_quantity_noop[None]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2022-07-11 11:28:12+00:00
bsd-3-clause
6,310
yukinarit__pyserde-208
diff --git a/pyproject.toml b/pyproject.toml index c19a5cf..51ec0ca 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,8 @@ pytest-cov = "*" pytest-watch = "*" pytest-flake8 = "*" coverage = "==4.5.4" -pdoc = { version = "~=8" } +pdoc = "~=11" +pygments = "<=2.11.2" mypy = { version = "==0.931", markers = "platform_python_implementation!='PyPy'" } more-itertools = "~=8.6.0" pre-commit = "==v2.10.1" diff --git a/serde/compat.py b/serde/compat.py index 8c1af36..b5e2143 100644 --- a/serde/compat.py +++ b/serde/compat.py @@ -121,6 +121,11 @@ def typename(typ) -> str: else: return 'Union' elif is_list(typ): + # Workaround for python 3.7. + # get_args for the bare List returns parameter T. + if typ is List: + return 'List' + args = type_args(typ) if args: et = typename(args[0]) @@ -128,6 +133,11 @@ def typename(typ) -> str: else: return 'List' elif is_set(typ): + # Workaround for python 3.7. + # get_args for the bare Set returns parameter T. + if typ is Set: + return 'Set' + args = type_args(typ) if args: et = typename(args[0]) @@ -135,6 +145,11 @@ def typename(typ) -> str: else: return 'Set' elif is_dict(typ): + # Workaround for python 3.7. + # get_args for the bare Dict returns parameter K, V. + if typ is Dict: + return 'Dict' + args = type_args(typ) if args and len(args) == 2: kt = typename(args[0]) diff --git a/serde/de.py b/serde/de.py index e76660b..3416505 100644 --- a/serde/de.py +++ b/serde/de.py @@ -562,7 +562,9 @@ class Renderer: res = self.default(arg, res) if self.custom and not arg.deserializer: - return self.custom_class_deserializer(arg, res) + # Rerender the code for default deserializer. + default = Renderer(self.func, self.cls, None).render(arg) + return self.custom_class_deserializer(arg, default) else: return res
yukinarit/pyserde
508c54d86a5e1fb954eea865e32ea426ee98f1cc
diff --git a/tests/test_compat.py b/tests/test_compat.py index f0819ff..8991865 100644 --- a/tests/test_compat.py +++ b/tests/test_compat.py @@ -1,5 +1,6 @@ import sys from dataclasses import dataclass +from datetime import datetime from typing import Dict, Generic, List, NewType, Optional, Set, Tuple, TypeVar, Union import serde @@ -67,9 +68,18 @@ def test_typename(): class Foo(Generic[T]): nested: Bar[T] - assert typename(Optional) == "Optional" assert typename(Foo[int]) == "Foo" assert typename(Foo) == "Foo" + assert typename(List[int]) == "List[int]" + assert typename(Optional) == "Optional" + assert typename(List) == "List" + assert typename(List[int]) == "List[int]" + assert typename(Tuple) == "Tuple" + assert typename(Tuple[int, str]) == "Tuple[int, str]" + assert typename(Dict) == "Dict" + assert typename(Dict[str, Foo]) == "Dict[str, Foo]" + assert typename(Set) == "Set" + assert typename(Set[int]) == "Set[int]" def test_iter_types(): @@ -79,6 +89,17 @@ def test_iter_types(): assert [Tuple, int, str, bool, float] == list(iter_types(Tuple[int, str, bool, float])) assert [PriOpt, Optional, int, Optional, str, Optional, float, Optional, bool] == list(iter_types(PriOpt)) + @serde.serde + class Foo: + a: int + b: datetime + c: datetime + d: Optional[str] = None + e: Union[str, int] = 10 + f: List[int] = serde.field(default_factory=list) + + assert [Foo, int, datetime, datetime, Optional, str, Union, str, int, List, int] == list(iter_types(Foo)) + def test_iter_unions(): assert [Union[str, int]] == list(iter_unions(Union[str, int])) diff --git a/tests/test_custom.py b/tests/test_custom.py index 060f046..dc75515 100644 --- a/tests/test_custom.py +++ b/tests/test_custom.py @@ -2,7 +2,7 @@ Tests for custom serializer/deserializer. """ from datetime import datetime -from typing import Optional, Union +from typing import List, Optional, Union import pytest @@ -81,14 +81,15 @@ def test_custom_class_serializer(): c: datetime d: Optional[str] = None e: Union[str, int] = 10 + f: List[int] = field(default_factory=list) dt = datetime(2021, 1, 1, 0, 0, 0) - f = Foo(10, dt, dt) + f = Foo(10, dt, dt, f=[1, 2, 3]) - assert to_json(f) == '{"a": 10, "b": "01/01/21", "c": "01/01/21", "d": null, "e": 10}' + assert to_json(f) == '{"a": 10, "b": "01/01/21", "c": "01/01/21", "d": null, "e": 10, "f": [1, 2, 3]}' assert f == from_json(Foo, to_json(f)) - assert to_tuple(f) == (10, '01/01/21', '01/01/21', None, 10) + assert to_tuple(f) == (10, '01/01/21', '01/01/21', None, 10, [1, 2, 3]) assert f == from_tuple(Foo, to_tuple(f)) def fallback(_, __):
ArgumentError for List when using custom deserializer Hey, thanks for the fast fix on #190 ! While testing the fix I found the following: ```python from serde import serde, SerdeSkip from serde.json import from_json from dataclasses import dataclass from typing import Optional def des(cls, o): raise SerdeSkip() @serde(deserializer=des) @dataclass class Foo: a: list[str] print(from_json(Foo, '{"a": []}')) # -> Foo(a=[]) print(from_json(Foo, '{"a": ["foo"]}')) # -> AttributeError: 'str' object has no attribute 'get' ``` Greetings
0.0
508c54d86a5e1fb954eea865e32ea426ee98f1cc
[ "tests/test_custom.py::test_custom_class_serializer" ]
[ "tests/test_compat.py::test_types", "tests/test_compat.py::test_typename", "tests/test_compat.py::test_iter_types", "tests/test_compat.py::test_iter_unions", "tests/test_compat.py::test_type_args", "tests/test_compat.py::test_union_args", "tests/test_compat.py::test_is_instance", "tests/test_compat.py::test_is_generic", "tests/test_custom.py::test_custom_field_serializer", "tests/test_custom.py::test_raise_error", "tests/test_custom.py::test_wrong_signature", "tests/test_custom.py::test_field_serialize_override_class_serializer", "tests/test_custom.py::test_override_by_default_serializer" ]
{ "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-03-21 15:34:35+00:00
mit
6,311
yukinarit__pyserde-229
diff --git a/examples/union_operator.py b/examples/union_operator.py index a6d2f68..cb78577 100644 --- a/examples/union_operator.py +++ b/examples/union_operator.py @@ -17,6 +17,7 @@ class Foo: a: int | str b: dict[str, int] | list[int] c: Bar | Baz + d: str | None = None # Should be treated as Optional def main(): diff --git a/serde/compat.py b/serde/compat.py index b5e2143..dc8b5a5 100644 --- a/serde/compat.py +++ b/serde/compat.py @@ -354,16 +354,16 @@ def is_union(typ) -> bool: True """ - is_union_type = False + # Python 3.10 Union operator e.g. str | int if sys.version_info[:2] >= (3, 10): try: - is_union_type = isinstance(typ, types.UnionType) and not is_opt(typ) + if isinstance(typ, types.UnionType): + return True except Exception: pass - typing_union = typing_inspect.is_union_type(typ) and not is_opt(typ) - - return is_union_type or typing_union + # typing.Union + return typing_inspect.is_union_type(typ) def is_opt(typ) -> bool: @@ -377,9 +377,22 @@ def is_opt(typ) -> bool: >>> is_opt(None.__class__) False """ + + # Python 3.10 Union operator e.g. str | None + is_union_type = False + if sys.version_info[:2] >= (3, 10): + try: + if isinstance(typ, types.UnionType): + is_union_type = True + except Exception: + pass + + # typing.Optional + is_typing_union = typing_inspect.is_optional_type(typ) + args = type_args(typ) if args: - return typing_inspect.is_optional_type(typ) and len(args) == 2 and not is_none(args[0]) and is_none(args[1]) + return (is_union_type or is_typing_union) and len(args) == 2 and not is_none(args[0]) and is_none(args[1]) else: return typ is Optional
yukinarit/pyserde
bd9039ef1ffac752f7566eb39fc2b3c0a18f7728
diff --git a/tests/test_compat.py b/tests/test_compat.py index 8991865..2f6ba0b 100644 --- a/tests/test_compat.py +++ b/tests/test_compat.py @@ -37,10 +37,10 @@ def test_types(): assert is_dict(Dict[str, int]) assert is_dict(Dict) assert is_opt(Optional[int]) + assert is_opt(Union[int, None]) assert is_union(Union[int, str]) assert is_union(Union[Optional[int], Optional[str]]) - assert is_opt(Optional[int]) - assert not is_union(Optional[int]) + assert is_union(Optional[int]) assert not is_opt(Union[Optional[int], Optional[str]]) assert is_union(Union[Optional[int], Optional[str]]) @@ -58,6 +58,11 @@ def test_types(): assert is_tuple(tuple[int, int, int]) assert is_dict(dict[str, int]) + if sys.version_info[:3] >= (3, 10, 0): + assert is_union(str | int) + assert is_union(str | None) + assert is_opt(str | None) + def test_typename(): @serde.serde
Python 3.10 union operator with None (e.g. " str | None") can't capture missing field I'm running Python 3.10 and Pyserde 0.7.2. This ```python @serde class Test: x: str | None print(from_json(Test, '{}')) ``` causes a crash ``` Traceback (most recent call last): File "/home/seho/.local/lib/python3.10/site-packages/serde/de.py", line 304, in from_obj return serde_scope.funcs[FROM_DICT]( File "<string>", line 11, in from_dict KeyError: 'x' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/seho/pts15_test/deleteme.py", line 11, in <module> print(from_json(Test, '{}')) File "/home/seho/.local/lib/python3.10/site-packages/serde/json.py", line 47, in from_json return from_dict(c, de.deserialize(s, **opts), reuse_instances=False) File "/home/seho/.local/lib/python3.10/site-packages/serde/de.py", line 376, in from_dict return from_obj(cls, o, named=True, reuse_instances=reuse_instances) File "/home/seho/.local/lib/python3.10/site-packages/serde/de.py", line 353, in from_obj raise SerdeError(e) serde.compat.SerdeError: 'x' ``` While using `Optional[str]` works fine ```python @serde class Test: x: Optional[str] print(from_json(Test, '{}')) ``` Output: `test(x=None)`
0.0
bd9039ef1ffac752f7566eb39fc2b3c0a18f7728
[ "tests/test_compat.py::test_types" ]
[ "tests/test_compat.py::test_typename", "tests/test_compat.py::test_iter_types", "tests/test_compat.py::test_iter_unions", "tests/test_compat.py::test_type_args", "tests/test_compat.py::test_union_args", "tests/test_compat.py::test_is_instance", "tests/test_compat.py::test_is_generic" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2022-05-16 14:07:02+00:00
mit
6,312
yukinarit__pyserde-264
diff --git a/serde/de.py b/serde/de.py index 240df8b..a88a2e7 100644 --- a/serde/de.py +++ b/serde/de.py @@ -648,14 +648,11 @@ class Renderer: if data.get("f") is not None else None' """ value = arg[0] - if has_default(arg): - return self.render(value) + if arg.iterbased: + exists = f'{arg.data} is not None' else: - if arg.iterbased: - exists = f'{arg.data} is not None' - else: - exists = f'{arg.datavar}.get("{arg.conv_name()}") is not None' - return f'({self.render(value)}) if {exists} else None' + exists = f'{arg.datavar}.get("{arg.conv_name()}") is not None' + return f'({self.render(value)}) if {exists} else None' def list(self, arg: DeField) -> str: """
yukinarit/pyserde
1f48c9ccd358f6d0eb5d0afd2ff87a0b47300ade
diff --git a/tests/test_union.py b/tests/test_union.py index be7317a..e6fa792 100644 --- a/tests/test_union.py +++ b/tests/test_union.py @@ -225,6 +225,29 @@ def test_optional_union_with_complex_types(): assert a_none == from_dict(A, to_dict(a_none, reuse_instances=True), reuse_instances=True) +def test_optional_complex_type_with_default(): + for T, default in [ + (IPv4Address, IPv4Address("127.0.0.1")), + (UUID, UUID("9c244009-c60d-452b-a378-b8afdc0c2d90")), + ]: + + @serde + class A: + id: Optional[T] = None + + a = A(default) + assert a == from_dict(A, to_dict(a, reuse_instances=False), reuse_instances=False) + assert a == from_dict(A, to_dict(a, reuse_instances=True), reuse_instances=True) + + a_none = A(None) + assert a_none == from_dict(A, to_dict(a_none, reuse_instances=False), reuse_instances=False) + assert a_none == from_dict(A, to_dict(a_none, reuse_instances=True), reuse_instances=True) + + a_default = A() + assert a_default == from_dict(A, to_dict(a_default, reuse_instances=False), reuse_instances=False) + assert a_default == from_dict(A, to_dict(a_default, reuse_instances=True), reuse_instances=True) + + def test_union_with_complex_types_in_containers(): @serde class A:
Unable to deserialize null value for type `Optional[UUID] = None` Hi, pyserde seems to mishandle this very specific corner case. Minimum example: ```python from serde import serde, to_dict, from_dict from uuid import UUID from typing import Optional from dataclasses import dataclass @serde @dataclass class NameId: name: str id: Optional[UUID] = None # <====== UUID and `None` here is the problem x = NameId("Fred", None) j = to_dict(x) y = from_dict(NameId, j) ``` Everything before the last line works fine. The last line gives the error below. If I remove the default for `id` (i.e., remove `= None`), it works fine. If I change the type of `id` to `Optional[int] = None`, it works fine. So it's specific to `Optional[UUID] = None`. From the error, it seems like it's trying to parse `None` as a `UUID`. Here's the error: ``` Traceback (most recent call last): File "/Users/kevin.squire/Library/Caches/pypoetry/virtualenvs/pyserde-Y2i1skxI-py3.10/lib/python3.10/site-packages/serde/de.py", line 338, in from_obj res = serde_scope.funcs[func_name](c, maybe_generic=maybe_generic, data=o, reuse_instances=reuse_instances) File "<string>", line 13, in from_dict File "/Users/kevin.squire/.pyenv/versions/3.10.5_x86/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/uuid.py", line 171, in __init__ raise TypeError('one of the hex, bytes, bytes_le, fields, ' TypeError: one of the hex, bytes, bytes_le, fields, or int arguments must be given During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/Users/kevin.squire/tmp/pyserde/tmp/test.py", line 15, in <module> y = from_dict(NameId, j) File "/Users/kevin.squire/Library/Caches/pypoetry/virtualenvs/pyserde-Y2i1skxI-py3.10/lib/python3.10/site-packages/serde/de.py", line 414, in from_dict return from_obj(cls, o, named=True, reuse_instances=reuse_instances) File "/Users/kevin.squire/Library/Caches/pypoetry/virtualenvs/pyserde-Y2i1skxI-py3.10/lib/python3.10/site-packages/serde/de.py", line 391, in from_obj raise SerdeError(e) serde.compat.SerdeError: one of the hex, bytes, bytes_le, fields, or int arguments must be given ```
0.0
1f48c9ccd358f6d0eb5d0afd2ff87a0b47300ade
[ "tests/test_union.py::test_optional_complex_type_with_default" ]
[ "tests/test_union.py::test_union", "tests/test_union.py::test_union_optional", "tests/test_union.py::test_union_containers", "tests/test_union.py::test_union_with_literal", "tests/test_union.py::test_union_with_complex_types", "tests/test_union.py::test_union_with_complex_types_and_reuse_instances", "tests/test_union.py::test_optional_union_with_complex_types", "tests/test_union.py::test_union_with_complex_types_in_containers", "tests/test_union.py::test_union_exception_if_nothing_matches", "tests/test_union.py::test_union_in_union", "tests/test_union.py::test_union_in_other_type", "tests/test_union.py::test_union_rename_all", "tests/test_union.py::test_union_with_list_of_other_class", "tests/test_union.py::test_union_with_union_in_nested_types", "tests/test_union.py::test_union_with_union_in_nested_tuple", "tests/test_union.py::test_generic_union", "tests/test_union.py::test_external_tagging", "tests/test_union.py::test_internal_tagging", "tests/test_union.py::test_adjacent_tagging", "tests/test_union.py::test_untagged" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2022-09-01 18:55:42+00:00
mit
6,313
yukinarit__pyserde-441
diff --git a/serde/core.py b/serde/core.py index 63449e2..7668d81 100644 --- a/serde/core.py +++ b/serde/core.py @@ -44,6 +44,7 @@ from .compat import ( is_list, is_literal, is_new_type_primitive, + is_any, is_opt, is_set, is_tuple, @@ -382,6 +383,8 @@ def is_instance(obj: Any, typ: Any) -> bool: return isinstance(obj, inner) else: return False + elif is_any(typ): + return True elif typ is Ellipsis: return True else:
yukinarit/pyserde
76e919cc5e2692eb2ba7d194af0655af898191ae
diff --git a/tests/test_union.py b/tests/test_union.py index af02d6d..f482028 100644 --- a/tests/test_union.py +++ b/tests/test_union.py @@ -2,7 +2,7 @@ import logging import sys from dataclasses import dataclass from ipaddress import IPv4Address -from typing import Dict, FrozenSet, Generic, List, NewType, Optional, Tuple, TypeVar, Union +from typing import Dict, FrozenSet, Generic, List, NewType, Optional, Tuple, TypeVar, Union, Any from uuid import UUID import pytest @@ -772,3 +772,21 @@ def test_union_frozenset_with_prim(): a: Union[FrozenSet[int], int] assert to_dict(Foo(frozenset({1}))) == {"a": {1}} + + +def test_union_with_any(): + @dataclass + class FooWithString: + foo: str + + @dataclass + class BarWithDict: + bar: Dict[str, Any] + + @serde(tagging=Untagged) + @dataclass + class Class: + foobars: List[Union[FooWithString, BarWithDict]] + + c = Class([FooWithString("string"), BarWithDict({"key": "value"})]) + assert c == from_json(Class, to_json(c))
serde.compat.SerdeError: Can not serialize with untagged union of dict[str, Any] `0.12.3` on Python 3.11.4: ```py from dataclasses import dataclass from typing import Any from serde import Untagged, field, serde from serde.yaml import from_yaml, to_yaml @dataclass class FooWithString: foo: str @dataclass class BarWithDict: bar: dict[str, Any] # remove [str, Any] to fix @serde(tagging=Untagged) @dataclass class Klass: foobars: list[FooWithString | BarWithDict] good_yaml = """ foobars: - foo: string - bar: key: value """ if __name__ == '__main__': config = from_yaml(Klass, good_yaml) print(to_yaml(config)) ``` ``` serde.compat.SerdeError: Can not serialize BarWithDict(bar={'key': 'value'}) of type BarWithDict for Union[FooWithString, BarWithDict] ``` --- Weirdly the more trivial case `list[BarWithDict]` works fine, as does just `BarWithDict`. It's something specifically caused by it being in an untagged union list 🤔
0.0
76e919cc5e2692eb2ba7d194af0655af898191ae
[ "tests/test_union.py::test_union_with_any" ]
[ "tests/test_union.py::test_union", "tests/test_union.py::test_union_optional", "tests/test_union.py::test_union_containers", "tests/test_union.py::test_union_with_literal", "tests/test_union.py::test_union_with_complex_types", "tests/test_union.py::test_union_with_complex_types_and_reuse_instances", "tests/test_union.py::test_optional_union_with_complex_types", "tests/test_union.py::test_optional_complex_type_with_default", "tests/test_union.py::test_union_with_complex_types_in_containers", "tests/test_union.py::test_union_exception_if_nothing_matches", "tests/test_union.py::test_union_in_union", "tests/test_union.py::test_union_in_other_type", "tests/test_union.py::test_union_rename_all", "tests/test_union.py::test_union_with_list_of_other_class", "tests/test_union.py::test_union_with_union_in_nested_types", "tests/test_union.py::test_union_with_union_in_nested_tuple", "tests/test_union.py::test_generic_union", "tests/test_union.py::test_external_tagging", "tests/test_union.py::test_internal_tagging", "tests/test_union.py::test_adjacent_tagging", "tests/test_union.py::test_untagged", "tests/test_union.py::test_newtype_and_untagged_union", "tests/test_union.py::test_union_directly", "tests/test_union.py::test_union_frozenset_with_prim" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-11-11 06:48:07+00:00
mit
6,314
yukinarit__pyserde-487
diff --git a/serde/de.py b/serde/de.py index 5451cf6..5674f0a 100644 --- a/serde/de.py +++ b/serde/de.py @@ -438,7 +438,10 @@ def from_obj(c: Type[T], o: Any, named: bool, reuse_instances: Optional[bool]) - try: thisfunc = functools.partial(from_obj, named=named, reuse_instances=reuse_instances) if is_dataclass_without_de(c): - deserialize(c) + # Do not automatically implement beartype if dataclass without serde decorator + # is passed, because it is surprising for users + # See https://github.com/yukinarit/pyserde/issues/480 + deserialize(c, type_check=disabled) res = deserializable_to_obj(c) elif is_deserializable(c): res = deserializable_to_obj(c) diff --git a/serde/se.py b/serde/se.py index eca1dcc..a69544f 100644 --- a/serde/se.py +++ b/serde/se.py @@ -366,7 +366,10 @@ def to_obj( if o is None: return None if is_dataclass_without_se(o): - serialize(type(o)) + # Do not automatically implement beartype if dataclass without serde decorator + # is passed, because it is surprising for users + # See https://github.com/yukinarit/pyserde/issues/480 + serialize(type(o), type_check=disabled) return serializable_to_obj(o) elif is_serializable(o): return serializable_to_obj(o)
yukinarit/pyserde
f4ab3800f4121a1be96d72fb1e237a1542972015
diff --git a/tests/test_type_check.py b/tests/test_type_check.py index 86caad6..9f3dd63 100644 --- a/tests/test_type_check.py +++ b/tests/test_type_check.py @@ -1,3 +1,4 @@ +from dataclasses import dataclass import datetime import pathlib from beartype.roar import BeartypeCallHintViolation @@ -15,6 +16,7 @@ from beartype.typing import ( import pytest import serde +import serde.json from . import data @@ -100,6 +102,18 @@ def test_type_check_strict(T: Any, data: Any, exc: bool) -> None: serde.from_dict(C, d) +def test_type_check_disabled_for_dataclass_without_serde() -> None: + @dataclass + class Foo: + value: int + + f = Foo("100") # type: ignore + data = serde.json.to_json(f) + assert f == serde.json.from_json(Foo, data) + + f = Foo("100") # type: ignore + + def test_uncoercible() -> None: @serde.serde(type_check=serde.coerce) class Foo:
Bear type checking "leaks" to classes without the @serde decorator Hi, With update 0.14, it seems that bear type checking is being applied to classes passed as arguments to `serde.json.to_json`, even if they do not have the `@serde` decorator applied to them. In general I prefer to not use the `@serde` decorator in my model classes, instead using `serde` only at the I/O layer with the `serde.json.from_json` and `serde.json.to_json` functions. However seems like after calling `to_json` into a dataclass without the `@serde` decorator, bear type checking is now applied to that class from that point onward, which is surprising. Example: ```python from dataclasses import dataclass import serde.json @dataclass class Foo: value: int # This passes. f = Foo("100") data = serde.json.to_json(f, cls=Foo) # After to_json(), this starts to fail with: # beartype.roar.BeartypeCallHintParamViolation: Method __main__.Foo.__init__() parameter value='100' violates type hint <class 'int'>, as str '100' not instance of int. f = Foo("100") ``` This is surprising to the user, even more so because model classes start to validate their types *after* some I/O has been performed, which might happen at different points in the application. A workaround for now is to decorate `Foo` with `@serde(type_check=disabled)`, however this is not ideal given the functions `to_json` and `from_json` do not convey that they might change the runtime behavior of the class being passed as parameter. > [!Note] > This example is simplified, but in my code it was more complex, where I had an attribute declared as `tuple[int, ...]` but would accept any `Sequence[int]` at runtime, being converted to `tuple[int, ...]` during `__post_init__` to ensure the constructed object would have the correct runtime type. The reason for this is that the `Sequence[int]` comes from an external source, and sometimes would be provided as `list[int]` or `tuple[int, ...]`.
0.0
f4ab3800f4121a1be96d72fb1e237a1542972015
[ "tests/test_type_check.py::test_type_check_disabled_for_dataclass_without_serde" ]
[ "tests/test_type_check.py::test_type_check_strict[int-10-False]", "tests/test_type_check.py::test_type_check_strict[int-10.0-True]", "tests/test_type_check.py::test_type_check_strict[int-10-True]", "tests/test_type_check.py::test_type_check_strict[int-True-False]", "tests/test_type_check.py::test_type_check_strict[float-10-True0]", "tests/test_type_check.py::test_type_check_strict[float-10.0-False]", "tests/test_type_check.py::test_type_check_strict[float-10-True1]", "tests/test_type_check.py::test_type_check_strict[float-True-True]", "tests/test_type_check.py::test_type_check_strict[str-10-True]", "tests/test_type_check.py::test_type_check_strict[str-10.0-True]", "tests/test_type_check.py::test_type_check_strict[str-10-False]", "tests/test_type_check.py::test_type_check_strict[str-True-True]", "tests/test_type_check.py::test_type_check_strict[bool-10-True0]", "tests/test_type_check.py::test_type_check_strict[bool-10.0-True]", "tests/test_type_check.py::test_type_check_strict[bool-10-True1]", "tests/test_type_check.py::test_type_check_strict[bool-True-False]", "tests/test_type_check.py::test_type_check_strict[list-data16-False]", "tests/test_type_check.py::test_type_check_strict[list-data17-True]", "tests/test_type_check.py::test_type_check_strict[list-data18-False]", "tests/test_type_check.py::test_type_check_strict[list-data19-True]", "tests/test_type_check.py::test_type_check_strict[list-data20-True]", "tests/test_type_check.py::test_type_check_strict[list-data21-False]", "tests/test_type_check.py::test_type_check_strict[list-data22-True]", "tests/test_type_check.py::test_type_check_strict[list-data23-False]", "tests/test_type_check.py::test_type_check_strict[list-data24-True]", "tests/test_type_check.py::test_type_check_strict[list-data25-False]", "tests/test_type_check.py::test_type_check_strict[list-data26-True]", "tests/test_type_check.py::test_type_check_strict[list-data27-False]", "tests/test_type_check.py::test_type_check_strict[dict-data28-False]", "tests/test_type_check.py::test_type_check_strict[dict-data30-False]", "tests/test_type_check.py::test_type_check_strict[dict-data31-True]", "tests/test_type_check.py::test_type_check_strict[set-data32-False]", "tests/test_type_check.py::test_type_check_strict[set-data33-False]", "tests/test_type_check.py::test_type_check_strict[set-data34-True]", "tests/test_type_check.py::test_type_check_strict[tuple-data35-False]", "tests/test_type_check.py::test_type_check_strict[tuple-data36-True]", "tests/test_type_check.py::test_type_check_strict[tuple-data37-False]", "tests/test_type_check.py::test_type_check_strict[tuple-data38-True]", "tests/test_type_check.py::test_type_check_strict[tuple-data39-False]", "tests/test_type_check.py::test_type_check_strict[tuple-data40-True]", "tests/test_type_check.py::test_type_check_strict[tuple-data41-False]", "tests/test_type_check.py::test_type_check_strict[tuple-data42-False]", "tests/test_type_check.py::test_type_check_strict[E-E.S-False]", "tests/test_type_check.py::test_type_check_strict[E-IE.V0-True]", "tests/test_type_check.py::test_type_check_strict[T45-10-False]", "tests/test_type_check.py::test_type_check_strict[T46-foo-False]", "tests/test_type_check.py::test_type_check_strict[T47-10.0-True]", "tests/test_type_check.py::test_type_check_strict[T48-data48-False]", "tests/test_type_check.py::test_type_check_strict[date-data49-False]", "tests/test_type_check.py::test_type_check_strict[Path-data50-False]", "tests/test_type_check.py::test_type_check_strict[Path-foo-True]", "tests/test_type_check.py::test_uncoercible", "tests/test_type_check.py::test_coerce" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2024-03-09 15:09:41+00:00
mit
6,315
zalando-stups__pierone-cli-33
diff --git a/.gitignore b/.gitignore index 1e365e8..e60d986 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ htmlcov/ virtualenv *.sw* .cache/ +.tox/ diff --git a/pierone/cli.py b/pierone/cli.py index 467ff32..1af5790 100644 --- a/pierone/cli.py +++ b/pierone/cli.py @@ -1,20 +1,18 @@ import datetime import os import re - -import click - -import requests import tarfile import tempfile import time -import zign.api -from clickclick import error, AliasedGroup, print_table, OutputFormat, UrlType -from .api import docker_login, request, get_latest_tag, DockerImage +import click import pierone +import requests import stups_cli.config +import zign.api +from clickclick import AliasedGroup, OutputFormat, UrlType, error, print_table +from .api import DockerImage, docker_login, get_latest_tag, request KEYRING_KEY = 'pierone' @@ -24,6 +22,48 @@ output_option = click.option('-o', '--output', type=click.Choice(['text', 'json' help='Use alternative output format') url_option = click.option('--url', help='Pier One URL', metavar='URI') +clair_url_option = click.option('--clair-url', help='Clair URL', metavar='CLAIR_URI') + +CVE_STYLES = { + 'TOO_OLD': { + 'bold': True, + 'fg': 'red' + }, + 'NOT_PROCESSED_YET': { + 'bold': True, + 'fg': 'red' + }, + 'COULDNT_FIGURE_OUT': { + 'bold': True, + 'fg': 'red' + }, + 'CRITICAL': { + 'bold': True, + 'fg': 'red' + }, + 'HIGH': { + 'bold': True, + 'fg': 'red' + }, + 'MEDIUM': { + 'fg': 'yellow' + }, + 'LOW': { + 'fg': 'yellow' + }, + 'NEGLIGIBLE': { + 'fg': 'yellow' + }, + 'UNKNOWN': { + 'fg': 'yellow' + }, + 'PENDING': { + 'fg': 'yellow' + }, + 'NO_CVES_FOUND': { + 'fg': 'green' + } +} TEAM_PATTERN_STR = r'[a-z][a-z0-9-]+' TEAM_PATTERN = re.compile(r'^{}$'.format(TEAM_PATTERN_STR)) @@ -54,6 +94,19 @@ def parse_time(s: str) -> float: return None +def parse_severity(value, clair_id_exists): + '''Parse severity values to displayable values''' + if value is None and clair_id_exists: + return 'NOT_PROCESSED_YET' + elif value is None: + return 'TOO_OLD' + + value = re.sub('^clair:', '', value) + value = re.sub('(?P<upper_letter>(?<=[a-z])[A-Z])', '_\g<upper_letter>', value) + + return value.upper() + + def print_version(ctx, param, value): if not value or ctx.resilient_parsing: return @@ -82,6 +135,28 @@ def set_pierone_url(config: dict, url: str) -> None: return url +def set_clair_url(config: dict, url: str) -> None: + '''Read Clair URL from cli, from config file or from stdin.''' + url = url or config.get('clair_url') + + while not url: + url = click.prompt('Please enter the Clair URL', type=UrlType()) + + try: + requests.get(url, timeout=5) + except: + error('Could not reach {}'.format(url)) + url = None + + if '://' not in url: + # issue 63: gracefully handle URLs without scheme + url = 'https://{}'.format(url) + + config['clair_url'] = url + stups_cli.config.store_config(config, 'pierone') + return url + + @click.group(cls=AliasedGroup, context_settings=CONTEXT_SETTINGS) @click.option('-V', '--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True, help='Print the current version number and exit.') @@ -147,6 +222,19 @@ def get_tags(url, team, art, access_token): return r.json() +def get_clair_features(url, layer_id, access_token): + if layer_id is None: + return [] + + r = request(url, '/v1/layers/{}?vulnerabilities&features'.format(layer_id), access_token) + if r.status_code == 404: + # empty list of tags (layer does not exist) + return [] + else: + r.raise_for_status() + return r.json()['Layer']['Features'] + + @cli.command() @click.argument('team', callback=validate_team) @url_option @@ -184,14 +272,69 @@ def tags(config, team: str, artifact, url, output): 'artifact': art, 'tag': row['name'], 'created_by': row['created_by'], - 'created_time': parse_time(row['created'])} + 'created_time': parse_time(row['created']), + 'severity_fix_available': parse_severity( + row.get('severity_fix_available'), row.get('clair_id', False)), + 'severity_no_fix_available': parse_severity( + row.get('severity_no_fix_available'), row.get('clair_id', False))} for row in r]) # sorts are guaranteed to be stable, i.e. tags will be sorted by time (as returned from REST service) rows.sort(key=lambda row: (row['team'], row['artifact'])) with OutputFormat(output): - print_table(['team', 'artifact', 'tag', 'created_time', 'created_by'], rows, - titles={'created_time': 'Created', 'created_by': 'By'}) + titles = { + 'created_time': 'Created', + 'created_by': 'By', + 'severity_fix_available': 'Fixable CVE Severity', + 'severity_no_fix_available': 'Unfixable CVE Severity' + } + print_table(['team', 'artifact', 'tag', 'created_time', 'created_by', + 'severity_fix_available', 'severity_no_fix_available'], + rows, titles=titles, styles=CVE_STYLES) + + [email protected]() [email protected]('team', callback=validate_team) [email protected]('artifact') [email protected]('tag') +@url_option +@clair_url_option +@output_option [email protected]_obj +def cves(config, team, artifact, tag, url, clair_url, output): + '''List all CVE's found by Clair service for a specific artifact tag''' + set_pierone_url(config, url) + set_clair_url(config, clair_url) + + rows = [] + token = get_token() + for artifact_tag in get_tags(config.get('url'), team, artifact, token): + if artifact_tag['name'] == tag: + installed_software = get_clair_features(config.get('clair_url'), artifact_tag.get('clair_id'), token) + for software_pkg in installed_software: + for cve in software_pkg.get('Vulnerabilities', []): + rows.append({ + 'cve': cve['Name'], + 'severity': cve['Severity'].upper(), + 'affected_feature': '{}:{}'.format(software_pkg['Name'], + software_pkg['Version']), + 'fixing_feature': cve.get( + 'FixedBy') and '{}:{}'.format(software_pkg['Name'], + cve['FixedBy']), + 'link': cve['Link'], + }) + severity_rating = ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW', 'NEGLIGIBLE', 'UNKNOWN', 'PENDING'] + rows.sort(key=lambda row: severity_rating.index(row['severity'])) + with OutputFormat(output): + titles = { + 'cve': 'CVE', + 'severity': 'Severity', + 'affected_feature': 'Affected Feature', + 'fixing_feature': 'Fixing Feature', + 'link': 'Link' + } + print_table(['cve', 'severity', 'affected_feature', 'fixing_feature', 'link'], + rows, titles=titles, styles=CVE_STYLES) @cli.command() diff --git a/tox.ini b/tox.ini index aa079ec..4644fe1 100644 --- a/tox.ini +++ b/tox.ini @@ -1,2 +1,8 @@ [flake8] max-line-length=120 + +[tox] +envlist=py34,py35 + +[testenv] +commands=python setup.py test
zalando-stups/pierone-cli
903f8e27f3e084fd9116929139a1ccd7f700f42f
diff --git a/setup.py b/setup.py old mode 100644 new mode 100755 diff --git a/tests/fixtures/clair_response.json b/tests/fixtures/clair_response.json new file mode 100644 index 0000000..2638daa --- /dev/null +++ b/tests/fixtures/clair_response.json @@ -0,0 +1,70 @@ +{ + "Layer": { + "Name": "sha256:0000000000000000000000000000000000000000000000000000000000000000", + "NamespaceName": "ubuntu:16.04", + "ParentName": "sha256:0000000000000000000000000000000000000000000000000000000000000000", + "IndexedByVersion": 2, + "Features": [ + { + "Name": "python3.5", + "NamespaceName": "ubuntu:16.04", + "Version": "3.5.1-10", + "AddedBy": "sha256:0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "Name": "python-pip", + "NamespaceName": "ubuntu:16.04", + "Version": "8.1.1-2", + "Vulnerabilities": [ + { + "Name": "CVE-2013-5123", + "NamespaceName": "ubuntu:16.04", + "Description": "The mirroring support (-M, --use-mirrors) was implemented without any sort of authenticity checks and is downloaded over plaintext HTTP. Further more by default it will dynamically discover the list of available mirrors by querying a DNS entry and extrapolating from that data. It does not attempt to use any sort of method of securing this querying of the DNS like DNSSEC. Software packages are downloaded over these insecure links, unpacked, and then typically the setup.py python file inside of them is executed.", + "Link": "http://people.ubuntu.com/~ubuntu-security/cve/CVE-2013-5123", + "Severity": "Medium" + }, + { + "Name": "CVE-2014-8991", + "NamespaceName": "ubuntu:16.04", + "Description": "pip 1.3 through 1.5.6 allows local users to cause a denial of service (prevention of package installation) by creating a /tmp/pip-build-* file for another user.", + "Link": "http://people.ubuntu.com/~ubuntu-security/cve/CVE-2014-8991", + "Severity": "Low", + "Metadata": { + "NVD": { + "CVSSv2": { + "Score": 2.1, + "Vectors": "AV:L/AC:L/Au:N/C:N/I:N" + } + } + } + } + ], + "AddedBy": "sha256:0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "Name": "openssl", + "NamespaceName": "ubuntu:16.04", + "Version": "1.0.2g-1ubuntu4", + "Vulnerabilities": [ + { + "Name": "CVE-2016-2108", + "NamespaceName": "ubuntu:16.04", + "Description": "The ASN.1 implementation in OpenSSL before 1.0.1o and 1.0.2 before 1.0.2c allows remote attackers to execute arbitrary code or cause a denial of service (buffer underflow and memory corruption) via an ANY field in crafted serialized data, aka the \"negative zero\" issue.", + "Link": "http://people.ubuntu.com/~ubuntu-security/cve/CVE-2016-2108", + "Severity": "High", + "Metadata": { + "NVD": { + "CVSSv2": { + "Score": 10, + "Vectors": "AV:N/AC:L/Au:N/C:C/I:C" + } + } + }, + "FixedBy": "1.0.2g-1ubuntu4.1" + } + ], + "AddedBy": "sha256:0000000000000000000000000000000000000000000000000000000000000000" + } + ] + } +} diff --git a/tests/test_api.py b/tests/test_api.py index 5cb2fc7..3548e01 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -1,9 +1,11 @@ import json import os from unittest.mock import MagicMock -import yaml -from pierone.api import docker_login, DockerImage, get_latest_tag, Unauthorized, image_exists + import pytest +import yaml +from pierone.api import (DockerImage, Unauthorized, docker_login, + get_latest_tag, image_exists) def test_docker_login(monkeypatch, tmpdir): @@ -12,22 +14,22 @@ def test_docker_login(monkeypatch, tmpdir): response.status_code = 200 response.json.return_value = {'access_token': '12377'} monkeypatch.setattr('requests.get', MagicMock(return_value=response)) - token = docker_login('https://pierone.example.org', 'services', 'mytok', - 'myuser', 'mypass', 'https://token.example.org', use_keyring=False) + docker_login('https://pierone.example.org', 'services', 'mytok', + 'myuser', 'mypass', 'https://token.example.org', use_keyring=False) path = os.path.expanduser('~/.docker/config.json') with open(path) as fd: data = yaml.safe_load(fd) - assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths').get('https://pierone.example.org') + assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths').get('https://pierone.example.org') def test_docker_login_service_token(monkeypatch, tmpdir): monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir))) monkeypatch.setattr('tokens.get', lambda x: '12377') - token = docker_login('https://pierone.example.org', None, 'mytok', 'myuser', 'mypass', 'https://token.example.org') + docker_login('https://pierone.example.org', None, 'mytok', 'myuser', 'mypass', 'https://token.example.org') path = os.path.expanduser('~/.docker/config.json') with open(path) as fd: data = yaml.safe_load(fd) - assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths').get('https://pierone.example.org') + assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths').get('https://pierone.example.org') def test_keep_dockercfg_entries(monkeypatch, tmpdir): @@ -49,12 +51,12 @@ def test_keep_dockercfg_entries(monkeypatch, tmpdir): with open(path, 'w') as fd: json.dump(existing_data, fd) - token = docker_login('https://pierone.example.org', 'services', 'mytok', - 'myuser', 'mypass', 'https://token.example.org', use_keyring=False) + docker_login('https://pierone.example.org', 'services', 'mytok', + 'myuser', 'mypass', 'https://token.example.org', use_keyring=False) with open(path) as fd: data = yaml.safe_load(fd) - assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths', {}).get('https://pierone.example.org') - assert existing_data.get(key) == data.get(key) + assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths', {}).get('https://pierone.example.org') + assert existing_data.get(key) == data.get(key) def test_get_latest_tag(monkeypatch): diff --git a/tests/test_cli.py b/tests/test_cli.py index 6f58d15..6282253 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,9 +1,9 @@ import json import os -from click.testing import CliRunner +import re from unittest.mock import MagicMock -import yaml -import zign.api + +from click.testing import CliRunner from pierone.cli import cli @@ -40,6 +40,7 @@ def test_login_given_url_option(monkeypatch, tmpdir): runner = CliRunner() config = {} + def store(data, section): config.update(**data) @@ -50,9 +51,9 @@ def test_login_given_url_option(monkeypatch, tmpdir): monkeypatch.setattr('requests.get', lambda x, timeout: response) with runner.isolated_filesystem(): - result = runner.invoke(cli, ['login'], catch_exceptions=False, input='pieroneurl\n') + runner.invoke(cli, ['login'], catch_exceptions=False, input='pieroneurl\n') assert config == {'url': 'https://pieroneurl'} - result = runner.invoke(cli, ['login', '--url', 'someotherregistry'], catch_exceptions=False) + runner.invoke(cli, ['login', '--url', 'someotherregistry'], catch_exceptions=False) with open(os.path.join(str(tmpdir), '.docker/config.json')) as fd: data = json.load(fd) assert data['auths']['https://pieroneurl']['auth'] == 'b2F1dGgyOnRvazEyMw==' @@ -65,7 +66,7 @@ def test_scm_source(monkeypatch, tmpdir): response.json.return_value = {'url': 'git:somerepo', 'revision': 'myrev123'} runner = CliRunner() - monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url':'foobar'}) + monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url': 'foobar'}) monkeypatch.setattr('zign.api.get_token', MagicMock(return_value='tok123')) monkeypatch.setattr('pierone.cli.get_tags', MagicMock(return_value={})) monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir))) @@ -75,12 +76,13 @@ def test_scm_source(monkeypatch, tmpdir): assert 'myrev123' in result.output assert 'git:somerepo' in result.output + def test_image(monkeypatch, tmpdir): response = MagicMock() response.json.return_value = [{'name': '1.0', 'team': 'stups', 'artifact': 'kio'}] runner = CliRunner() - monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url':'foobar'}) + monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url': 'foobar'}) monkeypatch.setattr('zign.api.get_token', MagicMock(return_value='tok123')) monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir))) monkeypatch.setattr('pierone.api.session.get', MagicMock(return_value=response)) @@ -93,16 +95,130 @@ def test_image(monkeypatch, tmpdir): def test_tags(monkeypatch, tmpdir): response = MagicMock() - response.json.return_value = [{'name': '1.0', 'created_by': 'myuser', 'created': '2015-08-20T08:14:59.432Z'}] + response.json.return_value = [ + # Former pierone payload + { + 'name': '1.0', + 'created_by': 'myuser', + 'created': '2015-08-20T08:14:59.432Z' + }, + # New pierone payload with clair but no information about CVEs -- old images + { + "name": "1.1", + "created": "2016-05-19T15:23:41.065Z", + "created_by": "myuser", + "image": "sha256:here", + "clair_id": None, + "severity_fix_available": None, + "severity_no_fix_available": None + }, + # New pierone payload with clair but no information about CVEs -- still processing + { + "name": "1.1", + "created": "2016-05-19T15:23:41.065Z", + "created_by": "myuser", + "image": "sha256:here", + "clair_id": "sha256:here", + "severity_fix_available": None, + "severity_no_fix_available": None + }, + # New pierone payload with clair but could not figure out + { + "name": "1.1", + "created": "2016-05-19T15:23:41.065Z", + "created_by": "myuser", + "image": "sha256:here", + "clair_id": "sha256:here", + "severity_fix_available": "clair:CouldntFigureOut", + "severity_no_fix_available": "clair:CouldntFigureOut" + }, + # New pierone payload with clair with no CVEs found + { + "name": "1.1", + "created": "2016-05-19T15:23:41.065Z", + "created_by": "myuser", + "image": "sha256:here", + "clair_id": "sha256:here", + "severity_fix_available": "clair:NoCVEsFound", + "severity_no_fix_available": "clair:NoCVEsFound" + }, + # New pierone payload with clair input and info about CVEs + { + "name": "1.2", + "created": "2016-05-23T13:29:17.753Z", + "created_by": "myuser", + "image": "sha256:here", + "clair_id": "sha256:here", + "severity_fix_available": "High", + "severity_no_fix_available": "Medium" + } + ] runner = CliRunner() - monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url':'foobar'}) + monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url': 'foobar'}) monkeypatch.setattr('zign.api.get_token', MagicMock(return_value='tok123')) monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir))) monkeypatch.setattr('pierone.api.session.get', MagicMock(return_value=response)) with runner.isolated_filesystem(): result = runner.invoke(cli, ['tags', 'myteam', 'myart'], catch_exceptions=False) assert '1.0' in result.output + assert 'Fixable CVE Severity' in result.output + assert 'Unfixable CVE Severity' in result.output + assert 'TOO_OLD' in result.output + assert 'NOT_PROCESSED_YET' in result.output + assert 'NO_CVES_FOUND' in result.output + assert re.search('HIGH\s+MEDIUM', result.output), 'Should how information about CVEs' + + +def test_cves(monkeypatch, tmpdir): + pierone_service_payload = [ + # Former pierone payload + { + 'name': '1.0', + 'created_by': 'myuser', + 'created': '2015-08-20T08:14:59.432Z' + }, + # New pierone payload with clair but no information about CVEs + { + "name": "1.1", + "created": "2016-05-19T15:23:41.065Z", + "created_by": "myuser", + "image": "sha256:here", + "clair_id": None, + "severity_fix_available": None, + "severity_no_fix_available": None + }, + # New pierone payload with clair input and info about CVEs + { + "name": "1.2", + "created": "2016-05-23T13:29:17.753Z", + "created_by": "myuser", + "image": "sha256:here", + "clair_id": "sha256:here", + "severity_fix_available": "High", + "severity_no_fix_available": "Medium" + } + ] + + with open(os.path.join(os.path.dirname(__file__), + 'fixtures', 'clair_response.json'), 'r') as fixture: + clair_service_payload = json.loads(fixture.read()) + + response = MagicMock() + response.json.side_effect = [ + pierone_service_payload, + clair_service_payload + ] + + runner = CliRunner() + monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url': 'foobar', 'clair_url': 'barfoo'}) + monkeypatch.setattr('zign.api.get_token', MagicMock(return_value='tok123')) + monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir))) + monkeypatch.setattr('pierone.api.session.get', MagicMock(return_value=response)) + with runner.isolated_filesystem(): + result = runner.invoke(cli, ['cves', 'myteam', 'myart', '1.2'], catch_exceptions=False) + assert 'CVE-2013-5123' in result.output + assert re.match('[^\n]+\n[^\n]+HIGH', result.output), 'Results should be ordered by highest priority' def test_latest(monkeypatch, tmpdir):
Display Clair security information. PierOne now supports Clair for vulnerability scanning. Pierone now exposes the following three information for each tag: - clair_id - severity_fix_available - severity_no_fix_available The PierOne CLI should now also enhance the `tags` subcommand with these information and provide a new `cves` subcommand to display full CVE reports. $ pierone tags foo bar Team | Artifact | Tag | Created | By | Fixable CVE Severity | Unfixable CVE Severity | --------|-----------|-------|------------|-----|-----------------------|-------------------------- foo | bar | 1.0 | 5d ago | example | **Critical** | Medium | foo | bar | 1.1 | 2d ago | example | **Critical** | Medium | foo | bar | 2.0 | 1d ago | example | None | Medium | `High` and `Critical` severities should be highlighted. $ pierone cves foo bar 1.0 CVE | Severity | Affected Feature | Fixing Feature | Link -------|------------|------------------------|---------------------|------- CVE-2014-9471 | Low | coreutils:8.23-4 | coreutils:9.23-5 | https://security-tracker.debian.org/tracker/CVE-2014-9471 Again, `High` and `Critical` needs to be highlighted and the whole table should be sorted by severity. PierOne source contains an ordered list of possible values. The information for this output can be retrieved via the [Clair API](https://github.com/coreos/clair/blob/master/api/v1/README.md#get-layersname) using the PierOne provided Clair ID. For this, the PierOne CLI will need to learn about the Clair API's endpoint.
0.0
903f8e27f3e084fd9116929139a1ccd7f700f42f
[ "tests/test_cli.py::test_tags", "tests/test_cli.py::test_cves" ]
[ "tests/test_api.py::test_docker_login_service_token", "tests/test_api.py::test_get_latest_tag", "tests/test_api.py::test_get_latest_tag_IOException", "tests/test_api.py::test_get_latest_tag_non_json", "tests/test_api.py::test_unauthorized", "tests/test_api.py::test_image_exists", "tests/test_api.py::test_image_exists_IOException", "tests/test_api.py::test_image_exists_but_other_version", "tests/test_api.py::test_image_not_exists", "tests/test_cli.py::test_version", "tests/test_cli.py::test_login", "tests/test_cli.py::test_login_given_url_option", "tests/test_cli.py::test_scm_source", "tests/test_cli.py::test_image", "tests/test_cli.py::test_latest", "tests/test_cli.py::test_latest_not_found", "tests/test_cli.py::test_url_without_scheme" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2016-05-26 16:17:11+00:00
apache-2.0
6,316
zalando-stups__pierone-cli-37
diff --git a/pierone/cli.py b/pierone/cli.py index 1af5790..50dba86 100644 --- a/pierone/cli.py +++ b/pierone/cli.py @@ -232,7 +232,8 @@ def get_clair_features(url, layer_id, access_token): return [] else: r.raise_for_status() - return r.json()['Layer']['Features'] + + return r.json()['Layer'].get('Features', []) @cli.command()
zalando-stups/pierone-cli
991c05e9c7496b2aac071d85d0a9ca6b8afcf9dd
diff --git a/tests/test_cli.py b/tests/test_cli.py index 6282253..087d27d 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -221,6 +221,61 @@ def test_cves(monkeypatch, tmpdir): assert re.match('[^\n]+\n[^\n]+HIGH', result.output), 'Results should be ordered by highest priority' +def test_no_cves_found(monkeypatch, tmpdir): + pierone_service_payload = [ + # Former pierone payload + { + 'name': '1.0', + 'created_by': 'myuser', + 'created': '2015-08-20T08:14:59.432Z' + }, + # New pierone payload with clair but no information about CVEs + { + "name": "1.1", + "created": "2016-05-19T15:23:41.065Z", + "created_by": "myuser", + "image": "sha256:here", + "clair_id": None, + "severity_fix_available": None, + "severity_no_fix_available": None + }, + # New pierone payload with clair input and info about CVEs + { + "name": "1.2", + "created": "2016-05-23T13:29:17.753Z", + "created_by": "myuser", + "image": "sha256:here", + "clair_id": "sha256:here", + "severity_fix_available": "High", + "severity_no_fix_available": "Medium" + } + ] + + no_cves_clair_payload = { + "Layer": { + "Name": "sha256:0000000000000000000000000000000000000000000000000000000000000000", + "NamespaceName": "ubuntu:16.04", + "ParentName": "sha256:0000000000000000000000000000000000000000000000000000000000000000", + "IndexedByVersion": 2 + } + } + + response = MagicMock() + response.json.side_effect = [ + pierone_service_payload, + no_cves_clair_payload + ] + + runner = CliRunner() + monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url': 'foobar', 'clair_url': 'barfoo'}) + monkeypatch.setattr('zign.api.get_token', MagicMock(return_value='tok123')) + monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir))) + monkeypatch.setattr('pierone.api.session.get', MagicMock(return_value=response)) + with runner.isolated_filesystem(): + result = runner.invoke(cli, ['cves', 'myteam', 'myart', '1.2'], catch_exceptions=False) + assert re.match('^[^\n]+\n$', result.output), 'No results should be shown' + + def test_latest(monkeypatch, tmpdir): response = MagicMock() response.json.return_value = [
pierone fails with backtrace when the CVE status is COULDNT_FIGURE_OUT ``` Traceback (most recent call last): File "/usr/local/bin/pierone", line 11, in <module> sys.exit(main()) File "/usr/local/lib/python3.4/dist-packages/pierone/cli.py", line 485, in main cli() File "/usr/local/lib/python3.4/dist-packages/click/core.py", line 716, in __call__ return self.main(*args, **kwargs) File "/usr/local/lib/python3.4/dist-packages/click/core.py", line 696, in main rv = self.invoke(ctx) File "/usr/local/lib/python3.4/dist-packages/click/core.py", line 1060, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.4/dist-packages/click/core.py", line 889, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python3.4/dist-packages/click/core.py", line 534, in invoke return callback(*args, **kwargs) File "/usr/local/lib/python3.4/dist-packages/click/decorators.py", line 27, in new_func return f(get_current_context().obj, *args, **kwargs) File "/usr/local/lib/python3.4/dist-packages/pierone/cli.py", line 313, in cves installed_software = get_clair_features(config.get('clair_url'), artifact_tag.get('clair_id'), token) File "/usr/local/lib/python3.4/dist-packages/pierone/cli.py", line 235, in get_clair_features return r.json()['Layer']['Features'] KeyError: 'Features' ```
0.0
991c05e9c7496b2aac071d85d0a9ca6b8afcf9dd
[ "tests/test_cli.py::test_no_cves_found" ]
[ "tests/test_cli.py::test_version", "tests/test_cli.py::test_login", "tests/test_cli.py::test_login_given_url_option", "tests/test_cli.py::test_scm_source", "tests/test_cli.py::test_image", "tests/test_cli.py::test_tags", "tests/test_cli.py::test_cves", "tests/test_cli.py::test_latest", "tests/test_cli.py::test_latest_not_found", "tests/test_cli.py::test_url_without_scheme" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2016-05-31 08:47:53+00:00
apache-2.0
6,317
zalando-stups__pierone-cli-49
diff --git a/.travis.yml b/.travis.yml index e417a33..7c746c5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,6 +4,7 @@ python: install: - pip install -r requirements.txt - pip install coveralls + - pip install flake8 # forcing installation of flake8, might be removed after https://gitlab.com/pycqa/flake8/issues/164 gets fixed. script: - python setup.py test - python setup.py flake8 diff --git a/pierone/cli.py b/pierone/cli.py index 90bb5c2..8918a42 100644 --- a/pierone/cli.py +++ b/pierone/cli.py @@ -8,7 +8,9 @@ import pierone import requests import stups_cli.config import zign.api -from clickclick import AliasedGroup, OutputFormat, UrlType, error, print_table +from clickclick import (AliasedGroup, OutputFormat, UrlType, error, + fatal_error, print_table) +from requests import RequestException from .api import (DockerImage, Unauthorized, docker_login, get_image_tags, get_latest_tag, parse_time, request) @@ -76,6 +78,17 @@ def print_version(ctx, param, value): ctx.exit() +def validate_pierone_url(url: str) -> None: + ping_url = url.rstrip('/') + '/swagger.json' + try: + response = requests.get(ping_url, timeout=5) + response.raise_for_status() + if 'Pier One API' not in response.text: + fatal_error('ERROR: Did not find a valid Pier One registry at {}'.format(url)) + except RequestException: + fatal_error('ERROR: Could not reach {}'.format(ping_url)) + + def set_pierone_url(config: dict, url: str) -> None: '''Read Pier One URL from cli, from config file or from stdin.''' url = url or config.get('url') @@ -93,6 +106,7 @@ def set_pierone_url(config: dict, url: str) -> None: # issue 63: gracefully handle URLs without scheme url = 'https://{}'.format(url) + validate_pierone_url(url) config['url'] = url return url
zalando-stups/pierone-cli
9f99c8f5a054c35b623c0601e66da0c15fdb578a
diff --git a/tests/test_cli.py b/tests/test_cli.py index e76073c..0bdd2fe 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -3,8 +3,17 @@ import os import re from unittest.mock import MagicMock +import pytest from click.testing import CliRunner from pierone.cli import cli +from requests import RequestException + + [email protected](autouse=True) +def valid_pierone_url(monkeypatch): + response = MagicMock() + response.text = 'Pier One API' + monkeypatch.setattr('requests.get', lambda *args, **kw: response) def test_version(monkeypatch): @@ -16,22 +25,47 @@ def test_version(monkeypatch): def test_login(monkeypatch, tmpdir): - response = MagicMock() - runner = CliRunner() monkeypatch.setattr('stups_cli.config.load_config', lambda x: {}) monkeypatch.setattr('pierone.api.get_named_token', MagicMock(return_value={'access_token': 'tok123'})) monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir))) - monkeypatch.setattr('requests.get', lambda x, timeout: response) with runner.isolated_filesystem(): result = runner.invoke(cli, ['login'], catch_exceptions=False, input='pieroneurl\n') + assert 'Storing Docker client configuration' in result.output + assert result.output.rstrip().endswith('OK') with open(os.path.join(str(tmpdir), '.docker/config.json')) as fd: data = json.load(fd) assert data['auths']['https://pieroneurl']['auth'] == 'b2F1dGgyOnRvazEyMw==' - assert 'Storing Docker client configuration' in result.output - assert result.output.rstrip().endswith('OK') + + +def test_invalid_url_for_login(monkeypatch, tmpdir): + runner = CliRunner() + response = MagicMock() + + monkeypatch.setattr('stups_cli.config.load_config', lambda x: {}) + monkeypatch.setattr('pierone.api.get_named_token', MagicMock(return_value={'access_token': 'tok123'})) + monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir))) + + # Missing Pier One header + response.text = 'Not valid API' + monkeypatch.setattr('requests.get', lambda *args, **kw: response) + + with runner.isolated_filesystem(): + result = runner.invoke(cli, ['login'], catch_exceptions=False, input='pieroneurl\n') + assert 'ERROR: Did not find a valid Pier One registry at https://pieroneurl' in result.output + assert result.exit_code == 1 + assert not os.path.exists(os.path.join(str(tmpdir), '.docker/config.json')) + + # Not a valid header + response.raise_for_status = MagicMock(side_effect=RequestException) + monkeypatch.setattr('requests.get', lambda *args, **kw: response) + with runner.isolated_filesystem(): + result = runner.invoke(cli, ['login'], catch_exceptions=False, input='pieroneurl\n') + assert 'ERROR: Could not reach https://pieroneurl' in result.output + assert result.exit_code == 1 + assert not os.path.exists(os.path.join(str(tmpdir), '.docker/config.json')) def test_login_arg_user(monkeypatch, tmpdir): @@ -95,8 +129,6 @@ def test_login_env_user(monkeypatch, tmpdir): def test_login_given_url_option(monkeypatch, tmpdir): - response = MagicMock() - runner = CliRunner() config = {} @@ -108,7 +140,6 @@ def test_login_given_url_option(monkeypatch, tmpdir): monkeypatch.setattr('stups_cli.config.store_config', store) monkeypatch.setattr('pierone.api.get_named_token', MagicMock(return_value={'access_token': 'tok123'})) monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir))) - monkeypatch.setattr('requests.get', lambda x, timeout: response) with runner.isolated_filesystem(): runner.invoke(cli, ['login'], catch_exceptions=False, input='pieroneurl\n')
Pierone login accepts any URL Would be nice to validate if the pierone URL is actually valid. Maybe pinging the address to see if it works and showing an error. The current behaviour leads to user that committed a typo in the pierone URL to think they are logged-in and getting error from `docker push` later with a not very helpful message. ### Current behaviour Example of what currently happens: ``` $ pierone login --url registry.does-not-exist.example.com Getting OAuth2 token "pierone".. OK Storing Docker client configuration in /home/master/.docker/config.json.. OK ``` Then trying to push image using docker cli: ``` $ docker push pierone.opensource.zalan.do/bus/hello:b17 The push refers to a repository [pierone.opensource.zalan.do/bus/hello] 9c445b8a75e0: Preparing 8a48ff634f1d: Preparing ... 19429b698a22: Waiting 9436069b92a3: Waiting no basic auth credentials ``` This leads users to think there is a problem with Pierone registry or with Docker which is misleading. ### Suggested behaviour When trying to login in pierone with a URL of non-pierone server: ``` $ pierone login --url registry.does-not-exist.example.com ERROR: Not found a valid Pierone registry at registry.does-not-exist.example.com ```
0.0
9f99c8f5a054c35b623c0601e66da0c15fdb578a
[ "tests/test_cli.py::test_invalid_url_for_login" ]
[ "tests/test_cli.py::test_version", "tests/test_cli.py::test_login", "tests/test_cli.py::test_login_arg_user", "tests/test_cli.py::test_login_zign_user", "tests/test_cli.py::test_login_env_user", "tests/test_cli.py::test_login_given_url_option", "tests/test_cli.py::test_scm_source", "tests/test_cli.py::test_image", "tests/test_cli.py::test_tags", "tests/test_cli.py::test_tags_versions_limit", "tests/test_cli.py::test_cves", "tests/test_cli.py::test_no_cves_found", "tests/test_cli.py::test_latest", "tests/test_cli.py::test_latest_not_found", "tests/test_cli.py::test_url_without_scheme" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2016-07-19 12:50:26+00:00
apache-2.0
6,318
zalando-stups__pierone-cli-61
diff --git a/pierone/api.py b/pierone/api.py index 35542be..9b0c76a 100644 --- a/pierone/api.py +++ b/pierone/api.py @@ -71,6 +71,9 @@ def docker_login_with_token(url, access_token): basic_auth = codecs.encode('oauth2:{}'.format(access_token).encode('utf-8'), 'base64').strip().decode('utf-8') if 'auths' not in dockercfg: dockercfg['auths'] = {} + if 'credsStore' in dockercfg: + del dockercfg['credsStore'] + dockercfg['auths'][url] = {'auth': basic_auth, 'email': '[email protected]'} with Action('Storing Docker client configuration in {}..'.format(path)):
zalando-stups/pierone-cli
0afce92aedf654855ad35b90623410e6d6c261dd
diff --git a/tests/test_api.py b/tests/test_api.py index 62e1f0a..3ee83be 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -21,6 +21,29 @@ def test_docker_login(monkeypatch, tmpdir): assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths').get('https://pierone.example.org') +def test_docker_login_with_credsstore(monkeypatch, tmpdir): + monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir))) + monkeypatch.setattr('pierone.api.get_token', MagicMock(return_value='12377')) + path = os.path.expanduser('~/.docker/config.json') + os.makedirs(os.path.dirname(path)) + with open(path, 'w') as fd: + json.dump({ + "auths": { + "https://pierone.stups.zalan.do": { + "auth": "xxx", + "email": "[email protected]" + } + }, + "credsStore": "osxkeychain" + }, fd) + docker_login('https://pierone.example.org', 'services', 'mytok', + 'myuser', 'mypass', 'https://token.example.org', use_keyring=False) + with open(path) as fd: + data = yaml.safe_load(fd) + assert {'auth': 'b2F1dGgyOjEyMzc3', + 'email': '[email protected]'} == data.get('auths').get('https://pierone.example.org') + assert 'credsStore' not in data + def test_docker_login_service_token(monkeypatch, tmpdir): monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir)))
no basic auth credentials Today I had an auth issue. While trying to do `docker push` I was getting the following log: ➜ pierone login Getting OAuth2 token "pierone".. OK Storing Docker client configuration in /Users/whoever/.docker/config.json.. OK ➜ docker push myrepo/... The push refers to a repository [myrepo/...] a5f591aacc10: Preparing ... cb11ba605400: Waiting no basic auth credentials Recreating `~/config.json` saved me. There was `"credsStore": "osxkeychain"` setting In the previous version of the `~/config.json` which was causing me troubles. BTW mac os 10.12.3 pierone cli 1.1.27 docker 17.06.0-ce, build 02c1d87
0.0
0afce92aedf654855ad35b90623410e6d6c261dd
[ "tests/test_api.py::test_docker_login_with_credsstore" ]
[ "tests/test_api.py::test_docker_login", "tests/test_api.py::test_docker_login_service_token", "tests/test_api.py::test_docker_login_with_iid", "tests/test_api.py::test_keep_dockercfg_entries", "tests/test_api.py::test_get_latest_tag", "tests/test_api.py::test_get_latest_tag_IOException", "tests/test_api.py::test_get_latest_tag_non_json", "tests/test_api.py::test_image_exists", "tests/test_api.py::test_image_exists_IOException", "tests/test_api.py::test_image_exists_but_other_version", "tests/test_api.py::test_image_not_exists", "tests/test_api.py::test_get_image_tags", "tests/test_api.py::test_get_image_tag", "tests/test_api.py::test_get_image_tag_that_does_not_exist" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2017-08-25 13:13:09+00:00
apache-2.0
6,319
zalando-stups__senza-280
diff --git a/senza/manaus/acm.py b/senza/manaus/acm.py index 0c16faa..ad918e8 100644 --- a/senza/manaus/acm.py +++ b/senza/manaus/acm.py @@ -80,16 +80,16 @@ class ACMCertificate: arn = certificate['CertificateArn'] subject_alternative_name = certificate['SubjectAlternativeNames'] domain_validation_options = certificate['DomainValidationOptions'] - serial = certificate['Serial'] subject = certificate['Subject'] - issuer = certificate['Issuer'] created_at = certificate['CreatedAt'] - issued_at = certificate['IssuedAt'] status = certificate['Status'] - not_before = certificate['NotBefore'] - not_after = certificate['NotAfter'] signature_algorithm = certificate['SignatureAlgorithm'] in_use_by = certificate['InUseBy'] + serial = certificate.get('Serial') + issuer = certificate.get('Issuer') + issued_at = certificate.get('IssuedAt') + not_before = certificate.get('NotBefore') + not_after = certificate.get('NotAfter') revoked_at = certificate.get('RevokedAt') revocation_reason = certificate.get('RevocationReason')
zalando-stups/senza
46c3172d27a4e02375f71a3aee408e73c668b5e0
diff --git a/tests/test_manaus/test_acm.py b/tests/test_manaus/test_acm.py index 13691ed..51e12d4 100644 --- a/tests/test_manaus/test_acm.py +++ b/tests/test_manaus/test_acm.py @@ -85,6 +85,24 @@ CERT2 = {'CertificateArn': 'arn:aws:acm:eu-west-1:cert2', '*.senza.aws.example.net', '*.app.example.net']} +CERT_VALIDATION_TIMED_OUT = { + 'KeyAlgorithm': 'RSA-2048', + 'DomainName': 'alpha.example.org', + 'InUseBy': [], + 'CreatedAt': datetime(2016, 7, 11, 15, 15, 30), + 'SubjectAlternativeNames': ['alpha.example.org'], + 'SignatureAlgorithm': 'SHA256WITHRSA', + 'Status': 'VALIDATION_TIMED_OUT', + 'DomainValidationOptions': [{'DomainName': 'alpha.example.org', + 'ValidationEmails': ['[email protected]', + '[email protected]', + '[email protected]', + '[email protected]', + '[email protected]'], + 'ValidationDomain': 'alpha.example.org'}], + 'CertificateArn': 'arn:aws:acm:eu-central-1:123123:certificate/f8a0fa1a-381b-44b6-ab10-1b94ba1480a1', + 'Subject': 'CN=alpha.example.org'} + def test_certificate_valid(): certificate1 = ACMCertificate.from_boto_dict(CERT1) @@ -108,6 +126,9 @@ def test_certificate_valid(): assert not certificate1_revoked.is_valid(when=datetime(2013, 4, 2, 10, 11, 12, tzinfo=timezone.utc)) + cert_invalid = ACMCertificate.from_boto_dict(CERT_VALIDATION_TIMED_OUT) + assert not cert_invalid.is_valid() + def test_certificate_comparison(): cert2 = CERT1.copy()
ACM Cert lookup fails with KeyError ``` Generating Cloud Formation template.. EXCEPTION OCCURRED: 'Serial' Unknown Error: 'Serial'. Please create an issue with the content of /tmp/senza-traceback-078eseqg $ cat /tmp/senza-traceback-078eseqg Traceback (most recent call last): File "/usr/local/lib/python3.5/dist-packages/senza/error_handling.py", line 69, in __call__ self.function(*args, **kwargs) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 716, in __call__ return self.main(*args, **kwargs) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 696, in main rv = self.invoke(ctx) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1060, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 889, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 534, in invoke return callback(*args, **kwargs) File "/usr/local/lib/python3.5/dist-packages/senza/cli.py", line 555, in create data = create_cf_template(definition, region, version, parameter, force, parameter_file) File "/usr/local/lib/python3.5/dist-packages/senza/cli.py", line 638, in create_cf_template data = evaluate(definition.copy(), args, account_info, force) File "/usr/local/lib/python3.5/dist-packages/senza/cli.py", line 239, in evaluate definition = componentfn(definition, configuration, args, info, force, account_info) File "/usr/local/lib/python3.5/dist-packages/senza/components/weighted_dns_elastic_load_balancer.py", line 29, in component_weighted_dns_elastic_load_balancer return component_elastic_load_balancer(definition, configuration, args, info, force, account_info) File "/usr/local/lib/python3.5/dist-packages/senza/components/elastic_load_balancer.py", line 110, in component_elastic_load_balancer listeners = configuration.get('Listeners') or get_listeners(subdomain, main_zone, configuration) File "/usr/local/lib/python3.5/dist-packages/senza/components/elastic_load_balancer.py", line 48, in get_listeners reverse=True) File "/usr/local/lib/python3.5/dist-packages/senza/manaus/acm.py", line 173, in get_certificates certificate = ACMCertificate.get_by_arn(arn) File "/usr/local/lib/python3.5/dist-packages/senza/manaus/acm.py", line 110, in get_by_arn return cls.from_boto_dict(certificate) File "/usr/local/lib/python3.5/dist-packages/senza/manaus/acm.py", line 83, in from_boto_dict serial = certificate['Serial'] KeyError: 'Serial' ``` The cert has status "'VALIDATION_TIMED_OUT" in the error case.
0.0
46c3172d27a4e02375f71a3aee408e73c668b5e0
[ "tests/test_manaus/test_acm.py::test_certificate_valid" ]
[ "tests/test_manaus/test_acm.py::test_certificate_comparison", "tests/test_manaus/test_acm.py::test_certificate_get_by_arn", "tests/test_manaus/test_acm.py::test_certificate_matches", "tests/test_manaus/test_acm.py::test_get_certificates", "tests/test_manaus/test_acm.py::test_arn_is_acm_certificate" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2016-07-26 14:24:47+00:00
apache-2.0
6,320
zalando-stups__senza-282
diff --git a/senza/components/elastic_load_balancer.py b/senza/components/elastic_load_balancer.py index 347b515..5644c13 100644 --- a/senza/components/elastic_load_balancer.py +++ b/senza/components/elastic_load_balancer.py @@ -2,6 +2,7 @@ import click from clickclick import fatal_error from senza.aws import resolve_security_groups +from ..cli import AccountArguments, TemplateArguments from ..manaus import ClientError from ..manaus.iam import IAM, IAMServerCertificate from ..manaus.acm import ACM, ACMCertificate @@ -23,13 +24,14 @@ def get_load_balancer_name(stack_name: str, stack_version: str): return '{}-{}'.format(stack_name[:l], stack_version) -def get_listeners(subdomain, main_zone, configuration): +def get_listeners(subdomain, main_zone, configuration, + account_info: AccountArguments): ssl_cert = configuration.get('SSLCertificateId') if ACMCertificate.arn_is_acm_certificate(ssl_cert): # check if certificate really exists try: - ACMCertificate.get_by_arn(ssl_cert) + ACMCertificate.get_by_arn(account_info.Region, ssl_cert) except ClientError as e: error_msg = e.response['Error']['Message'] fatal_error(error_msg) @@ -44,7 +46,8 @@ def get_listeners(subdomain, main_zone, configuration): iam_pattern = main_zone.lower().rstrip('.').replace('.', '-') name = '{sub}.{zone}'.format(sub=subdomain, zone=main_zone.rstrip('.')) - acm_certificates = sorted(ACM.get_certificates(domain_name=name), + acm = ACM(account_info.Region) + acm_certificates = sorted(acm.get_certificates(domain_name=name), reverse=True) else: iam_pattern = '' @@ -79,9 +82,13 @@ def get_listeners(subdomain, main_zone, configuration): ] -def component_elastic_load_balancer(definition, configuration, args, info, force, account_info): +def component_elastic_load_balancer(definition, + configuration: dict, + args: TemplateArguments, + info: dict, + force, + account_info: AccountArguments): lb_name = configuration["Name"] - # domains pointing to the load balancer subdomain = '' main_zone = None @@ -107,7 +114,7 @@ def component_elastic_load_balancer(definition, configuration, args, info, force subdomain = domain['Subdomain'] main_zone = domain['Zone'] # type: str - listeners = configuration.get('Listeners') or get_listeners(subdomain, main_zone, configuration) + listeners = configuration.get('Listeners') or get_listeners(subdomain, main_zone, configuration, account_info) health_check_protocol = "HTTP" allowed_health_check_protocols = ("HTTP", "TCP", "UDP", "SSL") diff --git a/senza/manaus/acm.py b/senza/manaus/acm.py index ad918e8..d28fe26 100644 --- a/senza/manaus/acm.py +++ b/senza/manaus/acm.py @@ -101,11 +101,11 @@ class ACMCertificate: revoked_at, revocation_reason) @classmethod - def get_by_arn(cls, arn: str) -> "ACMCertificate": + def get_by_arn(cls, region: str, arn: str) -> "ACMCertificate": """ Gets a ACMCertificate based on ARN alone """ - client = boto3.client('acm') + client = boto3.client('acm', region) certificate = client.describe_certificate(CertificateArn=arn)['Certificate'] return cls.from_boto_dict(certificate) @@ -156,21 +156,26 @@ class ACM: See http://boto3.readthedocs.io/en/latest/reference/services/acm.html """ - @staticmethod - def get_certificates(valid_only: bool=True, + def __init__(self, region=str): + self.region = region + + def get_certificates(self, + *, + valid_only: bool=True, domain_name: Optional[str]=None) -> Iterator[ACMCertificate]: """ Gets certificates from ACM. By default it returns all valid certificates + :param region: AWS region :param valid_only: Return only valid certificates :param domain_name: Return only certificates that match the domain """ # TODO implement pagination - client = boto3.client('acm') + client = boto3.client('acm', self.region) certificates = client.list_certificates()['CertificateSummaryList'] for summary in certificates: arn = summary['CertificateArn'] - certificate = ACMCertificate.get_by_arn(arn) + certificate = ACMCertificate.get_by_arn(self.region, arn) if valid_only and not certificate.is_valid(): pass elif domain_name is not None and not certificate.matches(domain_name):
zalando-stups/senza
56b109cbf40fe05f508580ad2fce9d07e60075e6
diff --git a/tests/test_manaus/test_acm.py b/tests/test_manaus/test_acm.py index 51e12d4..f022ff2 100644 --- a/tests/test_manaus/test_acm.py +++ b/tests/test_manaus/test_acm.py @@ -148,7 +148,8 @@ def test_certificate_get_by_arn(monkeypatch): m_client.describe_certificate.return_value = {'Certificate': CERT1} monkeypatch.setattr('boto3.client', m_client) - certificate1 = ACMCertificate.get_by_arn('arn:aws:acm:eu-west-1:cert') + certificate1 = ACMCertificate.get_by_arn('dummy-region', + 'arn:aws:acm:eu-west-1:cert') assert certificate1.domain_name == '*.senza.example.com' assert certificate1.is_valid(when=datetime(2016, 4, 5, 12, 14, 14, tzinfo=timezone.utc)) @@ -183,7 +184,7 @@ def test_get_certificates(monkeypatch): tzinfo=timezone.utc) monkeypatch.setattr('senza.manaus.acm.datetime', m_datetime) - acm = ACM() + acm = ACM('dummy-region') certificates_default = list(acm.get_certificates()) assert len(certificates_default) == 1 # Cert2 is excluded because it's REVOKED assert certificates_default[0].arn == 'arn:aws:acm:eu-west-1:cert1'
NoRegionError: You must specify a region. When trying to run `senza create` I am getting this error: ``` $ senza create --region eu-central-1 --force ../hello-flask/hello.yaml v56 ImageVersion=bus56 --stacktrace-visible Generating Cloud Formation template.. EXCEPTION OCCURRED: You must specify a region. Traceback (most recent call last): File "/Users/rcaricio/.virtualenvs/lizzy-init/bin/senza", line 11, in <module> sys.exit(main()) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/senza/cli.py", line 1492, in main HandleExceptions(cli)() File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/senza/error_handling.py", line 99, in __call__ self.die_unknown_error(e) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/senza/error_handling.py", line 57, in die_unknown_error raise e File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/senza/error_handling.py", line 69, in __call__ self.function(*args, **kwargs) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/click/core.py", line 716, in __call__ return self.main(*args, **kwargs) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/click/core.py", line 696, in main rv = self.invoke(ctx) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/click/core.py", line 1060, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/click/core.py", line 889, in invoke return ctx.invoke(self.callback, **ctx.params) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/click/core.py", line 534, in invoke return callback(*args, **kwargs) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/senza/cli.py", line 555, in create data = create_cf_template(definition, region, version, parameter, force, parameter_file) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/senza/cli.py", line 638, in create_cf_template data = evaluate(definition.copy(), args, account_info, force) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/senza/cli.py", line 239, in evaluate definition = componentfn(definition, configuration, args, info, force, account_info) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/senza/components/weighted_dns_elastic_load_balancer.py", line 29, in component_weighted_dns_elastic_load_balancer return component_elastic_load_balancer(definition, configuration, args, info, force, account_info) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/senza/components/elastic_load_balancer.py", line 110, in component_elastic_load_balancer listeners = configuration.get('Listeners') or get_listeners(subdomain, main_zone, configuration) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/senza/components/elastic_load_balancer.py", line 48, in get_listeners reverse=True) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/senza/manaus/acm.py", line 169, in get_certificates client = boto3.client('acm') File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/boto3/__init__.py", line 79, in client return _get_default_session().client(*args, **kwargs) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/boto3/session.py", line 250, in client aws_session_token=aws_session_token, config=config) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/botocore/session.py", line 818, in create_client client_config=config, api_version=api_version) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/botocore/client.py", line 69, in create_client verify, credentials, scoped_config, client_config, endpoint_bridge) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/botocore/client.py", line 199, in _get_client_args service_name, region_name, endpoint_url, is_secure) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/botocore/client.py", line 322, in resolve service_name, region_name) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/botocore/regions.py", line 122, in construct_endpoint partition, service_name, region_name) File "/Users/rcaricio/.virtualenvs/lizzy-init/lib/python3.5/site-packages/botocore/regions.py", line 135, in _endpoint_for_partition raise NoRegionError() botocore.exceptions.NoRegionError: You must specify a region. ``` Senza version `1.0.91` Does not happen with older versions of Senza though.
0.0
56b109cbf40fe05f508580ad2fce9d07e60075e6
[ "tests/test_manaus/test_acm.py::test_certificate_get_by_arn", "tests/test_manaus/test_acm.py::test_get_certificates" ]
[ "tests/test_manaus/test_acm.py::test_arn_is_acm_certificate", "tests/test_manaus/test_acm.py::test_certificate_valid", "tests/test_manaus/test_acm.py::test_certificate_comparison", "tests/test_manaus/test_acm.py::test_certificate_matches" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2016-07-27 07:31:57+00:00
apache-2.0
6,321
zalando-stups__senza-304
diff --git a/senza/cli.py b/senza/cli.py index f863831..fdb07f6 100755 --- a/senza/cli.py +++ b/senza/cli.py @@ -35,14 +35,16 @@ from .aws import (StackReference, get_account_alias, get_account_id, from .components import evaluate_template, get_component from .components.stups_auto_configuration import find_taupage_image from .error_handling import HandleExceptions -from .exceptions import VPCError +from .manaus.ec2 import EC2 +from .manaus.exceptions import VPCError from .manaus.route53 import Route53, Route53Record from .patch import patch_auto_scaling_group from .respawn import get_auto_scaling_group, respawn_auto_scaling_group from .stups.piu import Piu from .templates import get_template_description, get_templates from .templates._helper import get_mint_bucket_name -from .traffic import change_version_traffic, get_records, print_version_traffic, resolve_to_ip_addresses +from .traffic import (change_version_traffic, get_records, + print_version_traffic, resolve_to_ip_addresses) from .utils import (camel_case_to_underscore, ensure_keys, named_value, pystache_render) @@ -316,22 +318,21 @@ class AccountArguments: @property def VpcID(self): if self.__VpcID is None: - ec2 = boto3.resource('ec2', self.Region) - vpc_list = list() - for vpc in ec2.vpcs.all(): # don't use the list from blow. .all() use a internal pageing! - if vpc.is_default: - self.__VpcID = vpc.vpc_id - break - vpc_list.append(vpc) - else: - if len(vpc_list) == 1: - # Use the only one VPC if no default VPC found - self.__VpcID = vpc_list[0].vpc_id - elif len(vpc_list) > 1: - raise VPCError('Multiple VPCs are only supported if one ' - 'VPC is the default VPC (IsDefault=true)!') - else: - raise VPCError('Can\'t find any VPC!') + ec2 = EC2(self.Region) + try: + vpc = ec2.get_default_vpc() + except VPCError as error: + if sys.stdin.isatty() and error.number_of_vpcs: + # if running in interactive terminal and there are VPCs + # to choose from + vpcs = ec2.get_all_vpcs() + options = [(vpc.vpc_id, str(vpc)) for vpc in vpcs] + print("Can't find a default VPC") + vpc = choice("Select VPC to use", + options=options) + else: # if not running in interactive terminal (e.g Jenkins) + raise + self.__VpcID = vpc.vpc_id return self.__VpcID @property diff --git a/senza/exceptions.py b/senza/exceptions.py index 822221f..cdf4812 100644 --- a/senza/exceptions.py +++ b/senza/exceptions.py @@ -1,6 +1,6 @@ class SenzaException(Exception): """ - Base class for Senza execeptions + Base class for Senza exceptions """ @@ -11,15 +11,6 @@ class InvalidState(SenzaException): """ -class VPCError(SenzaException, AttributeError): - """ - Error raised when there are issues with VPCs configuration - """ - - def __init__(self, message): - super().__init__(message) - - class PiuNotFound(SenzaException, FileNotFoundError): """ Error raised when piu executable is not found diff --git a/senza/manaus/ec2.py b/senza/manaus/ec2.py new file mode 100644 index 0000000..9ea2600 --- /dev/null +++ b/senza/manaus/ec2.py @@ -0,0 +1,84 @@ +from collections import OrderedDict +from typing import Dict, List, Iterator + +import boto3 + +from .exceptions import VPCError + + +class EC2VPC: + + """ + See: + http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#vpc + """ + + def __init__(self, + vpc_id: str, + is_default: bool, + tags: List[Dict[str, str]]): + self.vpc_id = vpc_id + self.is_default = is_default + self.tags = OrderedDict([(t['Key'], t['Value']) for t in tags]) # type: Dict[str, str] + + self.name = self.tags.get('Name', self.vpc_id) + + def __str__(self): + return '{name} ({vpc_id})'.format_map(vars(self)) + + def __repr__(self): + return '<EC2VPC: {name} ({vpc_id})>'.format_map(vars(self)) + + @classmethod + def from_boto_vpc(cls, vpc) -> "EC2VPC": + """ + Converts an ec2.VPC as returned by resource.vpcs.all() + + See: + http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#vpc + """ + + return cls(vpc.vpc_id, vpc.is_default, vpc.tags) + + +class EC2: + + def __init__(self, region: str): + self.region = region + + def get_all_vpcs(self) -> Iterator[EC2VPC]: + """ + Get all VPCs from the account + """ + resource = boto3.resource('ec2', self.region) + + for vpc in resource.vpcs.all(): + yield EC2VPC.from_boto_vpc(vpc) + + def get_default_vpc(self) -> EC2VPC: + """ + Get one VPC from the account, either the default or, if only one + exists, that one. + """ + resource = boto3.resource('ec2', self.region) + + number_of_vpcs = 0 + # We shouldn't use the list with .all() because it has internal paging! + for vpc_number, vpc in enumerate(resource.vpcs.all(), start=1): + number_of_vpcs = vpc_number + + if vpc.is_default: + return EC2VPC.from_boto_vpc(vpc) + + if vpc_number == 1: + first_vpc = vpc + + if number_of_vpcs == 0: + raise VPCError("Can't find any VPC!", number_of_vpcs) + elif number_of_vpcs == 1: + # Use the only one VPC if it's not the default VPC found + return EC2VPC.from_boto_vpc(first_vpc) + else: + raise VPCError("Multiple VPCs are only supported if one " + "VPC is the default VPC (IsDefault=true)!", + number_of_vpcs) diff --git a/senza/manaus/exceptions.py b/senza/manaus/exceptions.py index 08b63be..d07b441 100644 --- a/senza/manaus/exceptions.py +++ b/senza/manaus/exceptions.py @@ -1,6 +1,6 @@ class ManausException(Exception): """ - Base class for Manaus execeptions + Base class for Manaus exceptions """ @@ -36,3 +36,13 @@ class RecordNotFound(ManausException): def __init__(self, name: str): super().__init__('Route 53 Record not found: {}'.format(name)) + + +class VPCError(ManausException, AttributeError): + """ + Error raised when there are issues with VPCs configuration + """ + + def __init__(self, message: str, number_of_vpcs: int=None): + super().__init__(message) + self.number_of_vpcs = number_of_vpcs diff --git a/senza/manaus/route53.py b/senza/manaus/route53.py index 0075eb0..6aab215 100644 --- a/senza/manaus/route53.py +++ b/senza/manaus/route53.py @@ -112,8 +112,9 @@ class Route53HostedZone: 'ResourceRecordSet': record.boto_dict} change_batch['Changes'].append(change) - client.change_resource_record_sets(HostedZoneId=self.id, - ChangeBatch=change_batch) + if change_batch['Changes']: + client.change_resource_record_sets(HostedZoneId=self.id, + ChangeBatch=change_batch) return change_batch
zalando-stups/senza
87feda79265966aa5d6a67f3a652e2f0d7961e64
diff --git a/tests/test_manaus/test_ec2.py b/tests/test_manaus/test_ec2.py new file mode 100644 index 0000000..36f1588 --- /dev/null +++ b/tests/test_manaus/test_ec2.py @@ -0,0 +1,95 @@ +from unittest.mock import MagicMock + +import pytest +from senza.manaus.ec2 import EC2, EC2VPC +from senza.manaus.exceptions import VPCError + + +def test_from_boto_vpc(): + mock_vpc = MagicMock() + mock_vpc.vpc_id = 'vpc-id' + mock_vpc.is_default = True + mock_vpc.tags = [{'Key': 'mykey', 'Value': 'myvalue'}, + {'Key': 'theanswer', 'Value': '42'}, + {'Key': 'Name', 'Value': 'my-vpc'}] + vpc = EC2VPC.from_boto_vpc(mock_vpc) + + assert vpc.vpc_id == 'vpc-id' + assert vpc.is_default + assert vpc.tags['mykey'] == 'myvalue' + assert vpc.tags['theanswer'] == '42' + assert vpc.name == 'my-vpc' + + +def test_get_default_vpc(monkeypatch): + mock_vpc1 = MagicMock() + mock_vpc1.vpc_id = 'vpc-id1' + mock_vpc1.is_default = True + mock_vpc1.tags = [] + + mock_vpc2 = MagicMock() + mock_vpc2.vpc_id = 'vpc-id2' + mock_vpc2.is_default = False + mock_vpc2.tags = [] + + mock_vpc3 = MagicMock() + mock_vpc3.vpc_id = 'vpc-id3' + mock_vpc3.is_default = False + mock_vpc3.tags = [] + + m_resource = MagicMock() + m_resource.return_value = m_resource + monkeypatch.setattr('boto3.resource', m_resource) + + ec2 = EC2('eu-test-1') + + # return default vpc + m_resource.vpcs.all.return_value = [mock_vpc1, mock_vpc2] + vpc1 = ec2.get_default_vpc() + assert vpc1.vpc_id == 'vpc-id1' + + # ony one, non default + m_resource.vpcs.all.return_value = [mock_vpc2] + vpc2 = ec2.get_default_vpc() + assert vpc2.vpc_id == 'vpc-id2' + + # no vpcs + m_resource.vpcs.all.return_value = [] + with pytest.raises(VPCError) as exc_info: + ec2.get_default_vpc() + assert str(exc_info.value) == "Can't find any VPC!" + + # no vpcs + m_resource.vpcs.all.return_value = [mock_vpc2, mock_vpc3] + with pytest.raises(VPCError) as exc_info: + ec2.get_default_vpc() + + assert str(exc_info.value) == ("Multiple VPCs are only supported if one " + "VPC is the default VPC (IsDefault=true)!") + + +def test_get_all_vpc(monkeypatch): + mock_vpc1 = MagicMock() + mock_vpc1.vpc_id = 'vpc-id1' + mock_vpc1.is_default = True + mock_vpc1.tags = [] + + mock_vpc2 = MagicMock() + mock_vpc2.vpc_id = 'vpc-id2' + mock_vpc2.is_default = False + mock_vpc2.tags = [] + + mock_vpc3 = MagicMock() + mock_vpc3.vpc_id = 'vpc-id3' + mock_vpc3.is_default = False + mock_vpc3.tags = [] + + m_resource = MagicMock() + m_resource.return_value = m_resource + monkeypatch.setattr('boto3.resource', m_resource) + + ec2 = EC2('eu-test-1') + + m_resource.vpcs.all.return_value = [mock_vpc1, mock_vpc2, mock_vpc3] + vpcs = list(ec2.get_all_vpcs()) + assert len(vpcs) == 3 diff --git a/tests/test_manaus/test_route53.py b/tests/test_manaus/test_route53.py index 2441ba1..24c5441 100644 --- a/tests/test_manaus/test_route53.py +++ b/tests/test_manaus/test_route53.py @@ -209,6 +209,12 @@ def test_hosted_zone_upsert(monkeypatch): ChangeBatch={'Changes': expected_changes, 'Comment': 'test'}) + m_client.change_resource_record_sets.reset_mock() + change_batch2 = hosted_zone.upsert([], comment="test") + assert change_batch2['Comment'] == "test" + assert change_batch2['Changes'] == [] + m_client.change_resource_record_sets.assert_not_called() + def test_hosted_zone_create(monkeypatch): m_client = MagicMock()
Only call API for Route53 config if there is changes to be made Only call AWS API when there is actual changes to be made. Users reporting this exception: ``` raise ParamValidationError(report=report.generate_report()) botocore.exceptions.ParamValidationError: Parameter validation failed: Invalid length for parameter ChangeBatch.Changes, value: 0, valid range: 1-inf ``` Error comes from https://github.com/zalando-stups/senza/blob/master/senza/manaus/route53.py#L114-L115
0.0
87feda79265966aa5d6a67f3a652e2f0d7961e64
[ "tests/test_manaus/test_ec2.py::test_from_boto_vpc", "tests/test_manaus/test_ec2.py::test_get_default_vpc", "tests/test_manaus/test_ec2.py::test_get_all_vpc", "tests/test_manaus/test_route53.py::test_hosted_zone_from_boto_dict", "tests/test_manaus/test_route53.py::test_record_from_boto_dict", "tests/test_manaus/test_route53.py::test_route53_hosted_zones", "tests/test_manaus/test_route53.py::test_route53_hosted_zones_paginated", "tests/test_manaus/test_route53.py::test_get_records", "tests/test_manaus/test_route53.py::test_route53_record_boto_dict", "tests/test_manaus/test_route53.py::test_hosted_zone_upsert", "tests/test_manaus/test_route53.py::test_hosted_zone_create", "tests/test_manaus/test_route53.py::test_hosted_zone_delete", "tests/test_manaus/test_route53.py::test_to_alias", "tests/test_manaus/test_route53.py::test_convert_domain_records_to_alias", "tests/test_manaus/test_route53.py::test_hosted_zone_get_by_domain_name", "tests/test_manaus/test_route53.py::test_hosted_zone_get_by_id", "tests/test_manaus/test_route53.py::test_get_by_domain_name" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2016-08-09 13:36:53+00:00
apache-2.0
6,322
zalando-stups__senza-349
diff --git a/senza/manaus/ec2.py b/senza/manaus/ec2.py index 9ea2600..6dee960 100644 --- a/senza/manaus/ec2.py +++ b/senza/manaus/ec2.py @@ -1,5 +1,5 @@ from collections import OrderedDict -from typing import Dict, List, Iterator +from typing import Dict, List, Iterator, Optional import boto3 @@ -16,9 +16,10 @@ class EC2VPC: def __init__(self, vpc_id: str, is_default: bool, - tags: List[Dict[str, str]]): + tags: Optional[List[Dict[str, str]]]): self.vpc_id = vpc_id self.is_default = is_default + tags = tags or [] # type: List[Dict[str, str]] self.tags = OrderedDict([(t['Key'], t['Value']) for t in tags]) # type: Dict[str, str] self.name = self.tags.get('Name', self.vpc_id)
zalando-stups/senza
e0331771ea0cc64d3ba5896f31d954f832a82ba9
diff --git a/tests/test_manaus/test_ec2.py b/tests/test_manaus/test_ec2.py index 36f1588..4dd7ae6 100644 --- a/tests/test_manaus/test_ec2.py +++ b/tests/test_manaus/test_ec2.py @@ -37,6 +37,11 @@ def test_get_default_vpc(monkeypatch): mock_vpc3.is_default = False mock_vpc3.tags = [] + mock_vpc4 = MagicMock() + mock_vpc4.vpc_id = 'vpc-id4' + mock_vpc4.is_default = True + mock_vpc4.tags = None + m_resource = MagicMock() m_resource.return_value = m_resource monkeypatch.setattr('boto3.resource', m_resource) @@ -59,11 +64,16 @@ def test_get_default_vpc(monkeypatch): ec2.get_default_vpc() assert str(exc_info.value) == "Can't find any VPC!" - # no vpcs + # multiple vpcs m_resource.vpcs.all.return_value = [mock_vpc2, mock_vpc3] with pytest.raises(VPCError) as exc_info: ec2.get_default_vpc() + # no tags in vpc return default vpc + m_resource.vpcs.all.return_value = [mock_vpc4, mock_vpc2] + vpc3 = ec2.get_default_vpc() + assert vpc3.vpc_id == 'vpc-id4' + assert str(exc_info.value) == ("Multiple VPCs are only supported if one " "VPC is the default VPC (IsDefault=true)!")
Better error message for "create" and VPC tags When trying to create a stack with a VPC that has no tags the user gets the following message: ``` senza create deploy-definition.yaml 1 0.1 Generating Cloud Formation template.. EXCEPTION OCCURRED: 'NoneType' object is not iterable Unknown Error: 'NoneType' object is not iterable. Please create an issue with the content of /var/folders/yd/p61l98fn2g9fffwgjs819gr1sprr6d/T/senza-traceback-xgrqlxbj ``` In /var/folders/yd/p61l98fn2g9fffwgjs819gr1sprr6d/T/senza-traceback-xgrqlxbj: ``` Traceback (most recent call last): File "/usr/local/lib/python3.5/site-packages/senza/error_handling.py", line 76, in __call__ self.function(*args, **kwargs) File "/usr/local/lib/python3.5/site-packages/click/core.py", line 716, in __call__ return self.main(*args, **kwargs) File "/usr/local/lib/python3.5/site-packages/click/core.py", line 696, in main rv = self.invoke(ctx) File "/usr/local/lib/python3.5/site-packages/click/core.py", line 1060, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.5/site-packages/click/core.py", line 889, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python3.5/site-packages/click/core.py", line 534, in invoke return callback(*args, **kwargs) File "/usr/local/lib/python3.5/site-packages/senza/cli.py", line 663, in create data = create_cf_template(definition, region, version, parameter, force, parameter_file) File "/usr/local/lib/python3.5/site-packages/senza/cli.py", line 746, in create_cf_template data = evaluate(definition.copy(), args, account_info, force) File "/usr/local/lib/python3.5/site-packages/senza/cli.py", line 242, in evaluate definition = componentfn(definition, configuration, args, info, force, account_info) File "/usr/local/lib/python3.5/site-packages/senza/components/stups_auto_configuration.py", line 31, in component_stups_auto_configuration vpc_id = configuration.get('VpcId', account_info.VpcID) File "/usr/local/lib/python3.5/site-packages/senza/cli.py", line 329, in VpcID vpc = ec2.get_default_vpc() File "/usr/local/lib/python3.5/site-packages/senza/manaus/ec2.py", line 71, in get_default_vpc return EC2VPC.from_boto_vpc(vpc) File "/usr/local/lib/python3.5/site-packages/senza/manaus/ec2.py", line 41, in from_boto_vpc return cls(vpc.vpc_id, vpc.is_default, vpc.tags) File "/usr/local/lib/python3.5/site-packages/senza/manaus/ec2.py", line 22, in __init__ self.tags = OrderedDict([(t['Key'], t['Value']) for t in tags]) # type: Dict[str, str] TypeError: 'NoneType' object is not iterable ``` The error message should be more descriptive.
0.0
e0331771ea0cc64d3ba5896f31d954f832a82ba9
[ "tests/test_manaus/test_ec2.py::test_get_default_vpc" ]
[ "tests/test_manaus/test_ec2.py::test_from_boto_vpc", "tests/test_manaus/test_ec2.py::test_get_all_vpc" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2016-09-12 07:22:12+00:00
apache-2.0
6,323
zalando-stups__senza-365
diff --git a/senza/manaus/cloudformation.py b/senza/manaus/cloudformation.py index b8529be..a53b2e9 100644 --- a/senza/manaus/cloudformation.py +++ b/senza/manaus/cloudformation.py @@ -124,6 +124,11 @@ class CloudFormationStack: for resource in resources: resource_type = resource["ResourceType"] if resource_type == ResourceType.route53_record_set: + physical_resource_id = resource.get('PhysicalResourceId') + if physical_resource_id is None: + # if there is no Physical Resource Id we can't fetch the + # record + continue records = Route53.get_records(name=resource['PhysicalResourceId']) for record in records: if (record.set_identifier is None or
zalando-stups/senza
fe537a4234d2dd978ef0ff04fba8e5507dad203d
diff --git a/tests/test_manaus/test_cloudformation.py b/tests/test_manaus/test_cloudformation.py index f700c77..44b868a 100644 --- a/tests/test_manaus/test_cloudformation.py +++ b/tests/test_manaus/test_cloudformation.py @@ -99,6 +99,12 @@ def test_cf_resources(monkeypatch): 'PhysicalResourceId': 'myapp1.example.com', 'ResourceStatus': 'CREATE_COMPLETE', 'ResourceType': 'AWS::Route53::RecordSet'}, + {'LastUpdatedTimestamp': datetime(2016, 7, 20, 7, 3, + 45, 70000, + tzinfo=timezone.utc), + 'LogicalResourceId': 'ThisWillBeIgnored', + 'ResourceStatus': 'CREATE_COMPLETE', + 'ResourceType': 'AWS::Route53::RecordSet'}, {'LastUpdatedTimestamp': datetime(2016, 7, 20, 7, 3, 43, 871000, tzinfo=timezone.utc),
Unknown Error: 'PhysicalResourceId' I got a `senza delete` error: Unknown Error: 'PhysicalResourceId'. Please create an issue with the content of /tmp/senza-traceback-8ecz_cyz ****************************************senza-traceback-8ecz_cyz**************************************************** `Traceback (most recent call last): File "/usr/local/lib/python3.5/dist-packages/senza/error_handling.py", line 82, in __call__ self.function(*args, **kwargs) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 716, in __call__ return self.main(*args, **kwargs) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 696, in main rv = self.invoke(ctx) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1060, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 889, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 534, in invoke return callback(*args, **kwargs) File "/usr/local/lib/python3.5/dist-packages/senza/cli.py", line 835, in delete for r in stack.resources: File "/usr/local/lib/python3.5/dist-packages/senza/manaus/cloudformation.py", line 127, in resources records = Route53.get_records(name=resource['PhysicalResourceId']) KeyError: 'PhysicalResourceId'`
0.0
fe537a4234d2dd978ef0ff04fba8e5507dad203d
[ "tests/test_manaus/test_cloudformation.py::test_cf_resources" ]
[ "tests/test_manaus/test_cloudformation.py::test_get_by_stack_name", "tests/test_manaus/test_cloudformation.py::test_get_stacks", "tests/test_manaus/test_cloudformation.py::test_get_by_stack_name_not_found", "tests/test_manaus/test_cloudformation.py::test_template", "tests/test_manaus/test_cloudformation.py::test_stack_update" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2016-09-28 08:02:15+00:00
apache-2.0
6,324
zalando-stups__senza-372
diff --git a/senza/aws.py b/senza/aws.py index 1284ba3..c7f0468 100644 --- a/senza/aws.py +++ b/senza/aws.py @@ -55,13 +55,12 @@ def is_status_complete(status: str): def get_security_group(region: str, sg_name: str): ec2 = boto3.resource('ec2', region) try: - sec_groups = list(ec2.security_groups.filter( - Filters=[{'Name': 'group-name', 'Values': [sg_name]}] - )) - if not sec_groups: - return None - # FIXME: What if we have 2 VPC, with a SG with the same name?! - return sec_groups[0] + # first try by tag name then by group-name (cannot be changed) + for _filter in [{'Name': 'tag:Name', 'Values': [sg_name]}, {'Name': 'group-name', 'Values': [sg_name]}]: + sec_groups = list(ec2.security_groups.filter(Filters=[_filter])) + if sec_groups: + # FIXME: What if we have 2 VPC, with a SG with the same name?! + return sec_groups[0] except ClientError as e: error_code = extract_client_error_code(e) if error_code == 'InvalidGroup.NotFound':
zalando-stups/senza
56e263195218e3fe052e95221b2d9528c4343264
diff --git a/tests/test_aws.py b/tests/test_aws.py index 4ca762a..8dd5b44 100644 --- a/tests/test_aws.py +++ b/tests/test_aws.py @@ -14,6 +14,21 @@ def test_get_security_group(monkeypatch): assert results == get_security_group('myregion', 'group_inexistant') +def test_get_security_group_by_tag_name(monkeypatch): + + def mock_filter(Filters): + if Filters[0]['Name'] == 'tag:Name' and Filters[0]['Values'] == ['my-sg']: + sg = MagicMock() + sg.id = 'sg-123' + return [sg] + + ec2 = MagicMock() + ec2.security_groups.filter = mock_filter + monkeypatch.setattr('boto3.resource', MagicMock(return_value=ec2)) + + assert get_security_group('myregion', 'my-sg').id == 'sg-123' + + def test_resolve_security_groups(monkeypatch): ec2 = MagicMock() ec2.security_groups.filter = MagicMock(side_effect=[
Lookup SecurityGroups by the tag "Name" rather than GroupName Both AWS API and CloudFormation allow to refer to a security group by its name if the operation runs in EC2 Classic or the default VPC. Unfortunately it uses the `GroupName` attribute that is automatically generated by AWS if the SG is a part of CloudFormation stack. It would be a good idea to extend Senza to lookup SG during the CF template generation phase and to use the _tag_ `Name` instead. The tag can be set by another ("system") Senza stack definition, thus allowing the cross-stack references. Another option would be to support the new cross-stack references that are recently introduced by Amazon: https://aws.amazon.com/blogs/aws/aws-cloudformation-update-yaml-cross-stack-references-simplified-substitution/
0.0
56e263195218e3fe052e95221b2d9528c4343264
[ "tests/test_aws.py::test_get_security_group_by_tag_name" ]
[ "tests/test_aws.py::test_get_security_group", "tests/test_aws.py::test_resolve_security_groups", "tests/test_aws.py::test_create", "tests/test_aws.py::test_encrypt", "tests/test_aws.py::test_list_kms_keys", "tests/test_aws.py::test_get_vpc_attribute", "tests/test_aws.py::test_get_account_id", "tests/test_aws.py::test_get_account_alias", "tests/test_aws.py::test_resolve_referenced_resource", "tests/test_aws.py::test_resolve_referenced_resource_with_update_complete_status", "tests/test_aws.py::test_resolve_referenced_output_when_stack_is_in_update_complete_status", "tests/test_aws.py::test_parse_time", "tests/test_aws.py::test_required_capabilities", "tests/test_aws.py::test_resolve_topic_arn", "tests/test_aws.py::test_matches_any", "tests/test_aws.py::test_get_tag" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2016-09-30 08:21:15+00:00
apache-2.0
6,325
zalando-stups__senza-374
diff --git a/senza/aws.py b/senza/aws.py index 1284ba3..c7f0468 100644 --- a/senza/aws.py +++ b/senza/aws.py @@ -55,13 +55,12 @@ def is_status_complete(status: str): def get_security_group(region: str, sg_name: str): ec2 = boto3.resource('ec2', region) try: - sec_groups = list(ec2.security_groups.filter( - Filters=[{'Name': 'group-name', 'Values': [sg_name]}] - )) - if not sec_groups: - return None - # FIXME: What if we have 2 VPC, with a SG with the same name?! - return sec_groups[0] + # first try by tag name then by group-name (cannot be changed) + for _filter in [{'Name': 'tag:Name', 'Values': [sg_name]}, {'Name': 'group-name', 'Values': [sg_name]}]: + sec_groups = list(ec2.security_groups.filter(Filters=[_filter])) + if sec_groups: + # FIXME: What if we have 2 VPC, with a SG with the same name?! + return sec_groups[0] except ClientError as e: error_code = extract_client_error_code(e) if error_code == 'InvalidGroup.NotFound': diff --git a/senza/subcommands/root.py b/senza/subcommands/root.py index 9162122..e5dab09 100644 --- a/senza/subcommands/root.py +++ b/senza/subcommands/root.py @@ -6,6 +6,7 @@ from typing import Optional import click import requests import senza +import sys from clickclick import AliasedGroup, warning from ..arguments import GLOBAL_OPTIONS, region_option @@ -81,6 +82,8 @@ def check_senza_version(current_version: str): Checks if senza is updated and prints a warning with instructions to update if it's not. """ + if not sys.stdout.isatty(): + return current_version = LooseVersion(current_version) try: latest_version = get_latest_version()
zalando-stups/senza
56e263195218e3fe052e95221b2d9528c4343264
diff --git a/tests/test_aws.py b/tests/test_aws.py index 4ca762a..8dd5b44 100644 --- a/tests/test_aws.py +++ b/tests/test_aws.py @@ -14,6 +14,21 @@ def test_get_security_group(monkeypatch): assert results == get_security_group('myregion', 'group_inexistant') +def test_get_security_group_by_tag_name(monkeypatch): + + def mock_filter(Filters): + if Filters[0]['Name'] == 'tag:Name' and Filters[0]['Values'] == ['my-sg']: + sg = MagicMock() + sg.id = 'sg-123' + return [sg] + + ec2 = MagicMock() + ec2.security_groups.filter = mock_filter + monkeypatch.setattr('boto3.resource', MagicMock(return_value=ec2)) + + assert get_security_group('myregion', 'my-sg').id == 'sg-123' + + def test_resolve_security_groups(monkeypatch): ec2 = MagicMock() ec2.security_groups.filter = MagicMock(side_effect=[ diff --git a/tests/test_subcommands/test_root.py b/tests/test_subcommands/test_root.py index 796d6c9..86f5a4f 100644 --- a/tests/test_subcommands/test_root.py +++ b/tests/test_subcommands/test_root.py @@ -34,8 +34,23 @@ def mock_warning(monkeypatch): return mock +@fixture() +def mock_tty(monkeypatch): + # check_senza_version only prints if we have a TTY + monkeypatch.setattr('sys.stdout.isatty', lambda: True) + + +def test_check_senza_version_notty(monkeypatch, mock_get_app_dir, mock_get, mock_warning): + with TemporaryDirectory() as temp_dir: + mock_get_app_dir.return_value = temp_dir + monkeypatch.setattr("senza.subcommands.root.__file__", + '/home/someuser/pymodules/root.py') + check_senza_version("0.40") + mock_warning.assert_not_called() + + def test_check_senza_version(monkeypatch, - mock_get_app_dir, mock_get, mock_warning): + mock_get_app_dir, mock_get, mock_warning, mock_tty): with TemporaryDirectory() as temp_dir_1: mock_get_app_dir.return_value = temp_dir_1 @@ -72,7 +87,7 @@ def test_check_senza_version(monkeypatch, ) -def test_check_senza_version_timeout(mock_get_app_dir, mock_get, mock_warning): +def test_check_senza_version_timeout(mock_get_app_dir, mock_get, mock_warning, mock_tty): with TemporaryDirectory() as temp_dir: mock_get_app_dir.return_value = temp_dir mock_get.side_effect = Timeout @@ -83,7 +98,8 @@ def test_check_senza_version_timeout(mock_get_app_dir, mock_get, mock_warning): def test_check_senza_version_outdated_cache(monkeypatch, # noqa: F811 mock_get_app_dir, mock_get, - mock_warning): + mock_warning, + mock_tty): monkeypatch.setattr("senza.subcommands.root.__file__", '/usr/pymodules/root.py') with TemporaryDirectory() as temp_dir: @@ -106,7 +122,8 @@ def test_check_senza_version_outdated_cache(monkeypatch, # noqa: F811 def test_check_senza_version_exception(monkeypatch, mock_get_app_dir, mock_get, - mock_warning): + mock_warning, + mock_tty): mock_sentry = MagicMock() monkeypatch.setattr("senza.subcommands.root.sentry", mock_sentry) with TemporaryDirectory() as temp_dir:
Senza version warning should not be printed for non-TTYs (piping result to awk etc) The Senza version check currently destroys popular shell scripting such as: ``` senza li -o tsv | tail -n +2 | awk '{ print $1 }' ``` We should only print the warning if `sys.stdout` is a TTY.
0.0
56e263195218e3fe052e95221b2d9528c4343264
[ "tests/test_aws.py::test_get_security_group_by_tag_name", "tests/test_subcommands/test_root.py::test_check_senza_version_notty" ]
[ "tests/test_aws.py::test_get_security_group", "tests/test_aws.py::test_resolve_security_groups", "tests/test_aws.py::test_create", "tests/test_aws.py::test_encrypt", "tests/test_aws.py::test_list_kms_keys", "tests/test_aws.py::test_get_vpc_attribute", "tests/test_aws.py::test_get_account_id", "tests/test_aws.py::test_get_account_alias", "tests/test_aws.py::test_resolve_referenced_resource", "tests/test_aws.py::test_resolve_referenced_resource_with_update_complete_status", "tests/test_aws.py::test_resolve_referenced_output_when_stack_is_in_update_complete_status", "tests/test_aws.py::test_parse_time", "tests/test_aws.py::test_required_capabilities", "tests/test_aws.py::test_resolve_topic_arn", "tests/test_aws.py::test_matches_any", "tests/test_aws.py::test_get_tag", "tests/test_subcommands/test_root.py::test_check_senza_version", "tests/test_subcommands/test_root.py::test_check_senza_version_timeout", "tests/test_subcommands/test_root.py::test_check_senza_version_outdated_cache", "tests/test_subcommands/test_root.py::test_check_senza_version_exception", "tests/test_subcommands/test_root.py::test_version" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2016-09-30 08:47:52+00:00
apache-2.0
6,326
zalando-stups__senza-397
diff --git a/senza/subcommands/root.py b/senza/subcommands/root.py index a121009..e163658 100644 --- a/senza/subcommands/root.py +++ b/senza/subcommands/root.py @@ -99,9 +99,9 @@ def check_senza_version(current_version: str): if latest_version is not None and current_version < latest_version: if __file__.startswith('/home'): # if it's installed in the user folder - cmd = "pip install --upgrade stups-senza" + cmd = "pip3 install --upgrade stups-senza" else: - cmd = "sudo pip install --upgrade stups-senza" + cmd = "sudo pip3 install --upgrade stups-senza" warning("Your senza version ({current}) is outdated. " "Please install the new one using '{cmd}'".format(current=current_version, cmd=cmd))
zalando-stups/senza
7d3726dec5badf48bab03bcee60eee43281b512c
diff --git a/tests/test_subcommands/test_root.py b/tests/test_subcommands/test_root.py index 86f5a4f..c16be12 100644 --- a/tests/test_subcommands/test_root.py +++ b/tests/test_subcommands/test_root.py @@ -71,7 +71,7 @@ def test_check_senza_version(monkeypatch, check_senza_version("0.40") mock_warning.assert_called_once_with( "Your senza version (0.40) is outdated. " - "Please install the new one using 'pip install --upgrade stups-senza'" + "Please install the new one using 'pip3 install --upgrade stups-senza'" ) with TemporaryDirectory() as temp_dir_4: @@ -83,7 +83,7 @@ def test_check_senza_version(monkeypatch, mock_warning.assert_called_once_with( "Your senza version (0.40) is outdated. " "Please install the new one using " - "'sudo pip install --upgrade stups-senza'" + "'sudo pip3 install --upgrade stups-senza'" ) @@ -115,7 +115,7 @@ def test_check_senza_version_outdated_cache(monkeypatch, # noqa: F811 mock_warning.assert_called_once_with( "Your senza version (0.40) is outdated. " "Please install the new one using " - "'sudo pip install --upgrade stups-senza'" + "'sudo pip3 install --upgrade stups-senza'" )
Discrepancy between README and error messages README.md states: `sudo pip3 install --upgrade stups-senza` But if you have an old version of senza installed, the reported message is: `...Please install the new one using 'sudo pip install --upgrade stups-senza'` Note that `pip3` is specified in README and `pip` is specified in the error message.
0.0
7d3726dec5badf48bab03bcee60eee43281b512c
[ "tests/test_subcommands/test_root.py::test_check_senza_version", "tests/test_subcommands/test_root.py::test_check_senza_version_outdated_cache" ]
[ "tests/test_subcommands/test_root.py::test_check_senza_version_notty", "tests/test_subcommands/test_root.py::test_check_senza_version_timeout", "tests/test_subcommands/test_root.py::test_check_senza_version_exception", "tests/test_subcommands/test_root.py::test_version" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2016-10-14 14:42:48+00:00
apache-2.0
6,327
zalando-stups__senza-414
diff --git a/senza/components/coreos_auto_configuration.py b/senza/components/coreos_auto_configuration.py new file mode 100644 index 0000000..707fdaf --- /dev/null +++ b/senza/components/coreos_auto_configuration.py @@ -0,0 +1,23 @@ +import requests + +from senza.components.subnet_auto_configuration import component_subnet_auto_configuration +from senza.utils import ensure_keys + + +def find_coreos_image(release_channel: str, region: str): + '''Find the latest CoreOS AMI''' + + response = requests.get('https://coreos.com/dist/aws/aws-{}.json'.format(release_channel), timeout=5) + response.raise_for_status() + data = response.json() + return data[region]['hvm'] + + +def component_coreos_auto_configuration(definition, configuration, args, info, force, account_info): + ami_id = find_coreos_image(configuration.get('ReleaseChannel') or 'stable', args.region) + configuration = ensure_keys(configuration, "Images", 'LatestCoreOSImage', args.region) + configuration["Images"]['LatestCoreOSImage'][args.region] = ami_id + + component_subnet_auto_configuration(definition, configuration, args, info, force, account_info) + + return definition
zalando-stups/senza
6cc75c6fdeb0ad9d9066e4658331ba9270a16f06
diff --git a/tests/test_components.py b/tests/test_components.py index e9a3f7c..d109065 100644 --- a/tests/test_components.py +++ b/tests/test_components.py @@ -11,6 +11,7 @@ from senza.components.auto_scaling_group import (component_auto_scaling_group, normalize_asg_success, normalize_network_threshold, to_iso8601_duration) +from senza.components.coreos_auto_configuration import component_coreos_auto_configuration from senza.components.elastic_load_balancer import (component_elastic_load_balancer, get_load_balancer_name) from senza.components.elastic_load_balancer_v2 import component_elastic_load_balancer_v2 @@ -1094,3 +1095,29 @@ def test_component_subnet_auto_configuration(monkeypatch): } result = component_subnet_auto_configuration(definition, configuration, args, info, False, MagicMock()) assert ['subnet-1', 'subnet-2'] == result['Mappings']['ServerSubnets']['foo']['Subnets'] + + +def test_component_coreos_auto_configuration(monkeypatch): + configuration = { + 'ReleaseChannel': 'gamma' + } + info = {'StackName': 'foobar', 'StackVersion': '0.1'} + definition = {"Resources": {}} + + args = MagicMock() + args.region = "foo" + + subnet1 = MagicMock() + subnet1.id = 'subnet-1' + + ec2 = MagicMock() + ec2.subnets.filter.return_value = [subnet1] + + get = MagicMock() + get.return_value.json.return_value = {'foo': {'hvm': 'ami-007'}} + + monkeypatch.setattr('boto3.resource', lambda *args: ec2) + monkeypatch.setattr('requests.get', get) + result = component_coreos_auto_configuration(definition, configuration, args, info, False, MagicMock()) + assert 'ami-007' == result['Mappings']['Images']['foo']['LatestCoreOSImage'] +
CoreOS Auto Configuration Component We are currently testing [Kubernetes on AWS](https://github.com/zalando-incubator/kubernetes-on-aws) with CoreOS AMIs. We take the AMI ID from https://coreos.com/dist/aws/aws-stable.json and put it into the Senza definition YAML manually. It would be better to have a Senza component to automatically find and use the latest CoreOS AMI (similar to what the `StupsAutoConfiguration` component does for Taupage).
0.0
6cc75c6fdeb0ad9d9066e4658331ba9270a16f06
[ "tests/test_components.py::test_invalid_component", "tests/test_components.py::test_component_iam_role", "tests/test_components.py::test_get_merged_policies", "tests/test_components.py::test_component_load_balancer_healthcheck", "tests/test_components.py::test_component_load_balancer_idletimeout", "tests/test_components.py::test_component_load_balancer_cert_arn", "tests/test_components.py::test_component_load_balancer_http_only", "tests/test_components.py::test_component_load_balancer_listeners_ssl", "tests/test_components.py::test_component_load_balancer_namelength", "tests/test_components.py::test_component_stups_auto_configuration", "tests/test_components.py::test_component_stups_auto_configuration_vpc_id", "tests/test_components.py::test_component_redis_node", "tests/test_components.py::test_component_redis_cluster", "tests/test_components.py::test_component_taupage_auto_scaling_group_user_data_without_ref", "tests/test_components.py::test_component_taupage_auto_scaling_group_user_data_with_ref", "tests/test_components.py::test_component_taupage_auto_scaling_group_user_data_with_lists_and_empty_dict", "tests/test_components.py::test_component_auto_scaling_group_configurable_properties", "tests/test_components.py::test_component_auto_scaling_group_custom_tags", "tests/test_components.py::test_component_auto_scaling_group_configurable_properties2", "tests/test_components.py::test_component_auto_scaling_group_metric_type", "tests/test_components.py::test_component_auto_scaling_group_optional_metric_type", "tests/test_components.py::test_to_iso8601_duration", "tests/test_components.py::test_normalize_asg_success", "tests/test_components.py::test_normalize_network_threshold", "tests/test_components.py::test_check_application_id", "tests/test_components.py::test_check_application_version", "tests/test_components.py::test_get_load_balancer_name", "tests/test_components.py::test_weighted_dns_load_balancer_v2", "tests/test_components.py::test_max_description_length", "tests/test_components.py::test_template_parameters", "tests/test_components.py::test_component_load_balancer_default_internal_scheme", "tests/test_components.py::test_component_load_balancer_v2_default_internal_scheme", "tests/test_components.py::test_component_load_balancer_v2_target_group_vpc_id", "tests/test_components.py::test_component_subnet_auto_configuration", "tests/test_components.py::test_component_coreos_auto_configuration" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
2016-11-03 15:08:53+00:00
apache-2.0
6,328
zalando-stups__senza-428
diff --git a/senza/aws.py b/senza/aws.py index 0f87a7e..e07f65d 100644 --- a/senza/aws.py +++ b/senza/aws.py @@ -9,6 +9,7 @@ import yaml from botocore.exceptions import ClientError from click import FileError +from .exceptions import SecurityGroupNotFound from .manaus.boto_proxy import BotoClientProxy from .manaus.utils import extract_client_error_code from .stack_references import check_file_exceptions @@ -108,14 +109,14 @@ def resolve_security_group(security_group, region: str): if isinstance(security_group, dict): sg = resolve_referenced_resource(security_group, region) if not sg: - raise ValueError('Referenced Security Group "{}" does not exist'.format(security_group)) + raise SecurityGroupNotFound(security_group) return sg elif security_group.startswith('sg-'): return security_group else: sg = get_security_group(region, security_group) if not sg: - raise ValueError('Security Group "{}" does not exist'.format(security_group)) + raise SecurityGroupNotFound(security_group) return sg.id diff --git a/senza/error_handling.py b/senza/error_handling.py index e56378b..32ffb02 100644 --- a/senza/error_handling.py +++ b/senza/error_handling.py @@ -15,7 +15,7 @@ from clickclick import fatal_error from raven import Client from .configuration import configuration -from .exceptions import InvalidDefinition, PiuNotFound +from .exceptions import InvalidDefinition, PiuNotFound, SecurityGroupNotFound from .manaus.exceptions import (ELBNotFound, HostedZoneNotFound, InvalidState, RecordNotFound) from .manaus.utils import extract_client_error_code @@ -112,7 +112,7 @@ class HandleExceptions: sys.stdout.flush() if is_credentials_expired_error(client_error): die_fatal_error('AWS credentials have expired.\n' - 'Use the "mai" command line tool to get a new ' + 'Use the "zaws" command line tool to get a new ' 'temporary access key.') elif is_access_denied_error(client_error): die_fatal_error( @@ -136,6 +136,10 @@ class HandleExceptions: except (ELBNotFound, HostedZoneNotFound, RecordNotFound, InvalidDefinition, InvalidState) as error: die_fatal_error(error) + except SecurityGroupNotFound as error: + message = ("{}\nRun `senza init` to (re-)create " + "the security group.").format(error) + die_fatal_error(message) except Exception as unknown_exception: # Catch All self.die_unknown_error(unknown_exception) diff --git a/senza/exceptions.py b/senza/exceptions.py index 5b57980..ff84c81 100644 --- a/senza/exceptions.py +++ b/senza/exceptions.py @@ -41,3 +41,15 @@ class InvalidDefinition(SenzaException): def __str__(self): return ("{path} is not a valid senza definition: " "{reason}".format_map(vars(self))) + + +class SecurityGroupNotFound(SenzaException): + """ + Exception raised when a Security Group is not found + """ + + def __init__(self, security_group: str): + self.security_group = security_group + + def __str__(self): + return 'Security Group "{}" does not exist.'.format(self.security_group)
zalando-stups/senza
a72ed3ba8f330170d7dc9e923bd18294a03186af
diff --git a/tests/test_error_handling.py b/tests/test_error_handling.py index 2df4395..c3c367f 100644 --- a/tests/test_error_handling.py +++ b/tests/test_error_handling.py @@ -7,7 +7,7 @@ import botocore.exceptions import senza.error_handling import yaml from pytest import fixture, raises -from senza.exceptions import PiuNotFound +from senza.exceptions import PiuNotFound, SecurityGroupNotFound from senza.manaus.exceptions import ELBNotFound, InvalidState @@ -225,6 +225,18 @@ def test_yaml_error(capsys): assert 'Please quote all variable values' in err +def test_sg_not_found(capsys): + func = MagicMock(side_effect=SecurityGroupNotFound('my-app')) + + with raises(SystemExit): + senza.error_handling.HandleExceptions(func)() + + out, err = capsys.readouterr() + + assert err == ('Security Group "my-app" does not exist.\n' + 'Run `senza init` to (re-)create the security group.\n') + + def test_unknown_error(capsys, mock_tempfile, mock_raven): senza.error_handling.sentry = senza.error_handling.setup_sentry(None) func = MagicMock(side_effect=Exception("something"))
Exception in senza create witout senza init senza throws exception when security group is missing. It can check this condition and suggest using "senza init" to create the group. ``` EXCEPTION OCCURRED: Security Group "app-something" does not exist Unknown Error: Security Group "app-something" does not exist. Traceback (most recent call last): File "/usr/local/lib/python3.5/dist-packages/senza/error_handling.py", line 104, in __call__ self.function(*args, **kwargs) File "/home/someuser/.local/lib/python3.5/site-packages/click/core.py", line 716, in __call__ return self.main(*args, **kwargs) File "/home/someuser/.local/lib/python3.5/site-packages/click/core.py", line 696, in main rv = self.invoke(ctx) File "/home/someuser/.local/lib/python3.5/site-packages/click/core.py", line 1060, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/home/someuser/.local/lib/python3.5/site-packages/click/core.py", line 889, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/someuser/.local/lib/python3.5/site-packages/click/core.py", line 534, in invoke return callback(*args, **kwargs) File "/usr/local/lib/python3.5/dist-packages/senza/cli.py", line 576, in create data = create_cf_template(definition, region, version, parameter, force, parameter_file) File "/usr/local/lib/python3.5/dist-packages/senza/cli.py", line 659, in create_cf_template data = evaluate(definition.copy(), args, account_info, force) File "/usr/local/lib/python3.5/dist-packages/senza/cli.py", line 238, in evaluate definition = componentfn(definition, configuration, args, info, force, account_info) File "/usr/local/lib/python3.5/dist-packages/senza/components/taupage_auto_scaling_group.py", line 81, in component_taupage_auto_scaling_group definition = component_auto_scaling_group(definition, configuration, args, info, force, account_info) File "/usr/local/lib/python3.5/dist-packages/senza/components/auto_scaling_group.py", line 99, in component_auto_scaling_group resolve_security_groups(configuration["SecurityGroups"], args.region) File "/usr/local/lib/python3.5/dist-packages/senza/aws.py", line 126, in resolve_security_groups result.append(resolve_security_group(security_group, region)) File "/usr/local/lib/python3.5/dist-packages/senza/aws.py", line 119, in resolve_security_group raise ValueError('Security Group "{}" does not exist'.format(security_group)) ValueError: Security Group "app-something" does not exist ```
0.0
a72ed3ba8f330170d7dc9e923bd18294a03186af
[ "tests/test_error_handling.py::test_store_exception", "tests/test_error_handling.py::test_store_nested_exception", "tests/test_error_handling.py::test_missing_credentials", "tests/test_error_handling.py::test_access_denied", "tests/test_error_handling.py::test_expired_credentials", "tests/test_error_handling.py::test_unknown_ClientError_raven", "tests/test_error_handling.py::test_unknown_ClientError_no_stack_trace", "tests/test_error_handling.py::test_piu_not_found", "tests/test_error_handling.py::test_elb_not_found", "tests/test_error_handling.py::test_invalid_state", "tests/test_error_handling.py::test_validation", "tests/test_error_handling.py::test_sg_not_found", "tests/test_error_handling.py::test_unknown_error", "tests/test_error_handling.py::test_unknown_error_sentry" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-01-06 08:35:04+00:00
apache-2.0
6,329
zalando-stups__senza-521
diff --git a/requirements.txt b/requirements.txt index 4b612cc..969c68d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,6 +7,6 @@ dnspython>=1.15.0 stups-pierone>=1.0.34 boto3>=1.3.0 botocore>=1.4.10 -pytest>=2.7.3 +pytest>=3.6.3 raven typing diff --git a/setup.py b/setup.py index b038fc5..4d36574 100755 --- a/setup.py +++ b/setup.py @@ -131,7 +131,7 @@ def setup_package(): install_requires=install_reqs, setup_requires=['flake8'], cmdclass=cmdclass, - tests_require=['pytest-cov', 'pytest', 'mock', 'responses'], + tests_require=['pytest-cov', 'pytest>=3.6.3', 'mock', 'responses'], command_options=command_options, entry_points={'console_scripts': CONSOLE_SCRIPTS, 'senza.templates': ['bgapp = senza.templates.bgapp', diff --git a/spotinst/components/elastigroup.py b/spotinst/components/elastigroup.py index aa916e9..8280ed4 100644 --- a/spotinst/components/elastigroup.py +++ b/spotinst/components/elastigroup.py @@ -20,7 +20,12 @@ from spotinst import MissingSpotinstAccount SPOTINST_LAMBDA_FORMATION_ARN = 'arn:aws:lambda:{}:178579023202:function:spotinst-cloudformation' SPOTINST_API_URL = 'https://api.spotinst.io' -ELASTIGROUP_DEFAULT_STRATEGY = {"risk": 100, "availabilityVsCost": "balanced", "utilizeReservedInstances": True} +ELASTIGROUP_DEFAULT_STRATEGY = { + "risk": 100, + "availabilityVsCost": "balanced", + "utilizeReservedInstances": True, + "fallbackToOd": True, +} ELASTIGROUP_DEFAULT_PRODUCT = "Linux/UNIX" @@ -33,7 +38,7 @@ def component_elastigroup(definition, configuration, args, info, force, account_ """ definition = ensure_keys(ensure_keys(definition, "Resources"), "Mappings", "Senza", "Info") if "SpotinstAccessToken" not in definition["Mappings"]["Senza"]["Info"]: - raise click.UsageError("You have to specificy your SpotinstAccessToken attribute inside the SenzaInfo " + raise click.UsageError("You have to specify your SpotinstAccessToken attribute inside the SenzaInfo " "to be able to use Elastigroups") configuration = ensure_keys(configuration, "Elastigroup") @@ -332,6 +337,7 @@ def extract_load_balancer_name(configuration, elastigroup_config: dict): if "ElasticLoadBalancer" in configuration: load_balancer_refs = configuration.pop("ElasticLoadBalancer") + health_check_type = "ELB" if isinstance(load_balancer_refs, str): load_balancers.append({ "name": {"Ref": load_balancer_refs}, @@ -344,6 +350,7 @@ def extract_load_balancer_name(configuration, elastigroup_config: dict): "type": "CLASSIC" }) if "ElasticLoadBalancerV2" in configuration: + health_check_type = "TARGET_GROUP" load_balancer_refs = configuration.pop("ElasticLoadBalancerV2") if isinstance(load_balancer_refs, str): load_balancers.append({ @@ -358,16 +365,13 @@ def extract_load_balancer_name(configuration, elastigroup_config: dict): }) if len(load_balancers) > 0: - # use ELB health check by default when there are LBs - health_check_type = "ELB" launch_spec_config["loadBalancersConfig"] = {"loadBalancers": load_balancers} - if "healthCheckType" in launch_spec_config: - health_check_type = launch_spec_config["healthCheckType"] - elif "HealthCheckType" in configuration: - health_check_type = configuration["HealthCheckType"] + health_check_type = launch_spec_config.get("healthCheckType", + configuration.get("HealthCheckType", health_check_type)) + grace_period = launch_spec_config.get("healthCheckGracePeriod", + configuration.get('HealthCheckGracePeriod', 300)) launch_spec_config["healthCheckType"] = health_check_type - grace_period = launch_spec_config.get("healthCheckGracePeriod", configuration.get('HealthCheckGracePeriod', 300)) launch_spec_config["healthCheckGracePeriod"] = grace_period @@ -432,20 +436,16 @@ def extract_instance_types(configuration, elastigroup_config): are no SpotAlternatives the Elastigroup will have the same ondemand type as spot alternative If there's already a compute.instanceTypes config it will be left untouched """ - elastigroup_config = ensure_keys(ensure_keys(elastigroup_config, "strategy"), "compute") + elastigroup_config = ensure_keys(elastigroup_config, "compute") compute_config = elastigroup_config["compute"] - instance_type = configuration.pop("InstanceType", None) + + if "InstanceType" not in configuration: + raise click.UsageError("You need to specify the InstanceType attribute to be able to use Elastigroups") + instance_type = configuration.pop("InstanceType") spot_alternatives = configuration.pop("SpotAlternatives", None) if "instanceTypes" not in compute_config: - if not (instance_type or spot_alternatives): - raise click.UsageError("You have to specify one of InstanceType or SpotAlternatives") instance_types = {} - strategy = elastigroup_config["strategy"] - if instance_type: - instance_types.update({"ondemand": instance_type}) - strategy.update({"fallbackToOd": True}) - else: - strategy.update({"fallbackToOd": False}) + instance_types.update({"ondemand": instance_type}) if spot_alternatives: instance_types.update({"spot": spot_alternatives}) else:
zalando-stups/senza
d5477538a198df36914cdd2dbe9e10accb4dec5f
diff --git a/tests/test_elastigroup.py b/tests/test_elastigroup.py index f77ccc1..022c0a8 100644 --- a/tests/test_elastigroup.py +++ b/tests/test_elastigroup.py @@ -442,7 +442,7 @@ def test_load_balancers(): "healthCheckGracePeriod": 300, }}}, }, - { # 1 application load balancer from Taupage, healthcheck type set to ELB (default grace period) + { # 1 application load balancer from Taupage, healthcheck type set to TARGET_GROUP (default grace period) "input": {"ElasticLoadBalancerV2": "bar"}, "given_config": {}, "expected_config": {"compute": {"launchSpecification": { @@ -451,11 +451,12 @@ def test_load_balancers(): {"arn": {"Ref": "barTargetGroup"}, "type": "TARGET_GROUP"}, ], }, - "healthCheckType": "ELB", + "healthCheckType": "TARGET_GROUP", "healthCheckGracePeriod": 300, }}}, }, - { # multiple application load balancers from Taupage, healthcheck type set to ELB (default grace period) + { # multiple application load balancers from Taupage, healthcheck type set to TARGET_GROUP + # (default grace period) "input": {"ElasticLoadBalancerV2": ["foo", "bar"]}, "given_config": {}, "expected_config": {"compute": {"launchSpecification": { @@ -465,11 +466,11 @@ def test_load_balancers(): {"arn": {"Ref": "barTargetGroup"}, "type": "TARGET_GROUP"}, ], }, - "healthCheckType": "ELB", + "healthCheckType": "TARGET_GROUP", "healthCheckGracePeriod": 300, }}}, }, - { # mixed load balancers from Taupage, healthcheck type set to ELB and custom Taupage grace period + { # mixed load balancers from Taupage, healthcheck type set to TARGET_GROUP and custom Taupage grace period "input": { "ElasticLoadBalancer": "foo", "ElasticLoadBalancerV2": "bar", @@ -483,7 +484,7 @@ def test_load_balancers(): {"arn": {"Ref": "barTargetGroup"}, "type": "TARGET_GROUP"}, ], }, - "healthCheckType": "ELB", + "healthCheckType": "TARGET_GROUP", "healthCheckGracePeriod": 42, }}}, }, @@ -598,9 +599,11 @@ def test_extract_security_group_ids(monkeypatch): assert test_case["expected_sgs"] == got["compute"]["launchSpecification"].get("securityGroupIds") -def test_missing_instance_types(): +def test_missing_instance_type(): with pytest.raises(click.UsageError): extract_instance_types({}, {}) + with pytest.raises(click.UsageError): + extract_instance_types({"SpotAlternatives": ["foo", "bar", "baz"]}, {}) def test_extract_instance_types(): @@ -608,20 +611,12 @@ def test_extract_instance_types(): { # minimum accepted behavior, on demand instance type from typical Senza "input": {"InstanceType": "foo"}, "given_config": {}, - "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["foo"]}}, - "strategy": {"fallbackToOd": True}}, + "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["foo"]}}}, }, { # both on demand instance type from typical Senza and spot alternatives specified "input": {"InstanceType": "foo", "SpotAlternatives": ["bar", "baz"]}, "given_config": {}, - "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["bar", "baz"]}}, - "strategy": {"fallbackToOd": True}}, - }, - { # only spot alternatives specified - "input": {"SpotAlternatives": ["foo", "bar"]}, - "given_config": {}, - "expected_config": {"compute": {"instanceTypes": {"spot": ["foo", "bar"]}}, - "strategy": {"fallbackToOd": False}}, + "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["bar", "baz"]}}}, }, ] for test_case in test_cases:
Fix Elastigroup healthCheckType The current implementation always set the Elastigroup's healthCheckType to "ELB" if there are any load balancers, regardless of their types. The [API clearly states](https://api.spotinst.com/elastigroup/amazon-web-services-2/create/#compute.launchSpecification.healthCheckType) that ELB is for classic ELBs and TARGET_GROUP should be used for ALBs. See [Spotinst's recommendation](https://github.com/zalando-stups/senza/pull/516#pullrequestreview-136726224).
0.0
d5477538a198df36914cdd2dbe9e10accb4dec5f
[ "tests/test_elastigroup.py::test_extract_instance_types", "tests/test_elastigroup.py::test_load_balancers", "tests/test_elastigroup.py::test_missing_instance_type" ]
[ "tests/test_elastigroup.py::test_extract_subnets", "tests/test_elastigroup.py::test_detailed_monitoring", "tests/test_elastigroup.py::test_missing_access_token", "tests/test_elastigroup.py::test_public_ips", "tests/test_elastigroup.py::test_extract_instance_profile", "tests/test_elastigroup.py::test_standard_tags", "tests/test_elastigroup.py::test_component_elastigroup_defaults", "tests/test_elastigroup.py::test_auto_scaling_rules", "tests/test_elastigroup.py::test_product", "tests/test_elastigroup.py::test_block_mappings", "tests/test_elastigroup.py::test_extract_security_group_ids", "tests/test_elastigroup.py::test_spotinst_account_resolution_failure", "tests/test_elastigroup.py::test_prediction_strategy", "tests/test_elastigroup.py::test_autoscaling_capacity", "tests/test_elastigroup.py::test_extract_image_id", "tests/test_elastigroup.py::test_spotinst_account_resolution" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-07-14 17:31:04+00:00
apache-2.0
6,330
zalando-stups__senza-522
diff --git a/spotinst/components/elastigroup.py b/spotinst/components/elastigroup.py index aa916e9..dca20f4 100644 --- a/spotinst/components/elastigroup.py +++ b/spotinst/components/elastigroup.py @@ -20,7 +20,12 @@ from spotinst import MissingSpotinstAccount SPOTINST_LAMBDA_FORMATION_ARN = 'arn:aws:lambda:{}:178579023202:function:spotinst-cloudformation' SPOTINST_API_URL = 'https://api.spotinst.io' -ELASTIGROUP_DEFAULT_STRATEGY = {"risk": 100, "availabilityVsCost": "balanced", "utilizeReservedInstances": True} +ELASTIGROUP_DEFAULT_STRATEGY = { + "risk": 100, + "availabilityVsCost": "balanced", + "utilizeReservedInstances": True, + "fallbackToOd": True, +} ELASTIGROUP_DEFAULT_PRODUCT = "Linux/UNIX" @@ -432,20 +437,16 @@ def extract_instance_types(configuration, elastigroup_config): are no SpotAlternatives the Elastigroup will have the same ondemand type as spot alternative If there's already a compute.instanceTypes config it will be left untouched """ - elastigroup_config = ensure_keys(ensure_keys(elastigroup_config, "strategy"), "compute") + elastigroup_config = ensure_keys(elastigroup_config, "compute") compute_config = elastigroup_config["compute"] - instance_type = configuration.pop("InstanceType", None) + + if "InstanceType" not in configuration: + raise click.UsageError("You need to specify the InstanceType attribute to be able to use Elastigroups") + instance_type = configuration.pop("InstanceType") spot_alternatives = configuration.pop("SpotAlternatives", None) if "instanceTypes" not in compute_config: - if not (instance_type or spot_alternatives): - raise click.UsageError("You have to specify one of InstanceType or SpotAlternatives") instance_types = {} - strategy = elastigroup_config["strategy"] - if instance_type: - instance_types.update({"ondemand": instance_type}) - strategy.update({"fallbackToOd": True}) - else: - strategy.update({"fallbackToOd": False}) + instance_types.update({"ondemand": instance_type}) if spot_alternatives: instance_types.update({"spot": spot_alternatives}) else:
zalando-stups/senza
d5477538a198df36914cdd2dbe9e10accb4dec5f
diff --git a/tests/test_elastigroup.py b/tests/test_elastigroup.py index f77ccc1..0fcb9e4 100644 --- a/tests/test_elastigroup.py +++ b/tests/test_elastigroup.py @@ -598,9 +598,11 @@ def test_extract_security_group_ids(monkeypatch): assert test_case["expected_sgs"] == got["compute"]["launchSpecification"].get("securityGroupIds") -def test_missing_instance_types(): +def test_missing_instance_type(): with pytest.raises(click.UsageError): extract_instance_types({}, {}) + with pytest.raises(click.UsageError): + extract_instance_types({"SpotAlternatives": ["foo", "bar", "baz"]}, {}) def test_extract_instance_types(): @@ -608,20 +610,12 @@ def test_extract_instance_types(): { # minimum accepted behavior, on demand instance type from typical Senza "input": {"InstanceType": "foo"}, "given_config": {}, - "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["foo"]}}, - "strategy": {"fallbackToOd": True}}, + "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["foo"]}}}, }, { # both on demand instance type from typical Senza and spot alternatives specified "input": {"InstanceType": "foo", "SpotAlternatives": ["bar", "baz"]}, "given_config": {}, - "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["bar", "baz"]}}, - "strategy": {"fallbackToOd": True}}, - }, - { # only spot alternatives specified - "input": {"SpotAlternatives": ["foo", "bar"]}, - "given_config": {}, - "expected_config": {"compute": {"instanceTypes": {"spot": ["foo", "bar"]}}, - "strategy": {"fallbackToOd": False}}, + "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["bar", "baz"]}}}, }, ] for test_case in test_cases:
Make Elastigroup's On Demand not optional The current implementation allows users not to specify the `InstanceType` attribute which would later be translated to the Elastigroups `ondemand` attribute. [This attribute is mandatory according to the API](https://api.spotinst.com/elastigroup/amazon-web-services-2/create/#compute.instanceTypes.ondemand) and [Spotint's recommendations](https://github.com/zalando-stups/senza/pull/516#pullrequestreview-136726224). The stack would not be created. The fallbackToOd should always be set to True given that `ondemand` is mandatory.
0.0
d5477538a198df36914cdd2dbe9e10accb4dec5f
[ "tests/test_elastigroup.py::test_extract_instance_types", "tests/test_elastigroup.py::test_missing_instance_type" ]
[ "tests/test_elastigroup.py::test_autoscaling_capacity", "tests/test_elastigroup.py::test_block_mappings", "tests/test_elastigroup.py::test_standard_tags", "tests/test_elastigroup.py::test_auto_scaling_rules", "tests/test_elastigroup.py::test_detailed_monitoring", "tests/test_elastigroup.py::test_extract_instance_profile", "tests/test_elastigroup.py::test_load_balancers", "tests/test_elastigroup.py::test_product", "tests/test_elastigroup.py::test_extract_subnets", "tests/test_elastigroup.py::test_public_ips", "tests/test_elastigroup.py::test_component_elastigroup_defaults", "tests/test_elastigroup.py::test_spotinst_account_resolution", "tests/test_elastigroup.py::test_missing_access_token", "tests/test_elastigroup.py::test_extract_image_id", "tests/test_elastigroup.py::test_spotinst_account_resolution_failure", "tests/test_elastigroup.py::test_extract_security_group_ids", "tests/test_elastigroup.py::test_prediction_strategy" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2018-07-14 18:04:16+00:00
apache-2.0
6,331
zalando-stups__senza-535
diff --git a/senza/components/elastigroup.py b/senza/components/elastigroup.py index 7d39d6b..301359a 100644 --- a/senza/components/elastigroup.py +++ b/senza/components/elastigroup.py @@ -64,7 +64,7 @@ def component_elastigroup(definition, configuration, args, info, force, account_ extract_instance_profile(args, definition, configuration, elastigroup_config) # cfn definition access_token = _extract_spotinst_access_token(definition) - config_name = configuration["Name"] + "Config" + config_name = configuration["Name"] definition["Resources"][config_name] = { "Type": "Custom::elastigroup", "Properties": {
zalando-stups/senza
935f4111323f6b98ff136ae44a0d57825ac763c7
diff --git a/tests/test_elastigroup.py b/tests/test_elastigroup.py index 339bf4e..b98621e 100644 --- a/tests/test_elastigroup.py +++ b/tests/test_elastigroup.py @@ -44,7 +44,7 @@ def test_component_elastigroup_defaults(monkeypatch): result = component_elastigroup(definition, configuration, args, info, False, mock_account_info) - properties = result["Resources"]["eg1Config"]["Properties"] + properties = result["Resources"]["eg1"]["Properties"] assert properties["accountId"] == 'act-12345abcdef' assert properties["group"]["capacity"] == {"target": 1, "minimum": 1, "maximum": 1} instance_types = properties["group"]["compute"]["instanceTypes"]
Spotinst elastigroup adds "Config" to resource name This invalidates many existing Senza templates. It should keep the original name
0.0
935f4111323f6b98ff136ae44a0d57825ac763c7
[ "tests/test_elastigroup.py::test_component_elastigroup_defaults" ]
[ "tests/test_elastigroup.py::test_block_mappings", "tests/test_elastigroup.py::test_extract_instance_profile", "tests/test_elastigroup.py::test_spotinst_account_resolution", "tests/test_elastigroup.py::test_extract_security_group_ids", "tests/test_elastigroup.py::test_load_balancers", "tests/test_elastigroup.py::test_autoscaling_capacity", "tests/test_elastigroup.py::test_extract_subnets", "tests/test_elastigroup.py::test_spotinst_account_resolution_failure", "tests/test_elastigroup.py::test_public_ips", "tests/test_elastigroup.py::test_auto_scaling_rules", "tests/test_elastigroup.py::test_prediction_strategy", "tests/test_elastigroup.py::test_extract_instance_types", "tests/test_elastigroup.py::test_standard_tags", "tests/test_elastigroup.py::test_detailed_monitoring", "tests/test_elastigroup.py::test_extract_image_id", "tests/test_elastigroup.py::test_missing_access_token", "tests/test_elastigroup.py::test_product", "tests/test_elastigroup.py::test_missing_instance_type" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2018-09-04 14:15:05+00:00
apache-2.0
6,332
zalando-stups__senza-565
diff --git a/senza/components/elastigroup.py b/senza/components/elastigroup.py index 42e9e6d..391ee91 100644 --- a/senza/components/elastigroup.py +++ b/senza/components/elastigroup.py @@ -138,7 +138,7 @@ def component_elastigroup(definition, configuration, args, info, force, account_ extract_user_data(configuration, elastigroup_config, info, force, account_info) extract_load_balancer_name(configuration, elastigroup_config) extract_public_ips(configuration, elastigroup_config) - extract_image_id(elastigroup_config) + extract_image_id(configuration, elastigroup_config) extract_security_group_ids(configuration, elastigroup_config, args) extract_instance_types(configuration, elastigroup_config) extract_autoscaling_capacity(configuration, elastigroup_config) @@ -497,7 +497,7 @@ def extract_public_ips(configuration, elastigroup_config): ] -def extract_image_id(elastigroup_config: dict): +def extract_image_id(configuration, elastigroup_config: dict): """ This function identifies whether a senza formatted AMI mapping is configured, if so it transforms it into a Spotinst Elastigroup AMI API configuration @@ -506,7 +506,8 @@ def extract_image_id(elastigroup_config: dict): launch_spec_config = elastigroup_config["compute"]["launchSpecification"] if "imageId" not in launch_spec_config.keys(): - launch_spec_config["imageId"] = {"Fn::FindInMap": ["Images", {"Ref": "AWS::Region"}, "LatestTaupageImage"]} + image_key = configuration.get("Image", "LatestTaupageImage") + launch_spec_config["imageId"] = {"Fn::FindInMap": ["Images", {"Ref": "AWS::Region"}, image_key]} def extract_security_group_ids(configuration, elastigroup_config: dict, args):
zalando-stups/senza
235c2a4b25bdd9dffdc36e0a2bbe16704788a407
diff --git a/tests/test_elastigroup.py b/tests/test_elastigroup.py index 78725db..7213a8b 100644 --- a/tests/test_elastigroup.py +++ b/tests/test_elastigroup.py @@ -62,7 +62,8 @@ def test_component_elastigroup_defaults(monkeypatch): assert {'tagKey': 'StackName', 'tagValue': 'foobar'} in tags assert {'tagKey': 'StackVersion', 'tagValue': '0.1'} in tags assert properties["group"]["compute"]["product"] == ELASTIGROUP_DEFAULT_PRODUCT - assert properties["group"]["compute"]["subnetIds"] == {"Fn::FindInMap": ["ServerSubnets", {"Ref": "AWS::Region"}, "Subnets"]} + assert properties["group"]["compute"]["subnetIds"] == { + "Fn::FindInMap": ["ServerSubnets", {"Ref": "AWS::Region"}, "Subnets"]} assert properties["group"]["region"] == "reg1" assert properties["group"]["strategy"] == ELASTIGROUP_DEFAULT_STRATEGY @@ -666,19 +667,28 @@ def test_public_ips(): def test_extract_image_id(): test_cases = [ { # default behavior - set latest taupage image + "input": {}, "given_config": {}, "expected_config": {"compute": {"launchSpecification": { "imageId": {"Fn::FindInMap": ["Images", {"Ref": "AWS::Region"}, "LatestTaupageImage"]} }}}, }, { # leave imageId untouched + "input": {}, "given_config": {"compute": {"launchSpecification": {"imageId": "fake-id"}}}, "expected_config": {"compute": {"launchSpecification": {"imageId": "fake-id"}}}, }, + { # use specified image from the Senza mapping + "input": {"Image": "Foo"}, + "given_config": {}, + "expected_config": {"compute": {"launchSpecification": { + "imageId": {"Fn::FindInMap": ["Images", {"Ref": "AWS::Region"}, "Foo"]} + }}}, + } ] for test_case in test_cases: got = test_case["given_config"] - extract_image_id(got) + extract_image_id(test_case["input"], got) assert test_case["expected_config"] == got
Allow Elastigroup component to use custom Taupage AMI Currently, Elastigroup components [always use the latest production Taupage AMI](https://github.com/zalando-stups/senza/blob/235c2a4b25bdd9dffdc36e0a2bbe16704788a407/senza/components/elastigroup.py#L509). When the attribute `Image` is specified that setting should be honored, allowing users to easily specify one of the other [existing options](https://stups.readthedocs.io/en/latest/components/senza.html#senza-stupsautoconfiguration) - `LatestTaupageStagingImage` or `LatestTaupageDevImage` The current workaround is to customize use the `Elastigroup` attribute: ```yaml Elastigroup: compute: launchSpecification: imageId: <ami-id> ``` Which requires users to know in advance the AMI ID and hardcode it.
0.0
235c2a4b25bdd9dffdc36e0a2bbe16704788a407
[ "tests/test_elastigroup.py::test_extract_image_id" ]
[ "tests/test_elastigroup.py::test_product", "tests/test_elastigroup.py::test_load_balancers", "tests/test_elastigroup.py::test_extract_security_group_ids", "tests/test_elastigroup.py::test_spotinst_account_resolution", "tests/test_elastigroup.py::test_extract_instance_profile", "tests/test_elastigroup.py::test_patch_cross_stack_policy_errors", "tests/test_elastigroup.py::test_component_elastigroup_defaults", "tests/test_elastigroup.py::test_prediction_strategy", "tests/test_elastigroup.py::test_autoscaling_capacity", "tests/test_elastigroup.py::test_spotinst_account_resolution_failure", "tests/test_elastigroup.py::test_extract_instance_types", "tests/test_elastigroup.py::test_extract_subnets", "tests/test_elastigroup.py::test_missing_instance_type", "tests/test_elastigroup.py::test_detailed_monitoring", "tests/test_elastigroup.py::test_patch_cross_stack_policy", "tests/test_elastigroup.py::test_auto_scaling_rules", "tests/test_elastigroup.py::test_block_mappings", "tests/test_elastigroup.py::test_missing_access_token", "tests/test_elastigroup.py::test_public_ips", "tests/test_elastigroup.py::test_multiple_target_groups", "tests/test_elastigroup.py::test_raw_user_data_and_base64_encoding_cf_function_used", "tests/test_elastigroup.py::test_standard_tags" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2019-05-22 15:10:01+00:00
apache-2.0
6,333
zalando-stups__zign-14
diff --git a/zign/api.py b/zign/api.py index d98ed9a..065afae 100644 --- a/zign/api.py +++ b/zign/api.py @@ -100,6 +100,11 @@ def get_named_token(scope, realm, name, user, password, url=None, if existing_token: return existing_token + if name and not realm: + access_token = get_service_token(name, scope) + if access_token: + return {'access_token': access_token} + config = get_config() url = url or config.get('url') @@ -153,6 +158,21 @@ def is_user_scope(scope: str): return scope in set(['uid', 'cn']) +def get_service_token(name: str, scopes: list): + '''Get service token (tokens lib) if possible, otherwise return None''' + tokens.manage(name, scopes) + try: + access_token = tokens.get(name) + except tokens.ConfigurationError: + # will be thrown if configuration is missing (e.g. OAUTH2_ACCESS_TOKEN_URL) + access_token = None + except tokens.InvalidCredentialsError: + # will be thrown if $CREDENTIALS_DIR/*.json cannot be read + access_token = None + + return access_token + + def get_token(name: str, scopes: list): '''Get an OAuth token, either from Token Service or directly from OAuth provider (using the Python tokens library)''' @@ -163,14 +183,7 @@ def get_token(name: str, scopes: list): if token: return token['access_token'] - tokens.manage(name, scopes) - try: - access_token = tokens.get(name) - except tokens.ConfigurationError: - access_token = None - except tokens.InvalidCredentialsError: - access_token = None - + access_token = get_service_token(name, scopes) if access_token: return access_token
zalando-stups/zign
46f296b8952b518c9f93d398ef890d5d4001a37a
diff --git a/tests/test_api.py b/tests/test_api.py index ad3cc20..e6e2f6c 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -1,4 +1,5 @@ import pytest +import time import tokens import zign.api @@ -72,3 +73,18 @@ def test_get_token_fallback_success(monkeypatch): monkeypatch.setattr('zign.api.get_new_token', lambda *args, **kwargs: {'access_token': 'tt77'}) assert zign.api.get_token('mytok', ['myscope']) == 'tt77' + + +def test_get_named_token_existing(monkeypatch): + existing = {'mytok': {'access_token': 'tt77', 'creation_time': time.time() - 10, 'expires_in': 3600}} + monkeypatch.setattr('zign.api.get_tokens', lambda: existing) + tok = zign.api.get_named_token(scope=['myscope'], realm=None, name='mytok', user='myusr', password='mypw') + assert tok['access_token'] == 'tt77' + + +def test_get_named_token_services(monkeypatch): + response = MagicMock(status_code=401) + monkeypatch.setattr('requests.get', MagicMock(return_value=response)) + monkeypatch.setattr('tokens.get', lambda x: 'svcmytok123') + tok = zign.api.get_named_token(scope=['myscope'], realm=None, name='mytok', user='myusr', password='mypw') + assert tok['access_token'] == 'svcmytok123'
Transparently get service tokens via "tokens" library if possible We already have the `zign.api.get_token` function and we should consider getting service tokens transparently also when using `zign token` directly.
0.0
46f296b8952b518c9f93d398ef890d5d4001a37a
[ "tests/test_api.py::test_get_named_token_services" ]
[ "tests/test_api.py::test_get_new_token_invalid_json", "tests/test_api.py::test_get_new_token_missing_access_token", "tests/test_api.py::test_get_token_existing", "tests/test_api.py::test_get_token_configuration_error", "tests/test_api.py::test_get_token_service_success", "tests/test_api.py::test_get_token_fallback_success", "tests/test_api.py::test_get_named_token_existing" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2016-04-19 17:40:40+00:00
apache-2.0
6,334