instance_id
stringlengths
12
53
patch
stringlengths
278
33.9k
repo
stringlengths
7
48
base_commit
stringlengths
40
40
hints_text
stringclasses
189 values
test_patch
stringlengths
212
264k
problem_statement
stringlengths
23
37.7k
version
stringclasses
1 value
FAIL_TO_PASS
sequencelengths
1
902
PASS_TO_PASS
sequencelengths
0
7.6k
created_at
stringlengths
25
25
__index_level_0__
int64
1.89k
4.13k
nagisc007__storybuilder-224
diff --git a/builder/master.py b/builder/master.py index 3a72f96..00a1f2a 100644 --- a/builder/master.py +++ b/builder/master.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Story manager class. """ -from .sbutils import assert_isint, assert_isstr +from .sbutils import assert_isint, assert_islist, assert_isstr from .action import _BaseAction, ActionGroup, TagAction from .enums import GroupType, LangType, TagType from .subject import Subject, Day, Item, Person, Stage, Word @@ -119,34 +119,44 @@ class Master(dict): return ActionGroup(lang=lang, group_type=GroupType.SCENE, *tmp) def set_days(self, li: list): + assert_islist(li) + for v in li: self.append_day(v[0], v[1:]) return self - def set_db(self, persons: list, stages: list, days: list, items: list, words: list): - self.set_persons(persons) - self.set_stages(stages) - self.set_days(days) - self.set_items(items) - self.set_words(words) + def set_db(self, persons: list=None, stages: list=None, days: list=None, items: list=None, words: list=None): + self.set_persons(persons if persons else []) + self.set_stages(stages if stages else []) + self.set_days(days if days else []) + self.set_items(items if items else []) + self.set_words(words if words else []) return self def set_items(self, li: list): + assert_islist(li) + for v in li: self.append_item(v[0], v[1:]) return self def set_persons(self, li: list): + assert_islist(li) + for v in li: self.append_person(v[0], v[1:]) return self def set_stages(self, li: list): + assert_islist(li) + for v in li: self.append_stage(v[0], v[1:]) return self def set_words(self, li: list): + assert_islist(li) + for v in li: self.append_word(v[0], v[1:]) return self diff --git a/builder/sbutils.py b/builder/sbutils.py index c5e1219..d05a510 100644 --- a/builder/sbutils.py +++ b/builder/sbutils.py @@ -37,6 +37,12 @@ def assert_isint(ins) -> bool: def assert_islist(ins) -> bool: + assert isinstance(ins, list) or isinstance(ins, tuple), _ASSERT_MSG.format(_instance_name_if(ins), "list or tuple") + + return True + + +def assert_islist_strict(ins) -> bool: assert isinstance(ins, list), _ASSERT_MSG.format(_instance_name_if(ins), "list") return True
nagisc007/storybuilder
b0219f7a7883f24de99ee8269d1e60b83130deed
diff --git a/tests/test_master.py b/tests/test_master.py index 5691f7b..86e4e64 100644 --- a/tests/test_master.py +++ b/tests/test_master.py @@ -289,6 +289,26 @@ class MasterTest(unittest.TestCase): for k, exp in expected: self.assertIsInstance(ma[k], exp) + def test_set_db_when_lacked(self): + data_p = (("p1", "Taro", 17, "male", "student"),) + data_s = (("s1", "stage1"),) + data_d = (("d1", "day1"),) + data_i = (("i1", "item1"),) + data_w = (("w1", "word1"),) + data = [ + (data_p, data_s, data_d, data_i, data_w), + (data_p, data_s, data_d, data_i, None), + (data_p, data_s, data_d, None, None), + (data_p, data_s, None, None, None), + (data_p, None, None, None, None), + (None, None, None, None, None), + ] + + for p, s, d, i, w in data: + with self.subTest(p=p, s=s, d=d, i=i, w=w): + ma = Master('test') + self.assertIsInstance(ma.set_db(p, s, d, i, w), Master) + def test_set_items(self): data = ( ("t1", "item1"),
Need to fix: nothing error in storyDB 空の要素で失敗 === **Description** StoryDBの引数で要素が空のタプルが含まれている時にerror発生。審査して作らないように対処。
0.0
[ "tests/test_master.py::MasterTest::test_set_db_when_lacked" ]
[ "tests/test_master.py::MasterTest::test_append_day", "tests/test_master.py::MasterTest::test_append_item", "tests/test_master.py::MasterTest::test_append_person", "tests/test_master.py::MasterTest::test_append_stage", "tests/test_master.py::MasterTest::test_append_word", "tests/test_master.py::MasterTest::test_attributes", "tests/test_master.py::MasterTest::test_br", "tests/test_master.py::MasterTest::test_break_symbol", "tests/test_master.py::MasterTest::test_combine", "tests/test_master.py::MasterTest::test_comment", "tests/test_master.py::MasterTest::test_hr", "tests/test_master.py::MasterTest::test_scene", "tests/test_master.py::MasterTest::test_set_days", "tests/test_master.py::MasterTest::test_set_db", "tests/test_master.py::MasterTest::test_set_items", "tests/test_master.py::MasterTest::test_set_persons", "tests/test_master.py::MasterTest::test_set_stages", "tests/test_master.py::MasterTest::test_set_words", "tests/test_master.py::MasterTest::test_story", "tests/test_master.py::MasterTest::test_title" ]
2019-04-12 01:40:04+00:00
4,093
nagisc007__storybuilder-231
diff --git a/builder/tools.py b/builder/tools.py index 07349c4..f196f9a 100644 --- a/builder/tools.py +++ b/builder/tools.py @@ -66,7 +66,7 @@ def output_story(story: ActionGroup, filename: str, is_action_data: bool=False, is_info_data: bool=False, is_out_as_file: bool=False, is_out_chars: bool=False, pri_filter: int=Action.MIN_PRIORITY, - is_debug: bool=False): + is_debug: bool=False): # pragma: no cover '''Output a story. Args: @@ -99,7 +99,8 @@ def output_story(story: ActionGroup, filename: str, is_action_data: bool=False, ret = _output_story_to_file( story_data, filename, suffix, is_debug) if is_info_data: - _output_story_to_file(["info data", "wip"], + _output_story_to_file( + _story_info_data_converted(story, is_debug), filename, "_i", is_debug) return ret else: @@ -108,7 +109,8 @@ def output_story(story: ActionGroup, filename: str, is_action_data: bool=False, story_data += _story_flags_info_converted(story) ret = _output_story_to_console(story_data, is_debug) if is_info_data: - _output_story_to_console(["info data", "wip"], + _output_story_to_console( + _story_info_data_converted(story, is_debug), is_debug) return ret @@ -522,8 +524,9 @@ def _story_info_data_converted(story: ActionGroup, is_debug: bool) -> list: assert_isclass(story, ActionGroup) assert_isbool(is_debug) - flags = _story_flags_info_converted(story) - return ["## Flags\n"] + flags + chars = ["## Characters\n", "- Total: {}".format(_count_descriptions(story))] + flags = ["## Flags\n"] + _story_flags_info_converted(story) + return chars + flags def _story_title_of(act: TagAction, level: int) -> str:
nagisc007/storybuilder
2a384c6fc03336ac50edb24e2281ed3b8529bab6
diff --git a/tests/test_tools.py b/tests/test_tools.py index 0dbddb0..10f0880 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -486,6 +486,24 @@ class PrivateMethodsTest(unittest.TestCase): with self.subTest(v=v, expected=expected): self.assertEqual(tools._story_flags_info_converted(v), expected) + def test_story_info_data_converted(self): + data = [ + (self.ma.story("test", + self.taro.be(Flag("apple"))), + False, + ["## Characters\n", "- Total: 0", + "## Flags\n", "[apple]:apple"]), + (self.ma.story("test", + self.taro.be().d("test apple")), + False, + ["## Characters\n", "- Total: 9", + "## Flags\n"]), + ] + + for v, isdbg, expected in data: + with self.subTest(v=v, isdbg=isdbg, expected=expected): + self.assertEqual(tools._story_info_data_converted(v, isdbg), expected) + def test_story_title_of(self): ma = Master('test')
Implement: info output No output currently === **Description** 現在infoは出力内容がないので、そこを修正
0.0
[ "tests/test_tools.py::PrivateMethodsTest::test_story_info_data_converted" ]
[ "tests/test_tools.py::PublicMethodsTest::test_output_info", "tests/test_tools.py::PrivateMethodsTest::test_action_info_as_eng", "tests/test_tools.py::PrivateMethodsTest::test_action_info_as_jpn", "tests/test_tools.py::PrivateMethodsTest::test_action_of_by_tag", "tests/test_tools.py::PrivateMethodsTest::test_action_of_by_type", "tests/test_tools.py::PrivateMethodsTest::test_break_symbol_of", "tests/test_tools.py::PrivateMethodsTest::test_comment_of", "tests/test_tools.py::PrivateMethodsTest::test_count_desc_at_action", "tests/test_tools.py::PrivateMethodsTest::test_count_desc_in_group", "tests/test_tools.py::PrivateMethodsTest::test_count_descriptions", "tests/test_tools.py::PrivateMethodsTest::test_desc_excepted_symbols", "tests/test_tools.py::PrivateMethodsTest::test_desc_head_of", "tests/test_tools.py::PrivateMethodsTest::test_desc_str_replaced_tag", "tests/test_tools.py::PrivateMethodsTest::test_description_of_by_tag", "tests/test_tools.py::PrivateMethodsTest::test_description_of_by_type", "tests/test_tools.py::PrivateMethodsTest::test_extra_chopped", "tests/test_tools.py::PrivateMethodsTest::test_flag_info_of", "tests/test_tools.py::PrivateMethodsTest::test_flags_if", "tests/test_tools.py::PrivateMethodsTest::test_hr_of", "tests/test_tools.py::PrivateMethodsTest::test_list_head_inserted", "tests/test_tools.py::PrivateMethodsTest::test_output_story_to_console", "tests/test_tools.py::PrivateMethodsTest::test_output_story_to_file", "tests/test_tools.py::PrivateMethodsTest::test_output_with_linenumber", "tests/test_tools.py::PrivateMethodsTest::test_scene_title_of", "tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_action", "tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_action_in_group", "tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_description", "tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_description_in_group", "tests/test_tools.py::PrivateMethodsTest::test_story_data_converted", "tests/test_tools.py::PrivateMethodsTest::test_story_flags_info_converted", "tests/test_tools.py::PrivateMethodsTest::test_story_title_of", "tests/test_tools.py::PrivateMethodsTest::test_test_head_if" ]
2019-04-13 01:30:58+00:00
4,094
nagisc007__storybuilder-354
diff --git a/builder/buildtool.py b/builder/buildtool.py index 5c005fe..08449e3 100644 --- a/builder/buildtool.py +++ b/builder/buildtool.py @@ -271,7 +271,7 @@ def _manupaper_rows_from_(val, lang: em.LangType, columns: int) -> int: elif isinstance(val, act.TagAction): return 0 elif isinstance(val, act.Action): - return utl.int_ceiled(_descs_count_from_(val, lang), columns) + return _descs_count_from_(val, lang) else: return 0 @@ -279,7 +279,10 @@ def _manupaper_rows_from_(val, lang: em.LangType, columns: int) -> int: def _manupaper_rows_from_in(vals: [act.ActionGroup, list, tuple], lang: em.LangType, columns: int) -> int: group = vals.actions if isinstance(vals, act.ActionGroup) else vals - return sum([_manupaper_rows_from_(v, lang, columns) for v in group]) + if isinstance(vals, act.ActionGroup) and vals.group_type is em.GroupType.COMBI: + return utl.int_ceiled(sum(_manupaper_rows_from_(v, lang, columns) for v in group), columns) + else: + return sum([utl.int_ceiled(_manupaper_rows_from_(v, lang, columns), columns) for v in group]) def _maintitle_from(story: list) -> list:
nagisc007/storybuilder
d711a90a6034bb61c176ccc809b367450c469fee
diff --git a/tests/test_buildtool.py b/tests/test_buildtool.py index c794c9b..b873256 100644 --- a/tests/test_buildtool.py +++ b/tests/test_buildtool.py @@ -157,6 +157,25 @@ class PrivateMethodsTest(unittest.TestCase): with self.subTest(v=v, lang=lang, expected=expected): self.assertEqual(btl._estimated_description_count_from(v, lang), expected) + def test_manupaper_rows_from_in(self): + from builder import world + w = world.World("test") + data = [ + ((self.taro.be().d("test"), self.taro.be().d("apple"),), + em.LangType.JPN, 20, 2), + ((w.combine( + self.taro.be().d("test"), self.taro.be().d("apple"), + ),), + em.LangType.JPN, 20, 1), + (w.combine( + self.taro.be().d("test apple"), self.taro.be().d("test orange"), + ), + em.LangType.JPN, 20, 2), + ] + for v, lang, columns, expected in data: + with self.subTest(v=v, lang=lang, columns=columns, expected=expected): + self.assertEqual(btl._manupaper_rows_from_in(v, lang, columns), expected) + def test_output_to_console(self): data = [ (["test"], False,
Check: character count when combined combine した時にちゃんと行と文字数がカウントできているか === **Description** combine時の確認。 間違っていたら修正。
0.0
[ "tests/test_buildtool.py::PrivateMethodsTest::test_manupaper_rows_from_in" ]
[ "tests/test_buildtool.py::PrivateMethodsTest::test_action_from", "tests/test_buildtool.py::PrivateMethodsTest::test_acttypes_percents_from", "tests/test_buildtool.py::PrivateMethodsTest::test_charcount_from", "tests/test_buildtool.py::PrivateMethodsTest::test_contents_title_from", "tests/test_buildtool.py::PrivateMethodsTest::test_descs_count_from", "tests/test_buildtool.py::PrivateMethodsTest::test_descs_from", "tests/test_buildtool.py::PrivateMethodsTest::test_estimated_description_count_from", "tests/test_buildtool.py::PrivateMethodsTest::test_output_to_console", "tests/test_buildtool.py::PrivateMethodsTest::test_output_to_file" ]
2019-06-17 00:05:20+00:00
4,095
nagisc007__storybuilder-52
diff --git a/builder/tools.py b/builder/tools.py index 406dfd6..2a990d8 100644 --- a/builder/tools.py +++ b/builder/tools.py @@ -17,15 +17,15 @@ def build_action_strings(story, is_debug=False): for s in story: if s.act_type is ActType.SYMBOL: - act_str.append("\n## {}\n\n".format(s.action)) + act_str.append("\n## {}\n\n".format(_action_with_act_word_if_selected(s))) elif s.act_type is ActType.DESC or s.act_type is ActType.ACT: - act_str.append("{}\n".format(s.action)) + act_str.append("{}\n".format(_action_with_act_word_if_selected(s))) elif s.act_type is ActType.TELL: - act_str.append("「{}」\n".format(s.action)) + act_str.append("「{}」{}\n".format(s.action, s.act_word)) elif s.act_type is ActType.THINK: - act_str.append("{}\n".format(s.action)) + act_str.append("{}\n".format(_action_with_act_word_if_selected(s))) elif s.act_type is ActType.TEST and is_debug: - act_str.append("> TEST:{}\n".format(s.action)) + act_str.append("> TEST:{}\n".format(_action_with_act_word_if_selected(s))) else: pass @@ -100,3 +100,12 @@ def _description_selected(act): return act.description return act.action + +def _action_with_act_word_if_selected(act): + '''Action string created with selecting act word. + + Returns: + str: action string. + ''' + return "{}{}".format(act.action, act.act_word) if act.with_act else act.action +
nagisc007/storybuilder
04f2d2f06e4f6ef6468d45e327b94f21f01c1567
diff --git a/tests/test_tools.py b/tests/test_tools.py index 9b6da20..aec9622 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -12,7 +12,7 @@ from builder.tools import build_description_strings from builder.tools import output from builder.tools import output_md from builder.tools import _description_selected - +from builder.tools import _action_with_act_word_if_selected class TestTools(unittest.TestCase): @@ -41,6 +41,13 @@ class TestTools(unittest.TestCase): self.assertEqual(_description_selected(normal_act), "go to room") + def test__action_with_act_word_if_selected(self): + noselected = Act(self.taro, ActType.ACT, Behavior.GO, "to room", " go", with_act=False) + selected = Act(self.taro, ActType.ACT, Behavior.GO, "to room", " go", with_act=True) + + self.assertEqual(_action_with_act_word_if_selected(noselected), "to room") + self.assertEqual(_action_with_act_word_if_selected(selected), "to room go") + def test__description_selected_with_description(self): normal_act = Act(self.taro, ActType.ACT, Behavior.GO, "go to room") act_with_desc = normal_act.desc("walk so slowly to the room")
Need to fix: output actions with act word act word が表示されない === **Description** 仕様変更にともない、actionの出力にact_wordを使うように修正。 **To Reproduce** nothing **Expected behavior** nothing **Current behavior** nothing **Tasks** - [ ] fix output with act_word - [ ] fix output_md with act_word - [ ] pass tests **Note** nothing
0.0
[ "tests/test_tools.py::TestTools::test__action_with_act_word_if_selected", "tests/test_tools.py::TestTools::test__description_selected", "tests/test_tools.py::TestTools::test__description_selected_with_description", "tests/test_tools.py::TestTools::test_build_action_strings", "tests/test_tools.py::TestTools::test_build_description_strings", "tests/test_tools.py::TestTools::test_output", "tests/test_tools.py::TestTools::test_output_md" ]
[]
2019-03-14 04:34:40+00:00
4,096
nagisc007__storybuilder-54
diff --git a/builder/person.py b/builder/person.py index aae74b0..9a6c312 100644 --- a/builder/person.py +++ b/builder/person.py @@ -85,47 +85,47 @@ class Person(BasePerson): # sub actions def agree(self, action, act_word="頷く", with_act=True, with_sub=False): - return reply(action, act_word, with_act, with_sub) + return self.reply(action, act_word, with_act, with_sub) def ask(self, action, act_word="尋ねる", with_act=True, with_sub=False): - return hear(action, act_word, with_act, with_sub) + return self.hear(action, act_word, with_act, with_sub) def brow(self, action, act_word="眉を顰める", with_act=True, with_sub=False): - return feel(action, act_word, with_act, with_sub) + return self.feel(action, act_word, with_act, with_sub) def call(self, action, act_word="呼ぶ", with_act=True, with_sub=False): - return speak(action, act_word, with_act, with_sub) + return self.speak(action, act_word, with_act, with_sub) def check(self, action, act_word="確認する", with_act=True, with_sub=False): - return hear(action, act_word, with_act, with_sub) + return self.hear(action, act_word, with_act, with_sub) def confuse(self, action, act_word="困惑する", with_act=True, with_sub=False): - return feel(action, act_word, with_act, with_sub) + return self.feel(action, act_word, with_act, with_sub) def explain(self, action, act_word="説明する", with_act=True, with_sub=False): - return talk(action, act_word, with_act, with_sub) + return self.talk(action, act_word, with_act, with_sub) def gaze(self, action, act_word="見つめる", with_act=True, with_sub=False): - return see(action, act_word, with_act, with_sub) + return self.see(action, act_word, with_act, with_sub) def growl(self, action, act_word="唸る", with_act=True, with_sub=False): - return speak(action, act_word, with_act, with_sub) + return self.speak(action, act_word, with_act, with_sub) def maon(self, action, act_word="呻く", with_act=True, with_sub=False): - return speak(action, act_word, with_act, with_sub) + return self.speak(action, act_word, with_act, with_sub) def oppose(self, action, act_word="反対する", with_act=True, with_sub=False): - return reply(action, act_word, with_act, with_sub) + return self.reply(action, act_word, with_act, with_sub) def surprise(self, action, act_word="驚く", with_act=True, with_sub=False): - return feel(action, act_word, with_act, with_sub) + return self.feel(action, act_word, with_act, with_sub) def stare(self, action, act_word="睨む", with_act=True, with_sub=False): - return see(action, act_word, with_act, with_sub) + return self.see(action, act_word, with_act, with_sub) def take(self, action, act_word="連れて行く", with_act=True, with_sub=False): - return go(action, act_word, with_act, with_sub) + return self.go(action, act_word, with_act, with_sub) def visit(self, action, act_word="訪れる", with_act=True, with_sub=False): - return go(action, act_word, with_act, with_sub) + return self.go(action, act_word, with_act, with_sub)
nagisc007/storybuilder
aaaca7a475d697fa159dffff4bbe629a686577bb
diff --git a/tests/test_base.py b/tests/test_base.py index be4be77..fe622ea 100644 --- a/tests/test_base.py +++ b/tests/test_base.py @@ -63,6 +63,9 @@ class TitleTest(unittest.TestCase): class PersonTest(unittest.TestCase): + def setUp(self): + self.psn = Person("Taro", 15, "male", "student", "I am", "he is a man") + def test_attributes(self): psn = Person("Taro", 15, "male", "student", "I am", "he is a man") @@ -154,6 +157,14 @@ class PersonTest(unittest.TestCase): self.assertEqual(acted.action, "forget his home work") self.assertEqual(acted.behavior, Behavior.RESULT) + def test_sub_act_check(self): + acted = self.psn.check("note") + + self.assertTrue(isinstance(acted, Act)) + self.assertEqual(acted.act_type, ActType.ACT) + self.assertEqual(acted.action, "note") + self.assertEqual(acted.behavior, Behavior.HEAR) + class StageTest(unittest.TestCase):
Need to fix: forget "self" keyword in person's attr Person内のsub actにおいてselfが抜けていたので修正。 === **Description** sub_actでは既に設定した、例えば see などを呼び出して使っているが、それに対してselfがないのでundefinedになる。 **To Reproduce** nothing **Expected behavior** nothing **Current behavior** nothing **Tasks** - [ ] fix all sub acts in Person class - [ ] implement sub act test - [ ] pass tests **Note** nothing
0.0
[ "tests/test_base.py::PersonTest::test_sub_act_check" ]
[ "tests/test_base.py::SubjectTest::test_feature_look", "tests/test_base.py::ActTest::test_attributes", "tests/test_base.py::ActTest::test_desc", "tests/test_base.py::TitleTest::test_attributes", "tests/test_base.py::PersonTest::test_act", "tests/test_base.py::PersonTest::test_act_with_behavior", "tests/test_base.py::PersonTest::test_attributes", "tests/test_base.py::PersonTest::test_must", "tests/test_base.py::PersonTest::test_reply", "tests/test_base.py::PersonTest::test_result", "tests/test_base.py::PersonTest::test_tell", "tests/test_base.py::PersonTest::test_think", "tests/test_base.py::PersonTest::test_want", "tests/test_base.py::StageTest::test_attributes", "tests/test_base.py::StageTest::test_look", "tests/test_base.py::ItemTest::test_attributes", "tests/test_base.py::ItemTest::test_look", "tests/test_base.py::DayTimeTest::test_attributes", "tests/test_base.py::DayTimeTest::test_look" ]
2019-03-14 12:22:57+00:00
4,097
nagisc007__storybuilder-79
diff --git a/builder/tools.py b/builder/tools.py index c36e60b..1c8ebbe 100644 --- a/builder/tools.py +++ b/builder/tools.py @@ -99,21 +99,33 @@ def _story_converted_as_action_in_group(group: ActionGroup, level: int, is_debug if isinstance(a, ActionGroup): tmp.extend(_story_converted_as_action_in_group(a, level + 1, is_debug)) else: - tmp.append(_action_str_by_type(a, level, is_debug)) + tmp.append(_action_str_by_type(a, group.lang, level, is_debug)) return tmp -def _action_str_by_type(act: Action, level: int, is_debug: bool) -> str: +def _action_str_by_type(act: Action, lang: LangType, level: int, is_debug: bool) -> str: if act.act_type == ActType.ACT: - return "{:8}:{:8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or not act.note else act.note) + if lang == LangType.JPN: + return "{: 8}{: 8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or not act.note else act.note) + else: + return "{:8}:{:8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or not act.note else act.note) elif act.act_type == ActType.EXPLAIN: - return "{:8}:{:8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or not act.note else act.note) + if lang == LangType.JPN: + return "{: 8}:{: 8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or not act.note else act.note) + else: + return "{:8}:{:8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or not act.note else act.note) elif act.act_type == ActType.TAG: return _action_str_by_tag(act, level) elif act.act_type == ActType.TELL: - return "{:8}:{:8}:「{}」:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or act.note else act.note) + if lang == LangType.JPN: + return "{: 8}:{: 8}:「{}」:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or act.note else act.note) + else: + return "{:8}:{:8}:「{}」:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or act.note else act.note) elif act.act_type == ActType.TEST and is_debug: - return "> {:8}:{:8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or act.note else act.note) + if lang == LangType.JPN: + return "> {: 8}:{: 8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or act.note else act.note) + else: + return "> {:8}:{:8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or act.note else act.note) else: return "" @@ -142,9 +154,9 @@ def _story_converted_as_description_in_group(group: ActionGroup, level: int, is_ def _description_str_by_type(act: Action, lang: LangType, level: int, is_debug: bool) -> str: if act.act_type in (ActType.ACT, ActType.EXPLAIN): - return "{}{}{}".format(" " if lang == LangType.JPN else " ", act.description, "。" if lang == LangType.JPN else ".") + return "{}{}{}".format(_paragraphtop_by_lang(lang), act.description, _period_by_lang(lang)) elif act.act_type == ActType.TELL: - return "{}{}{}".format("「" if lang == LangType.JPN else '"', act.description, "」" if lang == LangType.JPN else '"') + return "{}{}{}".format(_double_quatation_by_lang(lang), act.description, _double_quatation_by_lang(lang, False)) elif act.act_type == ActType.TAG: return _description_str_by_tag(act, level, is_debug) elif act.act_type == ActType.TEST and is_debug: @@ -161,3 +173,20 @@ def _description_str_by_tag(act: Action, level: int, is_debug: bool) -> str: else: return "" +def _period_by_lang(lang: LangType) -> str: + return "。" if lang == LangType.JPN else ". " + + +def _comma_by_lang(lang: LangType) -> str: + return "、" if lang == LangType.JPN else ", " + + +def _paragraphtop_by_lang(lang: LangType) -> str: + return " " if lang == LangType.JPN else " " + + +def _double_quatation_by_lang(lang: LangType, is_top: bool=True) -> str: + if is_top: + return "「" if lang == LangType.JPN else ' "' + else: + return "」" if lang == LangType.JPN else '" '
nagisc007/storybuilder
6f5020584b00ffd715c5a4728bf3488c7daec9c6
diff --git a/tests/test_tools.py b/tests/test_tools.py index 6722214..837b1f2 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -84,15 +84,15 @@ class BasicMethodTest(unittest.TestCase): def test__story_converted_as_description(self): self.assertEqual(tools._story_converted_as_description(self.story, False), ["# Taro and Hanako", - " a cute girl come in.", - '"Nice to meet you"', - '"I\'m not fine"']) + " a cute girl come in. ", + ' "Nice to meet you" ', + ' "I\'m not fine" ']) def test__story_converted_as_description_in_group(self): self.assertEqual(tools._story_converted_as_description_in_group(self.story, 1, False), ["# Taro and Hanako", - " a cute girl come in.", - '"Nice to meet you"', - '"I\'m not fine"']) + " a cute girl come in. ", + ' "Nice to meet you" ', + ' "I\'m not fine" '])
Need to fix: action output with japanese spaces 日本語の時にはスペースを「全角」で適用する === **Description** actionでのoutput時に半角だと折角そろえたものがずれているので、そこを読み取って調整する。
0.0
[ "tests/test_tools.py::BasicMethodTest::test__story_converted_as_description", "tests/test_tools.py::BasicMethodTest::test__story_converted_as_description_in_group" ]
[ "tests/test_tools.py::BasicMethodTest::test__output_story_to_console", "tests/test_tools.py::BasicMethodTest::test__output_story_to_file_as_action", "tests/test_tools.py::BasicMethodTest::test__output_story_to_file_as_description", "tests/test_tools.py::BasicMethodTest::test__story_converted_as_action", "tests/test_tools.py::BasicMethodTest::test__story_converted_as_action_in_group", "tests/test_tools.py::BasicMethodTest::test__story_data_converted", "tests/test_tools.py::BasicMethodTest::test_build_to_story", "tests/test_tools.py::BasicMethodTest::test_options_parsed", "tests/test_tools.py::BasicMethodTest::test_output_story" ]
2019-03-20 10:22:54+00:00
4,098
nalepae__pandarallel-188
diff --git a/docs/docs/index.md b/docs/docs/index.md index 5a3993c..32054cd 100644 --- a/docs/docs/index.md +++ b/docs/docs/index.md @@ -96,7 +96,7 @@ Computer used for this benchmark: For those given examples, parallel operations run approximately 4x faster than the standard operations (except for `series.map` which runs only 3.2x faster). -## When should I user `pandas`, `pandarallel` or `pyspark`? +## When should I use `pandas`, `pandarallel` or `pyspark`? According to [`pandas` documentation](https://pandas.pydata.org/): diff --git a/pandarallel/core.py b/pandarallel/core.py index f96359c..8eaaf12 100644 --- a/pandarallel/core.py +++ b/pandarallel/core.py @@ -235,7 +235,7 @@ def parallelize_with_memory_file_system( progresses_length = [len(chunk_) * multiplicator_factor for chunk_ in chunks] work_extra = data_type.get_work_extra(data) - reduce_extra = data_type.get_reduce_extra(data) + reduce_extra = data_type.get_reduce_extra(data, user_defined_function_kwargs) show_progress_bars = progress_bars_type != ProgressBarsType.No @@ -376,7 +376,7 @@ def parallelize_with_pipe( progresses_length = [len(chunk_) * multiplicator_factor for chunk_ in chunks] work_extra = data_type.get_work_extra(data) - reduce_extra = data_type.get_reduce_extra(data) + reduce_extra = data_type.get_reduce_extra(data, user_defined_function_kwargs) show_progress_bars = progress_bars_type != ProgressBarsType.No diff --git a/pandarallel/data_types/dataframe.py b/pandarallel/data_types/dataframe.py index 5e2e067..4454c6f 100644 --- a/pandarallel/data_types/dataframe.py +++ b/pandarallel/data_types/dataframe.py @@ -4,6 +4,7 @@ import pandas as pd from ..utils import chunk from .generic import DataType +from ..utils import _get_axis_int, _opposite_axis_int class DataFrame: @@ -13,13 +14,9 @@ class DataFrame: nb_workers: int, data: pd.DataFrame, **kwargs ) -> Iterator[pd.DataFrame]: user_defined_function_kwargs = kwargs["user_defined_function_kwargs"] - axis = user_defined_function_kwargs.get("axis", 0) - if axis not in {0, 1, "index", "columns"}: - raise ValueError(f"No axis named {axis} for object type DataFrame") - - axis_int = {0: 0, 1: 1, "index": 0, "columns": 1}[axis] - opposite_axis_int = 1 - axis_int + axis_int = _get_axis_int(user_defined_function_kwargs) + opposite_axis_int = _opposite_axis_int(axis_int) for chunk_ in chunk(data.shape[opposite_axis_int], nb_workers): yield data.iloc[chunk_] if axis_int == 1 else data.iloc[:, chunk_] @@ -38,11 +35,19 @@ class DataFrame: **user_defined_function_kwargs, ) + @staticmethod + def get_reduce_extra(data: Any, user_defined_function_kwargs) -> Dict[str, Any]: + return {"axis": _get_axis_int(user_defined_function_kwargs)} + @staticmethod def reduce( datas: Iterable[pd.DataFrame], extra: Dict[str, Any] ) -> pd.DataFrame: - return pd.concat(datas, copy=False) + if isinstance(datas[0], pd.Series): + axis = 0 + else: + axis = _opposite_axis_int(extra["axis"]) + return pd.concat(datas, copy=False, axis=axis) class ApplyMap(DataType): @staticmethod diff --git a/pandarallel/data_types/dataframe_groupby.py b/pandarallel/data_types/dataframe_groupby.py index f6d119f..184088b 100644 --- a/pandarallel/data_types/dataframe_groupby.py +++ b/pandarallel/data_types/dataframe_groupby.py @@ -40,7 +40,7 @@ class DataFrameGroupBy: return [compute_result(key, df) for key, df in data] @staticmethod - def get_reduce_extra(data: PandasDataFrameGroupBy) -> Dict[str, Any]: + def get_reduce_extra(data: PandasDataFrameGroupBy, user_defined_function_kwargs) -> Dict[str, Any]: return {"df_groupby": data} @staticmethod diff --git a/pandarallel/data_types/generic.py b/pandarallel/data_types/generic.py index 3b50383..34dfeb4 100644 --- a/pandarallel/data_types/generic.py +++ b/pandarallel/data_types/generic.py @@ -24,7 +24,7 @@ class DataType(ABC): ... @staticmethod - def get_reduce_extra(data: Any) -> Dict[str, Any]: + def get_reduce_extra(data: Any, user_defined_function_kwargs) -> Dict[str, Any]: return dict() @staticmethod diff --git a/pandarallel/utils.py b/pandarallel/utils.py index c83e93e..64c85b8 100644 --- a/pandarallel/utils.py +++ b/pandarallel/utils.py @@ -87,6 +87,18 @@ def get_pandas_version() -> Tuple[int, int]: return int(major_str), int(minor_str) +def _get_axis_int(user_defined_function_kwargs): + axis = user_defined_function_kwargs.get("axis", 0) + + if axis not in {0, 1, "index", "columns"}: + raise ValueError(f"No axis named {axis} for object type DataFrame") + + axis_int = {0: 0, 1: 1, "index": 0, "columns": 1}[axis] + return axis_int + +def _opposite_axis_int(axis: int): + return 1 - axis + class WorkerStatus(int, Enum): Running = 0 Success = 1
nalepae/pandarallel
bb0f50faf9bd3e8e548736040d297613d4482eaa
diff --git a/tests/test_pandarallel.py b/tests/test_pandarallel.py index 6cbdc8d..03d5cb4 100644 --- a/tests/test_pandarallel.py +++ b/tests/test_pandarallel.py @@ -135,6 +135,26 @@ def func_dataframe_groupby_expanding_apply(request): )[request.param] [email protected](params=("named", "anonymous")) +def func_dataframe_apply_axis_0_no_reduce(request): + def func(x): + return x + + return dict( + named=func, anonymous=lambda x: x + )[request.param] + + [email protected](params=("named", "anonymous")) +def func_dataframe_apply_axis_1_no_reduce(request): + def func(x): + return x**2 + + return dict( + named=func, anonymous=lambda x: x**2 + )[request.param] + + @pytest.fixture def pandarallel_init(progress_bar, use_memory_fs): pandarallel.initialize( @@ -290,3 +310,29 @@ def test_dataframe_groupby_expanding_apply( .parallel_apply(func_dataframe_groupby_expanding_apply, raw=False) ) res.equals(res_parallel) + +def test_dataframe_axis_0_no_reduction( + pandarallel_init, func_dataframe_apply_axis_0_no_reduce, df_size +): + df = pd.DataFrame( + dict(a=np.random.randint(1, 10, df_size), b=np.random.randint(1, 10, df_size), c=np.random.randint(1, 10, df_size)) + ) + res = df.apply(func_dataframe_apply_axis_0_no_reduce) + + res_parallel = df.parallel_apply(func_dataframe_apply_axis_0_no_reduce) + + assert res.equals(res_parallel) + +def test_dataframe_axis_1_no_reduction( + pandarallel_init, func_dataframe_apply_axis_1_no_reduce, df_size +): + df = pd.DataFrame( + dict(a=np.random.randint(1, 10, df_size), b=np.random.randint(1, 10, df_size), c=np.random.randint(1, 10, df_size)) + ) + + res = df.apply(func_dataframe_apply_axis_1_no_reduce, axis=1) + + res_parallel = df.parallel_apply(func_dataframe_apply_axis_1_no_reduce, axis=1) + + assert res.equals(res_parallel) +
parallel_apply(..., axis=0) return duplicated rows. this behavior is different from pandas.
0.0
[ "tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[named-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[named-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[named-1-False-False]", "tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[named-1-True-False]", "tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[anonymous-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[anonymous-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[anonymous-1-False-False]", "tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[anonymous-1-True-False]" ]
[ "tests/test_pandarallel.py::test_dataframe_apply_axis_0[named-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_0[named-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_0[named-1-False-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_0[named-1-True-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_0[anonymous-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_0[anonymous-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_0[anonymous-1-False-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_0[anonymous-1-True-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_1[named-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_1[named-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_1[named-1-False-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_1[named-1-True-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_1[anonymous-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_1[anonymous-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_1[anonymous-1-False-False]", "tests/test_pandarallel.py::test_dataframe_apply_axis_1[anonymous-1-True-False]", "tests/test_pandarallel.py::test_dataframe_apply_invalid_axis[False-None]", "tests/test_pandarallel.py::test_dataframe_apply_invalid_axis[False-False]", "tests/test_pandarallel.py::test_dataframe_apply_invalid_axis[True-None]", "tests/test_pandarallel.py::test_dataframe_apply_invalid_axis[True-False]", "tests/test_pandarallel.py::test_dataframe_applymap[named-1000-False-None]", "tests/test_pandarallel.py::test_dataframe_applymap[named-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_applymap[named-1000-True-None]", "tests/test_pandarallel.py::test_dataframe_applymap[named-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_applymap[named-1-False-None]", "tests/test_pandarallel.py::test_dataframe_applymap[named-1-False-False]", "tests/test_pandarallel.py::test_dataframe_applymap[named-1-True-None]", "tests/test_pandarallel.py::test_dataframe_applymap[named-1-True-False]", "tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1000-False-None]", "tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1000-True-None]", "tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1-False-None]", "tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1-False-False]", "tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1-True-None]", "tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1-True-False]", "tests/test_pandarallel.py::test_series_map[named-1000-False-None]", "tests/test_pandarallel.py::test_series_map[named-1000-False-False]", "tests/test_pandarallel.py::test_series_map[named-1000-True-None]", "tests/test_pandarallel.py::test_series_map[named-1000-True-False]", "tests/test_pandarallel.py::test_series_map[named-1-False-None]", "tests/test_pandarallel.py::test_series_map[named-1-False-False]", "tests/test_pandarallel.py::test_series_map[named-1-True-None]", "tests/test_pandarallel.py::test_series_map[named-1-True-False]", "tests/test_pandarallel.py::test_series_map[anonymous-1000-False-None]", "tests/test_pandarallel.py::test_series_map[anonymous-1000-False-False]", "tests/test_pandarallel.py::test_series_map[anonymous-1000-True-None]", "tests/test_pandarallel.py::test_series_map[anonymous-1000-True-False]", "tests/test_pandarallel.py::test_series_map[anonymous-1-False-None]", "tests/test_pandarallel.py::test_series_map[anonymous-1-False-False]", "tests/test_pandarallel.py::test_series_map[anonymous-1-True-None]", "tests/test_pandarallel.py::test_series_map[anonymous-1-True-False]", "tests/test_pandarallel.py::test_series_apply[named-1000-False-None]", "tests/test_pandarallel.py::test_series_apply[named-1000-False-False]", "tests/test_pandarallel.py::test_series_apply[named-1000-True-None]", "tests/test_pandarallel.py::test_series_apply[named-1000-True-False]", "tests/test_pandarallel.py::test_series_apply[named-1-False-None]", "tests/test_pandarallel.py::test_series_apply[named-1-False-False]", "tests/test_pandarallel.py::test_series_apply[named-1-True-None]", "tests/test_pandarallel.py::test_series_apply[named-1-True-False]", "tests/test_pandarallel.py::test_series_apply[anonymous-1000-False-None]", "tests/test_pandarallel.py::test_series_apply[anonymous-1000-False-False]", "tests/test_pandarallel.py::test_series_apply[anonymous-1000-True-None]", "tests/test_pandarallel.py::test_series_apply[anonymous-1000-True-False]", "tests/test_pandarallel.py::test_series_apply[anonymous-1-False-None]", "tests/test_pandarallel.py::test_series_apply[anonymous-1-False-False]", "tests/test_pandarallel.py::test_series_apply[anonymous-1-True-None]", "tests/test_pandarallel.py::test_series_apply[anonymous-1-True-False]", "tests/test_pandarallel.py::test_series_rolling_apply[named-1000-False-None]", "tests/test_pandarallel.py::test_series_rolling_apply[named-1000-False-False]", "tests/test_pandarallel.py::test_series_rolling_apply[named-1000-True-None]", "tests/test_pandarallel.py::test_series_rolling_apply[named-1000-True-False]", "tests/test_pandarallel.py::test_series_rolling_apply[named-1-False-None]", "tests/test_pandarallel.py::test_series_rolling_apply[named-1-False-False]", "tests/test_pandarallel.py::test_series_rolling_apply[named-1-True-None]", "tests/test_pandarallel.py::test_series_rolling_apply[named-1-True-False]", "tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1000-False-None]", "tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1000-False-False]", "tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1000-True-None]", "tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1000-True-False]", "tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1-False-None]", "tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1-False-False]", "tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1-True-None]", "tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1-True-False]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1000-False-None]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1000-True-None]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1-False-None]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1-False-False]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1-True-None]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1-True-False]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1000-False-None]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1000-True-None]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1-False-None]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1-False-False]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1-True-None]", "tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1-True-False]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1000-False-None]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1000-True-None]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1-False-None]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1-False-False]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1-True-None]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1-True-False]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1000-False-None]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1000-True-None]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1-False-None]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1-False-False]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1-True-None]", "tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1-True-False]", "tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[named-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[named-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[named-1-False-False]", "tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[named-1-True-False]", "tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[anonymous-1000-False-False]", "tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[anonymous-1000-True-False]", "tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[anonymous-1-False-False]", "tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[anonymous-1-True-False]" ]
2022-07-21 17:16:22+00:00
4,099
namaggarwal__splitwise-68
diff --git a/README.md b/README.md index 41a155f..c80c8fd 100644 --- a/README.md +++ b/README.md @@ -395,6 +395,17 @@ print("content:", comment.getContent()) print("errors:", errors) ``` +### Get Notifications + +You can use ```getNotifications()``` to get recent Notifications. It returns an array of ```Notification``` object. + +```python +sObj = Splitwise(Config.consumer_key,Config.consumer_secret) +sObj.setAccessToken(session['access_token']) +id = 982430660 +notifications = sObj.getNotifications() +``` + ## Objects ### User @@ -582,7 +593,27 @@ Methods: 7. getDeletedAt(id) - Returns datetime at which comment was deleted 8. getUser() - Returns a ```User``` object containing user details +### Notification + +Methods: + +1. getId() - Returns the id +2. getContent() - Returns message +3. getImageShape() - Returns comment type +4. getImageType() - Returns relation type of the comment +5. source - Returns source object +6. getCreatedAt() - Returns datetime at which notification was created +7. getCreatedBy() - Returns id of user who created notification + +### Source + +Used with Notifications. + +Methods: +1. getId() - Returns the id +2. getType() - Returns type. Use in combination with ID to fetch structured data +3. getUrl() - Returns url ## Sample Application diff --git a/splitwise/__init__.py b/splitwise/__init__.py index a3d4ed3..b2e1029 100644 --- a/splitwise/__init__.py +++ b/splitwise/__init__.py @@ -25,6 +25,7 @@ from splitwise.group import Group from splitwise.category import Category from splitwise.expense import Expense from splitwise.comment import Comment +from splitwise.notification import Notification from splitwise.error import SplitwiseError from requests_oauthlib import OAuth1, OAuth2Session, OAuth2 from requests import Request, sessions @@ -103,6 +104,8 @@ class Splitwise(object): "api/"+SPLITWISE_VERSION+"/get_comments" CREATE_COMMENT_URL = SPLITWISE_BASE_URL + \ "api/"+SPLITWISE_VERSION+"/create_comment" + GET_NOTIFICATIONS_URL = SPLITWISE_BASE_URL + \ + "api/"+SPLITWISE_VERSION+"/get_notifications" debug = False @@ -867,3 +870,30 @@ class Splitwise(object): errors = SplitwiseError(content['errors']) return comment, errors + + # TODO: Implement optional args + def getNotifications(self, updated_since=None, limit=None): + """ + Get notifications. + + Args: + updated_since(string): Optional. ISO8601 Timestamp. + limit(long): Optional. Defaults to 0 + + Returns: + :obj:`splitwise.notification.Notifications`: Object representing Notifications + """ + + try: + content = self.__makeRequest(Splitwise.GET_NOTIFICATIONS_URL) + except SplitwiseNotAllowedException as e: + e.setMessage("You are not allowed to fetch notifications") + raise + + content = json.loads(content) + notifications = [] + if "notifications" in content: + for n in content["notifications"]: + notifications.append(Notification(n)) + + return notifications diff --git a/splitwise/notification.py b/splitwise/notification.py new file mode 100644 index 0000000..156dc32 --- /dev/null +++ b/splitwise/notification.py @@ -0,0 +1,125 @@ +class Notification(object): + """ + Contains notification details + Attributes: + id(long): Notification Id + content(string): Notification content + type(long): Notification type + image_url(string): Url + image_shape(string): Shape of image + created_at(datetime): Notification creation datetime + created_by(long): User Id of the notification creator + source: The related thing (ie, Expense) + """ + def __init__(self, data=None): + """ + Args: + data(:obj:`json`, optional): JSON object representing comment object + """ + + if data: + self.id = data["id"] + self.content = data["content"] + self.type = data["type"] + self.created_at = data["created_at"] + self.created_by = data["created_by"] + self.image_shape = data["image_shape"] + self.image_url = data["image_url"] + self.source = Source(data["source"]) + + def getId(self): + """ Returns Notification's Id + Returns: + str: Notification's Id + """ + + return self.id + + def getContent(self): + """ Returns message + Returns: + str: Content of the notification - text and HTML. + """ + + return self.content + + def getType(self): + """ Returns Notification type + Returns: + long: Notification type + """ + + return self.type + + def getCreatedBy(self): + """ Returns id who triggered Notification was created + Returns: + long: Notification's creator id + """ + + return self.created_by + + def getCreatedAt(self): + """ Returns datetime at which notification was created + Returns: + datetime: Notification's creation date + """ + + return self.created_at + + def getImageShape(self): + """ Returns shape of image + Returns: + string: Image shape, ie square + """ + + return self.image_shape + + def getImageUrl(self): + """ Returns url of image + Returns: + string: Image url + """ + + return self.image_url + + +class Source(object): + """ + Contains made on an expense + Attributes: + id(long): Notification Source Id + type(long): Notification Source type + url(string): Url + """ + def __init__(self, data=None): + """ + Args: + data(:obj:`json`, optional): JSON object representing source object + """ + + if data: + self.id = data["id"] + self.type = data["type"] + self.url = data["url"] + + def getType(self): + """ Returns Notification Source's Type + Returns: + str: Notification Source's Type, ie Expense + """ + return self.type + + def getId(self): + """ Returns Notification Source's Id + Returns: + long: Notification Source's Id + """ + return self.id + + def getUrl(self): + """ Returns Notification Source's Url + Returns: + str: Notification Source's Url + """ + return self.url
namaggarwal/splitwise
c87e120fed91b0994665182bac0447ccd62954ae
diff --git a/tests/test_getNotifications.py b/tests/test_getNotifications.py new file mode 100755 index 0000000..100f426 --- /dev/null +++ b/tests/test_getNotifications.py @@ -0,0 +1,40 @@ +from splitwise import Splitwise +import unittest +try: + from unittest.mock import patch +except ImportError: # Python 2 + from mock import patch + + +@patch('splitwise.Splitwise._Splitwise__makeRequest') +class GetNotifications(unittest.TestCase): + + def setUp(self): + self.sObj = Splitwise('consumerkey', 'consumersecret') + + def test_getNotifications_success(self, mockMakeRequest): + mockMakeRequest.return_value = '{"notifications": [{"id": 32514315,"type": 0,"created_at": "2019-08-24T14:15:22Z","created_by": 2,"source": {"type": "Expense","id": 865077,"url": "string"},"image_url": "https://s3.amazonaws.com/splitwise/uploads/notifications/v2/0-venmo.png","image_shape": "square","content": "<strong>You</strong> paid <strong>Jon H.</strong>.<br><font color=\\\"#5bc5a7\\\">You paid $23.45</font>"}]}' # noqa: E501 + notifications = self.sObj.getNotifications() # TODO: Coverage of updated_after, limit: 0 arguments + mockMakeRequest.assert_called_with("https://secure.splitwise.com/api/v3.0/get_notifications") + + self.assertEqual(notifications[0].getId(), 32514315) + self.assertEqual(notifications[0].getType(), 0) # TODO: Constants? + self.assertEqual(notifications[0].getCreatedAt(), "2019-08-24T14:15:22Z") + self.assertEqual(notifications[0].getCreatedBy(), 2) # TODO: Users? + self.assertEqual(notifications[0].getImageUrl(), + "https://s3.amazonaws.com/splitwise/uploads/notifications/v2/0-venmo.png") + self.assertEqual(notifications[0].getImageShape(), "square") + self.assertEqual(notifications[0].getContent(), + "<strong>You</strong> paid <strong>Jon H.</strong>.<br><font color=\"#5bc5a7\">You paid $23.45</font>") # noqa: 501 + + self.assertEqual(notifications[0].source.getType(), "Expense") + self.assertEqual(notifications[0].source.getId(), 865077) + self.assertEqual(notifications[0].source.getUrl(), "string") + + def test_getNotifications_exception(self, mockMakeRequest): + mockMakeRequest.side_effect = Exception( + "Invalid response %s. Please check your consumer key and secret." % 404) + with self.assertRaises(Exception): + self.sObj.getNotifications() + mockMakeRequest.assert_called_with( + "https://secure.splitwise.com/api/v3.0/get_notifications") diff --git a/tests/test_updateExpense.py b/tests/test_updateExpense.py index 5e64774..8736f78 100644 --- a/tests/test_updateExpense.py +++ b/tests/test_updateExpense.py @@ -93,7 +93,8 @@ class UpdateExpenseTestCase(unittest.TestCase): expenseRes, errors = self.sObj.updateExpense(expense) mockMakeRequest.assert_called_with( "https://secure.splitwise.com/api/v3.0/update_expense/1010906976", "POST", - {'cost': '13', 'description': 'Testing', 'details': 'Full details of the expense', 'group_id': 19433671, 'split_equally': True}, files=None) + {'cost': '13', 'description': 'Testing', 'details': 'Full details of the expense', + 'group_id': 19433671, 'split_equally': True}, files=None) self.assertIsNone(errors) self.assertEqual(expenseRes.getId(), 1010906976) self.assertEqual(expenseRes.getGroupId(), 19433671) @@ -318,4 +319,4 @@ Sorry for the trouble!' users.append(user2) expense.setUsers(users) with self.assertRaises(SplitwiseBadRequestException): - expenseRes, errors = self.sObj.updateExpense(expense) \ No newline at end of file + expenseRes, errors = self.sObj.updateExpense(expense)
Add support for get_notifications https://dev.splitwise.com/#tag/notifications ``` { "notifications": [ { "id": 32514315, "type": 0, "created_at": "2019-08-24T14:15:22Z", "created_by": 2, "source": { "type": "Expense", "id": 865077, "url": "string" }, "image_url": "https://s3.amazonaws.com/splitwise/uploads/notifications/v2/0-venmo.png", "image_shape": "square", "content": "<strong>You</strong> paid <strong>Jon H.</strong>.<br><font color=\\\"#5bc5a7\\\">You paid $23.45</font>" } ] } ``` Enables use cases like: https://github.com/sriramsv/custom_component_splitwise/issues/4
0.0
[ "tests/test_getNotifications.py::GetNotifications::test_getNotifications_exception", "tests/test_getNotifications.py::GetNotifications::test_getNotifications_success" ]
[ "tests/test_updateExpense.py::UpdateExpenseTestCase::test_updateExpense_error", "tests/test_updateExpense.py::UpdateExpenseTestCase::test_updateExpense_exception", "tests/test_updateExpense.py::UpdateExpenseTestCase::test_updateExpense_missingExpenseId_Exception", "tests/test_updateExpense.py::UpdateExpenseTestCase::test_updateExpense_split_equally_success", "tests/test_updateExpense.py::UpdateExpenseTestCase::test_updateExpense_split_equally_with_receipt_success", "tests/test_updateExpense.py::UpdateExpenseTestCase::test_updateExpense_split_manually_success" ]
2022-03-20 06:20:08+00:00
4,100
nanonyme__simplecpreprocessor-11
diff --git a/simplecpreprocessor.py b/simplecpreprocessor.py index d6d025b..53da5fb 100644 --- a/simplecpreprocessor.py +++ b/simplecpreprocessor.py @@ -104,6 +104,7 @@ IFDEF = "ifdef" IFNDEF = "ifndef" ELSE = "else" SKIP_FILE = object() +TOKEN = re.compile(r"\b\w+\b") class Preprocessor(object): @@ -228,27 +229,22 @@ class Preprocessor(object): self.process_define(define, old_line_num) def process_source_line(self, line, line_num): - matches = set() - line = self._recursive_transform(line, matches) + line = self._recursive_transform(line, set()) return line + self.line_ending - def _recursive_transform(self, line, matches): - original_line = line - new_matches = set() + def _recursive_transform(self, line, seen): def transform_word(match): word = match.group(0) - if word in matches: + if word in seen: return word else: - new_matches.add(word) - return self.defines.get(word, word) - line = re.sub(r"\b\w+\b", transform_word, line) - matches.update(new_matches) - if original_line == line: - return line - else: - return self._recursive_transform(line, matches) + local_seen = {word} + local_seen.update(seen) + word = self.defines.get(word, word) + return self._recursive_transform(word, local_seen) + + return TOKEN.sub(transform_word, line) def skip_file(self, name): item = self.include_once.get(name)
nanonyme/simplecpreprocessor
766d87b952e0ad6f573ce396aff41cc13521e79e
diff --git a/test_simplecpreprocessor.py b/test_simplecpreprocessor.py index 71ae36d..8a4f33b 100644 --- a/test_simplecpreprocessor.py +++ b/test_simplecpreprocessor.py @@ -97,6 +97,14 @@ class TestSimpleCPreprocessor(ProfilerMixin, unittest.TestCase): expected_list = ["(4 + (2 * x))\n", "(2 * (4 + y))\n"] self.run_case(f_obj, expected_list) + def test_define_indirect_self_reference_multiple(self): + f_obj = FakeFile("header.h", ["#define I 1\n", + "#define J I + 2\n", + "#define K I + J\n", + "I\n", "J\n", "K\n"]) + expected_list = ["1\n", "1 + 2\n", "1 + 1 + 2\n"] + self.run_case(f_obj, expected_list) + def test_partial_match(self): f_obj = FakeFile("header.h", [ "#define FOO\n",
Macro expansion produces produces wrong results See for details https://github.com/nanonyme/simplecpreprocessor/commit/c3a1d4617fa0977bbc0f6bd0098b2d74219489bf
0.0
[ "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_indirect_self_reference_multiple" ]
[ "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_blank_define", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_complex_ignore", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_indirect_self_reference", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_inside_ifndef", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_parens", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_simple_self_referential", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_undefine", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_with_comment", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_expand_size_t", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_extra_endif_causes_error", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_fulfilled_ifdef_define_allowed", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_fullfile_guard_ifdef_noskip", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_fullfile_guard_ifdef_skip", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_fullfile_guard_ifndef_noskip", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_fullfile_guard_ifndef_skip", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_handler_existing_file", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_handler_missing_file", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifdef_else_defined", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifdef_else_undefined", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifdef_file_guard", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifdef_left_open_causes_error", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifdef_unfulfilled_define_ignored", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifdef_with_comment", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifndef_else_defined", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifndef_else_undefined", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifndef_fulfilled_define_allowed", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifndef_left_open_causes_error", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifndef_unfulfilled_define_ignored", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ignore_include_path", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_include_local_file_with_subdirectory", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_include_missing_local_file", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_include_with_path_list", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_include_with_path_list_with_subdirectory", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_lines_normalize_custom", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_lines_normalized", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_multiline_define", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_no_fullfile_guard_ifdef", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_no_fullfile_guard_ifndef", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_partial_match", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_platform_constants", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_pragma_once", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_repeated_macro", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_space_macro_indentation", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_tab_macro_indentation", "test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_unexpected_macro_gives_parse_error" ]
2018-06-24 08:39:38+00:00
4,101
napalm-automation-community__napalm-s350-39
diff --git a/napalm_s350/s350.py b/napalm_s350/s350.py index 23c371a..206249f 100644 --- a/napalm_s350/s350.py +++ b/napalm_s350/s350.py @@ -406,14 +406,17 @@ class S350Driver(NetworkDriver): match = re.match(r'^--------- -+ .*$', line) if match: header = False + fields_end = self._get_lldp_neighbors_fields_end(line) continue - line_elems = line.split() + line_elems = self._get_lldp_neighbors_line_to_fields(line, fields_end) - # long system name owerflow to the other line - if len(line_elems) == 1: - # complete remote name - remote_name = remote_name + line_elems[0] + # info owerflow to the other line + if line_elems[0] == '' or line_elems[4] == '' or line_elems[5] == '' : + # complete owerflown fields + local_port = local_port + line_elems[0] + remote_port = remote_port + line_elems[2] + remote_name = remote_name + line_elems[3] # then reuse old values na rewrite previous entry else: local_port = line_elems[0] @@ -429,6 +432,26 @@ class S350Driver(NetworkDriver): return neighbors + def _get_lldp_neighbors_line_to_fields(self, line, fields_end): + """ dynamic fields lenghts """ + line_elems={} + index=0 + f_start=0 + for f_end in fields_end: + line_elems[index] = line[f_start:f_end].strip() + index += 1 + f_start = f_end + return line_elems + + def _get_lldp_neighbors_fields_end(self, dashline): + """ fields length are diferent device to device, detect them on horizontal lin """ + + fields_end=[m.start() for m in re.finditer(' ', dashline)] + #fields_position.insert(0,0) + fields_end.append(len(dashline)) + + return fields_end + def _get_lldp_line_value(self, line): """ Safe-ish method to get the value from an 'lldp neighbors $IF' line.
napalm-automation-community/napalm-s350
efc48421d4d975d790feaaa73a6151372ce5b10a
diff --git a/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9-long_systemname/expected_result.json b/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9-long_systemname/expected_result.json index 942f033..591a9c5 100644 --- a/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9-long_systemname/expected_result.json +++ b/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9-long_systemname/expected_result.json @@ -4,7 +4,7 @@ "port" : "74:ac:aa:ff:1a:5f" } ], "gi1/0/35" : [ { - "hostname" : "O", + "hostname" : "", "port" : "34:e6:aa:ff:53:a8" } ], "te1/0/1" : [ { diff --git a/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9/expected_result.json b/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9/expected_result.json index 51fea79..34b31f9 100644 --- a/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9/expected_result.json +++ b/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9/expected_result.json @@ -4,7 +4,7 @@ "port" : "74:ac:aa:ff:1a:5f" } ], "gi1/0/35" : [ { - "hostname" : "O", + "hostname" : "", "port" : "34:e6:aa:ff:53:a8" } ], "te1/0/1" : [ { diff --git a/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_and_sysname/expected_result.json b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_and_sysname/expected_result.json new file mode 100644 index 0000000..5683619 --- /dev/null +++ b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_and_sysname/expected_result.json @@ -0,0 +1,10 @@ +{ + "gi7" : [ { + "hostname" : "switchc23cf0", + "port" : "gi8" + } ], + "gi8" : [ { + "hostname" : "iai-sti0067.example.net", + "port" : "9c:eb:e8:52:bd:7c" + } ] +} diff --git a/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_and_sysname/show_lldp_neighbors.txt b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_and_sysname/show_lldp_neighbors.txt new file mode 100644 index 0000000..7da8dbe --- /dev/null +++ b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_and_sysname/show_lldp_neighbors.txt @@ -0,0 +1,12 @@ + +System capability legend: +B - Bridge; R - Router; W - Wlan Access Point; T - telephone; +D - DOCSIS Cable Device; H - Host; r - Repeater; +TP - Two Ports MAC Relay; S - S-VLAN; C - C-VLAN; O - Other + + Port Device ID Port ID System Name Capabilities TTL +--------- ----------------- ------------- ----------------- ------------ ----- +gi7 00:a5:bf:c2:3c:f0 gi8 switchc23cf0 B, R 91 +gi8 f2:18:98:43:be:9b 9c:eb:e8:52:b iai-sti0067.examp B, W 115 + d:7c le.net + diff --git a/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_id/expected_result.json b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_id/expected_result.json new file mode 100644 index 0000000..8df5b44 --- /dev/null +++ b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_id/expected_result.json @@ -0,0 +1,1 @@ +{"gi7": [{"hostname": "switchc23cf0", "port": "gi8"}], "gi8": [{"hostname": "iai-sti006", "port": "9c:eb:e8:52:bd:7c"}]} diff --git a/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_id/show_lldp_neighbors.txt b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_id/show_lldp_neighbors.txt new file mode 100644 index 0000000..cc12440 --- /dev/null +++ b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_id/show_lldp_neighbors.txt @@ -0,0 +1,12 @@ + +System capability legend: +B - Bridge; R - Router; W - Wlan Access Point; T - telephone; +D - DOCSIS Cable Device; H - Host; r - Repeater; +TP - Two Ports MAC Relay; S - S-VLAN; C - C-VLAN; O - Other + + Port Device ID Port ID System Name Capabilities TTL +--------- ----------------- ------------- ----------------- ------------ ----- +gi7 00:a5:bf:c2:3c:f0 gi8 switchc23cf0 B, R 91 +gi8 f2:18:98:43:be:9b 9c:eb:e8:52:b iai-sti006 B, W 115 + d:7c +
parsing show lldp neighbors I am working my way through the development branch and came across the parser for lldp neigbors. I don't think it works reliably. I have seen the case that both the system name and the port ID can be wrapped. Examples: ``` switchf5bcdb#show lldp neighbors System capability legend: B - Bridge; R - Router; W - Wlan Access Point; T - telephone; D - DOCSIS Cable Device; H - Host; r - Repeater; TP - Two Ports MAC Relay; S - S-VLAN; C - C-VLAN; O - Other Port Device ID Port ID System Name Capabilities TTL --------- ----------------- ------------- ----------------- ------------ ----- gi7 00:a5:bf:c2:3c:f0 gi8 switchc23cf0 B, R 91 gi8 f2:18:98:43:be:9b 9c:eb:e8:52:b iai-sti006 B, W 115 d:7c ``` ``` System capability legend: B - Bridge; R - Router; W - Wlan Access Point; T - telephone; D - DOCSIS Cable Device; H - Host; r - Repeater; TP - Two Ports MAC Relay; S - S-VLAN; C - C-VLAN; O - Other Port Device ID Port ID System Name Capabilities TTL --------- ----------------- ----------------- ----------------- ------------ ----- te1/0/1 a0:f8:49:f2:4b:b6 te1/0/22 xx-rackswitch9 B, R 100 te1/0/2 a0:f8:49:f2:4b:b6 te2/0/22 xx-rackswitch9 B, R 105 te1/0/22 00:25:45:03:a5:c0 Te1/2 scn-0123-r004-3.t B, R 98 mn.xxx.xxx.xxx ``` Lines: https://github.com/napalm-automation-community/napalm-s350/blob/657bcc570ddcb0115e017fc2819aa38f95bde177/napalm_s350/s350.py#L360-L396
0.0
[ "test/unit/test_getters.py::TestGetter::test_get_lldp_neighbors[SG3xx-long_port_and_sysname]", "test/unit/test_getters.py::TestGetter::test_get_lldp_neighbors[SG350X-48P-K9-long_systemname]", "test/unit/test_getters.py::TestGetter::test_get_lldp_neighbors[SG3xx-long_port_id]", "test/unit/test_getters.py::TestGetter::test_get_lldp_neighbors[SG350X-48P-K9]" ]
[ "test/unit/test_getters.py::TestGetter::test_is_alive[normal]", "test/unit/test_getters.py::TestGetter::test_get_lldp_neighbors_detail[SG350X-48P-K9]", "test/unit/test_getters.py::TestGetter::test_get_arp_table[SG300-10]", "test/unit/test_getters.py::TestGetter::test_get_arp_table[SG350X-48P-K9]", "test/unit/test_getters.py::TestGetter::test_get_arp_table_with_vrf[SG300-10]", "test/unit/test_getters.py::TestGetter::test_get_arp_table_with_vrf[SG350X-48P-K9]", "test/unit/test_getters.py::TestGetter::test_get_ntp_servers[SG350X-48P-K9]", "test/unit/test_getters.py::TestGetter::test_get_interfaces_ip[SG350X-48P-K9]", "test/unit/test_getters.py::TestGetter::test_get_config[SG300-10]", "test/unit/test_getters.py::TestGetter::test_get_config[SG350X-48P-K9]", "test/unit/test_getters.py::TestGetter::test_get_config_sanitized[SG300-10]", "test/unit/test_getters.py::TestGetter::test_get_config_sanitized[SG350X-48P-K9]" ]
2020-11-05 16:24:08+00:00
4,102
napjon__krisk-39
diff --git a/krisk/plot/__init__.py b/krisk/plot/__init__.py index 14e24b8..e702c4a 100644 --- a/krisk/plot/__init__.py +++ b/krisk/plot/__init__.py @@ -8,7 +8,8 @@ def bar(df, how='count', stacked=False, annotate=None, - full=False): + full=False, + trendline=False): """ Parameters ---------- @@ -26,10 +27,13 @@ def bar(df, stacked: Boolean, default to False. Whether to stacked category on top of the other categories. annotate: string, {'all',True} default to None - if True, annotate value on top of the plot element. If stacked is also True, annotate the last - category. if 'all' and stacked, annotate all category + if True, annotate value on top of the plot element. If stacked is also True, annotate the + last category. if 'all' and stacked, annotate all category full: boolean, default to False. If true, set to full area stacked chart. Only work if stacked is True. + trendline: boolean, default to False. + If true, add line that connected the bars. Only work if not category, category but stacked, + or not full. Returns ------- @@ -39,6 +43,7 @@ def bar(df, # TODO: add optional argument trendline return make_chart(df,type='bar',x=x,y=y,c=c,how=how,stacked=stacked,full=full, + trendline=trendline, annotate='top' if annotate == True else annotate) diff --git a/krisk/plot/bar_line.py b/krisk/plot/bar_line.py index 057ed1d..1101415 100644 --- a/krisk/plot/bar_line.py +++ b/krisk/plot/bar_line.py @@ -51,6 +51,20 @@ def set_bar_line_chart(chart, df, x, c, **kwargs): if kwargs['annotate'] == 'top': series[-1]['label'] = d_annotate + if kwargs['type'] == 'bar' and kwargs['trendline']: + trendline = {'name':'trendline', 'type': 'line'} + + if c and kwargs['stacked']: + trendline['data'] = [0] * len(series[-1]['data']) + trendline['stack'] = c + elif c is None: + trendline['data'] = series[0]['data'] + else: + raise AssertionError('Trendline must either stacked category, or not category') + + series.append(trendline) + + # TODO: make annotate receive all kinds supported in echarts.
napjon/krisk
b7489f45df16b6805b2f576d696dabc1a3bc5235
diff --git a/krisk/tests/data/bar_year_pop_mean_continent_trendline.json b/krisk/tests/data/bar_year_pop_mean_continent_trendline.json new file mode 100644 index 0000000..89aa040 --- /dev/null +++ b/krisk/tests/data/bar_year_pop_mean_continent_trendline.json @@ -0,0 +1,152 @@ +{ + "legend": { + "data": [ + "Africa", + "Americas", + "Asia", + "Europe", + "Oceania" + ] + }, + "title": { + "text": "" + }, + "yAxis": {}, + "series": [ + { + "stack": "continent", + "type": "bar", + "data": [ + 9279525, + 10270856, + 11000948, + 12760499, + 14760787, + 17152804, + 20033753, + 23254956, + 26298373, + 29072015, + 31287142, + 33333216 + ], + "name": "Africa" + }, + { + "stack": "continent", + "type": "bar", + "data": [ + 17876956, + 19610538, + 21283783, + 22934225, + 24779799, + 26983828, + 29341374, + 31620918, + 33958947, + 36203463, + 38331121, + 40301927 + ], + "name": "Americas" + }, + { + "stack": "continent", + "type": "bar", + "data": [ + 8425333, + 9240934, + 10267083, + 11537966, + 13079460, + 14880372, + 12881816, + 13867957, + 16317921, + 22227415, + 25268405, + 31889923 + ], + "name": "Asia" + }, + { + "stack": "continent", + "type": "bar", + "data": [ + 1282697, + 1476505, + 1728137, + 1984060, + 2263554, + 2509048, + 2780097, + 3075321, + 3326498, + 3428038, + 3508512, + 3600523 + ], + "name": "Europe" + }, + { + "stack": "continent", + "type": "bar", + "data": [ + 8691212, + 9712569, + 10794968, + 11872264, + 13177000, + 14074100, + 15184200, + 16257249, + 17481977, + 18565243, + 19546792, + 20434176 + ], + "name": "Oceania" + }, + { + "stack": "continent", + "data": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "type": "line", + "name": "trendline" + } + ], + "tooltip": { + "axisPointer": { + "type": "" + } + }, + "xAxis": { + "data": [ + 1952, + 1957, + 1962, + 1967, + 1972, + 1977, + 1982, + 1987, + 1992, + 1997, + 2002, + 2007 + ] + } +} \ No newline at end of file diff --git a/krisk/tests/data/bar_year_pop_mean_trendline.json b/krisk/tests/data/bar_year_pop_mean_trendline.json new file mode 100644 index 0000000..15ef467 --- /dev/null +++ b/krisk/tests/data/bar_year_pop_mean_trendline.json @@ -0,0 +1,68 @@ +{ + "legend": { + "data": [] + }, + "title": { + "text": "" + }, + "yAxis": {}, + "series": [ + { + "type": "bar", + "data": [ + 9111144.6, + 10062280.4, + 11014983.8, + 12217802.8, + 13612120.0, + 15120030.4, + 16044248.0, + 17615280.2, + 19476743.2, + 21899234.8, + 23588394.4, + 25911953.0 + ], + "name": "year" + }, + { + "data": [ + 9111144.6, + 10062280.4, + 11014983.8, + 12217802.8, + 13612120.0, + 15120030.4, + 16044248.0, + 17615280.2, + 19476743.2, + 21899234.8, + 23588394.4, + 25911953.0 + ], + "type": "line", + "name": "trendline" + } + ], + "tooltip": { + "axisPointer": { + "type": "" + } + }, + "xAxis": { + "data": [ + 1952, + 1957, + 1962, + 1967, + 1972, + 1977, + 1982, + 1987, + 1992, + 1997, + 2002, + 2007 + ] + } +} \ No newline at end of file diff --git a/krisk/tests/test_plot.py b/krisk/tests/test_plot.py index 41aed67..bdce2e5 100644 --- a/krisk/tests/test_plot.py +++ b/krisk/tests/test_plot.py @@ -77,6 +77,20 @@ def test_full_bar_line(gapminder): assert bar.option == line.option == true_option + +def test_trendline(gapminder): + + p = kk.bar(gapminder,'year',how='mean',y='pop',trendline=True) + assert p.get_option() == read_option_tests('bar_year_pop_mean_trendline.json') + + p = kk.bar(gapminder,'year',how='mean',y='pop',trendline=True,c='continent',stacked=True) + assert p.get_option() == read_option_tests('bar_year_pop_mean_continent_trendline.json') + + try: + kk.bar(gapminder,'year',how='mean',y='pop',trendline=True,c='continent') + except AssertionError: + pass + def test_hist(gapminder): true_option = read_option_tests('hist_x.json')
Add trendline parameters for bar chart Add trendline that shows changes for bar chart
0.0
[ "krisk/tests/test_plot.py::test_trendline" ]
[ "krisk/tests/test_plot.py::test_bar", "krisk/tests/test_plot.py::test_line", "krisk/tests/test_plot.py::test_full_bar_line", "krisk/tests/test_plot.py::test_scatter" ]
2016-09-02 13:48:46+00:00
4,103
napjon__krisk-53
diff --git a/krisk/plot/api.py b/krisk/plot/api.py index 281688f..dd4437d 100644 --- a/krisk/plot/api.py +++ b/krisk/plot/api.py @@ -9,7 +9,10 @@ def bar(df, stacked=False, annotate=None, full=False, - trendline=False): + trendline=False, + sort_on='index', + sort_c_on=None, + ascending=True): """ Parameters ---------- @@ -17,33 +20,42 @@ def bar(df, data to be used for the chart x: string columns to be used as category axis - y: string, default to None + y: string, default None if None, use count of category value. otherwise aggregate based on y columns - category: string, default to None + category: string, default None another grouping columns inside x-axis - how: string, default to None + how: string, default None to be passed to pd.group_by(x).aggregate(how). Can be mean,median, or any reduced operations. - stacked: Boolean, default to False. + stacked: Boolean, default False. Whether to stacked category on top of the other categories. - annotate: string, {'all',True} default to None + annotate: string, {'all',True} default None if True, annotate value on top of the plot element. If stacked is also True, annotate the last category. if 'all' and stacked, annotate all category - full: boolean, default to False. + full: boolean, default False. If true, set to full area stacked chart. Only work if stacked is True. - trendline: boolean, default to False. + trendline: boolean, default False. If true, add line that connected the bars. Only work if not category, category but stacked, - or not full. + or not full. + sort_on: {'index', 'values', int, 'count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'}, + default 'index'. + Add sort mode. Only work when c is None. + If index, sort index on lexicographical order. use as s.sort_index() + if values, sort based on values. Use as s.sort_values() + If string, deviation from value provided by pd.Series.describe() + if integer, treat as value and deviate from that value + sort_c_on: string, default None. + specify a category as basis sort value if c is specified. Must be specified when use + sort_on other than default value. + ascending: boolean, default True + sort ascending vs. descending Returns ------- Chart Object """ - - # TODO: add optional argument trendline - return make_chart(df,type='bar',x=x,y=y,c=c,how=how,stacked=stacked,full=full, - trendline=trendline, + trendline=trendline, sort_on=sort_on, sort_c_on=sort_c_on, ascending=ascending, annotate='top' if annotate == True else annotate) @@ -56,7 +68,10 @@ def line(df, area=False, annotate=None, full=False, - smooth=False): + smooth=False, + sort_on='index', + sort_c_on=None, + ascending=True): """ Parameters ---------- @@ -64,29 +79,41 @@ def line(df, data to be used for the chart x: string columns to be used as category axis - y: string, default to None + y: string, default None if None, use count of category value. otherwise aggregate based on y columns - c: string, default to None + c: string, default None category column inside x-axis - how: string, default to None + how: string, default None to be passed to pd.group_by(x).aggregate(how). Can be mean,median, or any reduced operations. - stacked: Boolean, default to False. + stacked: Boolean, default False. Whether to stacked category on top of the other categories. - annotate: string, {'all',True} default to None + annotate: string, {'all',True} default None if True, annotate value on top of the plot element. If stacked is also True, annotate the last category. if 'all' and stacked, annotate all category - full: boolean, default to False. + full: boolean, default False. If true, set to full area stacked chart. Only work if stacked is True. - smooth: boolean, default to False. + smooth: boolean, default False. If true, smooth the line. - + sort_on: {'index', 'values', int, 'count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'}, + default 'index'. + Add sort mode. Only work when c is None. + If index, sort index on lexicographical order. use as s.sort_index() + if values, sort based on values. Use as s.sort_values() + If string, deviation from value provided by pd.Series.describe() + if integer, treat as value and deviate from that value + sort_c_on: string, default None. + specify a category as basis sort value if c is specified. Must be specified when use + sort_on other than default value. + ascending: boolean, default True + sort ascending vs. descending + Returns ------- Chart Object """ return make_chart(df,type='line',x=x,y=y,c=c,how=how,stacked=stacked,area=area,full=full, - smooth=smooth, + smooth=smooth, sort_on=sort_on, sort_c_on=sort_c_on, ascending=ascending, annotate='top' if annotate == True else annotate) @@ -105,18 +132,18 @@ def hist(df, data to be used for the chart x: string columns to be used as category axis - c: string, default to None + c: string, default None another grouping columns inside x-axis - bins: int, default to 10 + bins: int, default 10 Set number of bins in histogram - normed: boolean, default to False + normed: boolean, default False Whether normalize the histogram - stacked: Boolean, default to False. + stacked: Boolean, default False. Whether to stacked category on top of the other categories. - annotate: string, {'all',True} default to None + annotate: string, {'all',True} default None if True, annotate value on top of the plot element. If stacked is also True, annotate the last category. if 'all' and stacked, annotate all category - density: boolean, default to False. + density: boolean, default False. Whether to add density to the plot Returns @@ -136,12 +163,12 @@ def scatter(df, x, y, s=None, c=None, saturate=None, size_px=(10, 70)): data to be used for the chart x,y: string, columns in pd.DataFrame Used as coordinate in scatter chart - s: string, columns in pd.DataFrame default to None + s: string, columns in pd.DataFrame default None Used as sizing value of the scatter points - c: string, default to None + c: string, default None column used as grouping color category saturation - size_px: tuple, default to (10,70) + size_px: tuple, default (10,70) boundary size, lower and upper limit in pixel for min-max scatter points @@ -149,5 +176,4 @@ def scatter(df, x, y, s=None, c=None, saturate=None, size_px=(10, 70)): ------- Chart Object """ - #TODO add saturation return make_chart(df,type='scatter',x=x,y=y,s=s,c=c,saturate=saturate,size_px=size_px) diff --git a/krisk/plot/bar_line.py b/krisk/plot/bar_line.py index 73ffa54..931a83e 100644 --- a/krisk/plot/bar_line.py +++ b/krisk/plot/bar_line.py @@ -95,11 +95,7 @@ def set_bar_line_chart(chart, df, x, c, **kwargs): else: raise AssertionError('Density must either stacked category, or not category') - series.append(density) - - - - + series.append(density) def get_bar_line_data(df, x, c, y, **kwargs): @@ -119,11 +115,29 @@ def get_bar_line_data(df, x, c, y, **kwargs): else: data = df[x].value_counts() + #Specify sort_on and order method + sort_on = kwargs['sort_on'] + descr_keys = pd.Series([0]).describe().keys().tolist() + + if isinstance(sort_on, str): + assert sort_on in ['index','values'] + descr_keys + + if sort_on == 'index': + data.sort_index(inplace=True, ascending=kwargs['ascending']) + else: + if sort_on != 'values': + val_deviation = data.describe().loc[sort_on] if isinstance(sort_on, str) else sort_on + data = data - val_deviation + if c: + assert kwargs['sort_c_on'] is not None + data.sort_values(kwargs['sort_c_on'], inplace=True, ascending=kwargs['ascending']) + else: + data.sort_values(inplace=True, ascending=kwargs['ascending']) + # Stacked when category if c and kwargs['stacked'] and kwargs['full']: data = data.div(data.sum(1),axis=0) - return data
napjon/krisk
19fb69026ff1339649fac5ad82548ccbdc7b8d19
diff --git a/krisk/tests/test_plot.py b/krisk/tests/test_plot.py index ebbd7f5..26fc661 100644 --- a/krisk/tests/test_plot.py +++ b/krisk/tests/test_plot.py @@ -103,6 +103,25 @@ def test_full_bar_line(gapminder): assert remove_name_label(bar).option == remove_name_label(line).option == true_option +def test_sort_bar_line(gapminder): + p = kk.line(gapminder,'year', y='pop', how='mean',c='continent', sort_on='mean', sort_c_on='Americas') + + assert p.option['xAxis']['data'] == [1952, 1957, 1962, 1967, 1972, 1977, 1982, 1987, 1992, 1997, 2002, 2007] + assert p.option['legend']['data'] == ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania'] + assert p.option['series'][0] == {'data': [-10595881.167, + -9604550.167, + -8874458.167, + -7114907.167, + -5114619.167, + -2722602.167, + 158346.833, + 3379549.833, + 6422966.833, + 9196608.833, + 11411735.833, + 13457809.833], + 'name': 'Africa', + 'type': 'line'} def test_hist(gapminder):
Add Bar Mode for Sequential, Diverging, and Qualitative add 'mode' as keyword parameter for bar chart. * If None, treat the bar mode as "Qualitative", and sort on lexicographical order. * If Boolean, treat the bar as "Sequential", whether the order sort ascending. * If string, either median or mean and deviation from that value * if integer, treat as value and deviate from that value
0.0
[ "krisk/tests/test_plot.py::test_sort_bar_line" ]
[ "krisk/tests/test_plot.py::test_bar", "krisk/tests/test_plot.py::test_trendline", "krisk/tests/test_plot.py::test_line", "krisk/tests/test_plot.py::test_smooth_line", "krisk/tests/test_plot.py::test_full_bar_line", "krisk/tests/test_plot.py::test_scatter" ]
2016-10-12 05:48:29+00:00
4,104
napjon__krisk-58
diff --git a/krisk/plot/make_chart.py b/krisk/plot/make_chart.py index 924e015..68e3c41 100644 --- a/krisk/plot/make_chart.py +++ b/krisk/plot/make_chart.py @@ -43,6 +43,9 @@ def make_chart(df, **kwargs): if kwargs.get('y', None): chart.set_ylabel(kwargs['y']) + if kwargs['type'] == 'line': + chart.set_tooltip_style(trigger='axis',axis_pointer='shadow') + if kwargs['type'] in ['bar', 'line', 'hist']: set_bar_line_chart(chart, df, **kwargs)
napjon/krisk
8497da2333a8265b2e19c87dcec3bae20b8d4059
diff --git a/krisk/tests/test_plot.py b/krisk/tests/test_plot.py index 50435da..13b78e3 100644 --- a/krisk/tests/test_plot.py +++ b/krisk/tests/test_plot.py @@ -92,6 +92,8 @@ def test_line(gapminder): annotate='all') opt = read_option_tests('line.json') assert_barline_data(p, opt) + assert p.option['tooltip']['axisPointer']['type'] == 'shadow' + assert p.option['tooltip']['trigger'] == 'axis' def test_smooth_line(gapminder): @@ -134,6 +136,7 @@ def test_sort_bar_line(gapminder): 'name': 'Africa', 'type': 'line'} + def test_hist(gapminder): p1 = kk.hist(gapminder,'lifeExp',bins=10) opt1 = read_option_tests('hist_x.json')
Line Plot should have default trigger axis and axis pointer shadow
0.0
[ "krisk/tests/test_plot.py::test_line" ]
[ "krisk/tests/test_plot.py::test_bar", "krisk/tests/test_plot.py::test_trendline", "krisk/tests/test_plot.py::test_smooth_line", "krisk/tests/test_plot.py::test_full_bar_line", "krisk/tests/test_plot.py::test_sort_bar_line", "krisk/tests/test_plot.py::test_scatter" ]
2016-10-17 08:32:31+00:00
4,105
napjon__krisk-63
diff --git a/krisk/util.py b/krisk/util.py index db8bcb3..f798d93 100644 --- a/krisk/util.py +++ b/krisk/util.py @@ -25,9 +25,9 @@ def init_notebook(): from IPython.display import Javascript return Javascript(""" require.config({ - baseUrl : "//cdn.rawgit.com/napjon/krisk/master/krisk/static", + baseUrl : "https://cdn.rawgit.com/napjon/krisk/master/krisk/static", paths: { - echarts: "//cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min" + echarts: "https://cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min" } }); """) \ No newline at end of file diff --git a/notebooks/Intro.ipynb b/notebooks/Intro.ipynb index fba28c7..792c2b8 100644 --- a/notebooks/Intro.ipynb +++ b/notebooks/Intro.ipynb @@ -9,7 +9,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": { "collapsed": false }, @@ -19,9 +19,9 @@ "application/javascript": [ "\n", " require.config({\n", - " baseUrl : \"//cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n", + " baseUrl : \"https://cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n", " paths: {\n", - " echarts: \"//cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n", + " echarts: \"https://cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n", " }\n", " });\n", " " @@ -30,7 +30,7 @@ "<IPython.core.display.Javascript object>" ] }, - "execution_count": 2, + "execution_count": 1, "metadata": {}, "output_type": "execute_result" } @@ -38,7 +38,7 @@ "source": [ "import pandas as pd\n", "import krisk.plot as kk\n", - "# Only used this for nbviewer purposes (online js fetch). Don't use it.\n", + "# Use this when you want to nbconvert the notebook (used by nbviewer)\n", "from krisk import init_notebook; init_notebook()" ] }, @@ -2944,8 +2944,9 @@ } ], "metadata": { + "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python 3", + "display_name": "Python [default]", "language": "python", "name": "python3" }, @@ -2959,7 +2960,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.2" + "version": "3.5.1" } }, "nbformat": 4, diff --git a/notebooks/declarative-visualization.ipynb b/notebooks/declarative-visualization.ipynb index d59fa2f..d4bc7fb 100644 --- a/notebooks/declarative-visualization.ipynb +++ b/notebooks/declarative-visualization.ipynb @@ -9,7 +9,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 1, "metadata": { "collapsed": false }, @@ -19,9 +19,9 @@ "application/javascript": [ "\n", " require.config({\n", - " baseUrl : \"//cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n", + " baseUrl : \"https://cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n", " paths: {\n", - " echarts: \"//cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n", + " echarts: \"https://cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n", " }\n", " });\n", " " @@ -30,13 +30,13 @@ "<IPython.core.display.Javascript object>" ] }, - "execution_count": 3, + "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# Only used this for nbviewer purposes (online js fetch). Don't use it.\n", + "# Use this when you want to nbconvert the notebook (used by nbviewer)\n", "from krisk import init_notebook; init_notebook()" ] }, diff --git a/notebooks/legend-title-toolbox.ipynb b/notebooks/legend-title-toolbox.ipynb index 26df8c2..a12b0c4 100644 --- a/notebooks/legend-title-toolbox.ipynb +++ b/notebooks/legend-title-toolbox.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": { "collapsed": false }, @@ -12,9 +12,9 @@ "application/javascript": [ "\n", " require.config({\n", - " baseUrl : \"//cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n", + " baseUrl : \"https://cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n", " paths: {\n", - " echarts: \"//cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n", + " echarts: \"https://cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n", " }\n", " });\n", " " @@ -23,7 +23,7 @@ "<IPython.core.display.Javascript object>" ] }, - "execution_count": 2, + "execution_count": 1, "metadata": {}, "output_type": "execute_result" } @@ -31,7 +31,7 @@ "source": [ "import pandas as pd\n", "import krisk.plot as kk\n", - "# Only used this for nbviewer purposes (online js fetch). Don't use it.\n", + "# Use this when you want to nbconvert the notebook (used by nbviewer)\n", "from krisk import init_notebook; init_notebook()" ] }, @@ -1456,8 +1456,9 @@ } ], "metadata": { + "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python 3", + "display_name": "Python [default]", "language": "python", "name": "python3" }, @@ -1471,7 +1472,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.2" + "version": "3.5.1" } }, "nbformat": 4, diff --git a/notebooks/resync-reproducible.ipynb b/notebooks/resync-reproducible.ipynb index c52c887..61a760a 100644 --- a/notebooks/resync-reproducible.ipynb +++ b/notebooks/resync-reproducible.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": { "collapsed": false }, @@ -12,9 +12,9 @@ "application/javascript": [ "\n", " require.config({\n", - " baseUrl : \"//cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n", + " baseUrl : \"https://cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n", " paths: {\n", - " echarts: \"//cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n", + " echarts: \"https://cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n", " }\n", " });\n", " " @@ -23,7 +23,7 @@ "<IPython.core.display.Javascript object>" ] }, - "execution_count": 2, + "execution_count": 1, "metadata": {}, "output_type": "execute_result" } @@ -34,7 +34,7 @@ "\n", "import pandas as pd\n", "import krisk.plot as kk\n", - "# Only used this for nbviewer purposes (online js fetch). Don't use it.\n", + "# Use this when you want to nbconvert the notebook (used by nbviewer)\n", "from krisk import init_notebook; init_notebook()" ] }, @@ -484,8 +484,9 @@ } ], "metadata": { + "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python 3", + "display_name": "Python [default]", "language": "python", "name": "python3" }, @@ -499,7 +500,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.2" + "version": "3.5.1" } }, "nbformat": 4, diff --git a/notebooks/themes-colors.ipynb b/notebooks/themes-colors.ipynb index d73dcfa..269afc5 100644 --- a/notebooks/themes-colors.ipynb +++ b/notebooks/themes-colors.ipynb @@ -19,9 +19,9 @@ "application/javascript": [ "\n", " require.config({\n", - " baseUrl : \"//cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n", + " baseUrl : \"https://cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n", " paths: {\n", - " echarts: \"//cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n", + " echarts: \"https://cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n", " }\n", " });\n", " " @@ -38,7 +38,7 @@ "source": [ "import krisk.plot as kk\n", "import pandas as pd\n", - "# Only used this for nbviewer purposes (online js fetch). Don't use it.\n", + "# Use this when you want to nbconvert the notebook (used by nbviewer)\n", "from krisk import init_notebook; init_notebook()" ] }, @@ -2394,8 +2394,9 @@ } ], "metadata": { + "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python 3", + "display_name": "Python [default]", "language": "python", "name": "python3" }, @@ -2409,7 +2410,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.2" + "version": "3.5.1" } }, "nbformat": 4,
napjon/krisk
5d5980a518bb36fd7b1624fc36c2fbaa5c247e9b
diff --git a/krisk/tests/test_template.py b/krisk/tests/test_template.py index e5ef7e5..c25097f 100644 --- a/krisk/tests/test_template.py +++ b/krisk/tests/test_template.py @@ -21,9 +21,9 @@ def test_init_nb(): js_data = init_notebook().data js_init_template = """ require.config({{ - baseUrl : "//cdn.rawgit.com/napjon/krisk/master/krisk/static", + baseUrl : "https://cdn.rawgit.com/napjon/krisk/master/krisk/static", paths: {{ - echarts: "//cdnjs.cloudflare.com/ajax/libs/echarts/{VER}/echarts.min" + echarts: "https://cdnjs.cloudflare.com/ajax/libs/echarts/{VER}/echarts.min" }} }}); """
Not support nbconvert **Reproduce:** 1. Use Intro.ipynb 2. Remove `from krisk import init_notebook; init_notebook()` since it's not gonna publish to nbviewer. 3. Use nbconvert to export current page as html **Output:** Javascript part failed to print out. Console error: `require.min.js:8 Uncaught Error: Script error for: dark` `require.min.js:8 Failed to load resource: net::ERR_FILE_NOT_FOUND` ... **Expected Output:** Charts should be shown as it run in notebook after exporting. **Possible solution:** It seems like after exporting, it still tries to retrieve source js files from local server and current folder. Is that possible we can define the source files' location as commands in python? **ENV:** Python3.5 Jupyter Notebook 4.2.1 Echarts 3.2.1 (built-in)
0.0
[ "krisk/tests/test_template.py::test_init_nb" ]
[ "krisk/tests/test_template.py::test_html" ]
2016-11-05 07:19:17+00:00
4,106
nathan-v__aws_okta_keyman-72
diff --git a/aws_okta_keyman/aws.py b/aws_okta_keyman/aws.py index 80484db..91e961b 100644 --- a/aws_okta_keyman/aws.py +++ b/aws_okta_keyman/aws.py @@ -179,7 +179,6 @@ class Session(object): multiple_accounts = False first_account = '' formatted_roles = [] - i = 0 for role in self.assertion.roles(): account = role['role'].split(':')[4] role_name = role['role'].split(':')[5].split('/')[1] @@ -187,20 +186,27 @@ class Session(object): 'account': account, 'role_name': role_name, 'arn': role['role'], - 'principle': role['principle'], - 'roleIdx': i + 'principle': role['principle'] }) if first_account == '': first_account = account elif first_account != account: multiple_accounts = True - i = i + 1 if multiple_accounts: formatted_roles = self.account_ids_to_names(formatted_roles) - self.roles = sorted(formatted_roles, - key=lambda k: (k['account'], k['role_name'])) + formatted_roles = sorted(formatted_roles, + key=lambda k: (k['account'], k['role_name'])) + + # set the role role index after sorting + i = 0 + for role in formatted_roles: + role['roleIdx'] = i + i = i + 1 + + self.roles = formatted_roles + return self.roles def assume_role(self, print_only=False):
nathan-v/aws_okta_keyman
9dee43b01e5650f12ba9fdc33ce911a10fa723ca
diff --git a/aws_okta_keyman/test/aws_test.py b/aws_okta_keyman/test/aws_test.py index 60be405..f4f1de1 100644 --- a/aws_okta_keyman/test/aws_test.py +++ b/aws_okta_keyman/test/aws_test.py @@ -271,10 +271,15 @@ class TestSession(unittest.TestCase): def test_assume_role_preset(self, mock_write): mock_write.return_value = None assertion = mock.Mock() - assertion.roles.return_value = [{'arn': '', 'principle': ''}] + + roles = [{'role': '::::1:role/role1', 'principle': '', 'arn': '1'}, + {'role': '::::1:role/role2', 'principle': '', 'arn': '2'}, + {'role': '::::1:role/role3', 'principle': '', 'arn': '3'}] + + assertion.roles.return_value = roles session = aws.Session('BogusAssertion') - session.role = 0 - session.roles = [{'arn': '', 'principle': ''}] + session.role = 1 + session.roles = roles session.assertion = assertion sts = {'Credentials': {'AccessKeyId': 'AKI', @@ -296,6 +301,13 @@ class TestSession(unittest.TestCase): mock_write.assert_has_calls([ mock.call() ]) + session.sts.assert_has_calls([ + mock.call.assume_role_with_saml( + RoleArn='2', + PrincipalArn='', + SAMLAssertion=mock.ANY, + DurationSeconds=3600) + ]) @mock.patch('aws_okta_keyman.aws.Session._print_creds') @mock.patch('aws_okta_keyman.aws.Session._write') @@ -420,23 +432,29 @@ class TestSession(unittest.TestCase): self.assertEqual(ret, expected) def test_available_roles(self): - roles = [{'role': '::::1:role/role', 'principle': ''}, - {'role': '::::1:role/role', 'principle': ''}] + roles = [{'role': '::::1:role/role1', 'principle': ''}, + {'role': '::::1:role/role3', 'principle': ''}, + {'role': '::::1:role/role2', 'principle': ''}] session = aws.Session('BogusAssertion') session.assertion = mock.MagicMock() session.assertion.roles.return_value = roles - expected = [ - {'account': '1', 'role_name': 'role', - 'principle': '', 'arn': '::::1:role/role', - 'roleIdx': 0}, - {'account': '1', 'role_name': 'role', - 'principle': '', 'arn': '::::1:role/role', - 'roleIdx': 1} - ] result = session.available_roles() print(result) + + expected = [ + {'account': '1', 'role_name': 'role1', + 'principle': '', 'arn': '::::1:role/role1', + 'roleIdx': 0}, + {'account': '1', 'role_name': 'role2', + 'principle': '', 'arn': '::::1:role/role2', + 'roleIdx': 1}, + {'account': '1', 'role_name': 'role3', + 'principle': '', 'arn': '::::1:role/role3', + 'roleIdx': 2} + ] + self.assertEqual(expected, result) def test_available_roles_multiple_accounts(self): @@ -453,9 +471,9 @@ class TestSession(unittest.TestCase): session.account_ids_to_names.return_value = roles_full expected = [ {'account': '1', 'role_name': 'role', - 'principle': '', 'arn': '::::1:role/role'}, + 'principle': '', 'arn': '::::1:role/role', 'roleIdx': 0}, {'account': '2', 'role_name': 'role', - 'principle': '', 'arn': '::::2:role/role'} + 'principle': '', 'arn': '::::2:role/role', 'roleIdx': 1} ] result = session.available_roles()
Role Selection In Windows I've attached the response i've received while choosing a role, the issue in this is it picks the wrong role , in the below output i chooses role 2 which is ideally **aws-exp-exp3** but it assumed the different one please check 12:56:37 (INFO) AWS Okta Keyman 🔐 v0.8.3 Password: 12:56:45 (WARNING) Okta Verify Push being sent... 12:56:45 (INFO) Waiting for MFA success... 12:56:48 (INFO) Waiting for MFA success... 12:56:50 (INFO) Successfully authed Rishabh Ag 12:56:51 (INFO) Getting SAML Assertion from example 12:56:51 (WARNING) Application-level MFA present; re-authenticating Okta 12:56:52 (WARNING) Okta Verify Push being sent... 12:56:52 (INFO) Waiting for MFA success... 12:56:55 (INFO) Waiting for MFA success... 12:56:57 (INFO) Getting SAML Assertion from example 12:56:59 (INFO) Starting AWS session for us-east-1 12:56:59 (WARNING) Multiple AWS roles found; please select one Account Role [0] 123456789009 aws-exp-exp1 [1] 123456789009 aws-exp-exp2 [2] 123456789009 aws-exp-exp3 Selection: 2 12:57:02 (INFO) Assuming role: arn:aws:iam::123456789009:role/aws-exp-exp2 12:57:04 (INFO) Wrote profile "default" to C:\Users\Rishabh/.aws\credentials 💾 12:57:04 (INFO) Current time is 2020-07-15 07:27:04.316262 12:57:04 (INFO) Session expires at 2020-07-15 08:27:03+00:00 ⏳ 12:57:04 (INFO) All done! 👍
0.0
[ "aws_okta_keyman/test/aws_test.py::TestSession::test_available_roles", "aws_okta_keyman/test/aws_test.py::TestSession::test_available_roles_multiple_accounts" ]
[ "aws_okta_keyman/test/aws_test.py::TestCredentials::test_add_profile", "aws_okta_keyman/test/aws_test.py::TestCredentials::test_add_profile_missing_file_creates_new", "aws_okta_keyman/test/aws_test.py::TestSession::test_account_ids_to_names_call_failed", "aws_okta_keyman/test/aws_test.py::TestSession::test_account_ids_to_names_map", "aws_okta_keyman/test/aws_test.py::TestSession::test_account_names_from_html", "aws_okta_keyman/test/aws_test.py::TestSession::test_assume_role", "aws_okta_keyman/test/aws_test.py::TestSession::test_assume_role_duration_rejected", "aws_okta_keyman/test/aws_test.py::TestSession::test_assume_role_multiple", "aws_okta_keyman/test/aws_test.py::TestSession::test_assume_role_preset", "aws_okta_keyman/test/aws_test.py::TestSession::test_assume_role_print", "aws_okta_keyman/test/aws_test.py::TestSession::test_export_creds_to_var_string", "aws_okta_keyman/test/aws_test.py::TestSession::test_generate_aws_console_url", "aws_okta_keyman/test/aws_test.py::TestSession::test_get_account_name_map", "aws_okta_keyman/test/aws_test.py::TestSession::test_get_account_name_map_error", "aws_okta_keyman/test/aws_test.py::TestSession::test_init_folder_exists", "aws_okta_keyman/test/aws_test.py::TestSession::test_init_folder_missing", "aws_okta_keyman/test/aws_test.py::TestSession::test_is_valid_false", "aws_okta_keyman/test/aws_test.py::TestSession::test_is_valid_false_missing_expiration", "aws_okta_keyman/test/aws_test.py::TestSession::test_is_valid_true", "aws_okta_keyman/test/aws_test.py::TestSession::test_print_creds", "aws_okta_keyman/test/aws_test.py::TestSession::test_write" ]
2020-07-15 22:16:14+00:00
4,107
nathandines__SPF2IP-3
diff --git a/SPF2IP.py b/SPF2IP.py index e6210f3..b95903e 100644 --- a/SPF2IP.py +++ b/SPF2IP.py @@ -29,14 +29,22 @@ def dns_request_unicode(hostname,record_type,*args,**kwargs): value = value.decode('utf-8') output.append(value) elif record_type == "MX": - value = entry.exchange - if type(value) is not unicode: - value = value.__str__().encode('utf-8').decode('utf-8') + try: + value = entry.exchange.decode('utf-8') + except AttributeError as err: + if err.args[0] == "'Name' object has no attribute 'decode'": + value = unicode(entry.exchange) + else: + raise output.append(value) elif record_type == "TXT": - value = ''.join([str(ent) for ent in entry.strings]) - if type(value) is not unicode: - value = value.decode('utf-8') + value_array = [] + for ent in entry.strings: + if type(ent) is not unicode: + value_array.append(ent.decode('utf-8')) + else: + value_array.append(ent) + value = ''.join(value_array) output.append(value) return output diff --git a/setup.py b/setup.py index 3b958d6..3561be0 100755 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ with open(path.join(here, 'README.rst'), encoding='utf-8') as f: setup( name='SPF2IP', - version='1.0.4', + version='1.0.5', description='Python module to get IP addresses from an SPF record', long_description=long_description,
nathandines/SPF2IP
7e3593a6f322c39a02c1c0f4a108b046ec6c1a20
diff --git a/test_SPF2IP.py b/test_SPF2IP.py index 54caff5..6e88918 100644 --- a/test_SPF2IP.py +++ b/test_SPF2IP.py @@ -125,11 +125,13 @@ dns_records = { class fakedns: def __init__(self,value,record_type): if record_type == 'TXT': - self.strings = value + self.strings = [] + for entry in value: + self.strings.append(entry.encode('utf-8')) elif record_type == 'A' or record_type == 'AAAA': - self.address = value + self.address = value.encode('utf-8') elif record_type == 'MX': - self.exchange = value + self.exchange = value.encode('utf-8') def fake_dns_resolver(hostname,record_type): try: dns_records[hostname]
Broken on 3.6 The current code always returns an empty answer on Python 3.6. This fixes it for me: ``` diff --git a/SPF2IP.py b/SPF2IP.py index e6210f3..84683ff 100644 --- a/SPF2IP.py +++ b/SPF2IP.py @@ -34,7 +34,7 @@ def dns_request_unicode(hostname,record_type,*args,**kwargs): value = value.__str__().encode('utf-8').decode('utf-8') output.append(value) elif record_type == "TXT": - value = ''.join([str(ent) for ent in entry.strings]) + value = ''.join([str(ent, encoding='ascii') for ent in entry.strings]) if type(value) is not unicode: value = value.decode('utf-8') output.append(value) ``` I only know python3 so I can't give good advice on making code that works on both 2 and 3. But a friend of mine's package has this function that you might find useful: ``` def to_native_str(value, encoding='utf-8'): if isinstance(value, str): return value if six.PY3 and isinstance(value, six.binary_type): #pragma: no cover return value.decode(encoding) elif six.PY2 and isinstance(value, six.text_type): #pragma: no cover return value.encode(encoding) ```
0.0
[ "test_SPF2IP.py::SPF2IPTestCases::test_included_list_is_string_list", "test_SPF2IP.py::SPF2IPTestCases::test_included_loop", "test_SPF2IP.py::SPF2IPTestCases::test_ip4_results", "test_SPF2IP.py::SPF2IPTestCases::test_ip6_results", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_ip4", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_ip6", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_a", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_a_external", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_a_external_slash", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_a_slash", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_aaaa", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_aaaa_external", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_aaaa_external_slash", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_aaaa_slash", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a_external", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a_external_slash", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a_slash", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_aaaa", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_aaaa_external", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_aaaa_external_longslash", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_aaaa_slash", "test_SPF2IP.py::SPF2IPTestCases::test_spf_list_is_string_list_with_prefix", "test_SPF2IP.py::SPF2IPTestCases::test_spf_list_split_spf" ]
[ "test_SPF2IP.py::SPF2IPTestCases::test_dns_query_method_output", "test_SPF2IP.py::SPF2IPTestCases::test_domain_without_spf_results", "test_SPF2IP.py::SPF2IPTestCases::test_included_invalid_spf", "test_SPF2IP.py::SPF2IPTestCases::test_included_without_includes", "test_SPF2IP.py::SPF2IPTestCases::test_included_without_spf", "test_SPF2IP.py::SPF2IPTestCases::test_nonexistent_domain_results", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_empty", "test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a_external_longslash", "test_SPF2IP.py::SPF2IPTestCases::test_spf_list_invalid_spf", "test_SPF2IP.py::SPF2IPTestCases::test_spf_list_without_spf" ]
2018-01-19 22:24:43+00:00
4,108
ncclient__ncclient-314
diff --git a/ncclient/manager.py b/ncclient/manager.py index 3618291..09a8aaf 100644 --- a/ncclient/manager.py +++ b/ncclient/manager.py @@ -89,32 +89,47 @@ def make_device_handler(device_params): return handler_obj +def _extract_device_params(kwds): + if "device_params" in kwds: + device_params = kwds["device_params"] + del kwds["device_params"] + else: + device_params = None + return device_params + +def _extract_manager_params(kwds): + if "manager_params" in kwds: + manager_params = kwds["manager_params"] + del kwds["manager_params"] + else: + manager_params = {} + return manager_params + def connect_ssh(*args, **kwds): """ Initialize a :class:`Manager` over the SSH transport. For documentation of arguments see :meth:`ncclient.transport.SSHSession.connect`. The underlying :class:`ncclient.transport.SSHSession` is created with - :data:`CAPABILITIES`. It is first instructed to - :meth:`~ncclient.transport.SSHSession.load_known_hosts` and then - all the provided arguments are passed directly to its implementation - of :meth:`~ncclient.transport.SSHSession.connect`. + :data:`CAPABILITIES`. It is first instructed to + :meth:`~ncclient.transport.SSHSession.load_known_hosts` and then + all the provided arguments are passed directly to its implementation + of :meth:`~ncclient.transport.SSHSession.connect`. - To invoke advanced vendor related operation add device_params = - {'name':'<vendor_alias>'} in connection paramerers. For the time, - 'junos' and 'nexus' are supported for Juniper and Cisco Nexus respectively. + To customize the :class:`Manager`, add a `manager_params` dictionnary in connection + parameters (e.g. `manager_params={'timeout': 60}` for a bigger RPC timeout paramater) - A custom device handler can be provided with device_params = - {'handler':<handler class>} in connection paramerers. + To invoke advanced vendor related operation add + `device_params={'name': '<vendor_alias>'}` in connection parameters. For the time, + 'junos' and 'nexus' are supported for Juniper and Cisco Nexus respectively. + + A custom device handler can be provided with + `device_params={'handler':<handler class>}` in connection parameters. """ - # Extract device parameter dict, if it was passed into this function. Need to - # remove it from kwds, since the session.connect() doesn't like extra stuff in - # there. - if "device_params" in kwds: - device_params = kwds["device_params"] - del kwds["device_params"] - else: - device_params = None + # Extract device parameter and manager parameter dictionaries, if they were passed into this function. + # Remove them from kwds (which should keep only session.connect() parameters). + device_params = _extract_device_params(kwds) + manager_params = _extract_manager_params(kwds) device_handler = make_device_handler(device_params) device_handler.add_additional_ssh_connect_params(kwds) @@ -130,17 +145,16 @@ def connect_ssh(*args, **kwds): if session.transport: session.close() raise - return Manager(session, device_handler, **kwds) + return Manager(session, device_handler, **manager_params) def connect_ioproc(*args, **kwds): - if "device_params" in kwds: - device_params = kwds["device_params"] - del kwds["device_params"] + device_params = _extract_device_params(kwds) + manager_params = _extract_manager_params(kwds) + + if device_params: import_string = 'ncclient.transport.third_party.' import_string += device_params['name'] + '.ioproc' third_party_import = __import__(import_string, fromlist=['IOProc']) - else: - device_params = None device_handler = make_device_handler(device_params) @@ -149,7 +163,7 @@ def connect_ioproc(*args, **kwds): session = third_party_import.IOProc(device_handler) session.connect() - return Manager(session, device_handler, **kwds) + return Manager(session, device_handler, **manager_params) def connect(*args, **kwds): @@ -184,7 +198,7 @@ class Manager(object): # __metaclass__ = OpExecutor - def __init__(self, session, device_handler, timeout=30, *args, **kwargs): + def __init__(self, session, device_handler, timeout=30): self._session = session self._async_mode = False self._timeout = timeout diff --git a/ncclient/transport/ssh.py b/ncclient/transport/ssh.py index cf911ec..00a72a5 100644 --- a/ncclient/transport/ssh.py +++ b/ncclient/transport/ssh.py @@ -349,7 +349,8 @@ class SSHSession(Session): ssh_config = "~/.ssh/config" if sys.platform != "win32" else "~/ssh/config" if ssh_config is not None: config = paramiko.SSHConfig() - config.parse(open(os.path.expanduser(ssh_config))) + with open(os.path.expanduser(ssh_config)) as ssh_config_file_obj: + config.parse(ssh_config_file_obj) # Save default Paramiko SSH port so it can be reverted paramiko_default_ssh_port = paramiko.config.SSH_PORT
ncclient/ncclient
2b75f2c6a06bd2a5d1be67b01bb65c5ffd2e2d7a
diff --git a/test/unit/test_manager.py b/test/unit/test_manager.py index 8509379..55a9859 100644 --- a/test/unit/test_manager.py +++ b/test/unit/test_manager.py @@ -11,6 +11,12 @@ class TestManager(unittest.TestCase): m = MagicMock() mock_ssh.return_value = m conn = self._mock_manager() + m.connect.assert_called_once_with(host='10.10.10.10', + port=22, + username='user', + password='password', + hostkey_verify=False, allow_agent=False, + timeout=3) self.assertEqual(conn._session, m) self.assertEqual(conn._timeout, 10) @@ -62,7 +68,7 @@ class TestManager(unittest.TestCase): username='user', password='password', timeout=10, - hostkey_verify=False, + hostkey_verify=False, allow_agent=False, ssh_config=ssh_config_path) @@ -90,9 +96,10 @@ class TestManager(unittest.TestCase): port=22, username='user', password='password', - timeout=10, + timeout=3, + hostkey_verify=False, device_params={'local': True, 'name': 'junos'}, - hostkey_verify=False) + manager_params={'timeout': 10}) self.assertEqual(mock_connect.called, 1) self.assertEqual(conn._timeout, 10) self.assertEqual(conn._device_handler.device_params, {'local': True, 'name': 'junos'}) @@ -182,9 +189,10 @@ class TestManager(unittest.TestCase): port=22, username='user', password='password', - timeout=10, + timeout=3, + hostkey_verify=False, allow_agent=False, device_params={'name': 'junos'}, - hostkey_verify=False, allow_agent=False) + manager_params={'timeout': 10}) return conn @patch('socket.fromfd')
connect_ssh timeout parameter used for contradicting purposes The `timeout` kwd parameter of `manager.connect_ssh` ends up being used for two rather contradicting purposes: 1. It is passed as a kwd to `transport.SSHSession.connect` , where it serves as a connection timeout. This matches the documentation of the manager.connect_ssh function 1. It is also passed as kwd to `manager.Manager()`, where it serves as synchronous RPC timeout If one wants to set the ssh connection timeout to a small value (e.g. 3 seconds, a sensible option for not waiting long for a host that is down), this results in having a RPC timeout of 3 seconds too, which is not enough for retrieving large configurations. I see two ways of fixing this: * a) Discontinue the `timeout` parameter of `manager.Manager()` and rely solely on the `Manager.timeout` property for modifying its default value. This would be similar with how other Manager attributes are handled (e.g. `async_mode` or `raise_mode`). * b) Change the `transport.SSHSession.connect` timeout parameter name to `connect_timeout`, and dispatch the parameters accordingly in `manager.connect_ssh`. This would however require extending the documentation of manager.connect, to explain that it receives arguments that are used by either `transport.SSHSession.connect` (e.g. connect_timeout, and all the others) or `Manager` (e.g. timeout). IMHO option *(a)* is cleaner, as the `connect_ssh` method keeps its current behaviour of passing all non device_params to `transport.SSHSession.connect`. I would be happy to contribute with a patch, once we agree on the approach.
0.0
[ "test/unit/test_manager.py::TestManager::test_manager_connected", "test/unit/test_manager.py::TestManager::test_ssh2", "test/unit/test_manager.py::TestManager::test_manager_channel_id", "test/unit/test_manager.py::TestManager::test_ioproc", "test/unit/test_manager.py::TestManager::test_manager_channel_session_id", "test/unit/test_manager.py::TestManager::test_manager_server_capability", "test/unit/test_manager.py::TestManager::test_ssh", "test/unit/test_manager.py::TestManager::test_manager_client_capability", "test/unit/test_manager.py::TestManager::test_manager_channel_name" ]
[ "test/unit/test_manager.py::TestManager::test_outbound_manager_connected", "test/unit/test_manager.py::TestManager::test_connect_ssh_with_hostkey_ecdsa", "test/unit/test_manager.py::TestManager::test_manager_locked", "test/unit/test_manager.py::TestManager::test_connect_ioproc", "test/unit/test_manager.py::TestManager::test_connect_ssh_with_hostkey_rsa", "test/unit/test_manager.py::TestManager::test_connect_ssh_with_hostkey_ed25519", "test/unit/test_manager.py::TestManager::test_connect_outbound_ssh", "test/unit/test_manager.py::TestManager::test_make_device_handler", "test/unit/test_manager.py::TestManager::test_connect_with_ssh_config", "test/unit/test_manager.py::TestManager::test_connect_ssh", "test/unit/test_manager.py::TestManager::test_make_device_handler_provided_handler" ]
2019-05-03 14:53:15+00:00
4,109
ncclient__ncclient-420
diff --git a/examples/ericsson_nc_prefix_example.py b/examples/ericsson_nc_prefix_example.py new file mode 100644 index 0000000..29e569a --- /dev/null +++ b/examples/ericsson_nc_prefix_example.py @@ -0,0 +1,59 @@ +#! /usr/bin/env python +# +# Connect to the NETCONF server passed on the command line and +# set a device_params to turn on/off the namespace prefix "nc". +# if you want to verify the result, you can print the request that +# was sent. For brevity and clarity of the examples, we omit proper +# exception handling. +# +# $ ./ericsson_nc_prefix_example.py host username password + +import sys, os, warnings +warnings.simplefilter("ignore", DeprecationWarning) +from ncclient import manager + + +def ericsson_connect(host, port, user, password, device_params): + return manager.connect(host=host, + port=port, + username=user, + password=password, + device_params=device_params, + hostkey_verify-false) + + +def enable_nc_prefix(host, user, password): + # add a parameter 'with_ns' to turn on/off 'nc' + device_params = {'name': 'ericsson', 'with_ns': True} + with ericsson_connect(host, + port=22, + user=user, + password=password, + device_params=device_params) as m: + + ret = m.get_config(source="running").data_xml + print(ret) + + +def disable_nc_prefix(host, user, password): + # add a parameter 'with_ns' to turn on/off 'nc' + device_params = {'name': 'ericsson', 'with_ns': False} + with ericsson_connect(host, + port=22, + user=user, + password=password, + device_params=device_params) as m: + + ret = m.get_config(source="running").data_xml + print(ret) + + +def demo(host, user, password): + enable_nc_prefix(host, user, password) + print("#"*50) + disable_nc_prefix(host, user, password) + + +if __name__ == '__main__': + demo(sys.argv[1], sys.argv[2], sys.argv[3]) + diff --git a/ncclient/devices/ericsson.py b/ncclient/devices/ericsson.py new file mode 100644 index 0000000..c161526 --- /dev/null +++ b/ncclient/devices/ericsson.py @@ -0,0 +1,45 @@ +""" +Handler for Ericsson device specific information. + +Note that for proper import, the classname has to be: + + "<Devicename>DeviceHandler" + +...where <Devicename> is something like "Default", "Ericsson", etc. + +All device-specific handlers derive from the DefaultDeviceHandler, which implements the +generic information needed for interaction with a Netconf server. + +""" +from ncclient.xml_ import BASE_NS_1_0 +from ncclient.operations.errors import OperationError +from .default import DefaultDeviceHandler + + +class EricssonDeviceHandler(DefaultDeviceHandler): + """ + Ericsson handler for device specific information. + + """ + _EXEMPT_ERRORS = [] + + def __init__(self, device_params): + super(EricssonDeviceHandler, self).__init__(device_params) + + def get_xml_base_namespace_dict(self): + return {None: BASE_NS_1_0} + + def get_xml_extra_prefix_kwargs(self): + d = {} + if self.check_device_params() is False: + d.update(self.get_xml_base_namespace_dict()) + return {"nsmap": d} + + def check_device_params(self): + value = self.device_params.get('with_ns') + if value in [True, False]: + return value + elif value is None: + return False + else: + raise OperationError('Invalid "with_ns" value: %s' % value)
ncclient/ncclient
3380f1140791f4a8de5d303f919f1e4cc9532f32
diff --git a/test/unit/devices/test_ericsson.py b/test/unit/devices/test_ericsson.py new file mode 100644 index 0000000..1970d5e --- /dev/null +++ b/test/unit/devices/test_ericsson.py @@ -0,0 +1,72 @@ +import unittest +from ncclient import manager +from ncclient.devices.ericsson import * +from ncclient.operations.rpc import * +from ncclient.capabilities import Capabilities +import ncclient.transport + +class TestEricssonDevice(unittest.TestCase): + + def setUp(self): + self.device_handler = manager.make_device_handler({'name': 'ericsson'}) + + def test_rpc_default(self): + # It is a switch for user to turn on/off "nc" prefix, the "nc" prefix is disable by default + session = ncclient.transport.SSHSession(self.device_handler) + obj = RPC(session, self.device_handler, raise_mode=RaiseMode.ALL, timeout=0) + + expected = """<?xml version="1.0" encoding="UTF-8"?><rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="%s"><get-config><source><running/></source></get-config></rpc>""" % obj.id + + node = new_ele("get-config") + child = sub_ele(node, "source") + sub_ele(child, "running") + + rpc_node = obj._wrap(node) + self.assertEqual(rpc_node, expected) + + def test_rpc_disable_nc_prefix(self): + # It is a switch for user to turn on/off "nc" prefix + self.device_handler = manager.make_device_handler({'name': 'ericsson', 'with_ns': False}) + session = ncclient.transport.SSHSession(self.device_handler) + obj = RPC(session, self.device_handler, raise_mode=RaiseMode.ALL, timeout=0) + + expected = """<?xml version="1.0" encoding="UTF-8"?><rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="%s"><get-config><source><running/></source></get-config></rpc>""" % obj.id + + node = new_ele("get-config") + child = sub_ele(node, "source") + sub_ele(child, "running") + + # It is a switch for user to turn on/off "nc" prefix + rpc_node = obj._wrap(node) + self.assertEqual(rpc_node, expected) + + def test_rpc_enable_nc_prefix(self): + # It is a switch for user to turn on/off "nc" prefix + self.device_handler = manager.make_device_handler({'name': 'ericsson', 'with_ns': True}) + session = ncclient.transport.SSHSession(self.device_handler) + obj = RPC(session, self.device_handler, raise_mode=RaiseMode.ALL, timeout=0) + + expected = """<?xml version="1.0" encoding="UTF-8"?><nc:rpc xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="%s"><nc:get-config><nc:source><nc:running/></nc:source></nc:get-config></nc:rpc>""" % obj.id + + node = new_ele("get-config") + child = sub_ele(node, "source") + sub_ele(child, "running") + + rpc_node = obj._wrap(node) + self.assertEqual(rpc_node, expected) + + def test_rpc_enable_nc_prefix_exception(self): + # invalid value in "with_ns" + self.device_handler = manager.make_device_handler({'name': 'ericsson', 'with_ns': "Invalid_value"}) + session = ncclient.transport.SSHSession(self.device_handler) + obj = RPC(session, self.device_handler, raise_mode=RaiseMode.ALL, timeout=0) + + node = new_ele("get-config") + child = sub_ele(node, "source") + sub_ele(child, "running") + + self.assertRaises(OperationError, obj._wrap, node) + +suite = unittest.TestSuite() +unittest.TextTestRunner().run(suite) +
XML namespace tag nc: Hi, I dont think this is an issue with ncclient, but I cant find option to turn off xml namespace tagging. ncclient sends this RPC, `<?xml version="1.0" encoding="UTF-8"?> <nc:rpc xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="urn:uuid:cdad1414-956b-47e7-8efc-fee92888475c"> <nc:get-config> <nc:source> <nc:running /> </nc:source> </nc:get-config> </nc:rpc>` But, my device (Ericsson Telecom Packet Core elements) do not support xml namespace tags, and expect request to be formated as: `<?xml version="1.0" encoding="UTF-8"?> <rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="urn:uuid:cdad1414-956b-47e7-8efc-fee92888475c"> <get-config> <source> <running /> </source> </get-config> </rpc>` How can I turn off **nc:** ?
0.0
[ "test/unit/devices/test_ericsson.py::TestEricssonDevice::test_rpc_default", "test/unit/devices/test_ericsson.py::TestEricssonDevice::test_rpc_disable_nc_prefix", "test/unit/devices/test_ericsson.py::TestEricssonDevice::test_rpc_enable_nc_prefix", "test/unit/devices/test_ericsson.py::TestEricssonDevice::test_rpc_enable_nc_prefix_exception" ]
[]
2020-08-12 12:27:05+00:00
4,110
ncclient__ncclient-485
diff --git a/ncclient/devices/default.py b/ncclient/devices/default.py index f9107b4..bac3195 100644 --- a/ncclient/devices/default.py +++ b/ncclient/devices/default.py @@ -242,16 +242,16 @@ class DefaultDeviceHandler(object): def handle_connection_exceptions(self, sshsession): return False - def handle_reply_parsing_error(self, root, reply): + def reply_parsing_error_transform(self, reply_cls): """ - Hook for working around bugs in replies from devices (the root emelent can be "fixed") + Hook for working around bugs in replies from devices (the root element can be "fixed") - :param root: the rpc reply root element - :param reply: the RPCReply object that is parsing 'root' + :param reply_cls: the RPCReply class that is parsing the reply 'root' xml element - :return: + :return: transform function for the 'root' xml element of the RPC reply in case the normal parsing fails """ - pass + # No transformation by default + return None def transform_reply(self): return False diff --git a/ncclient/devices/junos.py b/ncclient/devices/junos.py index 01e7501..d751560 100644 --- a/ncclient/devices/junos.py +++ b/ncclient/devices/junos.py @@ -39,7 +39,7 @@ class JunosDeviceHandler(DefaultDeviceHandler): def __init__(self, device_params): super(JunosDeviceHandler, self).__init__(device_params) - self.__reply_parsing_error_handler_by_cls = { + self.__reply_parsing_error_transform_by_cls = { GetSchemaReply: fix_get_schema_reply } @@ -95,13 +95,9 @@ class JunosDeviceHandler(DefaultDeviceHandler): c.exec_command("xml-mode netconf need-trailer") return True - def handle_reply_parsing_error(self, root, reply): - reply_class = type(reply) - - # Apply transform if found - transform_handler = self.__reply_parsing_error_handler_by_cls.get(reply_class) - if transform_handler is not None: - transform_handler(root) + def reply_parsing_error_transform(self, reply_cls): + # return transform function if found, else None + return self.__reply_parsing_error_transform_by_cls.get(reply_cls) def transform_reply(self): reply = '''<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> diff --git a/ncclient/operations/rpc.py b/ncclient/operations/rpc.py index 2a6f32d..75f0839 100644 --- a/ncclient/operations/rpc.py +++ b/ncclient/operations/rpc.py @@ -144,9 +144,9 @@ class RPCReply(object): ERROR_CLS = RPCError "Subclasses can specify a different error class, but it should be a subclass of `RPCError`." - def __init__(self, raw, device_handler, huge_tree=False): + def __init__(self, raw, huge_tree=False, parsing_error_transform=None): self._raw = raw - self._device_handler = device_handler + self._parsing_error_transform = parsing_error_transform self._parsed = False self._root = None self._errors = [] @@ -171,8 +171,13 @@ class RPCReply(object): try: self._parsing_hook(root) except Exception as e: + if self._parsing_error_transform is None: + # re-raise as we have no workaround + exc_type, exc_value, exc_traceback = sys.exc_info() + six.reraise(exc_type, exc_value, exc_traceback) + # Apply device specific workaround and try again - self._device_handler.handle_reply_parsing_error(root, self) + self._parsing_error_transform(root) self._parsing_hook(root) self._parsed = True @@ -181,6 +186,9 @@ class RPCReply(object): "No-op by default. Gets passed the *root* element for the reply." pass + def set_parsing_error_transform(self, transform_function): + self._parsing_error_transform = transform_function + @property def xml(self): "*rpc-reply* element as returned." @@ -387,7 +395,14 @@ class RPC(object): def deliver_reply(self, raw): # internal use - self._reply = self.REPLY_CLS(raw, self._device_handler, huge_tree=self._huge_tree) + self._reply = self.REPLY_CLS(raw, huge_tree=self._huge_tree) + + # Set the reply_parsing_error transform outside the constructor, to keep compatibility for + # third party reply classes outside of ncclient + self._reply.set_parsing_error_transform( + self._device_handler.reply_parsing_error_transform(self.REPLY_CLS) + ) + self._event.set() def deliver_error(self, err):
ncclient/ncclient
7898dd9ec3404265418b05ef99e50bd670966084
diff --git a/test/unit/operations/test_rpc.py b/test/unit/operations/test_rpc.py index 3267531..249e876 100644 --- a/test/unit/operations/test_rpc.py +++ b/test/unit/operations/test_rpc.py @@ -121,7 +121,7 @@ xml7 = """<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"> class TestRPC(unittest.TestCase): def test_rpc_reply(self): - obj = RPCReply(xml4, self._mock_device_handler()) + obj = RPCReply(xml4) obj.parse() self.assertTrue(obj.ok) self.assertFalse(obj.error) @@ -129,11 +129,11 @@ class TestRPC(unittest.TestCase): self.assertTrue(obj._parsed) def test_rpc_reply_huge_text_node_exception(self): - obj = RPCReply(xml5_huge, self._mock_device_handler()) + obj = RPCReply(xml5_huge) self.assertRaises(etree.XMLSyntaxError, obj.parse) def test_rpc_reply_huge_text_node_workaround(self): - obj = RPCReply(xml5_huge, self._mock_device_handler(), huge_tree=True) + obj = RPCReply(xml5_huge, huge_tree=True) obj.parse() self.assertTrue(obj.ok) self.assertFalse(obj.error) @@ -145,7 +145,7 @@ class TestRPC(unittest.TestCase): def test_rpc_send(self, mock_thread, mock_send): device_handler, session = self._mock_device_handler_and_session() obj = RPC(session, device_handler, raise_mode=RaiseMode.ALL, timeout=0) - reply = RPCReply(xml1, device_handler) + reply = RPCReply(xml1) obj._reply = reply node = new_ele("commit") sub_ele(node, "confirmed") @@ -171,7 +171,7 @@ class TestRPC(unittest.TestCase): def test_generic_rpc_send(self, mock_thread, mock_send): device_handler, session = self._mock_device_handler_and_session() obj = GenericRPC(session, device_handler, raise_mode=RaiseMode.ALL, timeout=0) - reply = RPCReply(xml1, device_handler) + reply = RPCReply(xml1) obj._reply = reply rpc_command = 'edit-config' filters = ('subtree', '<top xmlns="urn:mod1"/>') @@ -206,7 +206,7 @@ class TestRPC(unittest.TestCase): raise_mode=RaiseMode.ALL, timeout=0, async_mode=True) - reply = RPCReply(xml1, device_handler) + reply = RPCReply(xml1) obj._reply = reply node = new_ele("commit") result = obj._request(node) @@ -217,7 +217,7 @@ class TestRPC(unittest.TestCase): def test_rpc_timeout_error(self, mock_thread, mock_send): device_handler, session = self._mock_device_handler_and_session() obj = RPC(session, device_handler, raise_mode=RaiseMode.ALL, timeout=0) - reply = RPCReply(xml1, device_handler) + reply = RPCReply(xml1) obj.deliver_reply(reply) node = new_ele("commit") sub_ele(node, "confirmed") @@ -229,7 +229,7 @@ class TestRPC(unittest.TestCase): def test_rpc_rpcerror(self, mock_thread, mock_send): device_handler, session = self._mock_device_handler_and_session() obj = RPC(session, device_handler, raise_mode=RaiseMode.ALL, timeout=0) - reply = RPCReply(xml1, device_handler) + reply = RPCReply(xml1) obj._reply = reply node = new_ele("commit") sub_ele(node, "confirmed") @@ -315,9 +315,6 @@ class TestRPC(unittest.TestCase): obj.huge_tree = False self.assertFalse(obj.huge_tree) - def _mock_device_handler(self): - return manager.make_device_handler({'name': 'default'}) - def _mock_device_handler_and_session(self): device_handler = manager.make_device_handler({'name': 'junos'}) capabilities = Capabilities(device_handler.get_capabilities())
TypeError: __init__() missing 1 required positional argument: 'device_handler' Why an optional parameter was added in between (and not in the end) https://github.com/ncclient/ncclient/pull/452/files#diff-9f0edad4d5a881f4165c86a0b3a9116fe67d9b7cdda2d9a888fb98bcb09311feR147 also as device handler is an optional parameter with None value, if a user doesnt pass, this line is bound to through NoneType exception https://github.com/ncclient/ncclient/pull/452/files#diff-9f0edad4d5a881f4165c86a0b3a9116fe67d9b7cdda2d9a888fb98bcb09311feR175 ``` File "/content/tmp/pyez/tests/unit/test_console.py", line 262, in test_load_console op = cu.load(xml, format="xml") File "/content/tmp/pyez/lib/jnpr/junos/utils/config.py", line 568, in load return try_load(rpc_contents, rpc_xattrs, ignore_warning=ignore_warning) File "/content/tmp/pyez/lib/jnpr/junos/utils/config.py", line 480, in try_load rpc_contents, ignore_warning=ignore_warning, **rpc_xattrs File "/content/tmp/pyez/lib/jnpr/junos/rpcmeta.py", line 288, in load_config return self._junos.execute(rpc, ignore_warning=ignore_warning) File "/content/tmp/pyez/lib/jnpr/junos/decorators.py", line 76, in wrapper return function(*args, **kwargs) File "/content/tmp/pyez/lib/jnpr/junos/decorators.py", line 31, in wrapper return function(*args, **kwargs) File "/content/tmp/pyez/lib/jnpr/junos/device.py", line 816, in execute filter_xml=kvargs.get("filter_xml"), File "/content/tmp/pyez/lib/jnpr/junos/decorators.py", line 117, in wrapper rsp = function(self, *args, **kwargs) File "/content/tmp/pyez/lib/jnpr/junos/console.py", line 279, in _rpc_reply reply = self._tty.nc.rpc(rpc_cmd) File "/content/tmp/pyez/lib/jnpr/junos/transport/tty_netconf.py", line 123, in rpc reply = RPCReply(rsp, huge_tree=self._tty._huge_tree) TypeError: __init__() missing 1 required positional argument: 'device_handler' ```
0.0
[ "test/unit/operations/test_rpc.py::TestRPC::test_generic_rpc_send", "test/unit/operations/test_rpc.py::TestRPC::test_rpc_async", "test/unit/operations/test_rpc.py::TestRPC::test_rpc_reply", "test/unit/operations/test_rpc.py::TestRPC::test_rpc_reply_huge_text_node_exception", "test/unit/operations/test_rpc.py::TestRPC::test_rpc_reply_huge_text_node_workaround", "test/unit/operations/test_rpc.py::TestRPC::test_rpc_rpcerror", "test/unit/operations/test_rpc.py::TestRPC::test_rpc_send", "test/unit/operations/test_rpc.py::TestRPC::test_rpc_timeout_error" ]
[ "test/unit/operations/test_rpc.py::TestRPC::test_rpc_capability_error", "test/unit/operations/test_rpc.py::TestRPC::test_rpc_huge_text_node_exception", "test/unit/operations/test_rpc.py::TestRPC::test_rpc_huge_text_node_workaround", "test/unit/operations/test_rpc.py::TestRPC::test_rpc_rpcerror_multiple_errors", "test/unit/operations/test_rpc.py::TestRPC::test_rpc_rpcerror_tag_to_attr" ]
2021-03-16 19:02:38+00:00
4,111
nens__threedi-modelchecker-11
diff --git a/CHANGES.rst b/CHANGES.rst index ffa7937..6811dc4 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -5,7 +5,13 @@ Changelog of threedi-modelchecker 0.8 (unreleased) ---------------- -- Nothing changed yet. +- Removed threedigrid from requirements. + +- Configured extra checks: Pumpstation.lower_stop_level > Manhole.bottom_level. + +- Configured extra checks: Pipe.invert_level >= .Manhole.bottom_level. + +- Added additional check type: QueryCheck. 0.7 (2019-07-18) diff --git a/requirements.txt b/requirements.txt index b1441aa..d5ce1cd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ GeoAlchemy2==0.6.1 SQLAlchemy==1.3.1 -threedigrid==1.0.10 Click==7.0 psycopg2 \ No newline at end of file diff --git a/setup.py b/setup.py index 494f0e1..7f260cc 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,6 @@ install_requires = [ "Click", "GeoAlchemy2>=0.6", "SQLAlchemy>=0.8", - "threedigrid", ] tests_require = [ diff --git a/threedi_modelchecker/checks/base.py b/threedi_modelchecker/checks/base.py index 37fdce2..addf0f0 100644 --- a/threedi_modelchecker/checks/base.py +++ b/threedi_modelchecker/checks/base.py @@ -76,7 +76,7 @@ class GeneralCheck(BaseCheck): Either specify what is valid with `criterion_valid` or what is invalid with `criterion_invalid`. - The criterion should be a sqlalchemy.sql.expression.BinaryExpression (https://docs.sqlalchemy.org/en/13/core/sqlelement.html#sqlalchemy.sql.expression.BinaryExpression) # noqa + The criterion should be a sqlalchemy.sql.expression.BinaryExpression (https://docs.sqlalchemy.org/en/13/core/sqlelement.html#sqlalchemy.sql.expression.BinaryExpression) # noqa with operators being operators being column within `self.table.columns` """ @@ -143,6 +143,25 @@ class ConditionalCheck(BaseCheck): ) +class QueryCheck(BaseCheck): + """Specify a sqlalchemy.orm.Query object to return invalid instances + + Provides more freedom than the GeneralCheck where you need to specify a + sqlalchemy.sql.expression.BinaryExpression. For example, QueryCheck allows joins + on multiple tables""" + + def __init__(self, column, invalid, message): + super().__init__(column) + self.invalid = invalid + self.message = message + + def get_invalid(self, session): + return list(self.invalid.with_session(session)) + + def description(self): + return self.message + + class ForeignKeyCheck(BaseCheck): """Check all values in `column` are in `reference_column`. diff --git a/threedi_modelchecker/config.py b/threedi_modelchecker/config.py index e4c0908..e98d55e 100644 --- a/threedi_modelchecker/config.py +++ b/threedi_modelchecker/config.py @@ -2,8 +2,9 @@ from sqlalchemy import Integer from sqlalchemy import and_ from sqlalchemy import cast from sqlalchemy import or_ +from sqlalchemy.orm import Query -from .checks.base import ConditionalCheck +from .checks.base import ConditionalCheck, QueryCheck from .checks.base import GeneralCheck from .checks.base import NotNullCheck from .checks.factories import generate_enum_checks @@ -172,6 +173,22 @@ RANGE_CHECKS = [ column=models.Weir.friction_value, criterion_valid=models.Weir.friction_value >= 0, ), + GeneralCheck( + column=models.Manhole.bottom_level, + criterion_valid=models.Manhole.bottom_level >= models.Manhole.surface_level, + ), + GeneralCheck( + column=models.Manhole.bottom_level, + criterion_valid=models.Manhole.bottom_level >= models.Manhole.drain_level, + ), + GeneralCheck( + column=models.GlobalSetting.maximum_sim_time_step, + criterion_valid=models.GlobalSetting.maximum_sim_time_step >= models.GlobalSetting.sim_time_step, # noqa: E501 + ), + GeneralCheck( + column=models.GlobalSetting.sim_time_step, + criterion_valid=models.GlobalSetting.sim_time_step >= models.GlobalSetting.minimum_sim_time_step, # noqa: E501 + ), ] OTHER_CHECKS = [ @@ -321,6 +338,58 @@ CONDITIONAL_CHECKS = [ ]) ) ), + QueryCheck( + column=models.Pumpstation.lower_stop_level, + invalid=Query(models.Pumpstation).join( + models.ConnectionNode, + models.Pumpstation.connection_node_start_id == models.ConnectionNode.id + ).join( + models.Manhole + ).filter( + models.Pumpstation.lower_stop_level <= models.Manhole.bottom_level, + ), + message="Pumpstation.lower_stop_level should be higher than " + "Manhole.bottom_level" + ), + QueryCheck( + column=models.Pumpstation.lower_stop_level, + invalid=Query(models.Pumpstation).join( + models.ConnectionNode, + models.Pumpstation.connection_node_end_id == models.ConnectionNode.id + ).join( + models.Manhole + ).filter( + models.Pumpstation.lower_stop_level <= models.Manhole.bottom_level, + ), + message="Pumpstation.lower_stop_level should be higher than " + "Manhole.bottom_level" + ), + QueryCheck( + column=models.Pipe.invert_level_end_point, + invalid=Query(models.Pipe).join( + models.ConnectionNode, + models.Pipe.connection_node_end_id == models.ConnectionNode.id + ).join( + models.Manhole + ).filter( + models.Pipe.invert_level_end_point < models.Manhole.bottom_level, + ), + message="Pipe.invert_level_end_point should be higher or equal than " + "Manhole.bottom_level" + ), + QueryCheck( + column=models.Pipe.invert_level_start_point, + invalid=Query(models.Pipe).join( + models.ConnectionNode, + models.Pipe.connection_node_start_id == models.ConnectionNode.id + ).join( + models.Manhole + ).filter( + models.Pipe.invert_level_start_point < models.Manhole.bottom_level, # noqa: E501 + ), + message="Pipe.invert_level_start_point should be higher or equal than " + "Manhole.bottom_level" + ) ] diff --git a/threedi_modelchecker/threedi_model/models.py b/threedi_modelchecker/threedi_model/models.py index 63ce372..8d27a29 100644 --- a/threedi_modelchecker/threedi_model/models.py +++ b/threedi_modelchecker/threedi_model/models.py @@ -325,9 +325,9 @@ class Manhole(Base): shape = Column(String(4)) width = Column(Float) length = Column(Float) - surface_level = Column(Float) - bottom_level = Column(Float) - drain_level = Column(Float) + surface_level = Column(Float, nullable=False) + bottom_level = Column(Float, nullable=False) + drain_level = Column(Float, nullable=False) sediment_level = Column(Float) manhole_indicator = Column(Integer) calculation_type = Column(IntegerEnum(constants.CalculationTypeNode))
nens/threedi-modelchecker
e4ed25acb9b2256c31dcd6e1410c9276517f24ac
diff --git a/tests/test_checks_base.py b/tests/test_checks_base.py index f23d8e2..c9a8814 100644 --- a/tests/test_checks_base.py +++ b/tests/test_checks_base.py @@ -1,14 +1,18 @@ import factory import pytest -from sqlalchemy import cast +from sqlalchemy import cast, and_ from sqlalchemy import func from sqlalchemy import Integer +from sqlalchemy.orm import Query from tests import factories -from threedi_modelchecker.checks.base import EnumCheck, ConditionalCheck, GeneralCheck +from threedi_modelchecker.checks.base import ConditionalCheck +from threedi_modelchecker.checks.base import EnumCheck +from threedi_modelchecker.checks.base import GeneralCheck from threedi_modelchecker.checks.base import ForeignKeyCheck from threedi_modelchecker.checks.base import GeometryCheck from threedi_modelchecker.checks.base import GeometryTypeCheck +from threedi_modelchecker.checks.base import QueryCheck from threedi_modelchecker.checks.base import NotNullCheck from threedi_modelchecker.checks.base import TypeCheck from threedi_modelchecker.checks.base import UniqueCheck @@ -413,6 +417,106 @@ def test_conditional_check_advanced(session): assert invalids[0].storage_area == connection_node2.storage_area +def test_conditional_check_joining_criterion_valid(session): + # Joining on criterion valid fails because it takes the complement (negation) + # of the joins (instead of only the where statement (joins are in the where + # statement)). + connection_node1 = factories.ConnectionNodeFactory() + connection_node2 = factories.ConnectionNodeFactory() + manhole1 = factories.ManholeFactory( + connection_node=connection_node1, bottom_level=1.0 + ) + factories.ManholeFactory( + connection_node=connection_node2, bottom_level=-1.0 + ) + factories.PumpstationFactory( + connection_node_start=connection_node1, lower_stop_level=0.0 + ) + factories.PumpstationFactory( + connection_node_start=connection_node2, lower_stop_level=2.0 + ) + + check_lower_stop_level_gt_bottom_level_compliment = GeneralCheck( + column=models.Manhole.bottom_level, + criterion_valid=and_( + models.Pumpstation.connection_node_start_id == models.ConnectionNode.id, + models.Manhole.connection_node_id == models.ConnectionNode.id, + models.Pumpstation.lower_stop_level > models.Manhole.bottom_level, + ), + ) + invalids = check_lower_stop_level_gt_bottom_level_compliment.get_invalid(session) + assert len(invalids) != 1 # Note that 1 is what we actually want! + assert invalids[0].id == manhole1.id + + +def test_query_check_with_joins(session): + connection_node1 = factories.ConnectionNodeFactory() + connection_node2 = factories.ConnectionNodeFactory() + factories.ManholeFactory( + connection_node=connection_node1, bottom_level=1.0 + ) + factories.ManholeFactory( + connection_node=connection_node2, bottom_level=-1.0 + ) + factories.PumpstationFactory( + connection_node_start=connection_node1, lower_stop_level=0.0 + ) + factories.PumpstationFactory( + connection_node_start=connection_node2, lower_stop_level=2.0 + ) + + query = Query(models.ConnectionNode).join( + models.Pumpstation.connection_node_start + ).join( + models.Manhole, models.ConnectionNode.id == models.Manhole.connection_node_id + ).filter( + models.Pumpstation.lower_stop_level <= models.Manhole.bottom_level, + ) + check = QueryCheck( + column=models.Manhole.bottom_level, + invalid=query, + message="Pumpstation.lower_stop_level should be higher than " + "Manhole.bottom_level" + ) + invalids = check.get_invalid(session) + assert len(invalids) == 1 + assert invalids[0].id == connection_node1.id + + +def test_query_check_on_pumpstation(session): + connection_node1 = factories.ConnectionNodeFactory() + connection_node2 = factories.ConnectionNodeFactory() + factories.ManholeFactory( + connection_node=connection_node1, bottom_level=1.0 + ) + factories.ManholeFactory( + connection_node=connection_node2, bottom_level=-1.0 + ) + pumpstation_wrong = factories.PumpstationFactory( + connection_node_start=connection_node1, lower_stop_level=0.0 + ) + factories.PumpstationFactory( + connection_node_start=connection_node2, lower_stop_level=2.0 + ) + + query = Query(models.Pumpstation).join( + models.ConnectionNode, models.Pumpstation.connection_node_start_id == models.ConnectionNode.id # noqa: E501 + ).join( + models.Manhole, models.Manhole.connection_node_id == models.ConnectionNode.id + ).filter( + models.Pumpstation.lower_stop_level <= models.Manhole.bottom_level, + ) + check = QueryCheck( + column=models.Pumpstation.lower_stop_level, + invalid=query, + message="Pumpstation lower_stop_level should be higher than Manhole " + "bottom_level" + ) + invalids = check.get_invalid(session) + assert len(invalids) == 1 + assert invalids[0].id == pumpstation_wrong.id + + def test_get_valid(session): factories.ConnectionNodeFactory(storage_area=1) factories.ConnectionNodeFactory(storage_area=2) diff --git a/tests/test_checks_factories.py b/tests/test_checks_factories.py index 64326d8..828076f 100644 --- a/tests/test_checks_factories.py +++ b/tests/test_checks_factories.py @@ -22,7 +22,7 @@ def test_gen_not_unique_checks(): def test_gen_not_null_checks(): not_null_checks = generate_not_null_checks(models.Manhole.__table__) - assert len(not_null_checks) == 4 + assert len(not_null_checks) == 7 not_null_check_columns = [check.column for check in not_null_checks] assert models.Manhole.id in not_null_check_columns assert models.Manhole.code in not_null_check_columns
Add additional checks The following fields of v2_manhole should not be null: - bottom level - surface level - drain level **Some extra checks:** - manhole.bottom_level > manhole.surface_level - manhole.bottom_level > manhole.drain_level - global_settings.max_timestep >= global_settings.timestep - global_settings.timestep >= global_settings.min_timestep warning: manhole.surface_level < manhole.drain_level **Cross-table reference checks (might be more difficult):** - pumpstation.lower_stop_level > manhole.bottom_level - pipe.invert_level >= manhole.bottom_level warning: weir.crest_level > manhole.bottom_level manhole
0.0
[ "tests/test_checks_base.py::test_sqlalchemy_to_sqlite_type_with_custom_type", "tests/test_checks_factories.py::test_gen_foreign_key_checks", "tests/test_checks_factories.py::test_gen_not_unique_checks", "tests/test_checks_factories.py::test_gen_not_null_checks", "tests/test_checks_factories.py::test_gen_geometry_check", "tests/test_checks_factories.py::test_gen_enum_checks", "tests/test_checks_factories.py::test_gen_enum_checks_varcharenum" ]
[]
2019-09-04 08:27:07+00:00
4,112
nens__threedi-modelchecker-17
diff --git a/CHANGES.rst b/CHANGES.rst index a893a3e..8890c52 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -5,7 +5,7 @@ Changelog of threedi-modelchecker 0.9 (unreleased) ---------------- -- Nothing changed yet. +- Fixed some misconfigured checks, see https://github.com/nens/threedi-modelchecker/issues/10. 0.8 (2019-11-26) diff --git a/threedi_modelchecker/config.py b/threedi_modelchecker/config.py index e98d55e..d51cce5 100644 --- a/threedi_modelchecker/config.py +++ b/threedi_modelchecker/config.py @@ -173,14 +173,6 @@ RANGE_CHECKS = [ column=models.Weir.friction_value, criterion_valid=models.Weir.friction_value >= 0, ), - GeneralCheck( - column=models.Manhole.bottom_level, - criterion_valid=models.Manhole.bottom_level >= models.Manhole.surface_level, - ), - GeneralCheck( - column=models.Manhole.bottom_level, - criterion_valid=models.Manhole.bottom_level >= models.Manhole.drain_level, - ), GeneralCheck( column=models.GlobalSetting.maximum_sim_time_step, criterion_valid=models.GlobalSetting.maximum_sim_time_step >= models.GlobalSetting.sim_time_step, # noqa: E501 @@ -210,6 +202,7 @@ OTHER_CHECKS = [ Use0DFlowCheck() ] + CONDITIONAL_CHECKS = [ ConditionalCheck( criterion=(models.ConnectionNode.id == models.Manhole.connection_node_id), @@ -226,12 +219,6 @@ CONDITIONAL_CHECKS = [ < models.CrossSectionLocation.bank_level) ) ), - ConditionalCheck( - criterion=(models.GlobalSetting.timestep_plus == True), - check=NotNullCheck( - column=models.GlobalSetting.maximum_sim_time_step, - ) - ), ConditionalCheck( criterion=or_( models.GlobalSetting.initial_groundwater_level_file != None, @@ -374,7 +361,7 @@ CONDITIONAL_CHECKS = [ ).filter( models.Pipe.invert_level_end_point < models.Manhole.bottom_level, ), - message="Pipe.invert_level_end_point should be higher or equal than " + message="Pipe.invert_level_end_point should be higher than or equal to " "Manhole.bottom_level" ), QueryCheck( @@ -387,9 +374,36 @@ CONDITIONAL_CHECKS = [ ).filter( models.Pipe.invert_level_start_point < models.Manhole.bottom_level, # noqa: E501 ), - message="Pipe.invert_level_start_point should be higher or equal than " + message="Pipe.invert_level_start_point should be higher than or equal to " "Manhole.bottom_level" - ) + ), + QueryCheck( + column=models.Manhole.bottom_level, + invalid=Query(models.Manhole).filter( + models.Manhole.drain_level < models.Manhole.bottom_level, + models.Manhole.calculation_type == constants.CalculationTypeNode.CONNECTED + ), + message="Manhole.drain_level >= Manhole.bottom_level when " + "Manhole.calculation_type is CONNECTED" + ), + QueryCheck( + column=models.Manhole.drain_level, + invalid=Query(models.Manhole).filter( + models.Manhole.calculation_type == constants.CalculationTypeNode.CONNECTED, + models.Manhole.drain_level == None + ), + message="Manhole.drain_level cannot be null when Manhole.calculation_type is " + "CONNECTED" + ), + QueryCheck( + column=models.GlobalSetting.maximum_sim_time_step, + invalid=Query(models.GlobalSetting).filter( + models.GlobalSetting.timestep_plus == True, + models.GlobalSetting.maximum_sim_time_step == None + ), + message="GlobalSettings.maximum_sim_time_step cannot be null when " + "GlobalSettings.timestep_plus is True." + ), ] diff --git a/threedi_modelchecker/threedi_model/models.py b/threedi_modelchecker/threedi_model/models.py index 284b1d4..342c9de 100644 --- a/threedi_modelchecker/threedi_model/models.py +++ b/threedi_modelchecker/threedi_model/models.py @@ -325,9 +325,9 @@ class Manhole(Base): shape = Column(String(4)) width = Column(Float) length = Column(Float) - surface_level = Column(Float, nullable=False) + surface_level = Column(Float) bottom_level = Column(Float, nullable=False) - drain_level = Column(Float, nullable=False) + drain_level = Column(Float) sediment_level = Column(Float) manhole_indicator = Column(Integer) calculation_type = Column(IntegerEnum(constants.CalculationTypeNode))
nens/threedi-modelchecker
5828888ad9d692b30db626f004c41be04537389f
diff --git a/tests/test_checks_base.py b/tests/test_checks_base.py index c9a8814..11cfae2 100644 --- a/tests/test_checks_base.py +++ b/tests/test_checks_base.py @@ -599,3 +599,56 @@ def test_general_check_modulo_operator(session): invalid = modulo_check.get_invalid(session) assert len(invalid) == 1 assert invalid[0].id == global_settings_remainder.id + + +def test_query_check_manhole_drain_level_calc_type_2(session): + # manhole.drain_level can be null, but if manhole.calculation_type == 2 (Connected) + # then manhole.drain_level >= manhole.bottom_level + factories.ManholeFactory(drain_level=None) + factories.ManholeFactory(drain_level=1) + m3_error = factories.ManholeFactory( + drain_level=None, + calculation_type=constants.CalculationTypeNode.CONNECTED + ) # drain_level cannot be null when calculation_type is CONNECTED + m4_error = factories.ManholeFactory( + drain_level=1, + bottom_level=2, + calculation_type=constants.CalculationTypeNode.CONNECTED + ) # bottom_level >= drain_level when calculation_type is CONNECTED + factories.ManholeFactory( + drain_level=1, + bottom_level=0, + calculation_type=constants.CalculationTypeNode.CONNECTED + ) + factories.ManholeFactory( + drain_level=None, + bottom_level=0, + calculation_type=constants.CalculationTypeNode.EMBEDDED + ) + + query_drn_lvl_st_bttm_lvl = Query(models.Manhole).filter( + models.Manhole.drain_level < models.Manhole.bottom_level, + models.Manhole.calculation_type == constants.CalculationTypeNode.CONNECTED + ) + query_invalid_not_null = Query(models.Manhole).filter( + models.Manhole.calculation_type == constants.CalculationTypeNode.CONNECTED, + models.Manhole.drain_level == None + ) + check_drn_lvl_gt_bttm_lvl = QueryCheck( + column=models.Manhole.bottom_level, + invalid=query_drn_lvl_st_bttm_lvl, + message="Manhole.drain_level >= Manhoole.bottom_level when " + "Manhole.calculation_type is CONNECTED" + ) + check_invalid_not_null = QueryCheck( + column=models.Manhole.drain_level, + invalid=query_invalid_not_null, + message="Manhole.drain_level cannot be null when Manhole.calculation_type is " + "CONNECTED" + ) + errors1 = check_drn_lvl_gt_bttm_lvl.get_invalid(session) + errors2 = check_invalid_not_null.get_invalid(session) + assert len(errors1) == 1 + assert len(errors2) == 1 + assert m3_error.id == errors2[0].id + assert m4_error.id == errors1[0].id diff --git a/tests/test_checks_factories.py b/tests/test_checks_factories.py index 828076f..4086a31 100644 --- a/tests/test_checks_factories.py +++ b/tests/test_checks_factories.py @@ -22,7 +22,7 @@ def test_gen_not_unique_checks(): def test_gen_not_null_checks(): not_null_checks = generate_not_null_checks(models.Manhole.__table__) - assert len(not_null_checks) == 7 + assert len(not_null_checks) == 5 not_null_check_columns = [check.column for check in not_null_checks] assert models.Manhole.id in not_null_check_columns assert models.Manhole.code in not_null_check_columns
Add additional checks The following fields of v2_manhole should not be null: - bottom level - surface level - drain level **Some extra checks:** - manhole.bottom_level > manhole.surface_level - manhole.bottom_level > manhole.drain_level - global_settings.max_timestep >= global_settings.timestep - global_settings.timestep >= global_settings.min_timestep warning: manhole.surface_level < manhole.drain_level **Cross-table reference checks (might be more difficult):** - pumpstation.lower_stop_level > manhole.bottom_level - pipe.invert_level >= manhole.bottom_level warning: weir.crest_level > manhole.bottom_level manhole
0.0
[ "tests/test_checks_factories.py::test_gen_not_null_checks" ]
[ "tests/test_checks_base.py::test_sqlalchemy_to_sqlite_type_with_custom_type", "tests/test_checks_factories.py::test_gen_foreign_key_checks", "tests/test_checks_factories.py::test_gen_not_unique_checks", "tests/test_checks_factories.py::test_gen_geometry_check", "tests/test_checks_factories.py::test_gen_enum_checks", "tests/test_checks_factories.py::test_gen_enum_checks_varcharenum" ]
2019-11-26 13:38:37+00:00
4,113
neo4j__neo4j-python-driver-498
diff --git a/neo4j/spatial/__init__.py b/neo4j/spatial/__init__.py index f7085fd4..36e8d66f 100644 --- a/neo4j/spatial/__init__.py +++ b/neo4j/spatial/__init__.py @@ -53,7 +53,7 @@ class Point(tuple): srid = None def __new__(cls, iterable): - return tuple.__new__(cls, iterable) + return tuple.__new__(cls, map(float, iterable)) def __repr__(self): return "POINT(%s)" % " ".join(map(str, self))
neo4j/neo4j-python-driver
48b989955e85ff718da4e604f029275a860ab0cb
diff --git a/tests/unit/spatial/__init__.py b/tests/unit/spatial/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/spatial/test_cartesian_point.py b/tests/unit/spatial/test_cartesian_point.py new file mode 100644 index 00000000..ee86e5b9 --- /dev/null +++ b/tests/unit/spatial/test_cartesian_point.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright (c) "Neo4j" +# Neo4j Sweden AB [http://neo4j.com] +# +# This file is part of Neo4j. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import struct +from unittest import TestCase + +from neo4j.data import DataDehydrator +from neo4j.packstream import Packer +from neo4j.spatial import CartesianPoint + + +class CartesianPointTestCase(TestCase): + + def test_alias(self): + x, y, z = 3.2, 4.0, -1.2 + p = CartesianPoint((x, y, z)) + self.assert_(hasattr(p, "x")) + self.assertEqual(p.x, x) + self.assert_(hasattr(p, "y")) + self.assertEqual(p.y, y) + self.assert_(hasattr(p, "z")) + self.assertEqual(p.z, z) + + def test_dehydration_3d(self): + coordinates = (1, -2, 3.1) + p = CartesianPoint(coordinates) + + dehydrator = DataDehydrator() + buffer = io.BytesIO() + packer = Packer(buffer) + packer.pack(dehydrator.dehydrate((p,))[0]) + self.assertEqual( + buffer.getvalue(), + b"\xB4Y" + + b"\xC9" + struct.pack(">h", 9157) + + b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates)) + ) + + def test_dehydration_2d(self): + coordinates = (.1, 0) + p = CartesianPoint(coordinates) + + dehydrator = DataDehydrator() + buffer = io.BytesIO() + packer = Packer(buffer) + packer.pack(dehydrator.dehydrate((p,))[0]) + self.assertEqual( + buffer.getvalue(), + b"\xB3X" + + b"\xC9" + struct.pack(">h", 7203) + + b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates)) + ) diff --git a/tests/unit/spatial/test_point.py b/tests/unit/spatial/test_point.py new file mode 100644 index 00000000..082f95c5 --- /dev/null +++ b/tests/unit/spatial/test_point.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright (c) "Neo4j" +# Neo4j Sweden AB [http://neo4j.com] +# +# This file is part of Neo4j. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import struct +from unittest import TestCase + +from neo4j.data import DataDehydrator +from neo4j.packstream import Packer +from neo4j.spatial import ( + Point, + point_type, +) + + +class PointTestCase(TestCase): + + def test_wrong_type_arguments(self): + for argument in (("a", "b"), ({"x": 1.0, "y": 2.0})): + with self.subTest(): + with self.assertRaises(ValueError): + Point(argument) + + def test_number_arguments(self): + for argument in ((1, 2), (1.2, 2.1)): + with self.subTest(): + p = Point(argument) + assert tuple(p) == argument + + def test_dehydration(self): + MyPoint = point_type("MyPoint", ["x", "y"], {2: 1234}) + coordinates = (.1, 0) + p = MyPoint(coordinates) + + dehydrator = DataDehydrator() + buffer = io.BytesIO() + packer = Packer(buffer) + packer.pack(dehydrator.dehydrate((p,))[0]) + self.assertEqual( + buffer.getvalue(), + b"\xB3X" + + b"\xC9" + struct.pack(">h", 1234) + + b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates)) + ) + + def test_immutable_coordinates(self): + MyPoint = point_type("MyPoint", ["x", "y"], {2: 1234}) + coordinates = (.1, 0) + p = MyPoint(coordinates) + with self.assertRaises(AttributeError): + p.x = 2.0 + with self.assertRaises(AttributeError): + p.y = 2.0 + with self.assertRaises(TypeError): + p[0] = 2.0 + with self.assertRaises(TypeError): + p[1] = 2.0 diff --git a/tests/unit/spatial/test_wgs84_point.py b/tests/unit/spatial/test_wgs84_point.py new file mode 100644 index 00000000..8f725a58 --- /dev/null +++ b/tests/unit/spatial/test_wgs84_point.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright (c) "Neo4j" +# Neo4j Sweden AB [http://neo4j.com] +# +# This file is part of Neo4j. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import struct +from unittest import TestCase + +from neo4j.data import DataDehydrator +from neo4j.packstream import Packer +from neo4j.spatial import WGS84Point + + +class WGS84PointTestCase(TestCase): + + def test_alias(self): + x, y, z = 3.2, 4.0, -1.2 + p = WGS84Point((x, y, z)) + self.assert_(hasattr(p, "longitude")) + self.assertEqual(p.longitude, x) + self.assert_(hasattr(p, "latitude")) + self.assertEqual(p.latitude, y) + self.assert_(hasattr(p, "height")) + self.assertEqual(p.height, z) + + def test_dehydration_3d(self): + coordinates = (1, -2, 3.1) + p = WGS84Point(coordinates) + + dehydrator = DataDehydrator() + buffer = io.BytesIO() + packer = Packer(buffer) + packer.pack(dehydrator.dehydrate((p,))[0]) + self.assertEqual( + buffer.getvalue(), + b"\xB4Y" + + b"\xC9" + struct.pack(">h", 4979) + + b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates)) + ) + + def test_dehydration_2d(self): + coordinates = (.1, 0) + p = WGS84Point(coordinates) + + dehydrator = DataDehydrator() + buffer = io.BytesIO() + packer = Packer(buffer) + packer.pack(dehydrator.dehydrate((p,))[0]) + self.assertEqual( + buffer.getvalue(), + b"\xB3X" + + b"\xC9" + struct.pack(">h", 4326) + + b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates)) + )
Crash when using spatial type as parameters Hi folks! I can't seem to get the spatial type working, just getting a crash (stack trace attached). - Neo4j version: Enterprise 4.2.1 - Neo4j Mode: Single instance - Python 3.9 - Driver version: driver 4.2.1 - Operating system: Ubuntu 20.04 - **Steps to reproduce**: run the following piece of code: ``` import neo4j print(neo4j.__version__) from neo4j.spatial import WGS84Point from neo4j import GraphDatabase, basic_auth driver = GraphDatabase.driver("bolt://localhost:7687", auth=basic_auth("neo4j", ""), encrypted=False) point = WGS84Point(dict(x=2, y=48)) cypher = "WITH $point as point RETURN [point.x, point.y]" params = {"point": point} with driver.session() as session: session.run(cypher, params) ``` - Expected behavior : query returns [2, 48] (or a meaningful error message in case of misuse) - Actual behavior: `neo4j.exceptions.ServiceUnavailable: Failed to read from defunct connection IPv4Address(('localhost', 7687)) (IPv4Address(('127.0.0.1', 7687)))` Full stack trace is attached. [neo_spatial.log](https://github.com/neo4j/neo4j-python-driver/files/5968103/neo_spatial.log) Also, only error in server logs is: ``` 2021-02-11 18:00:06.353+0000 ERROR Client triggered an unexpected error [Neo.DatabaseError.General.UnknownError]: Unknown statement ID: -1. Existing IDs: [], reference 1c40d17c-1c03-4b40-b422-ceeef7fc49ec. ``` - Additional information: - Same error with CartesianPoint - Doing similar things with DateTime works well (and basically, anything else is working just fine :) ) - I havn't found an example using spatial types in the doc, but the example is taken from https://github.com/neo4j/neo4j-python-driver/blob/4.3/tests/integration/test_spatial_types.py (I think) - Replacing the query with a `MATCH (n:Node) RETURN n.point` returns a `neo4j.spatial.WGS84Point` as expected - Same error as https://github.com/neo4j/neo4j-python-driver/issues/484 but, not sure the root cause is the same though.
0.0
[ "tests/unit/spatial/test_cartesian_point.py::CartesianPointTestCase::test_dehydration_2d", "tests/unit/spatial/test_cartesian_point.py::CartesianPointTestCase::test_dehydration_3d", "tests/unit/spatial/test_point.py::PointTestCase::test_dehydration", "tests/unit/spatial/test_point.py::PointTestCase::test_wrong_type_arguments", "tests/unit/spatial/test_wgs84_point.py::WGS84PointTestCase::test_dehydration_2d", "tests/unit/spatial/test_wgs84_point.py::WGS84PointTestCase::test_dehydration_3d" ]
[ "tests/unit/spatial/test_cartesian_point.py::CartesianPointTestCase::test_alias", "tests/unit/spatial/test_point.py::PointTestCase::test_immutable_coordinates", "tests/unit/spatial/test_point.py::PointTestCase::test_number_arguments", "tests/unit/spatial/test_wgs84_point.py::WGS84PointTestCase::test_alias" ]
2021-02-15 13:41:35+00:00
4,114
neo4j__neo4j-python-driver-510
diff --git a/neo4j/spatial/__init__.py b/neo4j/spatial/__init__.py index f7085fd4..36e8d66f 100644 --- a/neo4j/spatial/__init__.py +++ b/neo4j/spatial/__init__.py @@ -53,7 +53,7 @@ class Point(tuple): srid = None def __new__(cls, iterable): - return tuple.__new__(cls, iterable) + return tuple.__new__(cls, map(float, iterable)) def __repr__(self): return "POINT(%s)" % " ".join(map(str, self)) diff --git a/neo4j/work/simple.py b/neo4j/work/simple.py index ef1e6d8c..0ee46e7b 100644 --- a/neo4j/work/simple.py +++ b/neo4j/work/simple.py @@ -370,7 +370,7 @@ class Session(Workspace): with driver.session() as session: values = session.read_transaction(get_two_tx) - :param transaction_function: a function that takes a transaction as an argument and does work with the transaction. `tx_function(tx, \*args, \*\*kwargs)` + :param transaction_function: a function that takes a transaction as an argument and does work with the transaction. `tx_function(tx, *args, **kwargs)` :param args: arguments for the `transaction_function` :param kwargs: key word arguments for the `transaction_function` :return: a result as returned by the given unit of work @@ -395,7 +395,7 @@ class Session(Workspace): node_id = session.write_transaction(create_node_tx, "example") - :param transaction_function: a function that takes a transaction as an argument and does work with the transaction. `tx_function(tx, \*args, \*\*kwargs)` + :param transaction_function: a function that takes a transaction as an argument and does work with the transaction. `tx_function(tx, *args, **kwargs)` :param args: key word arguments for the `transaction_function` :param kwargs: key word arguments for the `transaction_function` :return: a result as returned by the given unit of work
neo4j/neo4j-python-driver
48b989955e85ff718da4e604f029275a860ab0cb
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index fd5a4a7a..952fa0c2 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -189,12 +189,8 @@ def service(request): if existing_service: NEO4J_SERVICE = existing_service else: - try: - NEO4J_SERVICE = Neo4jService(auth=NEO4J_AUTH, image=request.param, n_cores=NEO4J_CORES, n_replicas=NEO4J_REPLICAS) - NEO4J_SERVICE.start(timeout=300) - except urllib.error.HTTPError as error: - # pytest.skip(str(error)) - pytest.xfail(str(error) + " " + request.param) + NEO4J_SERVICE = Neo4jService(auth=NEO4J_AUTH, image=request.param, n_cores=NEO4J_CORES, n_replicas=NEO4J_REPLICAS) + NEO4J_SERVICE.start(timeout=300) yield NEO4J_SERVICE if NEO4J_SERVICE is not None: NEO4J_SERVICE.stop(timeout=300) diff --git a/tests/integration/examples/test_driver_introduction_example.py b/tests/integration/examples/test_driver_introduction_example.py index 5d592680..2496a27a 100644 --- a/tests/integration/examples/test_driver_introduction_example.py +++ b/tests/integration/examples/test_driver_introduction_example.py @@ -33,7 +33,7 @@ from neo4j.exceptions import ServiceUnavailable from neo4j._exceptions import BoltHandshakeError -# python -m pytest tests/integration/examples/test_aura_example.py -s -v +# python -m pytest tests/integration/examples/test_driver_introduction_example.py -s -v # tag::driver-introduction-example[] class App: @@ -91,10 +91,9 @@ class App: if __name__ == "__main__": - # Aura queries use an encrypted connection using the "neo4j+s" URI scheme bolt_url = "%%BOLT_URL_PLACEHOLDER%%" - user = "<Username for Neo4j Aura database>" - password = "<Password for Neo4j Aura database>" + user = "<Username for database>" + password = "<Password for database>" app = App(bolt_url, user, password) app.create_friendship("Alice", "David") app.find_person("Alice") diff --git a/tests/unit/spatial/__init__.py b/tests/unit/spatial/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/spatial/test_cartesian_point.py b/tests/unit/spatial/test_cartesian_point.py new file mode 100644 index 00000000..ee86e5b9 --- /dev/null +++ b/tests/unit/spatial/test_cartesian_point.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright (c) "Neo4j" +# Neo4j Sweden AB [http://neo4j.com] +# +# This file is part of Neo4j. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import struct +from unittest import TestCase + +from neo4j.data import DataDehydrator +from neo4j.packstream import Packer +from neo4j.spatial import CartesianPoint + + +class CartesianPointTestCase(TestCase): + + def test_alias(self): + x, y, z = 3.2, 4.0, -1.2 + p = CartesianPoint((x, y, z)) + self.assert_(hasattr(p, "x")) + self.assertEqual(p.x, x) + self.assert_(hasattr(p, "y")) + self.assertEqual(p.y, y) + self.assert_(hasattr(p, "z")) + self.assertEqual(p.z, z) + + def test_dehydration_3d(self): + coordinates = (1, -2, 3.1) + p = CartesianPoint(coordinates) + + dehydrator = DataDehydrator() + buffer = io.BytesIO() + packer = Packer(buffer) + packer.pack(dehydrator.dehydrate((p,))[0]) + self.assertEqual( + buffer.getvalue(), + b"\xB4Y" + + b"\xC9" + struct.pack(">h", 9157) + + b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates)) + ) + + def test_dehydration_2d(self): + coordinates = (.1, 0) + p = CartesianPoint(coordinates) + + dehydrator = DataDehydrator() + buffer = io.BytesIO() + packer = Packer(buffer) + packer.pack(dehydrator.dehydrate((p,))[0]) + self.assertEqual( + buffer.getvalue(), + b"\xB3X" + + b"\xC9" + struct.pack(">h", 7203) + + b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates)) + ) diff --git a/tests/unit/spatial/test_point.py b/tests/unit/spatial/test_point.py new file mode 100644 index 00000000..082f95c5 --- /dev/null +++ b/tests/unit/spatial/test_point.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright (c) "Neo4j" +# Neo4j Sweden AB [http://neo4j.com] +# +# This file is part of Neo4j. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import struct +from unittest import TestCase + +from neo4j.data import DataDehydrator +from neo4j.packstream import Packer +from neo4j.spatial import ( + Point, + point_type, +) + + +class PointTestCase(TestCase): + + def test_wrong_type_arguments(self): + for argument in (("a", "b"), ({"x": 1.0, "y": 2.0})): + with self.subTest(): + with self.assertRaises(ValueError): + Point(argument) + + def test_number_arguments(self): + for argument in ((1, 2), (1.2, 2.1)): + with self.subTest(): + p = Point(argument) + assert tuple(p) == argument + + def test_dehydration(self): + MyPoint = point_type("MyPoint", ["x", "y"], {2: 1234}) + coordinates = (.1, 0) + p = MyPoint(coordinates) + + dehydrator = DataDehydrator() + buffer = io.BytesIO() + packer = Packer(buffer) + packer.pack(dehydrator.dehydrate((p,))[0]) + self.assertEqual( + buffer.getvalue(), + b"\xB3X" + + b"\xC9" + struct.pack(">h", 1234) + + b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates)) + ) + + def test_immutable_coordinates(self): + MyPoint = point_type("MyPoint", ["x", "y"], {2: 1234}) + coordinates = (.1, 0) + p = MyPoint(coordinates) + with self.assertRaises(AttributeError): + p.x = 2.0 + with self.assertRaises(AttributeError): + p.y = 2.0 + with self.assertRaises(TypeError): + p[0] = 2.0 + with self.assertRaises(TypeError): + p[1] = 2.0 diff --git a/tests/unit/spatial/test_wgs84_point.py b/tests/unit/spatial/test_wgs84_point.py new file mode 100644 index 00000000..8f725a58 --- /dev/null +++ b/tests/unit/spatial/test_wgs84_point.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright (c) "Neo4j" +# Neo4j Sweden AB [http://neo4j.com] +# +# This file is part of Neo4j. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import struct +from unittest import TestCase + +from neo4j.data import DataDehydrator +from neo4j.packstream import Packer +from neo4j.spatial import WGS84Point + + +class WGS84PointTestCase(TestCase): + + def test_alias(self): + x, y, z = 3.2, 4.0, -1.2 + p = WGS84Point((x, y, z)) + self.assert_(hasattr(p, "longitude")) + self.assertEqual(p.longitude, x) + self.assert_(hasattr(p, "latitude")) + self.assertEqual(p.latitude, y) + self.assert_(hasattr(p, "height")) + self.assertEqual(p.height, z) + + def test_dehydration_3d(self): + coordinates = (1, -2, 3.1) + p = WGS84Point(coordinates) + + dehydrator = DataDehydrator() + buffer = io.BytesIO() + packer = Packer(buffer) + packer.pack(dehydrator.dehydrate((p,))[0]) + self.assertEqual( + buffer.getvalue(), + b"\xB4Y" + + b"\xC9" + struct.pack(">h", 4979) + + b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates)) + ) + + def test_dehydration_2d(self): + coordinates = (.1, 0) + p = WGS84Point(coordinates) + + dehydrator = DataDehydrator() + buffer = io.BytesIO() + packer = Packer(buffer) + packer.pack(dehydrator.dehydrate((p,))[0]) + self.assertEqual( + buffer.getvalue(), + b"\xB3X" + + b"\xC9" + struct.pack(">h", 4326) + + b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates)) + )
Prevent Deprecation Warning to users ## User get DeprecationWarnings - Operating system: Windows 10 Run install and run [nox](https://nox.thea.codes/en/stable/) on attached archive [example_python.zip](https://github.com/neo4j/neo4j-python-driver/files/5842343/example_python.zip) - Expected behavior pylint should analyze the code and report user erros - Actual behavior pylint reports issue in neo4j driver as well! ``` ================================================= warnings summary ================================================= <unknown>:339 <unknown>:339: DeprecationWarning: invalid escape sequence \* <unknown>:381 <unknown>:381: DeprecationWarning: invalid escape sequence \* -- Docs: https://docs.pytest.org/en/stable/warnings.html ============================================= short test summary info ============================================== FAILED example.py::PYLINT ```
0.0
[ "tests/unit/spatial/test_cartesian_point.py::CartesianPointTestCase::test_dehydration_2d", "tests/unit/spatial/test_cartesian_point.py::CartesianPointTestCase::test_dehydration_3d", "tests/unit/spatial/test_point.py::PointTestCase::test_dehydration", "tests/unit/spatial/test_point.py::PointTestCase::test_wrong_type_arguments", "tests/unit/spatial/test_wgs84_point.py::WGS84PointTestCase::test_dehydration_2d", "tests/unit/spatial/test_wgs84_point.py::WGS84PointTestCase::test_dehydration_3d" ]
[ "tests/unit/spatial/test_cartesian_point.py::CartesianPointTestCase::test_alias", "tests/unit/spatial/test_point.py::PointTestCase::test_immutable_coordinates", "tests/unit/spatial/test_point.py::PointTestCase::test_number_arguments", "tests/unit/spatial/test_wgs84_point.py::WGS84PointTestCase::test_alias" ]
2021-03-02 09:23:44+00:00
4,115
neo4j__neo4j-python-driver-803
diff --git a/neo4j/io/__init__.py b/neo4j/io/__init__.py index fb71d5ef..862895d7 100644 --- a/neo4j/io/__init__.py +++ b/neo4j/io/__init__.py @@ -756,9 +756,9 @@ class IOPool: + self.connections_reservations[address]) can_create_new_connection = (infinite_pool_size or pool_size < max_pool_size) - self.connections_reservations[address] += 1 - if can_create_new_connection: - return connection_creator + if can_create_new_connection: + self.connections_reservations[address] += 1 + return connection_creator def _acquire(self, address, deadline):
neo4j/neo4j-python-driver
2ecc4902b6047627fb5bd00c8efa1461d96bd9b2
diff --git a/tests/unit/io/test_neo4j_pool.py b/tests/unit/io/test_neo4j_pool.py index 5853ceac..73087534 100644 --- a/tests/unit/io/test_neo4j_pool.py +++ b/tests/unit/io/test_neo4j_pool.py @@ -35,6 +35,7 @@ from neo4j.conf import ( RoutingConfig, WorkspaceConfig ) +from neo4j._deadline import Deadline from neo4j.exceptions import ( ServiceUnavailable, SessionExpired @@ -271,3 +272,29 @@ def test_failing_opener_leaves_connections_in_use_alone(opener): pool.acquire(READ_ACCESS, 30, 60, "test_db", None) assert not cx1.closed() + + +def test__acquire_new_later_with_room(opener): + config = PoolConfig() + config.max_connection_pool_size = 1 + pool = Neo4jPool( + opener, config, WorkspaceConfig(), ROUTER_ADDRESS + ) + assert pool.connections_reservations[READER_ADDRESS] == 0 + creator = pool._acquire_new_later(READER_ADDRESS, Deadline(1)) + assert pool.connections_reservations[READER_ADDRESS] == 1 + assert callable(creator) + + +def test__acquire_new_later_without_room(opener): + config = PoolConfig() + config.max_connection_pool_size = 1 + pool = Neo4jPool( + opener, config, WorkspaceConfig(), ROUTER_ADDRESS + ) + _ = pool.acquire(READ_ACCESS, 30, 60, "test_db", None) + # pool is full now + assert pool.connections_reservations[READER_ADDRESS] == 0 + creator = pool._acquire_new_later(READER_ADDRESS, Deadline(1)) + assert pool.connections_reservations[READER_ADDRESS] == 0 + assert creator is None
Async driver keeping connections from pool open? I encountered a new issue with the Async driver version 5.0.0a2. The error states: ERROR in app: Exception on request POST /recommend_contact Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/quart/app.py", line 1489, in handle_request return await self.full_dispatch_request(request_context) File "/usr/local/lib/python3.7/site-packages/quart/app.py", line 1514, in full_dispatch_request result = await self.handle_user_exception(error) File "/usr/local/lib/python3.7/site-packages/quart/app.py", line 964, in handle_user_exception raise error File "/usr/local/lib/python3.7/site-packages/quart/app.py", line 1512, in full_dispatch_request result = await self.dispatch_request(request_context) File "/usr/local/lib/python3.7/site-packages/quart/app.py", line 1557, in dispatch_request return await self.ensure_async(handler)(**request_.view_args) File "/usr/local/lib/python3.7/site-packages/quart/views.py", line 57, in view return await current_app.ensure_async(self.dispatch_request)(*args, **kwargs) File "/usr/local/lib/python3.7/site-packages/quart_openapi/resource.py", line 71, in dispatch_request return await handler(*args, **kwargs) File "/api/app.py", line 119, in post recommendations = await asyncio.gather(*tasks) File "/api/app.py", line 108, in recommendation_task el['offsite']) File "/usr/local/lib/python3.7/site-packages/recommendation_engine-0.0.1-py3.7.egg/recommendation_engine/recommendation/recommender.py", line 32, in get_popular_contact ret = await self.graph_handler.run_query_async(graph_query, contact_id=contact_id, p=p, offsite=offsite) File "/usr/local/lib/python3.7/site-packages/recommendation_engine-0.0.1-py3.7.egg/recommendation_engine/graph_handler.py", line 636, in run_query_async return await session.read_transaction(self.run_transaction, query, **kwargs) File "/usr/local/lib/python3.7/site-packages/neo4j/_meta.py", line 73, in inner return await f(*args, **kwargs) File "/usr/local/lib/python3.7/site-packages/neo4j/_async/work/session.py", line 656, in read_transaction READ_ACCESS, transaction_function, *args, **kwargs File "/usr/local/lib/python3.7/site-packages/neo4j/_async/work/session.py", line 478, in _run_transaction timeout=timeout File "/usr/local/lib/python3.7/site-packages/neo4j/_async/work/session.py", line 392, in _open_transaction await self._connect(access_mode=access_mode) File "/usr/local/lib/python3.7/site-packages/neo4j/_async/work/session.py", line 122, in _connect await super()._connect(access_mode, **access_kwargs) File "/usr/local/lib/python3.7/site-packages/neo4j/_async/work/workspace.py", line 194, in _connect self._connection = await self._pool.acquire(**acquire_kwargs_) File "/usr/local/lib/python3.7/site-packages/neo4j/_async/io/_pool.py", line 403, in acquire self.address, deadline, liveness_check_timeout File "/usr/local/lib/python3.7/site-packages/neo4j/_async/io/_pool.py", line 216, in _acquire "{!r}s (timeout)".format(deadline.original_timeout) neo4j.exceptions.ClientError: {code: None} {message: None} I suspected it is the issue with connections being kept alive or being alive for a prolonged period of time, as when I checked "/usr/local/lib/python3.7/site-packages/neo4j/_async/io/_pool.py" I noticed the error is thrown when the pool is full and there are no avaliable connections. Also the code and message are shown as None, so I am not sure if that is a bug also. We are running Neo4j Version 4.4.3 Community edition as a single instance.
0.0
[ "tests/unit/io/test_neo4j_pool.py::test__acquire_new_later_without_room" ]
[ "tests/unit/io/test_neo4j_pool.py::test_acquires_new_routing_table_if_deleted", "tests/unit/io/test_neo4j_pool.py::test_acquires_new_routing_table_if_stale", "tests/unit/io/test_neo4j_pool.py::test_removes_old_routing_table", "tests/unit/io/test_neo4j_pool.py::test_chooses_right_connection_type[r]", "tests/unit/io/test_neo4j_pool.py::test_chooses_right_connection_type[w]", "tests/unit/io/test_neo4j_pool.py::test_reuses_connection", "tests/unit/io/test_neo4j_pool.py::test_closes_stale_connections[True]", "tests/unit/io/test_neo4j_pool.py::test_closes_stale_connections[False]", "tests/unit/io/test_neo4j_pool.py::test_does_not_close_stale_connections_in_use", "tests/unit/io/test_neo4j_pool.py::test_release_resets_connections", "tests/unit/io/test_neo4j_pool.py::test_release_does_not_resets_closed_connections", "tests/unit/io/test_neo4j_pool.py::test_release_does_not_resets_defunct_connections", "tests/unit/io/test_neo4j_pool.py::test_multiple_broken_connections_on_close", "tests/unit/io/test_neo4j_pool.py::test_failing_opener_leaves_connections_in_use_alone", "tests/unit/io/test_neo4j_pool.py::test__acquire_new_later_with_room" ]
2022-09-16 12:35:00+00:00
4,116
neogeny__TatSu-121
diff --git a/tatsu/contexts.py b/tatsu/contexts.py index 29024cb..8baf4df 100644 --- a/tatsu/contexts.py +++ b/tatsu/contexts.py @@ -557,7 +557,7 @@ class ParseContext(object): self._results[key] = result initial = self._pos - lastpos = initial + lastpos = initial - 1 while True: try: self._clear_recursion_errors()
neogeny/TatSu
bd7fea0ba31234ca49f6ce0b401677849483f329
diff --git a/test/grammar/left_recursion_test.py b/test/grammar/left_recursion_test.py index 6a64e5c..6c46c0c 100644 --- a/test/grammar/left_recursion_test.py +++ b/test/grammar/left_recursion_test.py @@ -535,7 +535,6 @@ class LeftRecursionTests(unittest.TestCase): assert ['a', ['a', 'a']] == parse(right_grammar, 'aaa') - @unittest.skip('bug in calculation of nullable') def test_nullable_void(self): left_grammar = ''' @@left_recursion :: True
Void doesn't end recursion ```python def test_nullable_void(self): left_grammar = ''' @@left_recursion :: True @@nameguard :: False start = A $ ; A = | A 'a' | () ; ''' assert [['a', 'a'], 'a'] == parse(left_grammar, 'aaa') # warning: infinite recursion ```
0.0
[ "test/grammar/left_recursion_test.py::LeftRecursionTests::test_nullable_void" ]
[ "test/grammar/left_recursion_test.py::LeftRecursionTests::test_associativity", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_calc", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_calc_indirect", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_change_start_rule", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_direct_left_recursion", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_dropped_input_bug", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_indirect_left_recursion", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_indirect_left_recursion_complex", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_indirect_left_recursion_with_cut", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_left_recursion_bug", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_left_recursion_with_right_associativity", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_leftrec_with_void", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_nested_left_recursion", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_no_left_recursion", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_not_at_top_level", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_partial_input_bug", "test/grammar/left_recursion_test.py::LeftRecursionTests::test_with_gather" ]
2019-04-26 12:52:45+00:00
4,117
neogeny__TatSu-183
diff --git a/.pylintrc b/.pylintrc index 75a1593..280bbe7 100644 --- a/.pylintrc +++ b/.pylintrc @@ -10,7 +10,7 @@ extension-pkg-whitelist= # Add files or directories to the blacklist. They should be base names, not # paths. -ignore=CVS,bootstrap.py,model.py +ignore=CVS,bootstrap.py,model.py,tmp # Add files or directories matching the regex patterns to the blacklist. The # regex matches against base names, not paths. diff --git a/README.md b/README.md index e16689f..a3df3b6 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ [![license](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/neogeny/tatsu/master/LICENSE.txt) [![pyversions](https://img.shields.io/pypi/pyversions/tatsu.svg)](https://pypi.python.org/pypi/tatsu) [![fury](https://badge.fury.io/py/TatSu.svg)](https://badge.fury.io/py/TatSu) [![circleci](https://circleci.com/gh/neogeny/TatSu.svg?style=shield)](https://circleci.com/gh/neogeny/TatSu) [![docs](https://readthedocs.org/projects/tatsu/badge/?version=stable)](http://tatsu.readthedocs.io/en/stable/) > *At least for the people who send me mail about a new language that they're designing, the general advice is: do it to learn about how to write a compiler. Don't have any expectations that anyone will use it, unless you hook up with some sort of organization in a position to push it hard. It's a lottery, and some can buy a lot of the tickets. There are plenty of beautiful languages (more beautiful than C) that didn't catch on. But someone does win the lottery, and doing a language at least teaches you something.* -> +> > [Dennis Ritchie](http://en.wikipedia.org/wiki/Dennis_Ritchie) (1941-2011) Creator of the [C](http://en.wikipedia.org/wiki/C_language) programming language and of [Unix](http://en.wikipedia.org/wiki/Unix) # 竜 **TatSu** @@ -44,21 +44,21 @@ $ pip install TatSu 竜 **TatSu** can be used as a library, much like [Python](http://python.org)'s `re`, by embedding grammars as strings and generating grammar models instead of generating [Python](http://python.org) code. - - `tatsu.compile(grammar, name=None, **kwargs)` - +- `tatsu.compile(grammar, name=None, **kwargs)` + Compiles the grammar and generates a *model* that can subsequently be used for parsing input with. - - `tatsu.parse(grammar, input, **kwargs)` - +- `tatsu.parse(grammar, input, **kwargs)` + Compiles the grammar and parses the given input producing an [AST](http://en.wikipedia.org/wiki/Abstract_syntax_tree) as result. The result is equivalent to calling: - + model = compile(grammar) ast = model.parse(input) - + Compiled grammars are cached for efficiency. - - `tatsu.to_python_sourcecode(grammar, name=None, filename=None, **kwargs)` - +- `tatsu.to_python_sourcecode(grammar, name=None, filename=None, **kwargs)` + Compiles the grammar to the [Python](http://python.org) sourcecode that implements the parser. This is an example of how to use 竜 **TatSu** as a library: @@ -150,7 +150,7 @@ For a detailed explanation of what 竜 **TatSu** is capable of, please see the [ ## Questions? -Please use the [\[tatsu\]](https://stackoverflow.com/tags/tatsu/info) tag on [StackOverflow](http://stackoverflow.com/tags/tatsu/info) for general Q\&A, and limit Github issues to bugs, enhancement proposals, and feature requests. +Please use the [\[tatsu\]](https://stackoverflow.com/tags/tatsu/info) tag on [StackOverflow](http://stackoverflow.com/tags/tatsu/info) for general Q&A, and limit Github issues to bugs, enhancement proposals, and feature requests. ## Changes diff --git a/tatsu/contexts.py b/tatsu/contexts.py index a5af681..d40cf77 100644 --- a/tatsu/contexts.py +++ b/tatsu/contexts.py @@ -205,6 +205,7 @@ class ParseContext(object): semantics=None, trace=False, whitespace=None, + ignorecase=None, **kwargs): try: self.parseinfo = kwargs.pop('parseinfo', self.parseinfo) @@ -215,6 +216,7 @@ class ParseContext(object): semantics=semantics, trace=trace if trace is not None else self.trace, whitespace=whitespace if whitespace is not None else self.whitespace, + ignorecase=ignorecase, **kwargs ) rule = self._find_rule(rule_name) @@ -848,7 +850,7 @@ class ParseContext(object): def _check_name(self, name=None): if name is None: name = str(self.last_node) - if self.ignorecase or self._tokenizer.ignorecase: + if self.ignorecase or self.tokenizer.ignorecase: name = name.upper() if name in self.keywords: raise FailedKeywordSemantics('"%s" is a reserved word' % name) diff --git a/tatsu/grammars.py b/tatsu/grammars.py index 8a90a8d..bb61bec 100644 --- a/tatsu/grammars.py +++ b/tatsu/grammars.py @@ -954,6 +954,7 @@ class Grammar(Model): semantics=None, filename='Unknown', whitespace=None, + ignorecase=None, nameguard=None, namechars=None, left_recursion=None, @@ -983,6 +984,10 @@ class Grammar(Model): whitespace = directives.get('whitespace') self.whitespace = whitespace + if ignorecase is None: + ignorecase = directives.get('ignorecase') + self.ignorecase = ignorecase + if nameguard is None: nameguard = directives.get('nameguard') self.nameguard = nameguard @@ -1008,6 +1013,8 @@ class Grammar(Model): self.eol_comments_re = eol_comments_re self.keywords = keywords or set() + if ignorecase: + self.keywords = {k.upper() for k in self.keywords} self._adopt_children(rules) @@ -1082,6 +1089,7 @@ class Grammar(Model): trace=False, context=None, whitespace=None, + ignorecase=None, left_recursion=None, comments_re=None, eol_comments_re=None, @@ -1106,6 +1114,7 @@ class Grammar(Model): nameguard = notnone(nameguard, self.nameguard) namechars = notnone(namechars, self.namechars) whitespace = notnone(whitespace, self.whitespace) + ignorecase = notnone(ignorecase, self.ignorecase) if whitespace: whitespace = re.compile(whitespace) @@ -1116,6 +1125,7 @@ class Grammar(Model): semantics=semantics, trace=trace, whitespace=whitespace, + ignorecase=ignorecase, comments_re=comments_re, eol_comments_re=eol_comments_re, left_recursion=left_recursion, diff --git a/tox.ini b/tox.ini index f26f242..372bb77 100644 --- a/tox.ini +++ b/tox.ini @@ -26,7 +26,7 @@ deps = [flake8] ignore = N802, W504, W605 max-line-length = 200 -max-complexity = 10 +max-complexity = 16 exclude = parsers, docs,
neogeny/TatSu
7e3a58843d8a70b6dddaf70dfbcb2abaecad07cb
diff --git a/test/grammar/keyword_test.py b/test/grammar/keyword_test.py index d8574da..da65540 100644 --- a/test/grammar/keyword_test.py +++ b/test/grammar/keyword_test.py @@ -132,3 +132,29 @@ class KeywordTests(unittest.TestCase): self.fail('accepted keyword "%s" as name' % k) except FailedParse as e: self.assertTrue('"%s" is a reserved word' % k in str(e)) + + def test_ignorecase_keywords(self): + grammar = ''' + @@ignorecase :: True + @@keyword :: if + + start = rule ; + + @name + rule = @:word if_exp $ ; + + if_exp = 'if' digit ; + + word = /\w+/ ; + digit = /\d/ ; + ''' + + model = compile(grammar, 'test') + + model.parse('nonIF if 1', trace=True) + + with self.assertRaises(FailedParse): + model.parse('i rf if 1', trace=True) + + with self.assertRaises(FailedParse): + model.parse('IF if 1', trace=True) diff --git a/test/parser_equivalence_test.py b/test/parser_equivalence_test.py index dacaf60..337b608 100644 --- a/test/parser_equivalence_test.py +++ b/test/parser_equivalence_test.py @@ -3,6 +3,8 @@ import subprocess # noqa import py_compile # noqa from pathlib import Path +import pytest + from tatsu.tool import compile, gencode INPUT = """ @@ -38,11 +40,13 @@ GRAMMAR = """ ; """ + def test_model_parse(): model = compile(grammar=GRAMMAR) assert OUTPUT == model.parse(INPUT) [email protected]('work in progress') def test_codegen_parse(): init_filename = Path('./tmp/__init__.py') init_filename.touch(exist_ok=True) @@ -65,7 +69,7 @@ def test_codegen_parse(): # } # ).decode() # print(output) - from tmp.parser import UnknownParser + from tmp.parser import UnknownParser # pylint: disable=all output = UnknownParser().parse(INPUT) assert output == OUTPUT finally:
Ignorecase needs uppercase keywords when reserving Reserving keywords and ignoring case does not work as expected: ``` @@ignorecase :: True @@keyword :: if @name rule = /\w+/ if_exp $ ; if_exp = 'if' /\d/ ; ``` **The 'if' is parsed as the rule while it should error.** This is because in [contexts.py](https://github.com/neogeny/TatSu/blob/master/tatsu/contexts.py), you should apply either `upper` (or `lower`) to both name and keywords: ``` def _check_name(self, name=None): if name is None: name = str(self.last_node) if self.ignorecase or self._tokenizer.ignorecase: name = name.upper() if name in self.keywords: raise FailedKeywordSemantics('"%s" is a reserved word' % name) ```
0.0
[ "test/grammar/keyword_test.py::KeywordTests::test_ignorecase_keywords" ]
[ "test/grammar/keyword_test.py::KeywordTests::test_check_keywords", "test/grammar/keyword_test.py::KeywordTests::test_check_unicode_name", "test/grammar/keyword_test.py::KeywordTests::test_define_keywords", "test/grammar/keyword_test.py::KeywordTests::test_keywords_in_rule_names", "test/grammar/keyword_test.py::KeywordTests::test_python_keywords_in_rule_names", "test/grammar/keyword_test.py::KeywordTests::test_sparse_keywords", "test/parser_equivalence_test.py::test_model_parse" ]
2021-03-20 18:34:21+00:00
4,118
neogeny__TatSu-309
diff --git a/README.rst b/README.rst index eb57f92..683c25c 100644 --- a/README.rst +++ b/README.rst @@ -32,12 +32,10 @@ input, much like the `re`_ module does with regular expressions, or it can gener |TatSu| supports `left-recursive`_ rules in PEG_ grammars using the algorithm_ by *Laurent* and *Mens*. The generated AST_ has the expected left associativity. -Starting with version 5.9.0 |TatSu| requires Python 3.11 or later. -While no code in |TatSu| yet depends on new language or standard library features, +|TatSu| requires a maintained version of Python (3.11 at the moment). While no code +in |TatSu| yet depends on new language or standard library features, the authors don't want to be constrained by Python version comaptibility consideration when developing features that will be part future releases. -Therefore, to simplify version pinning for users of the library, -they decided to proactively bump the Python minimum required version to 3.10. |TatSu| releases in the 5.7 series closely track releases in the 5.8 series while maintaining compatibility with Python 3.8 and later. diff --git a/tatsu/contexts.py b/tatsu/contexts.py index 06458c4..f5d0f96 100644 --- a/tatsu/contexts.py +++ b/tatsu/contexts.py @@ -569,6 +569,7 @@ class ParseContext: @property def memokey(self): + self.tokenizer.eat_whitespace() return MemoKey(self._pos, self.rule, self.substate) def _memoize(self, key, memo): diff --git a/tatsu/tokenizing.py b/tatsu/tokenizing.py index 918f082..bc7077f 100644 --- a/tatsu/tokenizing.py +++ b/tatsu/tokenizing.py @@ -40,6 +40,9 @@ class Tokenizer: def token(self): return self.current + def eat_whitespace(self): + raise NotImplementedError + def next(self): raise NotImplementedError
neogeny/TatSu
51d9f70b4cf068142e719ebeabaeab766e0a9b92
diff --git a/test/parsing_test.py b/test/parsing_test.py index 70b79e3..5c8a5a9 100644 --- a/test/parsing_test.py +++ b/test/parsing_test.py @@ -1,9 +1,10 @@ # -*- coding: utf-8 -*- +import json import unittest import tempfile import tatsu -from tatsu.util import trim, eval_escapes +from tatsu.util import trim, eval_escapes, asjson from tatsu.grammars import EBNFBuffer @@ -115,6 +116,20 @@ class ParsingTests(unittest.TestCase): model = tatsu.compile(grammar=grammar) model.parse('4 + 5') + def test_skip_whitespace(self): + grammar = ''' + statement = 'FOO' subject $ ; + subject = name:id ; + id = /[a-z]+/ ; + ''' + model = tatsu.compile(grammar=grammar) + ast = model.parse('FOO' + ' ' * 3 + 'bar', parseinfo=True) + print(json.dumps(asjson(ast), indent=2)) + subject = ast[1] + assert subject['name'] == 'bar' + parseinfo = subject['parseinfo'] + assert parseinfo.pos == parseinfo.tokenizer.text.index('bar') + def suite(): return unittest.TestLoader().loadTestsFromTestCase(ParsingTests)
Surprising whitespace handling Given this example ```python import tatsu parser = tatsu.compile(r''' statement = 'SELECT' 'FROM' table $ ; table = name:id ; id = /[a-z]+/ ; ''') string = 'SELECT FROM foo' value = parser.parse(string, parseinfo=True) table = value[2] assert table['name'] == 'foo' parseinfo = table['parseinfo'] print(parseinfo.tokenizer.text) print(f'{parseinfo.pos * " "}^') ``` I find whitespace handling a surprising. The whitespace between `FROM` and the table name is not skipped over before matching the `table` rule. This results in correct parsing but the `parseinfo` for the `table` rule. I would have expected whitespace to be skipped before attempting to match the `table` rule.
0.0
[ "test/parsing_test.py::ParsingTests::test_skip_whitespace" ]
[ "test/parsing_test.py::ParsingTests::test_escape_sequences", "test/parsing_test.py::ParsingTests::test_include", "test/parsing_test.py::ParsingTests::test_multiple_include", "test/parsing_test.py::ParsingTests::test_real_include", "test/parsing_test.py::ParsingTests::test_rule_capitalization", "test/parsing_test.py::ParsingTests::test_start", "test/parsing_test.py::ParsingTests::test_startrule_issue62" ]
2023-08-29 17:00:07+00:00
4,119
neovasili__tflens-34
diff --git a/README.md b/README.md index 4c1fad7..3b089dd 100644 --- a/README.md +++ b/README.md @@ -76,6 +76,8 @@ optional arguments: according to the following pattern: bucket-name/tfstate-key -m FILTER_MODULE, --filter-module FILTER_MODULE Applies a regular expression to the module field in order to filter the resources list to output + -n FILTER_NAME, --filter-name FILTER_NAME + Applies a regular expression to the name field in order to filter the resources list to output ``` ### Examples @@ -93,6 +95,16 @@ View table of resources for a tfstate located in the file system in the director View filtered table of resources for a tfstate located in the file system in the directory: +```bash +➜ tflens --filter-name "current" + +| provider | type | mode | name | module | +|--------------|---------------------|---------|-------------------------------|--------| +| provider.aws | aws_caller_identity | data | current_user | test | +``` + +Or: + ```bash ➜ tflens --filter-module "test" @@ -101,6 +113,16 @@ View filtered table of resources for a tfstate located in the file system in the | provider.aws | aws_caller_identity | data | current_user | test | ``` +Or: + +```bash +➜ tflens --filter-name "current" --filter-module "test" + +| provider | type | mode | name | module | +|--------------|---------------------|---------|-------------------------------|--------| +| provider.aws | aws_caller_identity | data | current_user | test | +``` + View table of resources for a tfstate located in the file system in the `dev/terraform.tfstate.json` path: ```bash diff --git a/tflens/__main__.py b/tflens/__main__.py index 45b72cf..8991726 100644 --- a/tflens/__main__.py +++ b/tflens/__main__.py @@ -43,12 +43,21 @@ parser.add_argument('-m', '--filter-module', filter the resources list to output", default="") +parser.add_argument('-n', '--filter-name', + type=str, + action="store", + dest="filter_name", + help="Applies a regular expression to the name field in order to \ + filter the resources list to output", + default="") + args = parser.parse_args() ARGS_REMOTE = args.remote ARGS_FILE_LOCATION = args.file_location ARGS_OUTPUT = args.output ARGS_FILTER_MODULE = args.filter_module +ARGS_FILTER_NAME = args.filter_name if not ARGS_FILE_LOCATION: ARGS_FILE_LOCATION = "{}/terraform.tfstate".format(Path().absolute()) @@ -61,6 +70,7 @@ def main(): tfstate_controller = remote_router[ARGS_REMOTE]( file_location=ARGS_FILE_LOCATION, + name_filter_expression=ARGS_FILTER_NAME, module_filter_expression=ARGS_FILTER_MODULE ) diff --git a/tflens/controller/tfstate.py b/tflens/controller/tfstate.py index f4a58ec..7f0fec6 100644 --- a/tflens/controller/tfstate.py +++ b/tflens/controller/tfstate.py @@ -11,11 +11,17 @@ from tflens.helper.filter import TfStateFilterHelper class TfStateController(): - def __init__(self, tfstate_content: dict, module_filter_expression: str=None): + def __init__( + self, + tfstate_content: dict, + name_filter_expression: str=None, + module_filter_expression: str=None + ): self.__tfstate = TfState( content=tfstate_content ) self.__resources = TfStateFilterHelper( + name_filter_expression=name_filter_expression, module_filter_expression=module_filter_expression, resources=self.__tfstate.get_resources() ).apply_filter() @@ -43,24 +49,36 @@ class TfStateController(): class LocalTfStateController(TfStateController): - def __init__(self, file_location: str, module_filter_expression: str=None): + def __init__( + self, + file_location: str, + module_filter_expression: str=None, + name_filter_expression: str=None + ): self.__local_tfstate_service = LocalTfStateService( file_location=file_location ) super().__init__( tfstate_content=self.__local_tfstate_service.read_content(), + name_filter_expression=name_filter_expression, module_filter_expression=module_filter_expression ) class RemoteS3TfStateController(TfStateController): - def __init__(self, file_location: str, module_filter_expression: str=None): + def __init__( + self, + file_location: str, + module_filter_expression: str=None, + name_filter_expression: str=None + ): self.__remote_s3_tfstate_service = RemoteS3TfStateService( file_location=file_location ) super().__init__( tfstate_content=self.__remote_s3_tfstate_service.read_content(), + name_filter_expression=name_filter_expression, module_filter_expression=module_filter_expression ) diff --git a/tflens/helper/filter.py b/tflens/helper/filter.py index 18343e2..d5cd16c 100644 --- a/tflens/helper/filter.py +++ b/tflens/helper/filter.py @@ -16,12 +16,26 @@ class ModuleFilterHelper(FilterHelper): def __init__(self, filter_expression: str, resource: TfStateResource): super().__init__( filter_expression=filter_expression, - object_attribute_value = resource.get_parent_module() + object_attribute_value=resource.get_parent_module() + ) + +class NameFilterHelper(FilterHelper): + + def __init__(self, filter_expression: str, resource: TfStateResource): + super().__init__( + filter_expression=filter_expression, + object_attribute_value=resource.get_name() ) class TfStateFilterHelper(): - def __init__(self, module_filter_expression: str=None, resources: list=None): + def __init__( + self, + name_filter_expression: str=None, + module_filter_expression: str=None, + resources: list=None + ): + self.__name_filter_expression = name_filter_expression self.__module_filter_expression = module_filter_expression self.__resources = resources @@ -29,13 +43,18 @@ class TfStateFilterHelper(): filtered_list = list() for resource in self.__resources or []: + pass_name_filter = True pass_module_filter = True + if self.__name_filter_expression: + filter_helper = NameFilterHelper(filter_expression=self.__name_filter_expression, resource=resource) + pass_name_filter = filter_helper.check_filter() + if self.__module_filter_expression: filter_helper = ModuleFilterHelper(filter_expression=self.__module_filter_expression, resource=resource) pass_module_filter = filter_helper.check_filter() - if pass_module_filter: + if pass_module_filter and pass_name_filter: filtered_list.append(resource) return filtered_list diff --git a/tflens/model/tfstate_resource.py b/tflens/model/tfstate_resource.py index 09c839f..be6a07a 100644 --- a/tflens/model/tfstate_resource.py +++ b/tflens/model/tfstate_resource.py @@ -27,6 +27,9 @@ class TfStateResource(): self.get_parent_module() ] + def get_name(self): + return self.__name + def get_parent_module(self): return self.__module.split('.')[1] if self.__module else '-'
neovasili/tflens
8de85543fed8503e823b64b112dbbb292fc90d04
diff --git a/tests/controllers_test.py b/tests/controllers_test.py index e6925f8..61504b4 100644 --- a/tests/controllers_test.py +++ b/tests/controllers_test.py @@ -92,6 +92,28 @@ class TestLocalTfStateController(unittest.TestCase): self.assertEqual(captured_output.getvalue().replace('\n', ''), '') + def test_local_show_resources_matching_name_filter(self): + local_tfstate_controller = LocalTfStateController( + file_location=self.existing_file, + name_filter_expression="current_user" + ) + captured_output = io.StringIO() + sys.stdout = captured_output + local_tfstate_controller.show_resources() + + self.assertEqual(captured_output.getvalue().replace('\n', ''), self.print_table_output) + + def test_local_show_resources_not_matching_name_filter(self): + local_tfstate_controller = LocalTfStateController( + file_location=self.existing_file, + name_filter_expression="Current_user" + ) + captured_output = io.StringIO() + sys.stdout = captured_output + local_tfstate_controller.show_resources() + + self.assertEqual(captured_output.getvalue().replace('\n', ''), '') + def test_local_create_markdown_file(self): local_tfstate_controller = LocalTfStateController(self.existing_file) local_tfstate_controller.create_markdown_file()
[resources] Add filter resources by name support We need to be able to filter resources shown in a table by name field. The filter must be able to use regular expressions as the filter pattern.
0.0
[ "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_name_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_name_filter" ]
[ "tests/controllers_test.py::TestTfStateController::test_create_html_file", "tests/controllers_test.py::TestTfStateController::test_create_markdown_file", "tests/controllers_test.py::TestTfStateController::test_show_resources", "tests/controllers_test.py::TestLocalTfStateController::test_local_create_html_file", "tests/controllers_test.py::TestLocalTfStateController::test_local_create_markdown_file", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_module_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_module_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_tfstate_controller" ]
2020-10-26 22:05:52+00:00
4,120
neovasili__tflens-35
diff --git a/README.md b/README.md index 3b089dd..b4b09a0 100644 --- a/README.md +++ b/README.md @@ -78,6 +78,8 @@ optional arguments: Applies a regular expression to the module field in order to filter the resources list to output -n FILTER_NAME, --filter-name FILTER_NAME Applies a regular expression to the name field in order to filter the resources list to output + -t FILTER_TYPE, --filter-type FILTER_TYPE + Applies a regular expression to the type field in order to filter the resources list to output ``` ### Examples diff --git a/tflens/__main__.py b/tflens/__main__.py index 8991726..f7f87a2 100644 --- a/tflens/__main__.py +++ b/tflens/__main__.py @@ -51,6 +51,14 @@ parser.add_argument('-n', '--filter-name', filter the resources list to output", default="") +parser.add_argument('-t', '--filter-type', + type=str, + action="store", + dest="filter_type", + help="Applies a regular expression to the type field in order to \ + filter the resources list to output", + default="") + args = parser.parse_args() ARGS_REMOTE = args.remote @@ -58,6 +66,7 @@ ARGS_FILE_LOCATION = args.file_location ARGS_OUTPUT = args.output ARGS_FILTER_MODULE = args.filter_module ARGS_FILTER_NAME = args.filter_name +ARGS_FILTER_TYPE = args.filter_type if not ARGS_FILE_LOCATION: ARGS_FILE_LOCATION = "{}/terraform.tfstate".format(Path().absolute()) @@ -71,6 +80,7 @@ def main(): tfstate_controller = remote_router[ARGS_REMOTE]( file_location=ARGS_FILE_LOCATION, name_filter_expression=ARGS_FILTER_NAME, + type_filter_expression=ARGS_FILTER_TYPE, module_filter_expression=ARGS_FILTER_MODULE ) diff --git a/tflens/controller/tfstate.py b/tflens/controller/tfstate.py index 7f0fec6..478c492 100644 --- a/tflens/controller/tfstate.py +++ b/tflens/controller/tfstate.py @@ -15,6 +15,7 @@ class TfStateController(): self, tfstate_content: dict, name_filter_expression: str=None, + type_filter_expression: str=None, module_filter_expression: str=None ): self.__tfstate = TfState( @@ -22,6 +23,7 @@ class TfStateController(): ) self.__resources = TfStateFilterHelper( name_filter_expression=name_filter_expression, + type_filter_expression=type_filter_expression, module_filter_expression=module_filter_expression, resources=self.__tfstate.get_resources() ).apply_filter() @@ -53,6 +55,7 @@ class LocalTfStateController(TfStateController): self, file_location: str, module_filter_expression: str=None, + type_filter_expression: str=None, name_filter_expression: str=None ): self.__local_tfstate_service = LocalTfStateService( @@ -62,6 +65,7 @@ class LocalTfStateController(TfStateController): super().__init__( tfstate_content=self.__local_tfstate_service.read_content(), name_filter_expression=name_filter_expression, + type_filter_expression=type_filter_expression, module_filter_expression=module_filter_expression ) @@ -71,6 +75,7 @@ class RemoteS3TfStateController(TfStateController): self, file_location: str, module_filter_expression: str=None, + type_filter_expression: str=None, name_filter_expression: str=None ): self.__remote_s3_tfstate_service = RemoteS3TfStateService( @@ -80,5 +85,6 @@ class RemoteS3TfStateController(TfStateController): super().__init__( tfstate_content=self.__remote_s3_tfstate_service.read_content(), name_filter_expression=name_filter_expression, + type_filter_expression=type_filter_expression, module_filter_expression=module_filter_expression ) diff --git a/tflens/helper/filter.py b/tflens/helper/filter.py index d5cd16c..f6ce147 100644 --- a/tflens/helper/filter.py +++ b/tflens/helper/filter.py @@ -27,15 +27,25 @@ class NameFilterHelper(FilterHelper): object_attribute_value=resource.get_name() ) +class TypeFilterHelper(FilterHelper): + + def __init__(self, filter_expression: str, resource: TfStateResource): + super().__init__( + filter_expression=filter_expression, + object_attribute_value=resource.get_type() + ) + class TfStateFilterHelper(): def __init__( self, name_filter_expression: str=None, + type_filter_expression: str=None, module_filter_expression: str=None, resources: list=None ): self.__name_filter_expression = name_filter_expression + self.__type_filter_expression = type_filter_expression self.__module_filter_expression = module_filter_expression self.__resources = resources @@ -45,16 +55,21 @@ class TfStateFilterHelper(): for resource in self.__resources or []: pass_name_filter = True pass_module_filter = True + pass_type_filter = True if self.__name_filter_expression: filter_helper = NameFilterHelper(filter_expression=self.__name_filter_expression, resource=resource) pass_name_filter = filter_helper.check_filter() + if self.__type_filter_expression: + filter_helper = TypeFilterHelper(filter_expression=self.__type_filter_expression, resource=resource) + pass_type_filter = filter_helper.check_filter() + if self.__module_filter_expression: filter_helper = ModuleFilterHelper(filter_expression=self.__module_filter_expression, resource=resource) pass_module_filter = filter_helper.check_filter() - if pass_module_filter and pass_name_filter: + if pass_module_filter and pass_name_filter and pass_type_filter: filtered_list.append(resource) return filtered_list diff --git a/tflens/model/tfstate_resource.py b/tflens/model/tfstate_resource.py index be6a07a..afd3dbf 100644 --- a/tflens/model/tfstate_resource.py +++ b/tflens/model/tfstate_resource.py @@ -30,6 +30,9 @@ class TfStateResource(): def get_name(self): return self.__name + def get_type(self): + return self.__type + def get_parent_module(self): return self.__module.split('.')[1] if self.__module else '-'
neovasili/tflens
8fed041deb04b1abef816991fe185a7c0821c5ee
diff --git a/tests/controllers_test.py b/tests/controllers_test.py index 61504b4..65f8078 100644 --- a/tests/controllers_test.py +++ b/tests/controllers_test.py @@ -114,6 +114,28 @@ class TestLocalTfStateController(unittest.TestCase): self.assertEqual(captured_output.getvalue().replace('\n', ''), '') + def test_local_show_resources_matching_type_filter(self): + local_tfstate_controller = LocalTfStateController( + file_location=self.existing_file, + type_filter_expression="aws_caller_identity" + ) + captured_output = io.StringIO() + sys.stdout = captured_output + local_tfstate_controller.show_resources() + + self.assertEqual(captured_output.getvalue().replace('\n', ''), self.print_table_output) + + def test_local_show_resources_not_matching_type_filter(self): + local_tfstate_controller = LocalTfStateController( + file_location=self.existing_file, + type_filter_expression="Aws_caller_identity" + ) + captured_output = io.StringIO() + sys.stdout = captured_output + local_tfstate_controller.show_resources() + + self.assertEqual(captured_output.getvalue().replace('\n', ''), '') + def test_local_create_markdown_file(self): local_tfstate_controller = LocalTfStateController(self.existing_file) local_tfstate_controller.create_markdown_file()
[resources] Add filter resources by resource type support We need to be able to filter resources shown in a table by resource type field. The filter must be able to use regular expressions as the filter pattern.
0.0
[ "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_type_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_type_filter" ]
[ "tests/controllers_test.py::TestTfStateController::test_create_html_file", "tests/controllers_test.py::TestTfStateController::test_create_markdown_file", "tests/controllers_test.py::TestTfStateController::test_show_resources", "tests/controllers_test.py::TestLocalTfStateController::test_local_create_html_file", "tests/controllers_test.py::TestLocalTfStateController::test_local_create_markdown_file", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_module_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_name_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_module_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_name_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_tfstate_controller" ]
2020-10-28 00:13:57+00:00
4,121
neovasili__tflens-36
diff --git a/README.md b/README.md index b4b09a0..5676c16 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,8 @@ optional arguments: Applies a regular expression to the name field in order to filter the resources list to output -t FILTER_TYPE, --filter-type FILTER_TYPE Applies a regular expression to the type field in order to filter the resources list to output + -p FILTER_PROVIDER, --filter-provider FILTER_PROVIDER + Applies a regular expression to the provider field in order to filter the resources list to output ``` ### Examples diff --git a/tflens/__main__.py b/tflens/__main__.py index f7f87a2..fe1ca3d 100644 --- a/tflens/__main__.py +++ b/tflens/__main__.py @@ -59,6 +59,14 @@ parser.add_argument('-t', '--filter-type', filter the resources list to output", default="") +parser.add_argument('-p', '--filter-provider', + type=str, + action="store", + dest="filter_provider", + help="Applies a regular expression to the provider field in order to \ + filter the resources list to output", + default="") + args = parser.parse_args() ARGS_REMOTE = args.remote @@ -67,6 +75,7 @@ ARGS_OUTPUT = args.output ARGS_FILTER_MODULE = args.filter_module ARGS_FILTER_NAME = args.filter_name ARGS_FILTER_TYPE = args.filter_type +ARGS_FILTER_PROVIDER = args.filter_provider if not ARGS_FILE_LOCATION: ARGS_FILE_LOCATION = "{}/terraform.tfstate".format(Path().absolute()) @@ -81,6 +90,7 @@ def main(): file_location=ARGS_FILE_LOCATION, name_filter_expression=ARGS_FILTER_NAME, type_filter_expression=ARGS_FILTER_TYPE, + provider_filter_expression=ARGS_FILTER_PROVIDER, module_filter_expression=ARGS_FILTER_MODULE ) diff --git a/tflens/controller/tfstate.py b/tflens/controller/tfstate.py index 478c492..0f4c7a5 100644 --- a/tflens/controller/tfstate.py +++ b/tflens/controller/tfstate.py @@ -16,6 +16,7 @@ class TfStateController(): tfstate_content: dict, name_filter_expression: str=None, type_filter_expression: str=None, + provider_filter_expression: str=None, module_filter_expression: str=None ): self.__tfstate = TfState( @@ -24,6 +25,7 @@ class TfStateController(): self.__resources = TfStateFilterHelper( name_filter_expression=name_filter_expression, type_filter_expression=type_filter_expression, + provider_filter_expression=provider_filter_expression, module_filter_expression=module_filter_expression, resources=self.__tfstate.get_resources() ).apply_filter() @@ -56,6 +58,7 @@ class LocalTfStateController(TfStateController): file_location: str, module_filter_expression: str=None, type_filter_expression: str=None, + provider_filter_expression: str=None, name_filter_expression: str=None ): self.__local_tfstate_service = LocalTfStateService( @@ -66,6 +69,7 @@ class LocalTfStateController(TfStateController): tfstate_content=self.__local_tfstate_service.read_content(), name_filter_expression=name_filter_expression, type_filter_expression=type_filter_expression, + provider_filter_expression=provider_filter_expression, module_filter_expression=module_filter_expression ) @@ -76,6 +80,7 @@ class RemoteS3TfStateController(TfStateController): file_location: str, module_filter_expression: str=None, type_filter_expression: str=None, + provider_filter_expression: str=None, name_filter_expression: str=None ): self.__remote_s3_tfstate_service = RemoteS3TfStateService( @@ -86,5 +91,6 @@ class RemoteS3TfStateController(TfStateController): tfstate_content=self.__remote_s3_tfstate_service.read_content(), name_filter_expression=name_filter_expression, type_filter_expression=type_filter_expression, + provider_filter_expression=provider_filter_expression, module_filter_expression=module_filter_expression ) diff --git a/tflens/helper/filter.py b/tflens/helper/filter.py index f6ce147..ee86ff3 100644 --- a/tflens/helper/filter.py +++ b/tflens/helper/filter.py @@ -35,17 +35,27 @@ class TypeFilterHelper(FilterHelper): object_attribute_value=resource.get_type() ) +class ProviderFilterHelper(FilterHelper): + + def __init__(self, filter_expression: str, resource: TfStateResource): + super().__init__( + filter_expression=filter_expression, + object_attribute_value=resource.get_type() + ) + class TfStateFilterHelper(): def __init__( self, name_filter_expression: str=None, type_filter_expression: str=None, + provider_filter_expression: str=None, module_filter_expression: str=None, resources: list=None ): self.__name_filter_expression = name_filter_expression self.__type_filter_expression = type_filter_expression + self.__provider_filter_expression = provider_filter_expression self.__module_filter_expression = module_filter_expression self.__resources = resources @@ -56,6 +66,7 @@ class TfStateFilterHelper(): pass_name_filter = True pass_module_filter = True pass_type_filter = True + pass_provider_filter = True if self.__name_filter_expression: filter_helper = NameFilterHelper(filter_expression=self.__name_filter_expression, resource=resource) @@ -65,11 +76,15 @@ class TfStateFilterHelper(): filter_helper = TypeFilterHelper(filter_expression=self.__type_filter_expression, resource=resource) pass_type_filter = filter_helper.check_filter() + if self.__provider_filter_expression: + filter_helper = ProviderFilterHelper(filter_expression=self.__provider_filter_expression, resource=resource) + pass_provider_filter = filter_helper.check_filter() + if self.__module_filter_expression: filter_helper = ModuleFilterHelper(filter_expression=self.__module_filter_expression, resource=resource) pass_module_filter = filter_helper.check_filter() - if pass_module_filter and pass_name_filter and pass_type_filter: + if pass_module_filter and pass_name_filter and pass_type_filter and pass_provider_filter: filtered_list.append(resource) return filtered_list
neovasili/tflens
ba81fda814f0f1401a79f52365d9ba1d3ca5799d
diff --git a/tests/controllers_test.py b/tests/controllers_test.py index 65f8078..b250a98 100644 --- a/tests/controllers_test.py +++ b/tests/controllers_test.py @@ -136,6 +136,28 @@ class TestLocalTfStateController(unittest.TestCase): self.assertEqual(captured_output.getvalue().replace('\n', ''), '') + def test_local_show_resources_matching_provider_filter(self): + local_tfstate_controller = LocalTfStateController( + file_location=self.existing_file, + provider_filter_expression="aws" + ) + captured_output = io.StringIO() + sys.stdout = captured_output + local_tfstate_controller.show_resources() + + self.assertEqual(captured_output.getvalue().replace('\n', ''), self.print_table_output) + + def test_local_show_resources_not_matching_provider_filter(self): + local_tfstate_controller = LocalTfStateController( + file_location=self.existing_file, + provider_filter_expression="Aws" + ) + captured_output = io.StringIO() + sys.stdout = captured_output + local_tfstate_controller.show_resources() + + self.assertEqual(captured_output.getvalue().replace('\n', ''), '') + def test_local_create_markdown_file(self): local_tfstate_controller = LocalTfStateController(self.existing_file) local_tfstate_controller.create_markdown_file()
[resources] Add filter resources by provider support We need to be able to filter resources shown in a table by provider field. The filter must be able to use regular expressions as the filter pattern.
0.0
[ "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_provider_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_provider_filter" ]
[ "tests/controllers_test.py::TestTfStateController::test_create_html_file", "tests/controllers_test.py::TestTfStateController::test_create_markdown_file", "tests/controllers_test.py::TestTfStateController::test_show_resources", "tests/controllers_test.py::TestLocalTfStateController::test_local_create_html_file", "tests/controllers_test.py::TestLocalTfStateController::test_local_create_markdown_file", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_module_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_name_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_type_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_module_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_name_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_type_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_tfstate_controller" ]
2020-10-28 00:29:57+00:00
4,122
neovasili__tflens-37
diff --git a/README.md b/README.md index 5676c16..1c92c04 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,8 @@ optional arguments: Applies a regular expression to the type field in order to filter the resources list to output -p FILTER_PROVIDER, --filter-provider FILTER_PROVIDER Applies a regular expression to the provider field in order to filter the resources list to output + -d FILTER_MODE, --filter-mode FILTER_MODE + Applies a regular expression to the mode field in order to filter the resources list to output ``` ### Examples diff --git a/tflens/__main__.py b/tflens/__main__.py index fe1ca3d..2e696e3 100644 --- a/tflens/__main__.py +++ b/tflens/__main__.py @@ -67,6 +67,14 @@ parser.add_argument('-p', '--filter-provider', filter the resources list to output", default="") +parser.add_argument('-d', '--filter-mode', + type=str, + action="store", + dest="filter_mode", + help="Applies a regular expression to the mode field in order to \ + filter the resources list to output", + default="") + args = parser.parse_args() ARGS_REMOTE = args.remote @@ -76,6 +84,7 @@ ARGS_FILTER_MODULE = args.filter_module ARGS_FILTER_NAME = args.filter_name ARGS_FILTER_TYPE = args.filter_type ARGS_FILTER_PROVIDER = args.filter_provider +ARGS_FILER_MODE = args.filter_mode if not ARGS_FILE_LOCATION: ARGS_FILE_LOCATION = "{}/terraform.tfstate".format(Path().absolute()) @@ -91,7 +100,8 @@ def main(): name_filter_expression=ARGS_FILTER_NAME, type_filter_expression=ARGS_FILTER_TYPE, provider_filter_expression=ARGS_FILTER_PROVIDER, - module_filter_expression=ARGS_FILTER_MODULE + module_filter_expression=ARGS_FILTER_MODULE, + mode_filter_expression=ARGS_FILER_MODE ) output_router = { diff --git a/tflens/controller/tfstate.py b/tflens/controller/tfstate.py index 0f4c7a5..bafd4cb 100644 --- a/tflens/controller/tfstate.py +++ b/tflens/controller/tfstate.py @@ -17,7 +17,8 @@ class TfStateController(): name_filter_expression: str=None, type_filter_expression: str=None, provider_filter_expression: str=None, - module_filter_expression: str=None + module_filter_expression: str=None, + mode_filter_expression: str=None ): self.__tfstate = TfState( content=tfstate_content @@ -27,6 +28,7 @@ class TfStateController(): type_filter_expression=type_filter_expression, provider_filter_expression=provider_filter_expression, module_filter_expression=module_filter_expression, + mode_filter_expression=mode_filter_expression, resources=self.__tfstate.get_resources() ).apply_filter() @@ -59,7 +61,8 @@ class LocalTfStateController(TfStateController): module_filter_expression: str=None, type_filter_expression: str=None, provider_filter_expression: str=None, - name_filter_expression: str=None + name_filter_expression: str=None, + mode_filter_expression: str=None ): self.__local_tfstate_service = LocalTfStateService( file_location=file_location @@ -70,7 +73,8 @@ class LocalTfStateController(TfStateController): name_filter_expression=name_filter_expression, type_filter_expression=type_filter_expression, provider_filter_expression=provider_filter_expression, - module_filter_expression=module_filter_expression + module_filter_expression=module_filter_expression, + mode_filter_expression=mode_filter_expression ) class RemoteS3TfStateController(TfStateController): @@ -81,7 +85,8 @@ class RemoteS3TfStateController(TfStateController): module_filter_expression: str=None, type_filter_expression: str=None, provider_filter_expression: str=None, - name_filter_expression: str=None + name_filter_expression: str=None, + mode_filter_expression: str=None ): self.__remote_s3_tfstate_service = RemoteS3TfStateService( file_location=file_location @@ -92,5 +97,6 @@ class RemoteS3TfStateController(TfStateController): name_filter_expression=name_filter_expression, type_filter_expression=type_filter_expression, provider_filter_expression=provider_filter_expression, - module_filter_expression=module_filter_expression + module_filter_expression=module_filter_expression, + mode_filter_expression=mode_filter_expression ) diff --git a/tflens/helper/filter.py b/tflens/helper/filter.py index ee86ff3..58bba51 100644 --- a/tflens/helper/filter.py +++ b/tflens/helper/filter.py @@ -9,7 +9,9 @@ class FilterHelper(): self.__object_attribute_value = object_attribute_value def check_filter(self): - return re.match(self.__filter_expression, self.__object_attribute_value) + compiled_pattern = re.compile(self.__filter_expression) + + return compiled_pattern.search(self.__object_attribute_value) class ModuleFilterHelper(FilterHelper): @@ -40,7 +42,15 @@ class ProviderFilterHelper(FilterHelper): def __init__(self, filter_expression: str, resource: TfStateResource): super().__init__( filter_expression=filter_expression, - object_attribute_value=resource.get_type() + object_attribute_value=resource.get_provider() + ) + +class ModeFilterHelper(FilterHelper): + + def __init__(self, filter_expression: str, resource: TfStateResource): + super().__init__( + filter_expression=filter_expression, + object_attribute_value=resource.get_mode() ) class TfStateFilterHelper(): @@ -51,12 +61,14 @@ class TfStateFilterHelper(): type_filter_expression: str=None, provider_filter_expression: str=None, module_filter_expression: str=None, + mode_filter_expression: str=None, resources: list=None ): self.__name_filter_expression = name_filter_expression self.__type_filter_expression = type_filter_expression self.__provider_filter_expression = provider_filter_expression self.__module_filter_expression = module_filter_expression + self.__mode_filter_expression = mode_filter_expression self.__resources = resources def apply_filter(self): @@ -67,6 +79,7 @@ class TfStateFilterHelper(): pass_module_filter = True pass_type_filter = True pass_provider_filter = True + pass_mode_filter = True if self.__name_filter_expression: filter_helper = NameFilterHelper(filter_expression=self.__name_filter_expression, resource=resource) @@ -84,7 +97,11 @@ class TfStateFilterHelper(): filter_helper = ModuleFilterHelper(filter_expression=self.__module_filter_expression, resource=resource) pass_module_filter = filter_helper.check_filter() - if pass_module_filter and pass_name_filter and pass_type_filter and pass_provider_filter: + if self.__mode_filter_expression: + filter_helper = ModeFilterHelper(filter_expression=self.__mode_filter_expression, resource=resource) + pass_mode_filter = filter_helper.check_filter() + + if pass_module_filter and pass_name_filter and pass_type_filter and pass_provider_filter and pass_mode_filter: filtered_list.append(resource) return filtered_list diff --git a/tflens/model/tfstate_resource.py b/tflens/model/tfstate_resource.py index afd3dbf..fa14840 100644 --- a/tflens/model/tfstate_resource.py +++ b/tflens/model/tfstate_resource.py @@ -33,6 +33,12 @@ class TfStateResource(): def get_type(self): return self.__type + def get_provider(self): + return self.__provider + + def get_mode(self): + return self.__mode + def get_parent_module(self): return self.__module.split('.')[1] if self.__module else '-'
neovasili/tflens
658445946b11c2776992bec4ae31e12e829a5136
diff --git a/tests/controllers_test.py b/tests/controllers_test.py index b250a98..6560a65 100644 --- a/tests/controllers_test.py +++ b/tests/controllers_test.py @@ -158,6 +158,58 @@ class TestLocalTfStateController(unittest.TestCase): self.assertEqual(captured_output.getvalue().replace('\n', ''), '') + def test_local_show_resources_matching_mode_filter(self): + local_tfstate_controller = LocalTfStateController( + file_location=self.existing_file, + mode_filter_expression="data" + ) + captured_output = io.StringIO() + sys.stdout = captured_output + local_tfstate_controller.show_resources() + + self.assertEqual(captured_output.getvalue().replace('\n', ''), self.print_table_output) + + def test_local_show_resources_not_matching_mode_filter(self): + local_tfstate_controller = LocalTfStateController( + file_location=self.existing_file, + mode_filter_expression="Data" + ) + captured_output = io.StringIO() + sys.stdout = captured_output + local_tfstate_controller.show_resources() + + self.assertEqual(captured_output.getvalue().replace('\n', ''), '') + + def test_local_show_resources_matching_mixed_filter(self): + local_tfstate_controller = LocalTfStateController( + file_location=self.existing_file, + module_filter_expression="test", + name_filter_expression="current_user", + type_filter_expression="aws_caller_identity", + provider_filter_expression="aws", + mode_filter_expression="data" + ) + captured_output = io.StringIO() + sys.stdout = captured_output + local_tfstate_controller.show_resources() + + self.assertEqual(captured_output.getvalue().replace('\n', ''), self.print_table_output) + + def test_local_show_resources_not_matching_mixed_filter(self): + local_tfstate_controller = LocalTfStateController( + file_location=self.existing_file, + module_filter_expression="test", + name_filter_expression="current_user", + type_filter_expression="aws_caller_identity", + provider_filter_expression="aws", + mode_filter_expression="Data" + ) + captured_output = io.StringIO() + sys.stdout = captured_output + local_tfstate_controller.show_resources() + + self.assertEqual(captured_output.getvalue().replace('\n', ''), '') + def test_local_create_markdown_file(self): local_tfstate_controller = LocalTfStateController(self.existing_file) local_tfstate_controller.create_markdown_file()
[resources] Add filter resources by mode support We need to be able to filter resources shown in a table by mode field. The filter must be able to use regular expressions as the filter pattern.
0.0
[ "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_mixed_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_mode_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_mixed_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_mode_filter" ]
[ "tests/controllers_test.py::TestTfStateController::test_create_html_file", "tests/controllers_test.py::TestTfStateController::test_create_markdown_file", "tests/controllers_test.py::TestTfStateController::test_show_resources", "tests/controllers_test.py::TestLocalTfStateController::test_local_create_html_file", "tests/controllers_test.py::TestLocalTfStateController::test_local_create_markdown_file", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_module_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_name_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_provider_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_type_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_module_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_name_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_provider_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_type_filter", "tests/controllers_test.py::TestLocalTfStateController::test_local_tfstate_controller" ]
2020-10-30 19:47:40+00:00
4,123
neovasili__tflens-46
diff --git a/tflens/__main__.py b/tflens/__main__.py index 4417c32..de432d6 100644 --- a/tflens/__main__.py +++ b/tflens/__main__.py @@ -2,11 +2,7 @@ from pathlib import Path import argparse from tflens.helper.config import VERSION -from tflens.controller.tfstate import ( - RemoteS3TfStateController, - RemoteHttpTfStateController, - LocalTfStateController -) +from tflens.helper.remote import RemoteHelper parser = argparse.ArgumentParser( description='Terraform lens is a CLI tool that enables developers have a summarized view of tfstate resources.' @@ -28,15 +24,6 @@ parser.add_argument('-o', '--output', help="Defines output type (markdown|html). If empty outputs in terminal", default="") -parser.add_argument('-r', '--remote', - type=str, - action="store", - dest="remote", - help="Defines if remote (s3|http) or local tfstate file. If empty local is used. \ - When remote is defined, you also need to specify --file-location with the tfstate location \ - according to the following pattern: bucket-name/tfstate-key", - default="") - parser.add_argument('-m', '--filter-module', type=str, action="store", @@ -98,7 +85,6 @@ parser.add_argument('-v', '--version', args = parser.parse_args() -ARGS_REMOTE = args.remote ARGS_FILE_LOCATION = args.file_location ARGS_OUTPUT = args.output ARGS_FILTER_MODULE = args.filter_module @@ -113,13 +99,9 @@ if not ARGS_FILE_LOCATION: ARGS_FILE_LOCATION = "{}/terraform.tfstate".format(Path().absolute()) def main(): - remote_router = { - 's3': RemoteS3TfStateController, - 'http': RemoteHttpTfStateController, - '': LocalTfStateController - } + remote_helper = RemoteHelper(ARGS_FILE_LOCATION) - tfstate_controller = remote_router[ARGS_REMOTE]( + tfstate_controller = remote_helper.invoke_remote_controller( file_location=ARGS_FILE_LOCATION, user=ARGS_HTTP_USER, password=ARGS_HTTP_PASSWORD, diff --git a/tflens/exception/exception.py b/tflens/exception/exception.py index bff9917..95e47b5 100644 --- a/tflens/exception/exception.py +++ b/tflens/exception/exception.py @@ -36,3 +36,13 @@ class ServerUnavailable(CustomException): def __init__(self): super().__init__("The server is unavailable") + +class NotValidS3Location(CustomException): + + def __init__(self): + super().__init__("Invalid S3 location. Must be something like 's3://bucket_name/key'") + +class NotValidHttpLocation(CustomException): + + def __init__(self): + super().__init__("Invalid Http location. Must be something like 'http(s)://http_server/'") diff --git a/tflens/helper/location.py b/tflens/helper/location.py new file mode 100644 index 0000000..3d13d5c --- /dev/null +++ b/tflens/helper/location.py @@ -0,0 +1,21 @@ +import re + +class LocationHelper(): + + def __init__(self, file_location: str, validation_pattern: str): + self.__file_location = file_location + self.__validation_pattern = validation_pattern + self.__compiled_pattern = re.compile(self.__validation_pattern) + + def validate(self): + return self.__compiled_pattern.search(self.__file_location) + +class S3LocationHelper(LocationHelper): + + def __init__(self, file_location: str): + super().__init__(file_location=file_location, validation_pattern="s3\:\/\/.+\/.+") + +class HttpLocationHelper(LocationHelper): + + def __init__(self, file_location: str): + super().__init__(file_location=file_location, validation_pattern="http(s)?\:\/\/.+") diff --git a/tflens/helper/remote.py b/tflens/helper/remote.py new file mode 100644 index 0000000..a6bfbd3 --- /dev/null +++ b/tflens/helper/remote.py @@ -0,0 +1,26 @@ +from tflens.controller.tfstate import ( + RemoteS3TfStateController, + RemoteHttpTfStateController, + LocalTfStateController +) + +class RemoteHelper(): + + def __init__(self, file_location: str): + self.__file_location = file_location + self.__remote_router = { + 's3': RemoteS3TfStateController, + 'http': RemoteHttpTfStateController, + 'https': RemoteHttpTfStateController, + 'local': LocalTfStateController + } + self.__remote_type = "local" + + if ":" in self.__file_location: + self.__remote_type = self.__file_location.split(":")[0] + + def get_remote_type(self): + return self.__remote_type + + def invoke_remote_controller(self, **kwargs): + return self.__remote_router[self.__remote_type](**kwargs) diff --git a/tflens/service/tfstate.py b/tflens/service/tfstate.py index e778a05..8972a4f 100644 --- a/tflens/service/tfstate.py +++ b/tflens/service/tfstate.py @@ -6,21 +6,35 @@ import requests from botocore.exceptions import ClientError +from tflens.helper.location import ( + S3LocationHelper, + HttpLocationHelper +) from tflens.exception.exception import ( CannotLoadLocalFile, CannotReadLocalFile, CannotLoadRemoteFile, UnauthorizedAccess, Forbidden, - ServerUnavailable + ServerUnavailable, + NotValidS3Location, + NotValidHttpLocation ) class RemoteS3TfStateService(): def __init__(self, file_location: str): - self.__s3_client = boto3.client('s3') - self.__bucket_name = file_location.split('/')[0] - self.__file_s3_key = "/".join(file_location.split('/')[1:]) + location_helper = S3LocationHelper(file_location=file_location) + + if location_helper.validate(): + location_without_schema = file_location.split(":")[1].replace("//", "") + + self.__s3_client = boto3.client('s3') + self.__bucket_name = location_without_schema.split('/')[0] + self.__file_s3_key = "/".join(location_without_schema.split('/')[1:]) + + else: + raise NotValidS3Location def read_content(self): try: @@ -37,9 +51,15 @@ class RemoteS3TfStateService(): class RemoteHttpTfStateService(): def __init__(self, file_location: str, user: str=None, password: str=None): - self.__file_location = file_location - self.__user = user - self.__password = password + location_helper = HttpLocationHelper(file_location=file_location) + + if location_helper.validate(): + self.__file_location = file_location + self.__user = user + self.__password = password + + else: + raise NotValidHttpLocation def read_content(self): try:
neovasili/tflens
57ccc55027682bd2180c7e24c3463eeb6e52d4b5
diff --git a/tests/helpers_test.py b/tests/helpers_test.py index b4b1447..9814a7f 100644 --- a/tests/helpers_test.py +++ b/tests/helpers_test.py @@ -18,6 +18,11 @@ from tflens.helper.table import ( MarkdownTableHelper, HtmlTableHelper ) +from tflens.helper.remote import RemoteHelper +from tflens.helper.location import ( + S3LocationHelper, + HttpLocationHelper +) class TestTableHelper(unittest.TestCase): @@ -108,3 +113,83 @@ class TestHtmlTableHelper(unittest.TestCase): html_file_content = html_file.read() self.assertEqual(html_file_content.replace('\n', ''), self.file_htmltable_output.replace('\n', '')) + +class TestRemoteHelper(unittest.TestCase): + + def setUp(self): + self.local_file_location = 'local/terraform.tfstate' + self.s3_file_location = 's3://local/terraform.tfstate' + self.http_file_location = 'http://local/terraform.tfstate' + self.https_file_location = 'https://local/terraform.tfstate' + + def test_invoke_local_remote_controller(self): + remote_helper = RemoteHelper(self.local_file_location) + + self.assertEqual( + remote_helper.get_remote_type(), + "local" + ) + + def test_invoke_s3_remote_controller(self): + remote_helper = RemoteHelper(self.s3_file_location) + + self.assertEqual( + remote_helper.get_remote_type(), + "s3" + ) + + def test_invoke_http_remote_controller(self): + remote_helper = RemoteHelper(self.http_file_location) + + self.assertEqual( + remote_helper.get_remote_type(), + "http" + ) + + def test_invoke_https_remote_controller(self): + remote_helper = RemoteHelper(self.https_file_location) + + self.assertEqual( + remote_helper.get_remote_type(), + "https" + ) + +class TestLocationHelper(unittest.TestCase): + + def setUp(self): + self.s3_file_location = 's3://local/terraform.tfstate' + self.non_valid_s3_file_location = 's3:/local/terraform.tfstate' + self.http_file_location = 'http://local/terraform.tfstate' + self.non_valid_http_file_location = 'http:/local/terraform.tfstate' + self.https_file_location = 'https://local/terraform.tfstate' + self.non_valid_https_file_location = 'https:/local/terraform.tfstate' + + def test_valid_s3_remote_location(self): + location_helper = S3LocationHelper(self.s3_file_location) + + self.assertTrue(location_helper.validate()) + + def test_non_valid_s3_remote_location(self): + location_helper = S3LocationHelper(self.non_valid_s3_file_location) + + self.assertFalse(location_helper.validate()) + + def test_valid_http_remote_location(self): + location_helper = HttpLocationHelper(self.http_file_location) + + self.assertTrue(location_helper.validate()) + + def test_non_valid_http_remote_location(self): + location_helper = HttpLocationHelper(self.non_valid_http_file_location) + + self.assertFalse(location_helper.validate()) + + def test_valid_https_remote_location(self): + location_helper = HttpLocationHelper(self.https_file_location) + + self.assertTrue(location_helper.validate()) + + def test_non_valid_https_remote_location(self): + location_helper = HttpLocationHelper(self.non_valid_https_file_location) + + self.assertFalse(location_helper.validate()) diff --git a/tests/services_test.py b/tests/services_test.py index eaee6d2..5028226 100644 --- a/tests/services_test.py +++ b/tests/services_test.py @@ -16,7 +16,9 @@ from tflens.service.tfstate import ( from tflens.exception.exception import ( CannotLoadLocalFile, CannotReadLocalFile, - CannotLoadRemoteFile + CannotLoadRemoteFile, + NotValidS3Location, + NotValidHttpLocation ) @mock_s3 @@ -27,8 +29,9 @@ class TestRemoteS3TfStateService(unittest.TestCase): self.valid_content_key = 'tflens/terraform.tfstate' self.non_valid_key = 'none' - self.valid_tfstate_file = "{}/{}".format(self.bucket, self.valid_content_key) - self.non_existing_tfstate_file = "{}/{}".format(self.bucket, self.non_valid_key) + self.valid_tfstate_file = "s3://{}/{}".format(self.bucket, self.valid_content_key) + self.non_existing_tfstate_file = "s3://{}/{}".format(self.bucket, self.non_valid_key) + self.non_valid_tfstate_location = "s3:/{}/{}".format(self.bucket, self.valid_content_key) with mock_s3(): s3 = boto3.client("s3") @@ -57,12 +60,20 @@ class TestRemoteS3TfStateService(unittest.TestCase): remote_s3_tfstate_service.read_content ) + def test_non_valid_remote_s3_file_location(self): + self.assertRaises( + NotValidS3Location, + RemoteS3TfStateService, + self.non_valid_tfstate_location + ) + class TestRemoteHttpTfStateService(unittest.TestCase): @requests_mock.mock() def setUp(self, mock): self.valid_tfstate_file = "http://valid_tfstate_file" self.non_existing_tfstate_file = "http://non_existing_tfstate_file" + self.non_valid_tfstate_location = "http:/non_existing_tfstate_file" self.user = "user" self.password = "password" @@ -88,6 +99,13 @@ class TestRemoteHttpTfStateService(unittest.TestCase): remote_http_tfstate_service.read_content ) + def test_non_valid_remote_http_file_location(self): + self.assertRaises( + NotValidHttpLocation, + RemoteHttpTfStateService, + self.non_valid_tfstate_location + ) + class TestLocalTfStateService(unittest.TestCase): def setUp(self):
[arguments] Remove remote argument Is possible to remove the `remote` argument and guess location of the file using `--file-location` argument schema, for instance if the file-location starts with `s3://` then the tfstate is remote and is located in S3.
0.0
[ "tests/helpers_test.py::TestTableHelper::test_non_valid_table_with_non_valid_row", "tests/helpers_test.py::TestTableHelper::test_print_table", "tests/helpers_test.py::TestTableHelper::test_set_table_non_valid_style", "tests/helpers_test.py::TestTableHelper::test_set_table_valid_style", "tests/helpers_test.py::TestTableHelper::test_valid_table_with_multiple_rows", "tests/helpers_test.py::TestTableHelper::test_valid_table_with_one_row", "tests/helpers_test.py::TestMarkdownTableHelper::test_print_markdowntable", "tests/helpers_test.py::TestMarkdownTableHelper::test_write_markdowntable_file", "tests/helpers_test.py::TestHtmlTableHelper::test_print_htmltable", "tests/helpers_test.py::TestHtmlTableHelper::test_write_html_file", "tests/helpers_test.py::TestRemoteHelper::test_invoke_http_remote_controller", "tests/helpers_test.py::TestRemoteHelper::test_invoke_https_remote_controller", "tests/helpers_test.py::TestRemoteHelper::test_invoke_local_remote_controller", "tests/helpers_test.py::TestRemoteHelper::test_invoke_s3_remote_controller", "tests/helpers_test.py::TestLocationHelper::test_non_valid_http_remote_location", "tests/helpers_test.py::TestLocationHelper::test_non_valid_https_remote_location", "tests/helpers_test.py::TestLocationHelper::test_non_valid_s3_remote_location", "tests/helpers_test.py::TestLocationHelper::test_valid_http_remote_location", "tests/helpers_test.py::TestLocationHelper::test_valid_https_remote_location", "tests/helpers_test.py::TestLocationHelper::test_valid_s3_remote_location", "tests/services_test.py::TestRemoteHttpTfStateService::test_non_valid_remote_http_file_location", "tests/services_test.py::TestRemoteHttpTfStateService::test_read_content_remote_http_file", "tests/services_test.py::TestLocalTfStateService::test_cannot_read_content_local_file", "tests/services_test.py::TestLocalTfStateService::test_fail_open_non_existing_local_file", "tests/services_test.py::TestLocalTfStateService::test_open_existing_local_file", "tests/services_test.py::TestLocalTfStateService::test_read_content_local_file" ]
[]
2020-11-08 17:16:47+00:00
4,124
netaddr__netaddr-225
diff --git a/netaddr/eui/__init__.py b/netaddr/eui/__init__.py index 5639676..07bbdc3 100644 --- a/netaddr/eui/__init__.py +++ b/netaddr/eui/__init__.py @@ -100,6 +100,9 @@ class OUI(BaseIdentifier): else: raise NotRegisteredError('OUI %r not registered!' % (oui,)) + def __hash__(self): + return hash(self._value) + def __eq__(self, other): if not isinstance(other, OUI): try:
netaddr/netaddr
606a44b62ea7032f63e359aaaaabc0057e168890
diff --git a/netaddr/tests/eui/test_eui.py b/netaddr/tests/eui/test_eui.py index 645a518..c17d0ce 100644 --- a/netaddr/tests/eui/test_eui.py +++ b/netaddr/tests/eui/test_eui.py @@ -186,6 +186,14 @@ def test_oui_constructor(): assert oui.reg_count == 3 +def test_oui_hash(): + oui0 = OUI(0) + oui1 = OUI(1) + oui_dict = {oui0: None, oui1: None} + + assert list(oui_dict.keys()) == [OUI(0), OUI(1)] + + def test_eui_iab(): mac = EUI('00-50-C2-00-0F-01') assert mac.is_iab()
OUI hashability # Summary `OUI` does not have a `__hash__` function. This prevents the use of `OUI` for keys. - Python Hash documentation [link](https://docs.python.org/3/reference/datamodel.html#object.__hash__) # Example - Break ``` from netaddr import OUI a = OUI("002272") try: set(a) except TypeError: print("Hash failed") ``` # Example - Fix ``` from netaddr import OUI a = OUI("002272") set(a) ```
0.0
[ "netaddr/tests/eui/test_eui.py::test_oui_hash" ]
[ "netaddr/tests/eui/test_eui.py::test_mac_address_properties", "netaddr/tests/eui/test_eui.py::test_mac_address_numerical_operations", "netaddr/tests/eui/test_eui.py::test_eui_oct_format_py3", "netaddr/tests/eui/test_eui.py::test_eui_constructor", "netaddr/tests/eui/test_eui.py::test_eui_dialects", "netaddr/tests/eui/test_eui.py::test_eui_dialect_property_assignment", "netaddr/tests/eui/test_eui.py::test_eui_format", "netaddr/tests/eui/test_eui.py::test_eui_custom_dialect", "netaddr/tests/eui/test_eui.py::test_eui64_dialects", "netaddr/tests/eui/test_eui.py::test_eui64_dialect_property_assignment", "netaddr/tests/eui/test_eui.py::test_eui64_custom_dialect", "netaddr/tests/eui/test_eui.py::test_eui_oui_information", "netaddr/tests/eui/test_eui.py::test_oui_constructor", "netaddr/tests/eui/test_eui.py::test_eui_iab", "netaddr/tests/eui/test_eui.py::test_eui64", "netaddr/tests/eui/test_eui.py::test_mac_to_ipv6_link_local", "netaddr/tests/eui/test_eui.py::test_iab", "netaddr/tests/eui/test_eui.py::test_new_iab", "netaddr/tests/eui/test_eui.py::test_eui48_vs_eui64", "netaddr/tests/eui/test_eui.py::test_eui_sort_order", "netaddr/tests/eui/test_eui.py::test_eui_pickle_support", "netaddr/tests/eui/test_eui.py::test_mac_to_eui64_conversion", "netaddr/tests/eui/test_eui.py::test_mac_to_ipv6", "netaddr/tests/eui/test_eui.py::test_eui64_constructor" ]
2021-02-06 00:03:45+00:00
4,125
netromdk__vermin-173
diff --git a/vermin/source_visitor.py b/vermin/source_visitor.py index 26f5191..440563a 100644 --- a/vermin/source_visitor.py +++ b/vermin/source_visitor.py @@ -769,16 +769,17 @@ class SourceVisitor(ast.NodeVisitor): def __add_kwargs(self, function, keyword, line=None, col=None): if function in self.__user_defs: # pragma: no cover self.__vvvvprint("Ignoring function '{}' because it's user-defined!".format(function)) - return + return False if self.__config.is_excluded_kwarg(function, keyword): self.__vvprint("Excluding kwarg: {}({})".format(function, keyword)) - return + return False fn_kw = (function, keyword) if fn_kw not in self.__kwargs: self.__kwargs.append(fn_kw) self.__add_line_col(fn_kw, line, col) + return True def __add_user_func_deco(self, ufd, line=None, col=None): if ufd in self.__user_defs: @@ -1268,6 +1269,7 @@ class SourceVisitor(ast.NodeVisitor): self.generic_visit(node) def visit_keyword(self, node): + added = False for func_name in self.__function_name_stack: # kwarg related. exp_name = func_name.split(".") @@ -1275,28 +1277,32 @@ class SourceVisitor(ast.NodeVisitor): # Check if function is imported from module. if func_name in self.__import_mem_mod: mod = self.__import_mem_mod[func_name] - self.__add_kwargs(dotted_name([mod, func_name]), node.arg, self.__line) + added |= self.__add_kwargs(dotted_name([mod, func_name]), node.arg, self.__line) # When having "ElementTree.tostringlist", for instance, and include mapping "{'ElementTree': # 'xml.etree'}" then try piecing them together to form a match. elif exp_name[0] in self.__import_mem_mod: mod = self.__import_mem_mod[exp_name[0]] - self.__add_kwargs(dotted_name([mod, func_name]), node.arg, self.__line) + added |= self.__add_kwargs(dotted_name([mod, func_name]), node.arg, self.__line) # Lookup indirect names via variables. elif exp_name[0] in self.__name_res: res = self.__name_res[exp_name[0]] if res in self.__import_mem_mod: mod = self.__import_mem_mod[res] - self.__add_kwargs(dotted_name([mod, res, exp_name[1:]]), node.arg, self.__line) + added |= self.__add_kwargs(dotted_name([mod, res, exp_name[1:]]), node.arg, self.__line) # Try as FQN. else: - self.__add_kwargs(dotted_name([res, exp_name[1:]]), node.arg, self.__line) + added |= self.__add_kwargs(dotted_name([res, exp_name[1:]]), node.arg, self.__line) # Only add direct function if not found via module/class/member. else: - self.__add_kwargs(func_name, node.arg, self.__line) + added |= self.__add_kwargs(func_name, node.arg, self.__line) + + # If not excluded or ignored then visit keyword values also. + if added: + self.generic_visit(node) def visit_Bytes(self, node): self.__bytesv3 = True
netromdk/vermin
a2229690af9c381768e1a173b7a74ca7d578e456
diff --git a/tests/lang.py b/tests/lang.py index f5adaf8..4243721 100644 --- a/tests/lang.py +++ b/tests/lang.py @@ -2128,3 +2128,13 @@ except OSError as ex: def test_True_constant(self): self.assertOnlyIn(((2, 3), (3, 0)), self.detect("True")) + + def test_issue_168_keyword_values(self): + visitor = self.visit(""" +ret = subparser.add_parser("qemu") +ret.add_argument("--efi", action=argparse.BooleanOptionalAction, help="...") +""") + + # `argparse.BooleanOptionalAction` requires !2, 3.9 but the keyword values weren't visited + # before. + self.assertOnlyIn((3, 9), visitor.minimum_versions())
python 3.9 feature not detected: argparse.BooleanOptionalAction **Describe the bug** `argparse.BooleanOptionalAction` is a Python 3.9 feature, but vermin doesn't detect it. **To Reproduce** Example code: ```python3 #!/usr/bin/env python3 ret = subparser.add_parser("qemu") ret.add_argument("--efi", action=argparse.BooleanOptionalAction, help="...") ``` Running `vermin -t=3.7-` on it doesn't report that `argparse.BooleanOptionalAction` is only available in Python >=3.9. See https://docs.python.org/3/library/argparse.html. **Expected behavior** Report that `argparse.BooleanOptionalAction` is only available in Python >=3.9. **Environment (please complete the following information):** vermin version: 1.5.1
0.0
[ "tests/lang.py::VerminLanguageTests::test_issue_168_keyword_values" ]
[ "tests/lang.py::VerminLanguageTests::test_False_constant", "tests/lang.py::VerminLanguageTests::test_True_constant", "tests/lang.py::VerminLanguageTests::test_ann_assign_target_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_assign_target_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_async_comprehension", "tests/lang.py::VerminLanguageTests::test_async_for", "tests/lang.py::VerminLanguageTests::test_async_generator", "tests/lang.py::VerminLanguageTests::test_async_multi_withitem", "tests/lang.py::VerminLanguageTests::test_async_with_parentheses", "tests/lang.py::VerminLanguageTests::test_async_with_statement", "tests/lang.py::VerminLanguageTests::test_aug_assign_target_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_await_in_comprehension", "tests/lang.py::VerminLanguageTests::test_bare_except_handler", "tests/lang.py::VerminLanguageTests::test_builtin_generic_type_annotation", "tests/lang.py::VerminLanguageTests::test_bytearray_format", "tests/lang.py::VerminLanguageTests::test_bytes_directives", "tests/lang.py::VerminLanguageTests::test_bytes_format", "tests/lang.py::VerminLanguageTests::test_bytes_from_type", "tests/lang.py::VerminLanguageTests::test_bytesv3", "tests/lang.py::VerminLanguageTests::test_class_decorators", "tests/lang.py::VerminLanguageTests::test_class_name_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_comprehension_assign_target_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_continue_in_finally", "tests/lang.py::VerminLanguageTests::test_coroutines_async", "tests/lang.py::VerminLanguageTests::test_coroutines_await", "tests/lang.py::VerminLanguageTests::test_detect_except_members", "tests/lang.py::VerminLanguageTests::test_detect_raise_members", "tests/lang.py::VerminLanguageTests::test_dict_comprehension", "tests/lang.py::VerminLanguageTests::test_dict_from_type", "tests/lang.py::VerminLanguageTests::test_dict_union", "tests/lang.py::VerminLanguageTests::test_dict_union_merge", "tests/lang.py::VerminLanguageTests::test_ellipsis_in_slices", "tests/lang.py::VerminLanguageTests::test_ellipsis_out_of_slices", "tests/lang.py::VerminLanguageTests::test_except_handler_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_except_star", "tests/lang.py::VerminLanguageTests::test_float_from_type", "tests/lang.py::VerminLanguageTests::test_for_target_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_format", "tests/lang.py::VerminLanguageTests::test_frozenset_from_type", "tests/lang.py::VerminLanguageTests::test_fstrings", "tests/lang.py::VerminLanguageTests::test_fstrings_named_expr", "tests/lang.py::VerminLanguageTests::test_fstrings_self_doc", "tests/lang.py::VerminLanguageTests::test_func_arg_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_func_kwarg_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_func_kwonlyarg_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_func_name_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_func_posonlyarg_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_func_vararg_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_function_decorators", "tests/lang.py::VerminLanguageTests::test_generalized_unpacking", "tests/lang.py::VerminLanguageTests::test_import_as_target_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_import_from_as_target_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_infix_matrix_multiplication", "tests/lang.py::VerminLanguageTests::test_int_from_type", "tests/lang.py::VerminLanguageTests::test_issue_66_annotations", "tests/lang.py::VerminLanguageTests::test_kw_only_args", "tests/lang.py::VerminLanguageTests::test_lambda_arg_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_list_from_type", "tests/lang.py::VerminLanguageTests::test_long_from_type", "tests/lang.py::VerminLanguageTests::test_longv2", "tests/lang.py::VerminLanguageTests::test_module_dir_func", "tests/lang.py::VerminLanguageTests::test_module_getattr_func", "tests/lang.py::VerminLanguageTests::test_multi_withitem", "tests/lang.py::VerminLanguageTests::test_named_expr_assign_target_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_named_expressions", "tests/lang.py::VerminLanguageTests::test_nonlocal_stmt", "tests/lang.py::VerminLanguageTests::test_pattern_matching", "tests/lang.py::VerminLanguageTests::test_pos_only_args", "tests/lang.py::VerminLanguageTests::test_print_v2_v3_mixed", "tests/lang.py::VerminLanguageTests::test_printv2", "tests/lang.py::VerminLanguageTests::test_printv3", "tests/lang.py::VerminLanguageTests::test_raise_cause", "tests/lang.py::VerminLanguageTests::test_raise_from_none", "tests/lang.py::VerminLanguageTests::test_relaxed_decorators", "tests/lang.py::VerminLanguageTests::test_set_comprehension", "tests/lang.py::VerminLanguageTests::test_set_from_type", "tests/lang.py::VerminLanguageTests::test_set_literals", "tests/lang.py::VerminLanguageTests::test_str_from_type", "tests/lang.py::VerminLanguageTests::test_super_no_args", "tests/lang.py::VerminLanguageTests::test_unicode_from_type", "tests/lang.py::VerminLanguageTests::test_union_types", "tests/lang.py::VerminLanguageTests::test_unpacking_assignment", "tests/lang.py::VerminLanguageTests::test_with_items_ignore_user_def", "tests/lang.py::VerminLanguageTests::test_with_parentheses", "tests/lang.py::VerminLanguageTests::test_with_parentheses_first_match", "tests/lang.py::VerminLanguageTests::test_with_parentheses_second_match", "tests/lang.py::VerminLanguageTests::test_with_statement", "tests/lang.py::VerminLanguageTests::test_yield_from" ]
2023-04-04 19:07:54+00:00
4,126
netromdk__vermin-196
diff --git a/vermin/backports.py b/vermin/backports.py index f10c6f8..2d32b07 100644 --- a/vermin/backports.py +++ b/vermin/backports.py @@ -27,6 +27,8 @@ BACKPORTS = ( ("typing_extensions==4.0", ["https://pypi.org/project/typing-extensions/4.0.0/"], (None, (3, 6))), ("typing_extensions==4.3", ["https://pypi.org/project/typing-extensions/4.3.0/"], (None, (3, 7))), ("typing_extensions", ["https://pypi.org/project/typing-extensions/4.3.0/"], (None, (3, 7))), + + ("zoneinfo", ["https://pypi.org/project/backports.zoneinfo/"], (None, (3, 6))), ) class Backports: diff --git a/vermin/rules.py b/vermin/rules.py index f80f006..0d76ee5 100644 --- a/vermin/rules.py +++ b/vermin/rules.py @@ -192,7 +192,7 @@ def MOD_REQS(config): "xmlrpclib": ((2, 2), None), "zipapp": (None, (3, 5)), "zipimport": ((2, 3), (3, 0)), - "zoneinfo": (None, (3, 9)), + "zoneinfo": bpv("zoneinfo", (None, (3, 9)), config), } # Module member requirements: member -> (module, requirements)
netromdk/vermin
5824cc0036fd71946e2cfff1ba65ceddbeae5f56
diff --git a/tests/backports.py b/tests/backports.py index c8061e1..e09c89a 100644 --- a/tests/backports.py +++ b/tests/backports.py @@ -20,6 +20,7 @@ class VerminBackportsTests(VerminTest): "typing_extensions==4.0", "typing_extensions==4.3", "typing_extensions", + "zoneinfo", ), Backports.modules()) def test_is_backport(self): @@ -41,7 +42,8 @@ class VerminBackportsTests(VerminTest): typing - https://pypi.org/project/typing/ (2.7, 3.2) typing_extensions==4.0 - https://pypi.org/project/typing-extensions/4.0.0/ (!2, 3.6) typing_extensions==4.3 - https://pypi.org/project/typing-extensions/4.3.0/ (!2, 3.7) - typing_extensions - https://pypi.org/project/typing-extensions/4.3.0/ (!2, 3.7)""", + typing_extensions - https://pypi.org/project/typing-extensions/4.3.0/ (!2, 3.7) + zoneinfo - https://pypi.org/project/backports.zoneinfo/ (!2, 3.6)""", Backports.str(3)) def test_version_filter(self): @@ -66,6 +68,7 @@ class VerminBackportsTests(VerminTest): "statistics", "typing", "typing_extensions", + "zoneinfo", ], Backports.unversioned_filter(Backports.modules())) def test_expand_versions(self):
False positive when using backported zoneinfo **Describe the bug** I'm trying to use `vermin` on a project that uses `zoneinfo`. The project is properly set up to install the backport on Python < 3.9. However, vermin reports this as a violation, and won't take `--backport zoneinfo`. **To Reproduce** ```sh git clone https://github.com/WhyNotHugo/django-afip.git cd django-afip vermin --no-tips --target=3.7 --violations . vermin --no-tips --target=3.7 --violations --backport zoneinfo . ``` **Expected behavior** The latter should work. **Environment (please complete the following information):** ```con > vermin --version 1.5.1 > git --version git version 2.40.1 ```` **Additional context** -
0.0
[ "tests/backports.py::VerminBackportsTests::test_modules", "tests/backports.py::VerminBackportsTests::test_str", "tests/backports.py::VerminBackportsTests::test_unversioned_filter" ]
[ "tests/backports.py::VerminBackportsTests::test_expand_versions", "tests/backports.py::VerminBackportsTests::test_is_backport", "tests/backports.py::VerminBackportsTests::test_version_filter" ]
2023-06-18 15:19:50+00:00
4,127
netromdk__vermin-234
diff --git a/vermin/arguments.py b/vermin/arguments.py index 71fc6e4..9fc7189 100644 --- a/vermin/arguments.py +++ b/vermin/arguments.py @@ -63,10 +63,11 @@ class Arguments: print(" --target=V | -t=V\n" " Target version that files must abide by. Can be specified once or twice.\n" " A '-' can be appended to match target version or smaller, like '-t=3.5-'.\n" - " If not met Vermin will exit with code 1. Note that the amount of target\n" - " versions must match the amount of minimum required versions detected.\n" - " However, if used in conjunction with --violations, and no rules are\n" - " triggered, it will exit with code 0.\n") + " If not met Vermin will exit with code 1. Vermin will only compare target\n" + " versions with the same major version, so if you do not care about Python\n" + " 2, you can just specify one target for Python 3. However, if used in\n" + " conjunction with --violations, and no rules are triggered, it will exit\n" + " with code 0.\n") print(" --no-target (default)\n" " Don't expect certain target version(s).\n") print(" --processes=N | -p=N\n" diff --git a/vermin/main.py b/vermin/main.py index be7a883..e0425db 100644 --- a/vermin/main.py +++ b/vermin/main.py @@ -7,7 +7,7 @@ from .printing import nprint, vprint from .detection import detect_paths from .processor import Processor from .arguments import Arguments -from .utility import version_strings, dotted_name +from .utility import version_strings, dotted_name, compare_requirements from .backports import Backports def main(): @@ -144,16 +144,10 @@ def main(): # don't fail wrt. targets. all_inconclusive = config.only_show_violations() and len(reqs) > 0 and \ all(req == (0, 0) for req in reqs) - if not all_inconclusive and\ - not (len(reqs) == len(targets) and - all(((exact and target == req) or (not exact and target >= req)) for - ((exact, target), req) in zip(targets, reqs))): + if not all_inconclusive and not compare_requirements(reqs, targets): if not parsable: vers = ["{}{}".format(dotted_name(t), "-" if not e else "") for (e, t) in targets] nprint("Target versions not met: {}".format(version_strings(vers)), config) - if len(targets) < len(reqs): - nprint("Note: Number of specified targets ({}) doesn't match number of detected minimum " - "versions ({}).".format(len(targets), len(reqs)), config) sys.exit(1) sys.exit(0) diff --git a/vermin/utility.py b/vermin/utility.py index 61bb00a..b38ae41 100644 --- a/vermin/utility.py +++ b/vermin/utility.py @@ -189,3 +189,20 @@ def parse_target(target): return None return (exact, elms) + +def compare_requirements(reqs, targets): + maj_to_req = {ver[0]: ver for ver in reqs} + maj_to_target = {ver[0]: (exact, ver) for (exact, ver) in targets} + common_major_versions = set(maj_to_req.keys()) & set(maj_to_target.keys()) + if not common_major_versions: + return False + if set(maj_to_target.keys()) - common_major_versions: + return False # target major version missing from the requirements + for maj in common_major_versions: + exact, target = maj_to_target[maj] + req = maj_to_req[maj] + if exact and target != req: + return False + if not exact and target < req: + return False + return True
netromdk/vermin
a0647aa2d315125c3c83d8789c8d3f086f23c899
diff --git a/tests/general.py b/tests/general.py index f36ba6b..d540df5 100644 --- a/tests/general.py +++ b/tests/general.py @@ -12,6 +12,7 @@ from vermin import combine_versions, InvalidVersionException, detect_paths,\ remove_whitespace, main, sort_line_column, sort_line_column_parsable, version_strings,\ format_title_descs, DEFAULT_PROCESSES from vermin.formats import ParsableFormat +from vermin.utility import compare_requirements from .testutils import VerminTest, current_version, ScopedTemporaryFile, detect, visit, touch, \ working_dir @@ -1054,3 +1055,79 @@ three - three.one def test_default_processes(self): self.assertEqual(cpu_count(), DEFAULT_PROCESSES) + + def test_compare_requirements_py2_and_py3_compatible(self): + reqs = [(2, 7), (3, 6)] + + # User provides only one target + self.assertFalse(compare_requirements(reqs, [(True, (2, 6))])) + self.assertFalse(compare_requirements(reqs, [(False, (2, 6))])) + self.assertTrue(compare_requirements(reqs, [(True, (2, 7))])) + self.assertTrue(compare_requirements(reqs, [(False, (2, 7))])) + self.assertFalse(compare_requirements(reqs, [(True, (3, 3))])) + self.assertFalse(compare_requirements(reqs, [(False, (3, 3))])) + self.assertTrue(compare_requirements(reqs, [(True, (3, 6))])) + self.assertTrue(compare_requirements(reqs, [(False, (3, 6))])) + self.assertFalse(compare_requirements(reqs, [(True, (3, 7))])) + self.assertTrue(compare_requirements(reqs, [(False, (3, 7))])) + + # Missing and invalid targets + self.assertFalse(compare_requirements(reqs, [])) + self.assertFalse(compare_requirements(reqs, [(True, (4, 1))])) + + # User provides multiple valid requirements, return true when both are + # satisfied. + self.assertTrue(compare_requirements(reqs, [(True, (2, 7)), (False, (3, 7))])) + self.assertFalse(compare_requirements(reqs, [(True, (2, 7)), (True, (3, 7))])) + + # User provides valid along with invalid version: fail because the target + # major version is missing + self.assertFalse(compare_requirements(reqs, [(True, (2, 7)), (False, (4, 7))])) + self.assertFalse(compare_requirements(reqs, [(True, (2, 7)), (True, (4, 7))])) + + def test_compare_requirements_py2_only(self): + reqs = [(2, 7)] + + # Correct major version, compare against minor version + self.assertFalse(compare_requirements(reqs, [(True, (2, 6))])) + self.assertFalse(compare_requirements(reqs, [(False, (2, 6))])) + self.assertTrue(compare_requirements(reqs, [(True, (2, 7))])) + self.assertTrue(compare_requirements(reqs, [(False, (2, 7))])) + + # The user specifies the wrong major version: this will always fail + self.assertFalse(compare_requirements(reqs, [(True, (3, 3))])) + self.assertFalse(compare_requirements(reqs, [(False, (3, 3))])) + self.assertFalse(compare_requirements(reqs, [(True, (3, 6))])) + self.assertFalse(compare_requirements(reqs, [(False, (3, 6))])) + self.assertFalse(compare_requirements(reqs, [(True, (3, 7))])) + self.assertFalse(compare_requirements(reqs, [(False, (3, 7))])) + self.assertFalse(compare_requirements(reqs, [(True, (4, 1))])) + + # Missing target: fail + self.assertFalse(compare_requirements(reqs, [])) + + # Multiple targets: fail because one target major version is missing + self.assertFalse(compare_requirements(reqs, [(False, (2, 7)), (False, (3, 6))])) + + def test_compare_requirements_py3_only(self): + reqs = [(3, 6)] + # The user specifies the wrong major version: this will always fail + self.assertFalse(compare_requirements(reqs, [(True, (2, 6))])) + self.assertFalse(compare_requirements(reqs, [(False, (2, 6))])) + self.assertFalse(compare_requirements(reqs, [(True, (2, 7))])) + self.assertFalse(compare_requirements(reqs, [(False, (2, 7))])) + self.assertFalse(compare_requirements(reqs, [(True, (4, 1))])) + + # Correct major version, compare against minor version + self.assertFalse(compare_requirements(reqs, [(True, (3, 3))])) + self.assertFalse(compare_requirements(reqs, [(False, (3, 3))])) + self.assertTrue(compare_requirements(reqs, [(True, (3, 6))])) + self.assertTrue(compare_requirements(reqs, [(False, (3, 6))])) + self.assertFalse(compare_requirements(reqs, [(True, (3, 7))])) + self.assertTrue(compare_requirements(reqs, [(False, (3, 7))])) + + # Missing and invalid requirements + self.assertFalse(compare_requirements(reqs, [])) + + # Multiple targets: fail because one target amjor version is missing + self.assertFalse(compare_requirements(reqs, [(False, (2, 7)), (False, (3, 6))]))
Vermin reports that code is incompatible but it is **Describe the bug** Vermin reports that my codebase is not compatible with 3.8, but it is. Oddly, it points out: Minimum required versions: 2.3, 3.0 Which sounds like it should be fine to me. **To Reproduce** ```python if sys.version_info < (3, 8): # noqa: UP036 # novermin errstr = "khal only supports python version 3.8+. Please Upgrade.\n" sys.stderr.write("#" * len(errstr) + '\n') sys.stderr.write(errstr) sys.stderr.write("#" * len(errstr) + '\n') sys.exit(1) ``` Vermin fails, indicating that this code doesn't run for the specified version: ```con > vermin -t=3.8 --violations setup.py Detecting python files.. Analyzing using 24 processes.. Minimum required versions: 2.3, 3.0 Target versions not met: 3.8 Note: Number of specified targets (1) doesn't match number of detected minimum versions (2). ``` **Expected behavior** This should work just fine. **Environment (please complete the following information):** - Via pre-commit, v1.5.2. - From Alpine packages, v1.5.2 **Additional context** Full file is here: https://github.com/pimutils/khal/blob/f5ca8883f2e3d8a403d7a6d16fa4f552b6b69283/setup.py
0.0
[ "tests/general.py::VerminGeneralTests::test_assign_rvalue_attribute", "tests/general.py::VerminGeneralTests::test_combine_versions", "tests/general.py::VerminGeneralTests::test_combine_versions_assert", "tests/general.py::VerminGeneralTests::test_compare_requirements_py2_and_py3_compatible", "tests/general.py::VerminGeneralTests::test_compare_requirements_py2_only", "tests/general.py::VerminGeneralTests::test_compare_requirements_py3_only", "tests/general.py::VerminGeneralTests::test_default_processes", "tests/general.py::VerminGeneralTests::test_detect_hidden_paths", "tests/general.py::VerminGeneralTests::test_detect_hidden_paths_incrementally", "tests/general.py::VerminGeneralTests::test_detect_min_version", "tests/general.py::VerminGeneralTests::test_detect_min_version_assert", "tests/general.py::VerminGeneralTests::test_detect_nonexistent_paths_incrementally", "tests/general.py::VerminGeneralTests::test_detect_nonexistent_paths_with_dot_incrementally", "tests/general.py::VerminGeneralTests::test_detect_paths", "tests/general.py::VerminGeneralTests::test_detect_paths_incrementally", "tests/general.py::VerminGeneralTests::test_detect_top_level_paths_incrementally", "tests/general.py::VerminGeneralTests::test_detect_vermin_min_versions", "tests/general.py::VerminGeneralTests::test_detect_vermin_min_versions_parsable", "tests/general.py::VerminGeneralTests::test_detect_vermin_paths_all_exts", "tests/general.py::VerminGeneralTests::test_detect_vermin_paths_directly", "tests/general.py::VerminGeneralTests::test_detect_vermin_paths_no_invalid_exts", "tests/general.py::VerminGeneralTests::test_detect_without_config", "tests/general.py::VerminGeneralTests::test_dotted_name", "tests/general.py::VerminGeneralTests::test_dotted_name_assert", "tests/general.py::VerminGeneralTests::test_exclude_directory_regex", "tests/general.py::VerminGeneralTests::test_exclude_pyi_regex", "tests/general.py::VerminGeneralTests::test_exclude_regex_relative", "tests/general.py::VerminGeneralTests::test_format", "tests/general.py::VerminGeneralTests::test_format_title_descs", "tests/general.py::VerminGeneralTests::test_ignore_members_when_user_defined_classes", "tests/general.py::VerminGeneralTests::test_ignore_members_when_user_defined_funcs", "tests/general.py::VerminGeneralTests::test_ignore_modules_when_user_defined_classes", "tests/general.py::VerminGeneralTests::test_ignore_modules_when_user_defined_funcs", "tests/general.py::VerminGeneralTests::test_ignore_non_top_level_imports", "tests/general.py::VerminGeneralTests::test_main_full_usage", "tests/general.py::VerminGeneralTests::test_main_no_args", "tests/general.py::VerminGeneralTests::test_main_no_paths", "tests/general.py::VerminGeneralTests::test_main_no_rules_hit", "tests/general.py::VerminGeneralTests::test_main_no_rules_hit_target_not_met_violations_mode", "tests/general.py::VerminGeneralTests::test_main_parsable_dont_ignore_paths_with_colon_in_drive_part", "tests/general.py::VerminGeneralTests::test_main_parsable_has_last_results_line", "tests/general.py::VerminGeneralTests::test_main_print_version", "tests/general.py::VerminGeneralTests::test_main_print_versions_range", "tests/general.py::VerminGeneralTests::test_main_target_not_met", "tests/general.py::VerminGeneralTests::test_member_class", "tests/general.py::VerminGeneralTests::test_member_constant", "tests/general.py::VerminGeneralTests::test_member_function", "tests/general.py::VerminGeneralTests::test_member_kwargs", "tests/general.py::VerminGeneralTests::test_mod_inverse_pow", "tests/general.py::VerminGeneralTests::test_modules", "tests/general.py::VerminGeneralTests::test_pessimistic_syntax_error", "tests/general.py::VerminGeneralTests::test_probably_python_file", "tests/general.py::VerminGeneralTests::test_process_file_not_Found", "tests/general.py::VerminGeneralTests::test_process_file_using_backport", "tests/general.py::VerminGeneralTests::test_process_invalid_versions", "tests/general.py::VerminGeneralTests::test_process_syntax_error", "tests/general.py::VerminGeneralTests::test_processor_argparse_backport_spawn_or_fork", "tests/general.py::VerminGeneralTests::test_processor_incompatible", "tests/general.py::VerminGeneralTests::test_processor_indent_show_output_text", "tests/general.py::VerminGeneralTests::test_processor_maybe_annotations", "tests/general.py::VerminGeneralTests::test_processor_maybe_annotations_default", "tests/general.py::VerminGeneralTests::test_processor_separately_incompatible", "tests/general.py::VerminGeneralTests::test_processor_used_novermin", "tests/general.py::VerminGeneralTests::test_processor_value_error", "tests/general.py::VerminGeneralTests::test_remove_whitespace", "tests/general.py::VerminGeneralTests::test_reverse_range", "tests/general.py::VerminGeneralTests::test_sort_line_column", "tests/general.py::VerminGeneralTests::test_sort_line_column_parsable", "tests/general.py::VerminGeneralTests::test_strftime_directives", "tests/general.py::VerminGeneralTests::test_user_defined", "tests/general.py::VerminGeneralTests::test_version_strings", "tests/general.py::VerminGeneralTests::test_version_strings_assert", "tests/general.py::VerminGeneralTests::test_visit_has_output_text", "tests/general.py::VerminGeneralTests::test_visit_output_text_has_correct_lines", "tests/general.py::VerminGeneralTests::test_visit_output_text_has_correct_lines_parsable", "tests/general.py::VerminGeneralTests::test_visit_without_config" ]
[]
2023-10-10 17:33:45+00:00
4,128
netromdk__vermin-8
diff --git a/.travis.yml b/.travis.yml index ef67b64..2726025 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,8 +16,8 @@ python: install: - pip install -U pip virtualenv script: -- if [[ $TRAVIS_PYTHON_VERSION != 3.2 ]]; then make setup-venv setup-coverage test-coverage; else make test; fi -- if [[ $TRAVIS_PYTHON_VERSION > 3.2 ]]; then make setup-misc; source .venv/bin/activate; make check; fi -- if [[ $TRAVIS_PYTHON_VERSION > 3.2 && $TRAVIS_PYTHON_VERSION < 3.7 ]]; then make setup-bandit; source .venv/bin/activate; make security-check; fi +- if [[ $TRAVIS_PYTHON_VERSION != 3.2 && $TRAVIS_PYTHON_VERSION != 3.3 ]]; then make setup-venv setup-coverage test-coverage; else make test; fi +- if [[ $TRAVIS_PYTHON_VERSION > 3.3 ]]; then make setup-misc; source .venv/bin/activate; make check; fi +- if [[ $TRAVIS_PYTHON_VERSION > 3.3 && $TRAVIS_PYTHON_VERSION < 3.7 ]]; then make setup-bandit; source .venv/bin/activate; make security-check; fi after_success: -- if [[ $TRAVIS_PYTHON_VERSION != 3.2 ]]; then make coveralls; fi +- if [[ $TRAVIS_PYTHON_VERSION != 3.2 && $TRAVIS_PYTHON_VERSION != 3.3 ]]; then make coveralls; fi diff --git a/vermin/rules.py b/vermin/rules.py index 947d5b9..d54363e 100644 --- a/vermin/rules.py +++ b/vermin/rules.py @@ -75,6 +75,7 @@ MOD_REQS = { "numbers": (2.6, 3.0), "optparse": (2.3, 3.0), "ossaudiodev": (2.3, 3.0), + "pathlib": (None, 3.4), "pickletools": (2.3, 3.0), "pkgutil": (2.3, 3.0), "platform": (2.3, 3.0),
netromdk/vermin
653670b33ca9a54c95bd1ae06c56ce7474197a82
diff --git a/tests/module.py b/tests/module.py index b8316fa..9900459 100644 --- a/tests/module.py +++ b/tests/module.py @@ -349,3 +349,6 @@ class VerminModuleTests(VerminTest): def test_venv(self): self.assertOnlyIn(3.3, detect("import venv")) + + def test_pathlib(self): + self.assertOnlyIn(3.4, detect("import pathlib"))
pathlib module missing in checks **Describe the bug** The pathlib module is available since python 3.4 and is not in the checks **To Reproduce** Code using the pathlib module not having a minimum version of 3.4 **Expected behavior** The minimum version should then be python 3.4 and no python 2 support **Environment (please complete the following information):** Vermin 0.4.4
0.0
[ "tests/module.py::VerminModuleTests::test_pathlib" ]
[ "tests/module.py::VerminModuleTests::test_ConfigParser", "tests/module.py::VerminModuleTests::test_DocXMLRPCServer", "tests/module.py::VerminModuleTests::test_HTMLParser", "tests/module.py::VerminModuleTests::test_Queue", "tests/module.py::VerminModuleTests::test_SimpleXMLRPCServer", "tests/module.py::VerminModuleTests::test_SocketServer", "tests/module.py::VerminModuleTests::test_Tkinter", "tests/module.py::VerminModuleTests::test___builtin__", "tests/module.py::VerminModuleTests::test___future__", "tests/module.py::VerminModuleTests::test__dummy_thread", "tests/module.py::VerminModuleTests::test__markupbase", "tests/module.py::VerminModuleTests::test__winreg", "tests/module.py::VerminModuleTests::test_abc", "tests/module.py::VerminModuleTests::test_argparse", "tests/module.py::VerminModuleTests::test_ast", "tests/module.py::VerminModuleTests::test_asyncio", "tests/module.py::VerminModuleTests::test_atexit", "tests/module.py::VerminModuleTests::test_builtins", "tests/module.py::VerminModuleTests::test_bz2", "tests/module.py::VerminModuleTests::test_cProfile", "tests/module.py::VerminModuleTests::test_cgitb", "tests/module.py::VerminModuleTests::test_collections", "tests/module.py::VerminModuleTests::test_configparser", "tests/module.py::VerminModuleTests::test_contextlib", "tests/module.py::VerminModuleTests::test_cookielib", "tests/module.py::VerminModuleTests::test_copy_reg", "tests/module.py::VerminModuleTests::test_copyreg", "tests/module.py::VerminModuleTests::test_csv", "tests/module.py::VerminModuleTests::test_ctypes", "tests/module.py::VerminModuleTests::test_datetime", "tests/module.py::VerminModuleTests::test_dbm_io", "tests/module.py::VerminModuleTests::test_dbm_ndbm", "tests/module.py::VerminModuleTests::test_dbm_os", "tests/module.py::VerminModuleTests::test_dbm_struct", "tests/module.py::VerminModuleTests::test_dbm_sys", "tests/module.py::VerminModuleTests::test_dbm_whichdb", "tests/module.py::VerminModuleTests::test_decimal", "tests/module.py::VerminModuleTests::test_difflib", "tests/module.py::VerminModuleTests::test_dummy_thread", "tests/module.py::VerminModuleTests::test_dummy_threading", "tests/module.py::VerminModuleTests::test_email", "tests/module.py::VerminModuleTests::test_email_charset", "tests/module.py::VerminModuleTests::test_email_contentmanager", "tests/module.py::VerminModuleTests::test_email_header", "tests/module.py::VerminModuleTests::test_email_headerregistry", "tests/module.py::VerminModuleTests::test_email_policy", "tests/module.py::VerminModuleTests::test_faulthandler", "tests/module.py::VerminModuleTests::test_fractions", "tests/module.py::VerminModuleTests::test_functools", "tests/module.py::VerminModuleTests::test_future_builtins", "tests/module.py::VerminModuleTests::test_hashlib", "tests/module.py::VerminModuleTests::test_heapq", "tests/module.py::VerminModuleTests::test_hmac", "tests/module.py::VerminModuleTests::test_hotshot", "tests/module.py::VerminModuleTests::test_html", "tests/module.py::VerminModuleTests::test_htmlentitydefs", "tests/module.py::VerminModuleTests::test_http", "tests/module.py::VerminModuleTests::test_http_cookiejar", "tests/module.py::VerminModuleTests::test_importlib", "tests/module.py::VerminModuleTests::test_inspect", "tests/module.py::VerminModuleTests::test_io", "tests/module.py::VerminModuleTests::test_ipaddress", "tests/module.py::VerminModuleTests::test_itertools", "tests/module.py::VerminModuleTests::test_json", "tests/module.py::VerminModuleTests::test_logging", "tests/module.py::VerminModuleTests::test_lzma", "tests/module.py::VerminModuleTests::test_markupbase", "tests/module.py::VerminModuleTests::test_md5", "tests/module.py::VerminModuleTests::test_modulefinder", "tests/module.py::VerminModuleTests::test_msilib", "tests/module.py::VerminModuleTests::test_multiprocessing", "tests/module.py::VerminModuleTests::test_new", "tests/module.py::VerminModuleTests::test_numbers", "tests/module.py::VerminModuleTests::test_optparse", "tests/module.py::VerminModuleTests::test_ossaudiodev", "tests/module.py::VerminModuleTests::test_pickletools", "tests/module.py::VerminModuleTests::test_pkgutil", "tests/module.py::VerminModuleTests::test_platform", "tests/module.py::VerminModuleTests::test_pydoc", "tests/module.py::VerminModuleTests::test_queue", "tests/module.py::VerminModuleTests::test_repr", "tests/module.py::VerminModuleTests::test_reprlib", "tests/module.py::VerminModuleTests::test_runpy", "tests/module.py::VerminModuleTests::test_secrets", "tests/module.py::VerminModuleTests::test_sets", "tests/module.py::VerminModuleTests::test_shlex", "tests/module.py::VerminModuleTests::test_socketserver", "tests/module.py::VerminModuleTests::test_spwd", "tests/module.py::VerminModuleTests::test_sqlite3", "tests/module.py::VerminModuleTests::test_ssl", "tests/module.py::VerminModuleTests::test_string_letters", "tests/module.py::VerminModuleTests::test_string_lowercase", "tests/module.py::VerminModuleTests::test_string_uppercase", "tests/module.py::VerminModuleTests::test_stringprep", "tests/module.py::VerminModuleTests::test_subprocess", "tests/module.py::VerminModuleTests::test_sysconfig", "tests/module.py::VerminModuleTests::test_tarfile", "tests/module.py::VerminModuleTests::test_textwrap", "tests/module.py::VerminModuleTests::test_timeit", "tests/module.py::VerminModuleTests::test_tkinter", "tests/module.py::VerminModuleTests::test_tracemalloc", "tests/module.py::VerminModuleTests::test_typing", "tests/module.py::VerminModuleTests::test_unittest", "tests/module.py::VerminModuleTests::test_unittest_mock", "tests/module.py::VerminModuleTests::test_urllib2", "tests/module.py::VerminModuleTests::test_uuid", "tests/module.py::VerminModuleTests::test_venv", "tests/module.py::VerminModuleTests::test_warnings", "tests/module.py::VerminModuleTests::test_weakref", "tests/module.py::VerminModuleTests::test_winreg", "tests/module.py::VerminModuleTests::test_wsgiref", "tests/module.py::VerminModuleTests::test_xmlrpc", "tests/module.py::VerminModuleTests::test_xmlrpc_client", "tests/module.py::VerminModuleTests::test_xmlrpc_server", "tests/module.py::VerminModuleTests::test_xmlrpclib", "tests/module.py::VerminModuleTests::test_zipimport" ]
2018-07-13 09:54:23+00:00
4,129
netsiphd__netrd-264
diff --git a/netrd/utilities/entropy.py b/netrd/utilities/entropy.py index dff68e2..9d6f3d7 100644 --- a/netrd/utilities/entropy.py +++ b/netrd/utilities/entropy.py @@ -15,7 +15,7 @@ from scipy.stats import entropy as sp_entropy def js_divergence(P, Q): - """Jenson-Shannon divergence between `P` and `Q`. + """Jensen-Shannon divergence between `P` and `Q`. Parameters ---------- @@ -35,8 +35,10 @@ def js_divergence(P, Q): return 0.5 * (sp_entropy(P, M, base=2) + sp_entropy(Q, M, base=2)) -def entropy(var): - """Return the Shannon entropy of a variable. +def entropy_from_seq(var): + """Return the Shannon entropy of a variable. This differs from + Scipy's entropy by taking a sequence of observations as input + rather than a histogram or probability distribution. Parameters ---------- @@ -65,7 +67,7 @@ def joint_entropy(data): Returns ------- float - Joint entrpoy of the variables of interests. + Joint entropy of the variables of interests. Notes -----
netsiphd/netrd
fa2c163376a88ed72ba15649190b1a2b23b1cb9a
diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 8837207..5b2ab79 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -8,7 +8,7 @@ Test utility functions. import numpy as np from netrd.utilities.entropy import categorized_data -from netrd.utilities.entropy import entropy, joint_entropy, conditional_entropy +from netrd.utilities.entropy import entropy_from_seq, joint_entropy, conditional_entropy from netrd.utilities import threshold @@ -89,7 +89,7 @@ def test_entropies(): """ data = np.array([[1, 0, 0, 1, 1, 0, 1, 0], [0, 1, 0, 1, 1, 0, 1, 0]]).T - H = entropy(data[:, 0]) + H = entropy_from_seq(data[:, 0]) H_joint = joint_entropy(data) H_cond = conditional_entropy(data[:, 1, np.newaxis], data[:, 0, np.newaxis])
Clarify usage of utilities.entropy.entropy? When I was refactoring the Dmeasure code, I looked at using our version of entropy instead of scipy's, but one is not a drop-in replacement for the other because they take different inputs - one takes a histogram and the other takes a sequence of values. It would touch a lot of files but but renaming our `entropy` to something like `entropy_from_sequence` would remove this ambiguity.
0.0
[ "tests/test_utilities.py::test_thresholds", "tests/test_utilities.py::test_categorized_data", "tests/test_utilities.py::test_entropies" ]
[]
2019-10-14 15:14:50+00:00
4,130