body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
f120e9d3bd406f7bd2c0bf04b4a12fe3e075a45eb907f3288d513bd82f913e5c
def test_does_not_support(self): 'Test OBJ_DOES_NOT_SUPPORT_RE.' msgs = [("'range' object does not support item assignment", ('range', 'item assignment')), ("'str' object doesn't support item deletion", ('str', 'item deletion')), ("'set' object does not support indexing", ('set', 'indexing'))] for (msg, groups) in msgs: results = (groups, dict()) self.re_matches(msg, re.OBJ_DOES_NOT_SUPPORT_RE, results)
Test OBJ_DOES_NOT_SUPPORT_RE.
didyoumean/didyoumean_re_tests.py
test_does_not_support
SylvainDe/DidYouMean-Python
76
python
def test_does_not_support(self): msgs = [("'range' object does not support item assignment", ('range', 'item assignment')), ("'str' object doesn't support item deletion", ('str', 'item deletion')), ("'set' object does not support indexing", ('set', 'indexing'))] for (msg, groups) in msgs: results = (groups, dict()) self.re_matches(msg, re.OBJ_DOES_NOT_SUPPORT_RE, results)
def test_does_not_support(self): msgs = [("'range' object does not support item assignment", ('range', 'item assignment')), ("'str' object doesn't support item deletion", ('str', 'item deletion')), ("'set' object does not support indexing", ('set', 'indexing'))] for (msg, groups) in msgs: results = (groups, dict()) self.re_matches(msg, re.OBJ_DOES_NOT_SUPPORT_RE, results)<|docstring|>Test OBJ_DOES_NOT_SUPPORT_RE.<|endoftext|>
cf9ed6c1852f5082255275f7e42e46c2c63e78ee3babcfa29e09f7226b6658dc
def test_cant_convert(self): 'Test CANT_CONVERT_RE.' msg = "Can't convert 'int' object to str implicitly" groups = ('int', 'str') results = (groups, dict()) self.re_matches(msg, re.CANT_CONVERT_RE, results)
Test CANT_CONVERT_RE.
didyoumean/didyoumean_re_tests.py
test_cant_convert
SylvainDe/DidYouMean-Python
76
python
def test_cant_convert(self): msg = "Can't convert 'int' object to str implicitly" groups = ('int', 'str') results = (groups, dict()) self.re_matches(msg, re.CANT_CONVERT_RE, results)
def test_cant_convert(self): msg = "Can't convert 'int' object to str implicitly" groups = ('int', 'str') results = (groups, dict()) self.re_matches(msg, re.CANT_CONVERT_RE, results)<|docstring|>Test CANT_CONVERT_RE.<|endoftext|>
a9254ede12363e59f5c0cb900a5819d76a71cc47bba5cd6c56b86cf35bef45e7
def test_must_be_type1_not_type2(self): 'Test MUST_BE_TYPE1_NOT_TYPE2_RE.' msg = 'must be str, not int' groups = ('str', 'int') results = (groups, dict()) self.re_matches(msg, re.MUST_BE_TYPE1_NOT_TYPE2_RE, results)
Test MUST_BE_TYPE1_NOT_TYPE2_RE.
didyoumean/didyoumean_re_tests.py
test_must_be_type1_not_type2
SylvainDe/DidYouMean-Python
76
python
def test_must_be_type1_not_type2(self): msg = 'must be str, not int' groups = ('str', 'int') results = (groups, dict()) self.re_matches(msg, re.MUST_BE_TYPE1_NOT_TYPE2_RE, results)
def test_must_be_type1_not_type2(self): msg = 'must be str, not int' groups = ('str', 'int') results = (groups, dict()) self.re_matches(msg, re.MUST_BE_TYPE1_NOT_TYPE2_RE, results)<|docstring|>Test MUST_BE_TYPE1_NOT_TYPE2_RE.<|endoftext|>
14fbee6a2a0e9abdd9a6754ce5386df95a64ef6b2547630198b922271f82fa33
def test_cannot_concat(self): 'Test CANNOT_CONCAT_RE.' msg = "cannot concatenate 'str' and 'int' objects" groups = ('str', 'int') results = (groups, dict()) self.re_matches(msg, re.CANNOT_CONCAT_RE, results)
Test CANNOT_CONCAT_RE.
didyoumean/didyoumean_re_tests.py
test_cannot_concat
SylvainDe/DidYouMean-Python
76
python
def test_cannot_concat(self): msg = "cannot concatenate 'str' and 'int' objects" groups = ('str', 'int') results = (groups, dict()) self.re_matches(msg, re.CANNOT_CONCAT_RE, results)
def test_cannot_concat(self): msg = "cannot concatenate 'str' and 'int' objects" groups = ('str', 'int') results = (groups, dict()) self.re_matches(msg, re.CANNOT_CONCAT_RE, results)<|docstring|>Test CANNOT_CONCAT_RE.<|endoftext|>
4939a0f2467af5973671e24f138528add8507fa6d1269aa02b42bc2d46b9b072
def test_only_concat(self): 'Test ONLY_CONCAT_RE.' msg = 'can only concatenate list (not "set") to list' self.re_matches(msg, re.ONLY_CONCAT_RE, NO_GROUP)
Test ONLY_CONCAT_RE.
didyoumean/didyoumean_re_tests.py
test_only_concat
SylvainDe/DidYouMean-Python
76
python
def test_only_concat(self): msg = 'can only concatenate list (not "set") to list' self.re_matches(msg, re.ONLY_CONCAT_RE, NO_GROUP)
def test_only_concat(self): msg = 'can only concatenate list (not "set") to list' self.re_matches(msg, re.ONLY_CONCAT_RE, NO_GROUP)<|docstring|>Test ONLY_CONCAT_RE.<|endoftext|>
dbc0fdf1ac339f93301ce39335c76c1a151c39a02147294722a73ca574541f2d
def test_unsupported_operand(self): 'Test UNSUPPORTED_OP_RE.' msgs = [("unsupported operand type(s) for +: 'int' and 'str'", '+', 'int', 'str'), ("unsupported operand type(s) for -: 'builtin_function' and 'int'", '-', 'builtin_function', 'int')] for (msg, op, t1, t2) in msgs: groups = (op, t1, t2) named_groups = {'op': op, 't1': t1, 't2': t2} results = (groups, named_groups) self.re_matches(msg, re.UNSUPPORTED_OP_RE, results)
Test UNSUPPORTED_OP_RE.
didyoumean/didyoumean_re_tests.py
test_unsupported_operand
SylvainDe/DidYouMean-Python
76
python
def test_unsupported_operand(self): msgs = [("unsupported operand type(s) for +: 'int' and 'str'", '+', 'int', 'str'), ("unsupported operand type(s) for -: 'builtin_function' and 'int'", '-', 'builtin_function', 'int')] for (msg, op, t1, t2) in msgs: groups = (op, t1, t2) named_groups = {'op': op, 't1': t1, 't2': t2} results = (groups, named_groups) self.re_matches(msg, re.UNSUPPORTED_OP_RE, results)
def test_unsupported_operand(self): msgs = [("unsupported operand type(s) for +: 'int' and 'str'", '+', 'int', 'str'), ("unsupported operand type(s) for -: 'builtin_function' and 'int'", '-', 'builtin_function', 'int')] for (msg, op, t1, t2) in msgs: groups = (op, t1, t2) named_groups = {'op': op, 't1': t1, 't2': t2} results = (groups, named_groups) self.re_matches(msg, re.UNSUPPORTED_OP_RE, results)<|docstring|>Test UNSUPPORTED_OP_RE.<|endoftext|>
a6e1af2e7298febf304f3c78b3b69d12d9c9f595c890859ffbd6fc67d4e0dd92
def test_unsupported_operand_sugg(self): 'Test UNSUPPORTED_OP_SUGG_RE.' msgs = [('unsupported operand type(s) for >>: \'builtin_function_or_method\' and \'int\'. Did you mean "print(<message>, file=<output_stream>)"?', '>>', 'builtin_function_or_method', 'int', 'print(<message>, file=<output_stream>)'), ('unsupported operand type(s) for -: \'builtin_function\' and \'int\'. Did you mean "print(<-number>)"?', '-', 'builtin_function', 'int', 'print(<-number>)')] for (msg, op, t1, t2, sugg) in msgs: groups = (op, t1, t2, sugg) named_groups = {'op': op, 't1': t1, 't2': t2, 'sugg': sugg} results = (groups, named_groups) self.re_matches(msg, re.UNSUPPORTED_OP_SUGG_RE, results)
Test UNSUPPORTED_OP_SUGG_RE.
didyoumean/didyoumean_re_tests.py
test_unsupported_operand_sugg
SylvainDe/DidYouMean-Python
76
python
def test_unsupported_operand_sugg(self): msgs = [('unsupported operand type(s) for >>: \'builtin_function_or_method\' and \'int\'. Did you mean "print(<message>, file=<output_stream>)"?', '>>', 'builtin_function_or_method', 'int', 'print(<message>, file=<output_stream>)'), ('unsupported operand type(s) for -: \'builtin_function\' and \'int\'. Did you mean "print(<-number>)"?', '-', 'builtin_function', 'int', 'print(<-number>)')] for (msg, op, t1, t2, sugg) in msgs: groups = (op, t1, t2, sugg) named_groups = {'op': op, 't1': t1, 't2': t2, 'sugg': sugg} results = (groups, named_groups) self.re_matches(msg, re.UNSUPPORTED_OP_SUGG_RE, results)
def test_unsupported_operand_sugg(self): msgs = [('unsupported operand type(s) for >>: \'builtin_function_or_method\' and \'int\'. Did you mean "print(<message>, file=<output_stream>)"?', '>>', 'builtin_function_or_method', 'int', 'print(<message>, file=<output_stream>)'), ('unsupported operand type(s) for -: \'builtin_function\' and \'int\'. Did you mean "print(<-number>)"?', '-', 'builtin_function', 'int', 'print(<-number>)')] for (msg, op, t1, t2, sugg) in msgs: groups = (op, t1, t2, sugg) named_groups = {'op': op, 't1': t1, 't2': t2, 'sugg': sugg} results = (groups, named_groups) self.re_matches(msg, re.UNSUPPORTED_OP_SUGG_RE, results)<|docstring|>Test UNSUPPORTED_OP_SUGG_RE.<|endoftext|>
ddb796cd1af60f3e9b2e8541a2d1eb771e883736f241c7a66d9b7e347c5c4edc
def test_bad_operand_unary(self): 'Test BAD_OPERAND_UNARY_RE.' msgs = [("bad operand type for unary ~: 'set'", ('~', 'set')), ("bad operand type for abs(): 'set'", ('abs()', 'set')), ("unsupported operand type for unary neg: 'Foobar'", ('neg', 'Foobar'))] for (msg, groups) in msgs: results = (groups, dict()) self.re_matches(msg, re.BAD_OPERAND_UNARY_RE, results)
Test BAD_OPERAND_UNARY_RE.
didyoumean/didyoumean_re_tests.py
test_bad_operand_unary
SylvainDe/DidYouMean-Python
76
python
def test_bad_operand_unary(self): msgs = [("bad operand type for unary ~: 'set'", ('~', 'set')), ("bad operand type for abs(): 'set'", ('abs()', 'set')), ("unsupported operand type for unary neg: 'Foobar'", ('neg', 'Foobar'))] for (msg, groups) in msgs: results = (groups, dict()) self.re_matches(msg, re.BAD_OPERAND_UNARY_RE, results)
def test_bad_operand_unary(self): msgs = [("bad operand type for unary ~: 'set'", ('~', 'set')), ("bad operand type for abs(): 'set'", ('abs()', 'set')), ("unsupported operand type for unary neg: 'Foobar'", ('neg', 'Foobar'))] for (msg, groups) in msgs: results = (groups, dict()) self.re_matches(msg, re.BAD_OPERAND_UNARY_RE, results)<|docstring|>Test BAD_OPERAND_UNARY_RE.<|endoftext|>
c273f34de4208883721c635b219d3f49571f2403ea1e3cadad9a20286460b553
def test_not_callable(self): 'Test NOT_CALLABLE_RE.' msg = "'list' object is not callable" groups = ('list',) results = (groups, dict()) self.re_matches(msg, re.NOT_CALLABLE_RE, results)
Test NOT_CALLABLE_RE.
didyoumean/didyoumean_re_tests.py
test_not_callable
SylvainDe/DidYouMean-Python
76
python
def test_not_callable(self): msg = "'list' object is not callable" groups = ('list',) results = (groups, dict()) self.re_matches(msg, re.NOT_CALLABLE_RE, results)
def test_not_callable(self): msg = "'list' object is not callable" groups = ('list',) results = (groups, dict()) self.re_matches(msg, re.NOT_CALLABLE_RE, results)<|docstring|>Test NOT_CALLABLE_RE.<|endoftext|>
bdc4f14d448cb0c59aa38dac400e0b7d60ce0dff6eca790be7f74fe3a23894bf
def test_descriptor_requires(self): 'Test DESCRIPT_REQUIRES_TYPE_RE.' msgs = ["descriptor 'add' requires a 'set' object but received a 'int'", "descriptor 'add' for 'set' objects doesn't apply to 'int' object", "descriptor 'add' for 'set' objects doesn't apply to a 'int' object"] for msg in msgs: groups = ('add', 'set', 'int') results = (groups, dict()) self.re_matches(msg, re.DESCRIPT_REQUIRES_TYPE_RE, results)
Test DESCRIPT_REQUIRES_TYPE_RE.
didyoumean/didyoumean_re_tests.py
test_descriptor_requires
SylvainDe/DidYouMean-Python
76
python
def test_descriptor_requires(self): msgs = ["descriptor 'add' requires a 'set' object but received a 'int'", "descriptor 'add' for 'set' objects doesn't apply to 'int' object", "descriptor 'add' for 'set' objects doesn't apply to a 'int' object"] for msg in msgs: groups = ('add', 'set', 'int') results = (groups, dict()) self.re_matches(msg, re.DESCRIPT_REQUIRES_TYPE_RE, results)
def test_descriptor_requires(self): msgs = ["descriptor 'add' requires a 'set' object but received a 'int'", "descriptor 'add' for 'set' objects doesn't apply to 'int' object", "descriptor 'add' for 'set' objects doesn't apply to a 'int' object"] for msg in msgs: groups = ('add', 'set', 'int') results = (groups, dict()) self.re_matches(msg, re.DESCRIPT_REQUIRES_TYPE_RE, results)<|docstring|>Test DESCRIPT_REQUIRES_TYPE_RE.<|endoftext|>
1ff23fdac0ae0d3999fb4dce8397b35fbd684a2e88b43342e42ef042f070660a
def test_argument_not_iterable(self): 'Test ARG_NOT_ITERABLE_RE.' msgs = ["argument of type 'type' is not iterable", "'type' object is not iterable"] groups = ('type',) results = (groups, dict()) for msg in msgs: self.re_matches(msg, re.ARG_NOT_ITERABLE_RE, results)
Test ARG_NOT_ITERABLE_RE.
didyoumean/didyoumean_re_tests.py
test_argument_not_iterable
SylvainDe/DidYouMean-Python
76
python
def test_argument_not_iterable(self): msgs = ["argument of type 'type' is not iterable", "'type' object is not iterable"] groups = ('type',) results = (groups, dict()) for msg in msgs: self.re_matches(msg, re.ARG_NOT_ITERABLE_RE, results)
def test_argument_not_iterable(self): msgs = ["argument of type 'type' is not iterable", "'type' object is not iterable"] groups = ('type',) results = (groups, dict()) for msg in msgs: self.re_matches(msg, re.ARG_NOT_ITERABLE_RE, results)<|docstring|>Test ARG_NOT_ITERABLE_RE.<|endoftext|>
28e885f410bcf8369303f050af5245758ba9add35105a0cbcacfd89708410dbd
def test_must_be_called_with_instance(self): 'Test MUST_BE_CALLED_WITH_INST_RE.' msg = 'unbound method add() must be called with set instance as first argument (got int instance instead)' groups = ('add', 'set', 'int') results = (groups, dict()) self.re_matches(msg, re.MUST_BE_CALLED_WITH_INST_RE, results)
Test MUST_BE_CALLED_WITH_INST_RE.
didyoumean/didyoumean_re_tests.py
test_must_be_called_with_instance
SylvainDe/DidYouMean-Python
76
python
def test_must_be_called_with_instance(self): msg = 'unbound method add() must be called with set instance as first argument (got int instance instead)' groups = ('add', 'set', 'int') results = (groups, dict()) self.re_matches(msg, re.MUST_BE_CALLED_WITH_INST_RE, results)
def test_must_be_called_with_instance(self): msg = 'unbound method add() must be called with set instance as first argument (got int instance instead)' groups = ('add', 'set', 'int') results = (groups, dict()) self.re_matches(msg, re.MUST_BE_CALLED_WITH_INST_RE, results)<|docstring|>Test MUST_BE_CALLED_WITH_INST_RE.<|endoftext|>
f913676dae4a96161abb6143b76c35211a84cfffc7d7eefac984f21ccc5db9c1
def test_object_has_no(self): 'Test OBJECT_HAS_NO_FUNC_RE.' msgs = {'len': "object of type 'generator' has no len()", 'length': "'generator' has no length"} for (name, msg) in msgs.items(): groups = ('generator', name) results = (groups, dict()) self.re_matches(msg, re.OBJECT_HAS_NO_FUNC_RE, results)
Test OBJECT_HAS_NO_FUNC_RE.
didyoumean/didyoumean_re_tests.py
test_object_has_no
SylvainDe/DidYouMean-Python
76
python
def test_object_has_no(self): msgs = {'len': "object of type 'generator' has no len()", 'length': "'generator' has no length"} for (name, msg) in msgs.items(): groups = ('generator', name) results = (groups, dict()) self.re_matches(msg, re.OBJECT_HAS_NO_FUNC_RE, results)
def test_object_has_no(self): msgs = {'len': "object of type 'generator' has no len()", 'length': "'generator' has no length"} for (name, msg) in msgs.items(): groups = ('generator', name) results = (groups, dict()) self.re_matches(msg, re.OBJECT_HAS_NO_FUNC_RE, results)<|docstring|>Test OBJECT_HAS_NO_FUNC_RE.<|endoftext|>
7611e00ad5bd6b826c617ac95428d564a0403545836740898b56f04c271c80f3
def test_instance_has_no_meth(self): 'Test INSTANCE_HAS_NO_METH_RE.' msg = 'CustomClass instance has no __call__ method' (class_, method) = ('CustomClass', '__call__') groups = (class_, method) results = (groups, dict()) self.re_matches(msg, re.INSTANCE_HAS_NO_METH_RE, results)
Test INSTANCE_HAS_NO_METH_RE.
didyoumean/didyoumean_re_tests.py
test_instance_has_no_meth
SylvainDe/DidYouMean-Python
76
python
def test_instance_has_no_meth(self): msg = 'CustomClass instance has no __call__ method' (class_, method) = ('CustomClass', '__call__') groups = (class_, method) results = (groups, dict()) self.re_matches(msg, re.INSTANCE_HAS_NO_METH_RE, results)
def test_instance_has_no_meth(self): msg = 'CustomClass instance has no __call__ method' (class_, method) = ('CustomClass', '__call__') groups = (class_, method) results = (groups, dict()) self.re_matches(msg, re.INSTANCE_HAS_NO_METH_RE, results)<|docstring|>Test INSTANCE_HAS_NO_METH_RE.<|endoftext|>
8806ad2ec20b426e90e86f17d5dbdaaee9d185610ba4687bbc4bd93960744950
def test_nobinding_nonlocal(self): 'Test NO_BINDING_NONLOCAL_RE.' msg = "no binding for nonlocal 'foo' found" groups = ('foo',) results = (groups, dict()) self.re_matches(msg, re.NO_BINDING_NONLOCAL_RE, results)
Test NO_BINDING_NONLOCAL_RE.
didyoumean/didyoumean_re_tests.py
test_nobinding_nonlocal
SylvainDe/DidYouMean-Python
76
python
def test_nobinding_nonlocal(self): msg = "no binding for nonlocal 'foo' found" groups = ('foo',) results = (groups, dict()) self.re_matches(msg, re.NO_BINDING_NONLOCAL_RE, results)
def test_nobinding_nonlocal(self): msg = "no binding for nonlocal 'foo' found" groups = ('foo',) results = (groups, dict()) self.re_matches(msg, re.NO_BINDING_NONLOCAL_RE, results)<|docstring|>Test NO_BINDING_NONLOCAL_RE.<|endoftext|>
3f3c504fd84ca91e26c323f1710b4cfb9e5dc23e3873f49cf8b737da4e337c06
def test_nonlocal_at_module_level(self): 'Test NONLOCAL_AT_MODULE_RE.' msg = 'nonlocal declaration not allowed at module level' self.re_matches(msg, re.NONLOCAL_AT_MODULE_RE, NO_GROUP)
Test NONLOCAL_AT_MODULE_RE.
didyoumean/didyoumean_re_tests.py
test_nonlocal_at_module_level
SylvainDe/DidYouMean-Python
76
python
def test_nonlocal_at_module_level(self): msg = 'nonlocal declaration not allowed at module level' self.re_matches(msg, re.NONLOCAL_AT_MODULE_RE, NO_GROUP)
def test_nonlocal_at_module_level(self): msg = 'nonlocal declaration not allowed at module level' self.re_matches(msg, re.NONLOCAL_AT_MODULE_RE, NO_GROUP)<|docstring|>Test NONLOCAL_AT_MODULE_RE.<|endoftext|>
87d345c181f7c28646dde4059f42fef53179e7db5255b5756aafe1abbc172413
def test_unexpected_eof(self): 'Test UNEXPECTED_EOF_RE.' msg = 'unexpected EOF while parsing' self.re_matches(msg, re.UNEXPECTED_EOF_RE, NO_GROUP)
Test UNEXPECTED_EOF_RE.
didyoumean/didyoumean_re_tests.py
test_unexpected_eof
SylvainDe/DidYouMean-Python
76
python
def test_unexpected_eof(self): msg = 'unexpected EOF while parsing' self.re_matches(msg, re.UNEXPECTED_EOF_RE, NO_GROUP)
def test_unexpected_eof(self): msg = 'unexpected EOF while parsing' self.re_matches(msg, re.UNEXPECTED_EOF_RE, NO_GROUP)<|docstring|>Test UNEXPECTED_EOF_RE.<|endoftext|>
8529b59cd3bf8687fbd7845d25692ca64fb5af4357588bac9084b7d9cabb26ec
def test_nosuchfile(self): 'Test NO_SUCH_FILE_RE.' msg = 'No such file or directory' self.re_matches(msg, re.NO_SUCH_FILE_RE, NO_GROUP)
Test NO_SUCH_FILE_RE.
didyoumean/didyoumean_re_tests.py
test_nosuchfile
SylvainDe/DidYouMean-Python
76
python
def test_nosuchfile(self): msg = 'No such file or directory' self.re_matches(msg, re.NO_SUCH_FILE_RE, NO_GROUP)
def test_nosuchfile(self): msg = 'No such file or directory' self.re_matches(msg, re.NO_SUCH_FILE_RE, NO_GROUP)<|docstring|>Test NO_SUCH_FILE_RE.<|endoftext|>
2049d9c399ec2bb8f8e329042d38b6721a8719f6c1aac8ea348207d05f07b802
def test_timedata_does_not_match_format(self): 'Test TIME_DATA_DOES_NOT_MATCH_FORMAT_RE.' msg = "time data '%d %b %y' does not match format '30 Nov 00'" groups = ("'%d %b %y'", "'30 Nov 00'") named_groups = {'format': "'30 Nov 00'", 'timedata': "'%d %b %y'"} results = (groups, named_groups) self.re_matches(msg, re.TIME_DATA_DOES_NOT_MATCH_FORMAT_RE, results)
Test TIME_DATA_DOES_NOT_MATCH_FORMAT_RE.
didyoumean/didyoumean_re_tests.py
test_timedata_does_not_match_format
SylvainDe/DidYouMean-Python
76
python
def test_timedata_does_not_match_format(self): msg = "time data '%d %b %y' does not match format '30 Nov 00'" groups = ("'%d %b %y'", "'30 Nov 00'") named_groups = {'format': "'30 Nov 00'", 'timedata': "'%d %b %y'"} results = (groups, named_groups) self.re_matches(msg, re.TIME_DATA_DOES_NOT_MATCH_FORMAT_RE, results)
def test_timedata_does_not_match_format(self): msg = "time data '%d %b %y' does not match format '30 Nov 00'" groups = ("'%d %b %y'", "'30 Nov 00'") named_groups = {'format': "'30 Nov 00'", 'timedata': "'%d %b %y'"} results = (groups, named_groups) self.re_matches(msg, re.TIME_DATA_DOES_NOT_MATCH_FORMAT_RE, results)<|docstring|>Test TIME_DATA_DOES_NOT_MATCH_FORMAT_RE.<|endoftext|>
b1192d38670e5117a3199854769c5d455d30eeb3ba51ee5b44b7958fea66cbc0
def test_invalid_token(self): 'Test INVALID_TOKEN_RE.' msg = 'invalid token' self.re_matches(msg, re.INVALID_TOKEN_RE, NO_GROUP)
Test INVALID_TOKEN_RE.
didyoumean/didyoumean_re_tests.py
test_invalid_token
SylvainDe/DidYouMean-Python
76
python
def test_invalid_token(self): msg = 'invalid token' self.re_matches(msg, re.INVALID_TOKEN_RE, NO_GROUP)
def test_invalid_token(self): msg = 'invalid token' self.re_matches(msg, re.INVALID_TOKEN_RE, NO_GROUP)<|docstring|>Test INVALID_TOKEN_RE.<|endoftext|>
80dcfce5d126b6a6d368116ca30ce4e4e6cfca4454da3f4cf42775617c19a8fd
def test_leading_zeros(self): 'Test LEADING_ZEROS_RE.' msg = 'leading zeros in decimal integer literals are not permitted; use an 0o prefix for octal integers' self.re_matches(msg, re.LEADING_ZEROS_RE, NO_GROUP)
Test LEADING_ZEROS_RE.
didyoumean/didyoumean_re_tests.py
test_leading_zeros
SylvainDe/DidYouMean-Python
76
python
def test_leading_zeros(self): msg = 'leading zeros in decimal integer literals are not permitted; use an 0o prefix for octal integers' self.re_matches(msg, re.LEADING_ZEROS_RE, NO_GROUP)
def test_leading_zeros(self): msg = 'leading zeros in decimal integer literals are not permitted; use an 0o prefix for octal integers' self.re_matches(msg, re.LEADING_ZEROS_RE, NO_GROUP)<|docstring|>Test LEADING_ZEROS_RE.<|endoftext|>
2636ab935238fbd0384541e85e2ba6ba8f02c66b0ee4f70de5cd1bc251ee41ba
def test_exception_group_parenthesized(self): 'Test EXC_GROUP_PARENTH_RE.' msgs = ['exception group must be parenthesized', 'multiple exception types must be parenthesized'] for msg in msgs: self.re_matches(msg, re.EXC_GROUP_PARENTH_RE, NO_GROUP)
Test EXC_GROUP_PARENTH_RE.
didyoumean/didyoumean_re_tests.py
test_exception_group_parenthesized
SylvainDe/DidYouMean-Python
76
python
def test_exception_group_parenthesized(self): msgs = ['exception group must be parenthesized', 'multiple exception types must be parenthesized'] for msg in msgs: self.re_matches(msg, re.EXC_GROUP_PARENTH_RE, NO_GROUP)
def test_exception_group_parenthesized(self): msgs = ['exception group must be parenthesized', 'multiple exception types must be parenthesized'] for msg in msgs: self.re_matches(msg, re.EXC_GROUP_PARENTH_RE, NO_GROUP)<|docstring|>Test EXC_GROUP_PARENTH_RE.<|endoftext|>
ccc92d107f02e3e6d5a77d2bf47c87dd08c2a3934d52a13fd062e20ff834d1e1
def test_exc_must_derive_from(self): 'Test EXC_MUST_DERIVE_FROM_RE.' msgs = ['exceptions must be old-style classes or derived from BaseException, not NoneType', 'exceptions must derive from BaseException'] for msg in msgs: self.re_matches(msg, re.EXC_MUST_DERIVE_FROM_RE, NO_GROUP)
Test EXC_MUST_DERIVE_FROM_RE.
didyoumean/didyoumean_re_tests.py
test_exc_must_derive_from
SylvainDe/DidYouMean-Python
76
python
def test_exc_must_derive_from(self): msgs = ['exceptions must be old-style classes or derived from BaseException, not NoneType', 'exceptions must derive from BaseException'] for msg in msgs: self.re_matches(msg, re.EXC_MUST_DERIVE_FROM_RE, NO_GROUP)
def test_exc_must_derive_from(self): msgs = ['exceptions must be old-style classes or derived from BaseException, not NoneType', 'exceptions must derive from BaseException'] for msg in msgs: self.re_matches(msg, re.EXC_MUST_DERIVE_FROM_RE, NO_GROUP)<|docstring|>Test EXC_MUST_DERIVE_FROM_RE.<|endoftext|>
1670239aa41ddf44b207fee72e12b5a5f019e76b4c3b6b96fabf86bcc95d5ceb
def test_unorderable_types(self): 'Test UNORDERABLE_TYPES_RE.' msgs = ['unorderable types: str() > int()', 'unorderable types: FoobarClass() <= int()', 'unorderable types: FoobarClass > FoobarClass'] for msg in msgs: self.re_matches(msg, re.UNORDERABLE_TYPES_RE, NO_GROUP)
Test UNORDERABLE_TYPES_RE.
didyoumean/didyoumean_re_tests.py
test_unorderable_types
SylvainDe/DidYouMean-Python
76
python
def test_unorderable_types(self): msgs = ['unorderable types: str() > int()', 'unorderable types: FoobarClass() <= int()', 'unorderable types: FoobarClass > FoobarClass'] for msg in msgs: self.re_matches(msg, re.UNORDERABLE_TYPES_RE, NO_GROUP)
def test_unorderable_types(self): msgs = ['unorderable types: str() > int()', 'unorderable types: FoobarClass() <= int()', 'unorderable types: FoobarClass > FoobarClass'] for msg in msgs: self.re_matches(msg, re.UNORDERABLE_TYPES_RE, NO_GROUP)<|docstring|>Test UNORDERABLE_TYPES_RE.<|endoftext|>
d16fd15b94e2b8a5ea3e7dc8ed9eca57b177105ca6619d081bbc77be48ae304e
def test_op_not_supported_between_instances(self): 'Test OP_NOT_SUPP_BETWEEN_INSTANCES_RE.' msgs = ["'<' not supported between instances of 'int' and 'NoneType'", "'>' not supported between instances of 'Foo' and 'Foo'"] for msg in msgs: self.re_matches(msg, re.OP_NOT_SUPP_BETWEEN_INSTANCES_RE, NO_GROUP)
Test OP_NOT_SUPP_BETWEEN_INSTANCES_RE.
didyoumean/didyoumean_re_tests.py
test_op_not_supported_between_instances
SylvainDe/DidYouMean-Python
76
python
def test_op_not_supported_between_instances(self): msgs = ["'<' not supported between instances of 'int' and 'NoneType'", "'>' not supported between instances of 'Foo' and 'Foo'"] for msg in msgs: self.re_matches(msg, re.OP_NOT_SUPP_BETWEEN_INSTANCES_RE, NO_GROUP)
def test_op_not_supported_between_instances(self): msgs = ["'<' not supported between instances of 'int' and 'NoneType'", "'>' not supported between instances of 'Foo' and 'Foo'"] for msg in msgs: self.re_matches(msg, re.OP_NOT_SUPP_BETWEEN_INSTANCES_RE, NO_GROUP)<|docstring|>Test OP_NOT_SUPP_BETWEEN_INSTANCES_RE.<|endoftext|>
c6b8fcd552975b4a3d3941b89303800fcb668935b679cc1a229c375c14107b7c
def test_max_recursion_depth(self): 'Test MAX_RECURSION_DEPTH_RE.' msg = 'maximum recursion depth exceeded' self.re_matches(msg, re.MAX_RECURSION_DEPTH_RE, NO_GROUP)
Test MAX_RECURSION_DEPTH_RE.
didyoumean/didyoumean_re_tests.py
test_max_recursion_depth
SylvainDe/DidYouMean-Python
76
python
def test_max_recursion_depth(self): msg = 'maximum recursion depth exceeded' self.re_matches(msg, re.MAX_RECURSION_DEPTH_RE, NO_GROUP)
def test_max_recursion_depth(self): msg = 'maximum recursion depth exceeded' self.re_matches(msg, re.MAX_RECURSION_DEPTH_RE, NO_GROUP)<|docstring|>Test MAX_RECURSION_DEPTH_RE.<|endoftext|>
2d08ef40edf9bed601e3e2e4ecc28a74bc10df871ff095f67e9149974fbb7c16
def test_size_changed_during_iter(self): 'Test SIZE_CHANGED_DURING_ITER_RE.' msgs = {'Set': 'Set changed size during iteration', 'dictionary': 'dictionary changed size during iteration'} for (name, msg) in msgs.items(): groups = (name,) results = (groups, dict()) self.re_matches(msg, re.SIZE_CHANGED_DURING_ITER_RE, results)
Test SIZE_CHANGED_DURING_ITER_RE.
didyoumean/didyoumean_re_tests.py
test_size_changed_during_iter
SylvainDe/DidYouMean-Python
76
python
def test_size_changed_during_iter(self): msgs = {'Set': 'Set changed size during iteration', 'dictionary': 'dictionary changed size during iteration'} for (name, msg) in msgs.items(): groups = (name,) results = (groups, dict()) self.re_matches(msg, re.SIZE_CHANGED_DURING_ITER_RE, results)
def test_size_changed_during_iter(self): msgs = {'Set': 'Set changed size during iteration', 'dictionary': 'dictionary changed size during iteration'} for (name, msg) in msgs.items(): groups = (name,) results = (groups, dict()) self.re_matches(msg, re.SIZE_CHANGED_DURING_ITER_RE, results)<|docstring|>Test SIZE_CHANGED_DURING_ITER_RE.<|endoftext|>
c2acb1c5a4d05978799f5e33816ab9f48a57241fb647ce82578bd3008da3f70b
def __init__(self, mode, sw_specification): 'Load the ShapeWorld JSON file and build config file' if isinstance(sw_specification, str): with open(sw_specification, 'r') as fh: self.src_config = json.load(fh) else: self.src_config = sw_specification for (k, v) in self.src_config.items(): setattr(self, k, v) assert (mode in ['train', 'validation', 'test']), 'Mode not recognised' self.mode = mode self.instances_per_shard = 10000 self.num_shards = (10 if (self.mode == 'train') else 1) self.num_epochs = (500 if (self.mode == 'train') else 1) self.batch_size = (128 if (self.mode == 'train') else 1) self.num_steps_per_epoch = int(((np.floor(self.instances_per_shard) * self.num_shards) / self.batch_size)) self.num_total_steps = int(np.floor((self.num_epochs * self.num_steps_per_epoch))) self.pixel_noise_stddev = 0.1 self.noise_axis = 3 self.train_cnn = True self.train_embeddings = True self.cnn_checkpoint = None self.initializer = tf.contrib.layers.xavier_initializer self.embedding_initializer = tf.random_uniform_initializer self.initializer_scale = 0.1 self.embedding_size = 50 self.img_embedding_size = 128 self.joint_embedding_size = (128 + 50) self.num_lstm_units = self.joint_embedding_size self.optimizer = 'Adam' self.initial_learning_rate = 0.001 self.clip_gradients = 5.0 self.max_checkpoints_to_keep = 10 self.save_every_n_steps = 100 self.decode_type = 'greedy' self.softmax_temperature = 1.5 self.beam_width = 3 self.max_decoding_seq_len = 20
Load the ShapeWorld JSON file and build config file
shape2seq/config.py
__init__
trsherborne/shape2seq
1
python
def __init__(self, mode, sw_specification): if isinstance(sw_specification, str): with open(sw_specification, 'r') as fh: self.src_config = json.load(fh) else: self.src_config = sw_specification for (k, v) in self.src_config.items(): setattr(self, k, v) assert (mode in ['train', 'validation', 'test']), 'Mode not recognised' self.mode = mode self.instances_per_shard = 10000 self.num_shards = (10 if (self.mode == 'train') else 1) self.num_epochs = (500 if (self.mode == 'train') else 1) self.batch_size = (128 if (self.mode == 'train') else 1) self.num_steps_per_epoch = int(((np.floor(self.instances_per_shard) * self.num_shards) / self.batch_size)) self.num_total_steps = int(np.floor((self.num_epochs * self.num_steps_per_epoch))) self.pixel_noise_stddev = 0.1 self.noise_axis = 3 self.train_cnn = True self.train_embeddings = True self.cnn_checkpoint = None self.initializer = tf.contrib.layers.xavier_initializer self.embedding_initializer = tf.random_uniform_initializer self.initializer_scale = 0.1 self.embedding_size = 50 self.img_embedding_size = 128 self.joint_embedding_size = (128 + 50) self.num_lstm_units = self.joint_embedding_size self.optimizer = 'Adam' self.initial_learning_rate = 0.001 self.clip_gradients = 5.0 self.max_checkpoints_to_keep = 10 self.save_every_n_steps = 100 self.decode_type = 'greedy' self.softmax_temperature = 1.5 self.beam_width = 3 self.max_decoding_seq_len = 20
def __init__(self, mode, sw_specification): if isinstance(sw_specification, str): with open(sw_specification, 'r') as fh: self.src_config = json.load(fh) else: self.src_config = sw_specification for (k, v) in self.src_config.items(): setattr(self, k, v) assert (mode in ['train', 'validation', 'test']), 'Mode not recognised' self.mode = mode self.instances_per_shard = 10000 self.num_shards = (10 if (self.mode == 'train') else 1) self.num_epochs = (500 if (self.mode == 'train') else 1) self.batch_size = (128 if (self.mode == 'train') else 1) self.num_steps_per_epoch = int(((np.floor(self.instances_per_shard) * self.num_shards) / self.batch_size)) self.num_total_steps = int(np.floor((self.num_epochs * self.num_steps_per_epoch))) self.pixel_noise_stddev = 0.1 self.noise_axis = 3 self.train_cnn = True self.train_embeddings = True self.cnn_checkpoint = None self.initializer = tf.contrib.layers.xavier_initializer self.embedding_initializer = tf.random_uniform_initializer self.initializer_scale = 0.1 self.embedding_size = 50 self.img_embedding_size = 128 self.joint_embedding_size = (128 + 50) self.num_lstm_units = self.joint_embedding_size self.optimizer = 'Adam' self.initial_learning_rate = 0.001 self.clip_gradients = 5.0 self.max_checkpoints_to_keep = 10 self.save_every_n_steps = 100 self.decode_type = 'greedy' self.softmax_temperature = 1.5 self.beam_width = 3 self.max_decoding_seq_len = 20<|docstring|>Load the ShapeWorld JSON file and build config file<|endoftext|>
e1212cac60d452cc99af16c7e6fd4938f0f5575e3f8a1e686aa116e557d4cc48
def to_dict(input_ordered_dict): 'to_dict converts an input ordered dict into a standard dict\n :param input_ordered_dict: the ordered dict\n ' return json.loads(json.dumps(input_ordered_dict))
to_dict converts an input ordered dict into a standard dict :param input_ordered_dict: the ordered dict
expdj/apps/result/utils.py
to_dict
vsoch/expfactory-docker-dev
0
python
def to_dict(input_ordered_dict): 'to_dict converts an input ordered dict into a standard dict\n :param input_ordered_dict: the ordered dict\n ' return json.loads(json.dumps(input_ordered_dict))
def to_dict(input_ordered_dict): 'to_dict converts an input ordered dict into a standard dict\n :param input_ordered_dict: the ordered dict\n ' return json.loads(json.dumps(input_ordered_dict))<|docstring|>to_dict converts an input ordered dict into a standard dict :param input_ordered_dict: the ordered dict<|endoftext|>
8c78405802082b87d7af085fa148f115b67ace77f41b97739f0279cf88046aec
def get_worker_experiments(worker, battery, completed=False): 'get_worker_experiments returns a list of experiment objects that\n a worker has/has not completed for a particular battery\n :param completed: boolean, default False to return uncompleted experiments\n ' battery_tags = Experiment.objects.filter(battery=battery) worker_completed = worker.experiments_completed.all() if (completed == False): return [e for e in battery_tags if (e not in worker_completed)] else: return [e for e in worker_completed if (e in battery_tags)]
get_worker_experiments returns a list of experiment objects that a worker has/has not completed for a particular battery :param completed: boolean, default False to return uncompleted experiments
expdj/apps/result/utils.py
get_worker_experiments
vsoch/expfactory-docker-dev
0
python
def get_worker_experiments(worker, battery, completed=False): 'get_worker_experiments returns a list of experiment objects that\n a worker has/has not completed for a particular battery\n :param completed: boolean, default False to return uncompleted experiments\n ' battery_tags = Experiment.objects.filter(battery=battery) worker_completed = worker.experiments_completed.all() if (completed == False): return [e for e in battery_tags if (e not in worker_completed)] else: return [e for e in worker_completed if (e in battery_tags)]
def get_worker_experiments(worker, battery, completed=False): 'get_worker_experiments returns a list of experiment objects that\n a worker has/has not completed for a particular battery\n :param completed: boolean, default False to return uncompleted experiments\n ' battery_tags = Experiment.objects.filter(battery=battery) worker_completed = worker.experiments_completed.all() if (completed == False): return [e for e in battery_tags if (e not in worker_completed)] else: return [e for e in worker_completed if (e in battery_tags)]<|docstring|>get_worker_experiments returns a list of experiment objects that a worker has/has not completed for a particular battery :param completed: boolean, default False to return uncompleted experiments<|endoftext|>
a3323fd8f5cd51a5cc71233b80fe34c91e01c57f33ce6845f445b51e7ff52ba3
def get_time_difference(d1, d2, format='%Y-%m-%d %H:%M:%S'): 'calculate difference between two time strings, t1 and t2, returns minutes' if isinstance(d1, str): d1 = datetime.datetime.strptime(d1, format) if isinstance(d2, str): d2 = datetime.datetime.strptime(d2, format) return ((d2 - d1).total_seconds() / 60)
calculate difference between two time strings, t1 and t2, returns minutes
expdj/apps/result/utils.py
get_time_difference
vsoch/expfactory-docker-dev
0
python
def get_time_difference(d1, d2, format='%Y-%m-%d %H:%M:%S'): if isinstance(d1, str): d1 = datetime.datetime.strptime(d1, format) if isinstance(d2, str): d2 = datetime.datetime.strptime(d2, format) return ((d2 - d1).total_seconds() / 60)
def get_time_difference(d1, d2, format='%Y-%m-%d %H:%M:%S'): if isinstance(d1, str): d1 = datetime.datetime.strptime(d1, format) if isinstance(d2, str): d2 = datetime.datetime.strptime(d2, format) return ((d2 - d1).total_seconds() / 60)<|docstring|>calculate difference between two time strings, t1 and t2, returns minutes<|endoftext|>
a1b1bc263d00c389f2c11981cdd4f917eed2b32ab8e564dbee555c170062058a
def generate_email(battery, experiment, worker, data): 'generate_email will use sendgrid API to send a result object to a user as an email attachment.\n :param battery: The expdj.apps.experiment.Battery\n :param data: the data result (json) to send\n ' if (not isinstance(data, dict)): data = {'result': data} data = json.dumps(to_dict(data)) to_email = battery.email subject = ('[EXPFACTORY][RESULT][%s][%s]' % (battery.name, experiment.name)) body = ('Experiment Factory Result Data\n%s\n%s\n%s' % (battery.flat(), experiment.flat(), worker.flat())) body = ('%s\n<pre>%s</pre>' % (body, data)) return {'personalizations': [{'to': [{'email': to_email}], 'subject': subject}], 'from': {'email': REPLY_TO}, 'content': [{'type': 'text/plain', 'value': body}]}
generate_email will use sendgrid API to send a result object to a user as an email attachment. :param battery: The expdj.apps.experiment.Battery :param data: the data result (json) to send
expdj/apps/result/utils.py
generate_email
vsoch/expfactory-docker-dev
0
python
def generate_email(battery, experiment, worker, data): 'generate_email will use sendgrid API to send a result object to a user as an email attachment.\n :param battery: The expdj.apps.experiment.Battery\n :param data: the data result (json) to send\n ' if (not isinstance(data, dict)): data = {'result': data} data = json.dumps(to_dict(data)) to_email = battery.email subject = ('[EXPFACTORY][RESULT][%s][%s]' % (battery.name, experiment.name)) body = ('Experiment Factory Result Data\n%s\n%s\n%s' % (battery.flat(), experiment.flat(), worker.flat())) body = ('%s\n<pre>%s</pre>' % (body, data)) return {'personalizations': [{'to': [{'email': to_email}], 'subject': subject}], 'from': {'email': REPLY_TO}, 'content': [{'type': 'text/plain', 'value': body}]}
def generate_email(battery, experiment, worker, data): 'generate_email will use sendgrid API to send a result object to a user as an email attachment.\n :param battery: The expdj.apps.experiment.Battery\n :param data: the data result (json) to send\n ' if (not isinstance(data, dict)): data = {'result': data} data = json.dumps(to_dict(data)) to_email = battery.email subject = ('[EXPFACTORY][RESULT][%s][%s]' % (battery.name, experiment.name)) body = ('Experiment Factory Result Data\n%s\n%s\n%s' % (battery.flat(), experiment.flat(), worker.flat())) body = ('%s\n<pre>%s</pre>' % (body, data)) return {'personalizations': [{'to': [{'email': to_email}], 'subject': subject}], 'from': {'email': REPLY_TO}, 'content': [{'type': 'text/plain', 'value': body}]}<|docstring|>generate_email will use sendgrid API to send a result object to a user as an email attachment. :param battery: The expdj.apps.experiment.Battery :param data: the data result (json) to send<|endoftext|>
351f5656ebf8c2d5a176ce2889c2e16b3ca2fc89a38c5705a4fe91facaa824fa
def complete_survey_result(experiment, taskdata): 'complete_survey_result parses the form names (question ids) and matches to a lookup table generated by expfactory-python survey module that has complete question / option information.\n :param experiment: the experiment object\n :param taskdata: the taskdata from the server, typically an ordered dict\n ' taskdata = dict(taskdata) experiment_folder = experiment.get_install_dir() experiment = [{'exp_id': experiment.exp_id}] question_lookup = export_questions(experiment, experiment_folder) final_data = {} for (queskey, quesval) in taskdata.iteritems(): if (queskey in question_lookup): complete_question = question_lookup[queskey] complete_question['response'] = quesval[0] else: complete_question = {'response': quesval[0]} final_data[queskey] = complete_question return final_data
complete_survey_result parses the form names (question ids) and matches to a lookup table generated by expfactory-python survey module that has complete question / option information. :param experiment: the experiment object :param taskdata: the taskdata from the server, typically an ordered dict
expdj/apps/result/utils.py
complete_survey_result
vsoch/expfactory-docker-dev
0
python
def complete_survey_result(experiment, taskdata): 'complete_survey_result parses the form names (question ids) and matches to a lookup table generated by expfactory-python survey module that has complete question / option information.\n :param experiment: the experiment object\n :param taskdata: the taskdata from the server, typically an ordered dict\n ' taskdata = dict(taskdata) experiment_folder = experiment.get_install_dir() experiment = [{'exp_id': experiment.exp_id}] question_lookup = export_questions(experiment, experiment_folder) final_data = {} for (queskey, quesval) in taskdata.iteritems(): if (queskey in question_lookup): complete_question = question_lookup[queskey] complete_question['response'] = quesval[0] else: complete_question = {'response': quesval[0]} final_data[queskey] = complete_question return final_data
def complete_survey_result(experiment, taskdata): 'complete_survey_result parses the form names (question ids) and matches to a lookup table generated by expfactory-python survey module that has complete question / option information.\n :param experiment: the experiment object\n :param taskdata: the taskdata from the server, typically an ordered dict\n ' taskdata = dict(taskdata) experiment_folder = experiment.get_install_dir() experiment = [{'exp_id': experiment.exp_id}] question_lookup = export_questions(experiment, experiment_folder) final_data = {} for (queskey, quesval) in taskdata.iteritems(): if (queskey in question_lookup): complete_question = question_lookup[queskey] complete_question['response'] = quesval[0] else: complete_question = {'response': quesval[0]} final_data[queskey] = complete_question return final_data<|docstring|>complete_survey_result parses the form names (question ids) and matches to a lookup table generated by expfactory-python survey module that has complete question / option information. :param experiment: the experiment object :param taskdata: the taskdata from the server, typically an ordered dict<|endoftext|>
0ce1f5b21b2adc0b2c1f859f10fa332e79318451fb64539573ab14ce9b5f56fb
def zip_up(experiment): 'zip_up will zip up an experiment folder into a package (.zip)\n for the user to download. It is returned as a StringIO object\n :param experiment_folder: the experiment folder to zip up\n ' experiment_folder = experiment.get_install_dir() s = StringIO.StringIO() zf = zipfile.ZipFile(s, 'w', zipfile.ZIP_DEFLATED, allowZip64=True) for (root, dirs, files) in os.walk(experiment_folder): for filey in files: subdirectory = root.split(experiment.exp_id)[(- 1)] archive_folder = ('%s/%s' % (subdirectory, filey)) zf.write(os.path.join(root, filey), archive_folder) zf.close() return s
zip_up will zip up an experiment folder into a package (.zip) for the user to download. It is returned as a StringIO object :param experiment_folder: the experiment folder to zip up
expdj/apps/result/utils.py
zip_up
vsoch/expfactory-docker-dev
0
python
def zip_up(experiment): 'zip_up will zip up an experiment folder into a package (.zip)\n for the user to download. It is returned as a StringIO object\n :param experiment_folder: the experiment folder to zip up\n ' experiment_folder = experiment.get_install_dir() s = StringIO.StringIO() zf = zipfile.ZipFile(s, 'w', zipfile.ZIP_DEFLATED, allowZip64=True) for (root, dirs, files) in os.walk(experiment_folder): for filey in files: subdirectory = root.split(experiment.exp_id)[(- 1)] archive_folder = ('%s/%s' % (subdirectory, filey)) zf.write(os.path.join(root, filey), archive_folder) zf.close() return s
def zip_up(experiment): 'zip_up will zip up an experiment folder into a package (.zip)\n for the user to download. It is returned as a StringIO object\n :param experiment_folder: the experiment folder to zip up\n ' experiment_folder = experiment.get_install_dir() s = StringIO.StringIO() zf = zipfile.ZipFile(s, 'w', zipfile.ZIP_DEFLATED, allowZip64=True) for (root, dirs, files) in os.walk(experiment_folder): for filey in files: subdirectory = root.split(experiment.exp_id)[(- 1)] archive_folder = ('%s/%s' % (subdirectory, filey)) zf.write(os.path.join(root, filey), archive_folder) zf.close() return s<|docstring|>zip_up will zip up an experiment folder into a package (.zip) for the user to download. It is returned as a StringIO object :param experiment_folder: the experiment folder to zip up<|endoftext|>
0d8da88133d19c45da672d4d89d16bba31273fee1a5d3eb1cd29188e02b31074
def extractFlowerBridgeToo(item): '\n\t# FlowerBridgeToo\n\n\t' return None
# FlowerBridgeToo
WebMirror/management/rss_parser_funcs/feed_parse_extractFlowerBridgeToo.py
extractFlowerBridgeToo
fake-name/ReadableWebProxy
193
python
def extractFlowerBridgeToo(item): '\n\t\n\n\t' return None
def extractFlowerBridgeToo(item): '\n\t\n\n\t' return None<|docstring|># FlowerBridgeToo<|endoftext|>
90384d4c1eeb4da27f8eba6f652b841b3dfc470ea7ea0491a09a3fce2a2d8636
def get_partitions(self, current_time: Optional[datetime]=None) -> List[Partition[T]]: 'Return the set of known partitions.\n\n Arguments:\n current_time (Optional[datetime]): The evaluation time for the partition function, which\n is passed through to the ``partition_fn`` (if it accepts a parameter). Defaults to\n the current time in UTC.\n ' return self._partitions_def.get_partitions(current_time)
Return the set of known partitions. Arguments: current_time (Optional[datetime]): The evaluation time for the partition function, which is passed through to the ``partition_fn`` (if it accepts a parameter). Defaults to the current time in UTC.
python_modules/dagster/dagster/core/definitions/partition.py
get_partitions
theonlypoi/dagster
1
python
def get_partitions(self, current_time: Optional[datetime]=None) -> List[Partition[T]]: 'Return the set of known partitions.\n\n Arguments:\n current_time (Optional[datetime]): The evaluation time for the partition function, which\n is passed through to the ``partition_fn`` (if it accepts a parameter). Defaults to\n the current time in UTC.\n ' return self._partitions_def.get_partitions(current_time)
def get_partitions(self, current_time: Optional[datetime]=None) -> List[Partition[T]]: 'Return the set of known partitions.\n\n Arguments:\n current_time (Optional[datetime]): The evaluation time for the partition function, which\n is passed through to the ``partition_fn`` (if it accepts a parameter). Defaults to\n the current time in UTC.\n ' return self._partitions_def.get_partitions(current_time)<|docstring|>Return the set of known partitions. Arguments: current_time (Optional[datetime]): The evaluation time for the partition function, which is passed through to the ``partition_fn`` (if it accepts a parameter). Defaults to the current time in UTC.<|endoftext|>
f395063964289d150bee92d2819c9fe257b658286c441bddb73cd522f4ff6524
def create_schedule_definition(self, schedule_name, cron_schedule, partition_selector, should_execute=None, environment_vars=None, execution_timezone=None, description=None, decorated_fn=None, job=None): 'Create a ScheduleDefinition from a PartitionSetDefinition.\n\n Arguments:\n schedule_name (str): The name of the schedule.\n cron_schedule (str): A valid cron string for the schedule\n partition_selector (Callable[ScheduleEvaluationContext, PartitionSetDefinition], Union[Partition, List[Partition]]):\n Function that determines the partition to use at a given execution time. Can return\n either a single Partition or a list of Partitions. For time-based partition sets,\n will likely be either `identity_partition_selector` or a selector returned by\n `create_offset_partition_selector`.\n should_execute (Optional[function]): Function that runs at schedule execution time that\n determines whether a schedule should execute. Defaults to a function that always returns\n ``True``.\n environment_vars (Optional[dict]): The environment variables to set for the schedule.\n execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works\n with DagsterDaemonScheduler, and must be set when using that scheduler.\n description (Optional[str]): A human-readable description of the schedule.\n\n Returns:\n PartitionScheduleDefinition: The generated PartitionScheduleDefinition for the partition\n selector\n ' check.str_param(schedule_name, 'schedule_name') check.str_param(cron_schedule, 'cron_schedule') check.opt_callable_param(should_execute, 'should_execute') check.opt_dict_param(environment_vars, 'environment_vars', key_type=str, value_type=str) check.callable_param(partition_selector, 'partition_selector') check.opt_str_param(execution_timezone, 'execution_timezone') check.opt_str_param(description, 'description') def _execution_fn(context): check.inst_param(context, 'context', ScheduleEvaluationContext) with user_code_error_boundary(ScheduleExecutionError, (lambda : f'Error occurred during the execution of partition_selector for schedule {schedule_name}')): selector_result = partition_selector(context, self) if isinstance(selector_result, SkipReason): (yield selector_result) return selected_partitions = (selector_result if isinstance(selector_result, (frozenlist, list)) else [selector_result]) check.is_list(selected_partitions, of_type=Partition) if (not selected_partitions): (yield SkipReason('Partition selector returned an empty list of partitions.')) return missing_partition_names = [partition.name for partition in selected_partitions if (partition.name not in self.get_partition_names(context.scheduled_execution_time))] if missing_partition_names: (yield SkipReason((('Partition selector returned partition' + ('s' if (len(missing_partition_names) > 1) else '')) + f" not in the partition set: {', '.join(missing_partition_names)}."))) return with user_code_error_boundary(ScheduleExecutionError, (lambda : f'Error occurred during the execution of should_execute for schedule {schedule_name}')): if (should_execute and (not should_execute(context))): (yield SkipReason('should_execute function for {schedule_name} returned false.'.format(schedule_name=schedule_name))) return for selected_partition in selected_partitions: with user_code_error_boundary(ScheduleExecutionError, (lambda : f'Error occurred during the execution of run_config_fn for schedule {schedule_name}')): run_config = self.run_config_for_partition(selected_partition) with user_code_error_boundary(ScheduleExecutionError, (lambda : f'Error occurred during the execution of tags_fn for schedule {schedule_name}')): tags = self.tags_for_partition(selected_partition) (yield RunRequest(run_key=(selected_partition.name if (len(selected_partitions) > 0) else None), run_config=run_config, tags=tags)) return PartitionScheduleDefinition(name=schedule_name, cron_schedule=cron_schedule, pipeline_name=self._pipeline_name, tags_fn=None, solid_selection=self._solid_selection, mode=self._mode, should_execute=None, environment_vars=environment_vars, partition_set=self, execution_timezone=execution_timezone, execution_fn=_execution_fn, description=description, decorated_fn=decorated_fn, job=job)
Create a ScheduleDefinition from a PartitionSetDefinition. Arguments: schedule_name (str): The name of the schedule. cron_schedule (str): A valid cron string for the schedule partition_selector (Callable[ScheduleEvaluationContext, PartitionSetDefinition], Union[Partition, List[Partition]]): Function that determines the partition to use at a given execution time. Can return either a single Partition or a list of Partitions. For time-based partition sets, will likely be either `identity_partition_selector` or a selector returned by `create_offset_partition_selector`. should_execute (Optional[function]): Function that runs at schedule execution time that determines whether a schedule should execute. Defaults to a function that always returns ``True``. environment_vars (Optional[dict]): The environment variables to set for the schedule. execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works with DagsterDaemonScheduler, and must be set when using that scheduler. description (Optional[str]): A human-readable description of the schedule. Returns: PartitionScheduleDefinition: The generated PartitionScheduleDefinition for the partition selector
python_modules/dagster/dagster/core/definitions/partition.py
create_schedule_definition
theonlypoi/dagster
1
python
def create_schedule_definition(self, schedule_name, cron_schedule, partition_selector, should_execute=None, environment_vars=None, execution_timezone=None, description=None, decorated_fn=None, job=None): 'Create a ScheduleDefinition from a PartitionSetDefinition.\n\n Arguments:\n schedule_name (str): The name of the schedule.\n cron_schedule (str): A valid cron string for the schedule\n partition_selector (Callable[ScheduleEvaluationContext, PartitionSetDefinition], Union[Partition, List[Partition]]):\n Function that determines the partition to use at a given execution time. Can return\n either a single Partition or a list of Partitions. For time-based partition sets,\n will likely be either `identity_partition_selector` or a selector returned by\n `create_offset_partition_selector`.\n should_execute (Optional[function]): Function that runs at schedule execution time that\n determines whether a schedule should execute. Defaults to a function that always returns\n ``True``.\n environment_vars (Optional[dict]): The environment variables to set for the schedule.\n execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works\n with DagsterDaemonScheduler, and must be set when using that scheduler.\n description (Optional[str]): A human-readable description of the schedule.\n\n Returns:\n PartitionScheduleDefinition: The generated PartitionScheduleDefinition for the partition\n selector\n ' check.str_param(schedule_name, 'schedule_name') check.str_param(cron_schedule, 'cron_schedule') check.opt_callable_param(should_execute, 'should_execute') check.opt_dict_param(environment_vars, 'environment_vars', key_type=str, value_type=str) check.callable_param(partition_selector, 'partition_selector') check.opt_str_param(execution_timezone, 'execution_timezone') check.opt_str_param(description, 'description') def _execution_fn(context): check.inst_param(context, 'context', ScheduleEvaluationContext) with user_code_error_boundary(ScheduleExecutionError, (lambda : f'Error occurred during the execution of partition_selector for schedule {schedule_name}')): selector_result = partition_selector(context, self) if isinstance(selector_result, SkipReason): (yield selector_result) return selected_partitions = (selector_result if isinstance(selector_result, (frozenlist, list)) else [selector_result]) check.is_list(selected_partitions, of_type=Partition) if (not selected_partitions): (yield SkipReason('Partition selector returned an empty list of partitions.')) return missing_partition_names = [partition.name for partition in selected_partitions if (partition.name not in self.get_partition_names(context.scheduled_execution_time))] if missing_partition_names: (yield SkipReason((('Partition selector returned partition' + ('s' if (len(missing_partition_names) > 1) else )) + f" not in the partition set: {', '.join(missing_partition_names)}."))) return with user_code_error_boundary(ScheduleExecutionError, (lambda : f'Error occurred during the execution of should_execute for schedule {schedule_name}')): if (should_execute and (not should_execute(context))): (yield SkipReason('should_execute function for {schedule_name} returned false.'.format(schedule_name=schedule_name))) return for selected_partition in selected_partitions: with user_code_error_boundary(ScheduleExecutionError, (lambda : f'Error occurred during the execution of run_config_fn for schedule {schedule_name}')): run_config = self.run_config_for_partition(selected_partition) with user_code_error_boundary(ScheduleExecutionError, (lambda : f'Error occurred during the execution of tags_fn for schedule {schedule_name}')): tags = self.tags_for_partition(selected_partition) (yield RunRequest(run_key=(selected_partition.name if (len(selected_partitions) > 0) else None), run_config=run_config, tags=tags)) return PartitionScheduleDefinition(name=schedule_name, cron_schedule=cron_schedule, pipeline_name=self._pipeline_name, tags_fn=None, solid_selection=self._solid_selection, mode=self._mode, should_execute=None, environment_vars=environment_vars, partition_set=self, execution_timezone=execution_timezone, execution_fn=_execution_fn, description=description, decorated_fn=decorated_fn, job=job)
def create_schedule_definition(self, schedule_name, cron_schedule, partition_selector, should_execute=None, environment_vars=None, execution_timezone=None, description=None, decorated_fn=None, job=None): 'Create a ScheduleDefinition from a PartitionSetDefinition.\n\n Arguments:\n schedule_name (str): The name of the schedule.\n cron_schedule (str): A valid cron string for the schedule\n partition_selector (Callable[ScheduleEvaluationContext, PartitionSetDefinition], Union[Partition, List[Partition]]):\n Function that determines the partition to use at a given execution time. Can return\n either a single Partition or a list of Partitions. For time-based partition sets,\n will likely be either `identity_partition_selector` or a selector returned by\n `create_offset_partition_selector`.\n should_execute (Optional[function]): Function that runs at schedule execution time that\n determines whether a schedule should execute. Defaults to a function that always returns\n ``True``.\n environment_vars (Optional[dict]): The environment variables to set for the schedule.\n execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works\n with DagsterDaemonScheduler, and must be set when using that scheduler.\n description (Optional[str]): A human-readable description of the schedule.\n\n Returns:\n PartitionScheduleDefinition: The generated PartitionScheduleDefinition for the partition\n selector\n ' check.str_param(schedule_name, 'schedule_name') check.str_param(cron_schedule, 'cron_schedule') check.opt_callable_param(should_execute, 'should_execute') check.opt_dict_param(environment_vars, 'environment_vars', key_type=str, value_type=str) check.callable_param(partition_selector, 'partition_selector') check.opt_str_param(execution_timezone, 'execution_timezone') check.opt_str_param(description, 'description') def _execution_fn(context): check.inst_param(context, 'context', ScheduleEvaluationContext) with user_code_error_boundary(ScheduleExecutionError, (lambda : f'Error occurred during the execution of partition_selector for schedule {schedule_name}')): selector_result = partition_selector(context, self) if isinstance(selector_result, SkipReason): (yield selector_result) return selected_partitions = (selector_result if isinstance(selector_result, (frozenlist, list)) else [selector_result]) check.is_list(selected_partitions, of_type=Partition) if (not selected_partitions): (yield SkipReason('Partition selector returned an empty list of partitions.')) return missing_partition_names = [partition.name for partition in selected_partitions if (partition.name not in self.get_partition_names(context.scheduled_execution_time))] if missing_partition_names: (yield SkipReason((('Partition selector returned partition' + ('s' if (len(missing_partition_names) > 1) else )) + f" not in the partition set: {', '.join(missing_partition_names)}."))) return with user_code_error_boundary(ScheduleExecutionError, (lambda : f'Error occurred during the execution of should_execute for schedule {schedule_name}')): if (should_execute and (not should_execute(context))): (yield SkipReason('should_execute function for {schedule_name} returned false.'.format(schedule_name=schedule_name))) return for selected_partition in selected_partitions: with user_code_error_boundary(ScheduleExecutionError, (lambda : f'Error occurred during the execution of run_config_fn for schedule {schedule_name}')): run_config = self.run_config_for_partition(selected_partition) with user_code_error_boundary(ScheduleExecutionError, (lambda : f'Error occurred during the execution of tags_fn for schedule {schedule_name}')): tags = self.tags_for_partition(selected_partition) (yield RunRequest(run_key=(selected_partition.name if (len(selected_partitions) > 0) else None), run_config=run_config, tags=tags)) return PartitionScheduleDefinition(name=schedule_name, cron_schedule=cron_schedule, pipeline_name=self._pipeline_name, tags_fn=None, solid_selection=self._solid_selection, mode=self._mode, should_execute=None, environment_vars=environment_vars, partition_set=self, execution_timezone=execution_timezone, execution_fn=_execution_fn, description=description, decorated_fn=decorated_fn, job=job)<|docstring|>Create a ScheduleDefinition from a PartitionSetDefinition. Arguments: schedule_name (str): The name of the schedule. cron_schedule (str): A valid cron string for the schedule partition_selector (Callable[ScheduleEvaluationContext, PartitionSetDefinition], Union[Partition, List[Partition]]): Function that determines the partition to use at a given execution time. Can return either a single Partition or a list of Partitions. For time-based partition sets, will likely be either `identity_partition_selector` or a selector returned by `create_offset_partition_selector`. should_execute (Optional[function]): Function that runs at schedule execution time that determines whether a schedule should execute. Defaults to a function that always returns ``True``. environment_vars (Optional[dict]): The environment variables to set for the schedule. execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works with DagsterDaemonScheduler, and must be set when using that scheduler. description (Optional[str]): A human-readable description of the schedule. Returns: PartitionScheduleDefinition: The generated PartitionScheduleDefinition for the partition selector<|endoftext|>
4cd3a2898a72bf922f521d27fcfc6a96c16667f9ba7cbd2645decb0a85b8fa54
def _drawPair_random(self, MTracker, kA=None, randstate=np.random): '\n Returns\n --------\n kA\n kB\n ' candidatesA = MTracker.getAvailableComps() nA = len(candidatesA) pA = (np.ones(nA) / nA) if (kA is None): kA = choice(candidatesA, ps=pA, randstate=randstate) else: assert (kA in candidatesA) candidatesB = MTracker.getAvailablePartnersForComp(kA) assert (len(candidatesB) > 0) nB = len(candidatesB) pB = (np.ones(nB) / nB) kB = choice(candidatesB, ps=pB, randstate=randstate) return (kA, kB)
Returns -------- kA kB
refinery/bnpy/bnpy-dev/bnpy/learnalg/MergePairSelector.py
_drawPair_random
csa0001/Refinery
103
python
def _drawPair_random(self, MTracker, kA=None, randstate=np.random): '\n Returns\n --------\n kA\n kB\n ' candidatesA = MTracker.getAvailableComps() nA = len(candidatesA) pA = (np.ones(nA) / nA) if (kA is None): kA = choice(candidatesA, ps=pA, randstate=randstate) else: assert (kA in candidatesA) candidatesB = MTracker.getAvailablePartnersForComp(kA) assert (len(candidatesB) > 0) nB = len(candidatesB) pB = (np.ones(nB) / nB) kB = choice(candidatesB, ps=pB, randstate=randstate) return (kA, kB)
def _drawPair_random(self, MTracker, kA=None, randstate=np.random): '\n Returns\n --------\n kA\n kB\n ' candidatesA = MTracker.getAvailableComps() nA = len(candidatesA) pA = (np.ones(nA) / nA) if (kA is None): kA = choice(candidatesA, ps=pA, randstate=randstate) else: assert (kA in candidatesA) candidatesB = MTracker.getAvailablePartnersForComp(kA) assert (len(candidatesB) > 0) nB = len(candidatesB) pB = (np.ones(nB) / nB) kB = choice(candidatesB, ps=pB, randstate=randstate) return (kA, kB)<|docstring|>Returns -------- kA kB<|endoftext|>
a83a1fb44bc90953984af544525796352bf624ade198e3170fa9156afd850029
def _drawPair_marglik(self, hmodel, SS, MTracker, kA=None, randstate=np.random): '\n Returns\n --------\n kA\n kB\n ' candidatesA = MTracker.getAvailableComps() if (kA is None): nA = len(candidatesA) pA = (np.ones(nA) / nA) kA = choice(candidatesA, ps=pA, randstate=randstate) else: assert (kA in candidatesA) candidatesB = MTracker.getAvailablePartnersForComp(kA) assert (len(candidatesB) > 0) ps = self._calcMargLikProbVector(hmodel, SS, kA, candidatesB) kB = choice(candidatesB, ps=ps, randstate=randstate) return (kA, kB)
Returns -------- kA kB
refinery/bnpy/bnpy-dev/bnpy/learnalg/MergePairSelector.py
_drawPair_marglik
csa0001/Refinery
103
python
def _drawPair_marglik(self, hmodel, SS, MTracker, kA=None, randstate=np.random): '\n Returns\n --------\n kA\n kB\n ' candidatesA = MTracker.getAvailableComps() if (kA is None): nA = len(candidatesA) pA = (np.ones(nA) / nA) kA = choice(candidatesA, ps=pA, randstate=randstate) else: assert (kA in candidatesA) candidatesB = MTracker.getAvailablePartnersForComp(kA) assert (len(candidatesB) > 0) ps = self._calcMargLikProbVector(hmodel, SS, kA, candidatesB) kB = choice(candidatesB, ps=ps, randstate=randstate) return (kA, kB)
def _drawPair_marglik(self, hmodel, SS, MTracker, kA=None, randstate=np.random): '\n Returns\n --------\n kA\n kB\n ' candidatesA = MTracker.getAvailableComps() if (kA is None): nA = len(candidatesA) pA = (np.ones(nA) / nA) kA = choice(candidatesA, ps=pA, randstate=randstate) else: assert (kA in candidatesA) candidatesB = MTracker.getAvailablePartnersForComp(kA) assert (len(candidatesB) > 0) ps = self._calcMargLikProbVector(hmodel, SS, kA, candidatesB) kB = choice(candidatesB, ps=ps, randstate=randstate) return (kA, kB)<|docstring|>Returns -------- kA kB<|endoftext|>
b17109652008b5fc18f30fa1b1cd2a344ca64e846f9f441f4adff78cded6ae43
@property def is_resolved(self) -> bool: '\n If true, all dependent placeholders are resolved\n ' if any(((isinstance(operand, Placeholder) and (not Placeholder.check_resolved(operand))) for operand in self.operands)): return False return True
If true, all dependent placeholders are resolved
src/graph_transpiler/webdnn/graph/placeholder.py
is_resolved
you74674/webdnn
1
python
@property def is_resolved(self) -> bool: '\n \n ' if any(((isinstance(operand, Placeholder) and (not Placeholder.check_resolved(operand))) for operand in self.operands)): return False return True
@property def is_resolved(self) -> bool: '\n \n ' if any(((isinstance(operand, Placeholder) and (not Placeholder.check_resolved(operand))) for operand in self.operands)): return False return True<|docstring|>If true, all dependent placeholders are resolved<|endoftext|>
b8280d204e12f1c69b42ee91f40d4a68fe7b98baeaf2f0fabc4c854a32f2970e
@staticmethod def to_int(x: Union[(int, 'Placeholder')]): 'to_int(x)\n\n Convert the placeholder into concrete integer value.\n Args:\n x: the placeholder\n\n Returns:\n (int or Placeholder): If `x` is resolved, an integer is returned. Otherwise, `x` itself is returned.\n ' return (int(x) if (not isinstance(x, Placeholder)) else (int(x.value) if Placeholder.check_resolved(x) else x))
to_int(x) Convert the placeholder into concrete integer value. Args: x: the placeholder Returns: (int or Placeholder): If `x` is resolved, an integer is returned. Otherwise, `x` itself is returned.
src/graph_transpiler/webdnn/graph/placeholder.py
to_int
you74674/webdnn
1
python
@staticmethod def to_int(x: Union[(int, 'Placeholder')]): 'to_int(x)\n\n Convert the placeholder into concrete integer value.\n Args:\n x: the placeholder\n\n Returns:\n (int or Placeholder): If `x` is resolved, an integer is returned. Otherwise, `x` itself is returned.\n ' return (int(x) if (not isinstance(x, Placeholder)) else (int(x.value) if Placeholder.check_resolved(x) else x))
@staticmethod def to_int(x: Union[(int, 'Placeholder')]): 'to_int(x)\n\n Convert the placeholder into concrete integer value.\n Args:\n x: the placeholder\n\n Returns:\n (int or Placeholder): If `x` is resolved, an integer is returned. Otherwise, `x` itself is returned.\n ' return (int(x) if (not isinstance(x, Placeholder)) else (int(x.value) if Placeholder.check_resolved(x) else x))<|docstring|>to_int(x) Convert the placeholder into concrete integer value. Args: x: the placeholder Returns: (int or Placeholder): If `x` is resolved, an integer is returned. Otherwise, `x` itself is returned.<|endoftext|>
df904481a44ac97d4ab0a4697589ddcd44a877fea9b85b9bcab8aca987a736a7
@staticmethod def force_int(x: Union[(int, 'Placeholder')]): 'force_int(x)\n\n Convert the placeholder into concrete integer value. If `x` is not resolved, an error is raised.\n Args:\n x: the placeholder\n\n Returns:\n (int): an integer\n ' if (not isinstance(x, Placeholder)): return int(x) elif Placeholder.check_resolved(x): return x.value raise ValueError(f'{x} is not resolved.')
force_int(x) Convert the placeholder into concrete integer value. If `x` is not resolved, an error is raised. Args: x: the placeholder Returns: (int): an integer
src/graph_transpiler/webdnn/graph/placeholder.py
force_int
you74674/webdnn
1
python
@staticmethod def force_int(x: Union[(int, 'Placeholder')]): 'force_int(x)\n\n Convert the placeholder into concrete integer value. If `x` is not resolved, an error is raised.\n Args:\n x: the placeholder\n\n Returns:\n (int): an integer\n ' if (not isinstance(x, Placeholder)): return int(x) elif Placeholder.check_resolved(x): return x.value raise ValueError(f'{x} is not resolved.')
@staticmethod def force_int(x: Union[(int, 'Placeholder')]): 'force_int(x)\n\n Convert the placeholder into concrete integer value. If `x` is not resolved, an error is raised.\n Args:\n x: the placeholder\n\n Returns:\n (int): an integer\n ' if (not isinstance(x, Placeholder)): return int(x) elif Placeholder.check_resolved(x): return x.value raise ValueError(f'{x} is not resolved.')<|docstring|>force_int(x) Convert the placeholder into concrete integer value. If `x` is not resolved, an error is raised. Args: x: the placeholder Returns: (int): an integer<|endoftext|>
a96093353dae62dac59d4f5a7d68b8f70898d9a7224310a3aa6595216c9b43ee
@staticmethod def check_resolved(x: Union[(int, 'Placeholder')]): "check_resolved(x)\n\n Check whether specified placeholder is resolved or not.\n Args:\n x: the placeholder\n\n Returns:\n (bool): If `True`, the placeholder is resolved. Otherwise, it's not resolved.\n " if (not isinstance(x, Placeholder)): return True if (x._value is not None): return True if x.dependency: return x.dependency.is_resolved return False
check_resolved(x) Check whether specified placeholder is resolved or not. Args: x: the placeholder Returns: (bool): If `True`, the placeholder is resolved. Otherwise, it's not resolved.
src/graph_transpiler/webdnn/graph/placeholder.py
check_resolved
you74674/webdnn
1
python
@staticmethod def check_resolved(x: Union[(int, 'Placeholder')]): "check_resolved(x)\n\n Check whether specified placeholder is resolved or not.\n Args:\n x: the placeholder\n\n Returns:\n (bool): If `True`, the placeholder is resolved. Otherwise, it's not resolved.\n " if (not isinstance(x, Placeholder)): return True if (x._value is not None): return True if x.dependency: return x.dependency.is_resolved return False
@staticmethod def check_resolved(x: Union[(int, 'Placeholder')]): "check_resolved(x)\n\n Check whether specified placeholder is resolved or not.\n Args:\n x: the placeholder\n\n Returns:\n (bool): If `True`, the placeholder is resolved. Otherwise, it's not resolved.\n " if (not isinstance(x, Placeholder)): return True if (x._value is not None): return True if x.dependency: return x.dependency.is_resolved return False<|docstring|>check_resolved(x) Check whether specified placeholder is resolved or not. Args: x: the placeholder Returns: (bool): If `True`, the placeholder is resolved. Otherwise, it's not resolved.<|endoftext|>
debc7b023b591adaf5e0e17c7f1532c310e709602e30bf3ebc8f441c54c8dc5a
@property def value(self) -> Union[(int, 'Placeholder')]: "value\n\n The placeholder's value. If it's not resolved, the placeholder itself is returned.\n\n If the placeholder is already resolved, new value cannot be set, and it causes an error.\n " if Placeholder.check_resolved(self): if self.dependency: if (self._value is None): self._value = self.dependency.value return self._value else: return self
value The placeholder's value. If it's not resolved, the placeholder itself is returned. If the placeholder is already resolved, new value cannot be set, and it causes an error.
src/graph_transpiler/webdnn/graph/placeholder.py
value
you74674/webdnn
1
python
@property def value(self) -> Union[(int, 'Placeholder')]: "value\n\n The placeholder's value. If it's not resolved, the placeholder itself is returned.\n\n If the placeholder is already resolved, new value cannot be set, and it causes an error.\n " if Placeholder.check_resolved(self): if self.dependency: if (self._value is None): self._value = self.dependency.value return self._value else: return self
@property def value(self) -> Union[(int, 'Placeholder')]: "value\n\n The placeholder's value. If it's not resolved, the placeholder itself is returned.\n\n If the placeholder is already resolved, new value cannot be set, and it causes an error.\n " if Placeholder.check_resolved(self): if self.dependency: if (self._value is None): self._value = self.dependency.value return self._value else: return self<|docstring|>value The placeholder's value. If it's not resolved, the placeholder itself is returned. If the placeholder is already resolved, new value cannot be set, and it causes an error.<|endoftext|>
cd65d4bf57f88faa43ba9aab01c2de1a24f4e4e4749c840c9d68f4355d942112
def get_depend_placeholders(self): 'get_depend_placeholders\n\n List up all dependent placeholders\n\n Returns:\n (list of Placeholder): list of all dependent placeholders\n ' if Placeholder.check_resolved(self): return set() if self.dependency: res = set() for v in self.dependency.operands: if (not Placeholder.check_resolved(v)): res.update(v.get_depend_placeholders()) return res else: return {self}
get_depend_placeholders List up all dependent placeholders Returns: (list of Placeholder): list of all dependent placeholders
src/graph_transpiler/webdnn/graph/placeholder.py
get_depend_placeholders
you74674/webdnn
1
python
def get_depend_placeholders(self): 'get_depend_placeholders\n\n List up all dependent placeholders\n\n Returns:\n (list of Placeholder): list of all dependent placeholders\n ' if Placeholder.check_resolved(self): return set() if self.dependency: res = set() for v in self.dependency.operands: if (not Placeholder.check_resolved(v)): res.update(v.get_depend_placeholders()) return res else: return {self}
def get_depend_placeholders(self): 'get_depend_placeholders\n\n List up all dependent placeholders\n\n Returns:\n (list of Placeholder): list of all dependent placeholders\n ' if Placeholder.check_resolved(self): return set() if self.dependency: res = set() for v in self.dependency.operands: if (not Placeholder.check_resolved(v)): res.update(v.get_depend_placeholders()) return res else: return {self}<|docstring|>get_depend_placeholders List up all dependent placeholders Returns: (list of Placeholder): list of all dependent placeholders<|endoftext|>
d6e03a85adb2c437a74d97c7394221482f36bdd81596d2cd4c63c39542b9127f
def generate_js_function(self, flag_semicolon=True): "generate_js_function\n\n Generate javascript code to resolve this placeholder's value at runtime.\n\n Args:\n flag_semicolon(bool): If True, semicolon is appended into generated code.\n\n Returns:\n (str): generated code\n " if Placeholder.check_resolved(self): return (f'{self.value}' + (';' if flag_semicolon else '')) elif self.dependency: return self.dependency.generate_js_function() else: return (f"placeholders['{self.label}']" + (';' if flag_semicolon else ''))
generate_js_function Generate javascript code to resolve this placeholder's value at runtime. Args: flag_semicolon(bool): If True, semicolon is appended into generated code. Returns: (str): generated code
src/graph_transpiler/webdnn/graph/placeholder.py
generate_js_function
you74674/webdnn
1
python
def generate_js_function(self, flag_semicolon=True): "generate_js_function\n\n Generate javascript code to resolve this placeholder's value at runtime.\n\n Args:\n flag_semicolon(bool): If True, semicolon is appended into generated code.\n\n Returns:\n (str): generated code\n " if Placeholder.check_resolved(self): return (f'{self.value}' + (';' if flag_semicolon else )) elif self.dependency: return self.dependency.generate_js_function() else: return (f"placeholders['{self.label}']" + (';' if flag_semicolon else ))
def generate_js_function(self, flag_semicolon=True): "generate_js_function\n\n Generate javascript code to resolve this placeholder's value at runtime.\n\n Args:\n flag_semicolon(bool): If True, semicolon is appended into generated code.\n\n Returns:\n (str): generated code\n " if Placeholder.check_resolved(self): return (f'{self.value}' + (';' if flag_semicolon else )) elif self.dependency: return self.dependency.generate_js_function() else: return (f"placeholders['{self.label}']" + (';' if flag_semicolon else ))<|docstring|>generate_js_function Generate javascript code to resolve this placeholder's value at runtime. Args: flag_semicolon(bool): If True, semicolon is appended into generated code. Returns: (str): generated code<|endoftext|>
935c96ffa17bfe0e5b35e36180175401f67004fc77153b78994e3ae70db1ee8b
def GetParser(): 'Creates the argparse parser.' parser = commandline.ArgumentParser(description=__doc__) parser.add_argument('--repo-path', type='path', default='.', help='Path to the repo to snapshot.') parser.add_argument('--snapshot-ref', help='Remote ref to create for projects whose HEAD is not reachable from its current upstream branch. Projects with multiple checkouts may have a unique suffix appended to this ref.') parser.add_argument('--output-file', type='path', help='Path to write the manifest snapshot XML to.') parser.add_argument('--dry-run', action='store_true', help='Do not actually push to remotes.') parser.add_argument('--jobs', type=int, default=16, help='The number of parallel processes to run for git push operations.') return parser
Creates the argparse parser.
scripts/create_manifest_snapshot.py
GetParser
khromiumos/chromiumos-chromite
0
python
def GetParser(): parser = commandline.ArgumentParser(description=__doc__) parser.add_argument('--repo-path', type='path', default='.', help='Path to the repo to snapshot.') parser.add_argument('--snapshot-ref', help='Remote ref to create for projects whose HEAD is not reachable from its current upstream branch. Projects with multiple checkouts may have a unique suffix appended to this ref.') parser.add_argument('--output-file', type='path', help='Path to write the manifest snapshot XML to.') parser.add_argument('--dry-run', action='store_true', help='Do not actually push to remotes.') parser.add_argument('--jobs', type=int, default=16, help='The number of parallel processes to run for git push operations.') return parser
def GetParser(): parser = commandline.ArgumentParser(description=__doc__) parser.add_argument('--repo-path', type='path', default='.', help='Path to the repo to snapshot.') parser.add_argument('--snapshot-ref', help='Remote ref to create for projects whose HEAD is not reachable from its current upstream branch. Projects with multiple checkouts may have a unique suffix appended to this ref.') parser.add_argument('--output-file', type='path', help='Path to write the manifest snapshot XML to.') parser.add_argument('--dry-run', action='store_true', help='Do not actually push to remotes.') parser.add_argument('--jobs', type=int, default=16, help='The number of parallel processes to run for git push operations.') return parser<|docstring|>Creates the argparse parser.<|endoftext|>
82c8dec0e479965e323226f28fb09f2d0ba898cdf43755e50ac2ad5dd659147f
def _GetUpstreamBranch(project): "Return a best guess at the project's upstream branch name." branch = project.upstream if (branch and branch.startswith(BRANCH_REF_PREFIX)): branch = branch[len(BRANCH_REF_PREFIX):] return branch
Return a best guess at the project's upstream branch name.
scripts/create_manifest_snapshot.py
_GetUpstreamBranch
khromiumos/chromiumos-chromite
0
python
def _GetUpstreamBranch(project): branch = project.upstream if (branch and branch.startswith(BRANCH_REF_PREFIX)): branch = branch[len(BRANCH_REF_PREFIX):] return branch
def _GetUpstreamBranch(project): branch = project.upstream if (branch and branch.startswith(BRANCH_REF_PREFIX)): branch = branch[len(BRANCH_REF_PREFIX):] return branch<|docstring|>Return a best guess at the project's upstream branch name.<|endoftext|>
81f619ac2d9df122a5ed6a9d33dd5dbfe7cbc26ab60dd106028296031632aa06
def _NeedsSnapshot(repo_root, project): "Test if project's revision is reachable from its upstream ref." branch = (_GetUpstreamBranch(project) or 'master') upstream_ref = ('refs/remotes/%s/%s' % (project.Remote().GitName(), branch)) project_path = os.path.join(repo_root, project.Path()) try: if git.IsReachable(project_path, project.revision, upstream_ref): return False except cros_build_lib.RunCommandError as e: logging.debug('Reachability check failed: %s', e) logging.info('Project %s revision %s not reachable from upstream %r.', project.name, project.revision, upstream_ref) return True
Test if project's revision is reachable from its upstream ref.
scripts/create_manifest_snapshot.py
_NeedsSnapshot
khromiumos/chromiumos-chromite
0
python
def _NeedsSnapshot(repo_root, project): branch = (_GetUpstreamBranch(project) or 'master') upstream_ref = ('refs/remotes/%s/%s' % (project.Remote().GitName(), branch)) project_path = os.path.join(repo_root, project.Path()) try: if git.IsReachable(project_path, project.revision, upstream_ref): return False except cros_build_lib.RunCommandError as e: logging.debug('Reachability check failed: %s', e) logging.info('Project %s revision %s not reachable from upstream %r.', project.name, project.revision, upstream_ref) return True
def _NeedsSnapshot(repo_root, project): branch = (_GetUpstreamBranch(project) or 'master') upstream_ref = ('refs/remotes/%s/%s' % (project.Remote().GitName(), branch)) project_path = os.path.join(repo_root, project.Path()) try: if git.IsReachable(project_path, project.revision, upstream_ref): return False except cros_build_lib.RunCommandError as e: logging.debug('Reachability check failed: %s', e) logging.info('Project %s revision %s not reachable from upstream %r.', project.name, project.revision, upstream_ref) return True<|docstring|>Test if project's revision is reachable from its upstream ref.<|endoftext|>
d5261846c49aaf1ff8d94ba295e79f8b639d3d9258c60fadb4f160251eb5e495
def _MakeUniqueRef(project, base_ref, used_refs): "Return a git ref for project that isn't in used_refs.\n\n Args:\n project: The Project object to create a ref for.\n base_ref: A base ref name; this may be appended to to generate a unique ref.\n used_refs: A set of ref names to uniquify against. It is updated with the\n newly generated ref.\n " ref = base_ref branch = _GetUpstreamBranch(project) if (branch and (branch != 'master')): ref = ('%s/%s' % (ref, branch)) if (ref in used_refs): for i in range(1, (len(used_refs) + 2)): numbered = ('%s/%d' % (ref, i)) if (numbered not in used_refs): ref = numbered break else: raise AssertionError(('failed to make unique ref (ref=%s used_refs=%r)' % (ref, used_refs))) used_refs.add(ref) return ref
Return a git ref for project that isn't in used_refs. Args: project: The Project object to create a ref for. base_ref: A base ref name; this may be appended to to generate a unique ref. used_refs: A set of ref names to uniquify against. It is updated with the newly generated ref.
scripts/create_manifest_snapshot.py
_MakeUniqueRef
khromiumos/chromiumos-chromite
0
python
def _MakeUniqueRef(project, base_ref, used_refs): "Return a git ref for project that isn't in used_refs.\n\n Args:\n project: The Project object to create a ref for.\n base_ref: A base ref name; this may be appended to to generate a unique ref.\n used_refs: A set of ref names to uniquify against. It is updated with the\n newly generated ref.\n " ref = base_ref branch = _GetUpstreamBranch(project) if (branch and (branch != 'master')): ref = ('%s/%s' % (ref, branch)) if (ref in used_refs): for i in range(1, (len(used_refs) + 2)): numbered = ('%s/%d' % (ref, i)) if (numbered not in used_refs): ref = numbered break else: raise AssertionError(('failed to make unique ref (ref=%s used_refs=%r)' % (ref, used_refs))) used_refs.add(ref) return ref
def _MakeUniqueRef(project, base_ref, used_refs): "Return a git ref for project that isn't in used_refs.\n\n Args:\n project: The Project object to create a ref for.\n base_ref: A base ref name; this may be appended to to generate a unique ref.\n used_refs: A set of ref names to uniquify against. It is updated with the\n newly generated ref.\n " ref = base_ref branch = _GetUpstreamBranch(project) if (branch and (branch != 'master')): ref = ('%s/%s' % (ref, branch)) if (ref in used_refs): for i in range(1, (len(used_refs) + 2)): numbered = ('%s/%d' % (ref, i)) if (numbered not in used_refs): ref = numbered break else: raise AssertionError(('failed to make unique ref (ref=%s used_refs=%r)' % (ref, used_refs))) used_refs.add(ref) return ref<|docstring|>Return a git ref for project that isn't in used_refs. Args: project: The Project object to create a ref for. base_ref: A base ref name; this may be appended to to generate a unique ref. used_refs: A set of ref names to uniquify against. It is updated with the newly generated ref.<|endoftext|>
7467f41c73a1bddc82fd6ac3e2a7e57b21154a5eed6c008f3ad25fc492da7cf6
def _GitPushProjectUpstream(repo_root, project, dry_run): 'Push the project revision to its remote upstream.' git.GitPush(os.path.join(repo_root, project.Path()), project.revision, git.RemoteRef(project.Remote().GitName(), project.upstream), dry_run=dry_run)
Push the project revision to its remote upstream.
scripts/create_manifest_snapshot.py
_GitPushProjectUpstream
khromiumos/chromiumos-chromite
0
python
def _GitPushProjectUpstream(repo_root, project, dry_run): git.GitPush(os.path.join(repo_root, project.Path()), project.revision, git.RemoteRef(project.Remote().GitName(), project.upstream), dry_run=dry_run)
def _GitPushProjectUpstream(repo_root, project, dry_run): git.GitPush(os.path.join(repo_root, project.Path()), project.revision, git.RemoteRef(project.Remote().GitName(), project.upstream), dry_run=dry_run)<|docstring|>Push the project revision to its remote upstream.<|endoftext|>
a2fe3cd4668726f0a42cc64f172e2ad5a771c5a977ef70ad607abe66ce2f06f3
@classmethod def _load(cls, name): '\n Attemps to load the .so for a tcti given its name. Returns a subclass of\n TCTI.\n ' return type('{}TCTI'.format(name.upper()), (cls,), {'NAME': name, 'CONTEXT': type('{}TCTIContext'.format(name.upper()), (cls.CONTEXT,), {'NAME': name})})
Attemps to load the .so for a tcti given its name. Returns a subclass of TCTI.
tpm2_pytss/tcti.py
_load
pdxjohnny/tpm2-pytss
0
python
@classmethod def _load(cls, name): '\n Attemps to load the .so for a tcti given its name. Returns a subclass of\n TCTI.\n ' return type('{}TCTI'.format(name.upper()), (cls,), {'NAME': name, 'CONTEXT': type('{}TCTIContext'.format(name.upper()), (cls.CONTEXT,), {'NAME': name})})
@classmethod def _load(cls, name): '\n Attemps to load the .so for a tcti given its name. Returns a subclass of\n TCTI.\n ' return type('{}TCTI'.format(name.upper()), (cls,), {'NAME': name, 'CONTEXT': type('{}TCTIContext'.format(name.upper()), (cls.CONTEXT,), {'NAME': name})})<|docstring|>Attemps to load the .so for a tcti given its name. Returns a subclass of TCTI.<|endoftext|>
7d25bd782f32911d234db00d7bab6918ba690731294dde1e043d0bcbba20345d
@classmethod def load(cls, name): '\n Attemps to load the .so for a tcti given its name. Returns the\n instantiation of a subclass of TCTI.\n ' return cls._load(name)()
Attemps to load the .so for a tcti given its name. Returns the instantiation of a subclass of TCTI.
tpm2_pytss/tcti.py
load
pdxjohnny/tpm2-pytss
0
python
@classmethod def load(cls, name): '\n Attemps to load the .so for a tcti given its name. Returns the\n instantiation of a subclass of TCTI.\n ' return cls._load(name)()
@classmethod def load(cls, name): '\n Attemps to load the .so for a tcti given its name. Returns the\n instantiation of a subclass of TCTI.\n ' return cls._load(name)()<|docstring|>Attemps to load the .so for a tcti given its name. Returns the instantiation of a subclass of TCTI.<|endoftext|>
9faa5cd6f0fa32164ee9478e09b770c0c4f023b383061eb92ca00298bcd3f89e
def find_cleanup_targets(self): "Get configured and orphaned commit templates.\n\n :return: Dictionary with keys for each branch that map to a dictionary\n with keys 'config' and 'template' (the respective branch's config\n file and configured template). If there are orphaned commit\n templates, there will also be a key 'orphans' that's mapped to a\n list of orphaned commit templates.\n " config_expr = 'includeif\\.onbranch:(.*)\\.path' config_matches = self.configs.call_config_command(config_expr, file=self.configs.CONFIG_PATH, get_regexp=True) if (config_matches is not None): config_matches = config_matches.split('\n') else: config_matches = [] expr = 'includeif\\.onbranch:(.*)\\.path (.*)' targets = {} configured_commit_templates = [] for line in config_matches: match = re.match(expr, line) if match: branch_name = match.group(1) branch_config_file = match.group(2) branch_commit_template = self.configs.get_config('commit.template', file=os.path.join(self.repo.git_dir, branch_config_file)) targets[branch_name] = {'config': branch_config_file, 'template': branch_commit_template} configured_commit_templates.append(branch_commit_template) repo_root_dir = os.path.dirname(self.repo.git_dir) all_commit_templates = [os.path.basename(path) for path in glob.glob(os.path.join(repo_root_dir, '.gitmessage_local*'))] orphan_commit_templates = [template for template in all_commit_templates if (template not in configured_commit_templates)] if orphan_commit_templates: targets['orphans'] = orphan_commit_templates return targets
Get configured and orphaned commit templates. :return: Dictionary with keys for each branch that map to a dictionary with keys 'config' and 'template' (the respective branch's config file and configured template). If there are orphaned commit templates, there will also be a key 'orphans' that's mapped to a list of orphaned commit templates.
git_workflow/workflow/cleanup.py
find_cleanup_targets
connordelacruz/git-workflow
5
python
def find_cleanup_targets(self): "Get configured and orphaned commit templates.\n\n :return: Dictionary with keys for each branch that map to a dictionary\n with keys 'config' and 'template' (the respective branch's config\n file and configured template). If there are orphaned commit\n templates, there will also be a key 'orphans' that's mapped to a\n list of orphaned commit templates.\n " config_expr = 'includeif\\.onbranch:(.*)\\.path' config_matches = self.configs.call_config_command(config_expr, file=self.configs.CONFIG_PATH, get_regexp=True) if (config_matches is not None): config_matches = config_matches.split('\n') else: config_matches = [] expr = 'includeif\\.onbranch:(.*)\\.path (.*)' targets = {} configured_commit_templates = [] for line in config_matches: match = re.match(expr, line) if match: branch_name = match.group(1) branch_config_file = match.group(2) branch_commit_template = self.configs.get_config('commit.template', file=os.path.join(self.repo.git_dir, branch_config_file)) targets[branch_name] = {'config': branch_config_file, 'template': branch_commit_template} configured_commit_templates.append(branch_commit_template) repo_root_dir = os.path.dirname(self.repo.git_dir) all_commit_templates = [os.path.basename(path) for path in glob.glob(os.path.join(repo_root_dir, '.gitmessage_local*'))] orphan_commit_templates = [template for template in all_commit_templates if (template not in configured_commit_templates)] if orphan_commit_templates: targets['orphans'] = orphan_commit_templates return targets
def find_cleanup_targets(self): "Get configured and orphaned commit templates.\n\n :return: Dictionary with keys for each branch that map to a dictionary\n with keys 'config' and 'template' (the respective branch's config\n file and configured template). If there are orphaned commit\n templates, there will also be a key 'orphans' that's mapped to a\n list of orphaned commit templates.\n " config_expr = 'includeif\\.onbranch:(.*)\\.path' config_matches = self.configs.call_config_command(config_expr, file=self.configs.CONFIG_PATH, get_regexp=True) if (config_matches is not None): config_matches = config_matches.split('\n') else: config_matches = [] expr = 'includeif\\.onbranch:(.*)\\.path (.*)' targets = {} configured_commit_templates = [] for line in config_matches: match = re.match(expr, line) if match: branch_name = match.group(1) branch_config_file = match.group(2) branch_commit_template = self.configs.get_config('commit.template', file=os.path.join(self.repo.git_dir, branch_config_file)) targets[branch_name] = {'config': branch_config_file, 'template': branch_commit_template} configured_commit_templates.append(branch_commit_template) repo_root_dir = os.path.dirname(self.repo.git_dir) all_commit_templates = [os.path.basename(path) for path in glob.glob(os.path.join(repo_root_dir, '.gitmessage_local*'))] orphan_commit_templates = [template for template in all_commit_templates if (template not in configured_commit_templates)] if orphan_commit_templates: targets['orphans'] = orphan_commit_templates return targets<|docstring|>Get configured and orphaned commit templates. :return: Dictionary with keys for each branch that map to a dictionary with keys 'config' and 'template' (the respective branch's config file and configured template). If there are orphaned commit templates, there will also be a key 'orphans' that's mapped to a list of orphaned commit templates.<|endoftext|>
c805c0f8b78e35f185b2329dc935c1ab406a459a0ff4ac5523c8637fbf47b80a
def generator(sweep: 'ParameterSweep', func: callable, **kwargs) -> np.ndarray: 'Method for computing custom data as a function of the external parameter,\n calculated via the function `func`.\n\n Parameters\n ----------\n sweep:\n ParameterSweep object containing HilbertSpace and spectral information\n func:\n signature: `func(parametersweep, [paramindex_tuple, paramvals_tuple,\n **kwargs])`, specifies how to calculate the data for a single choice of\n parameter(s)\n **kwargs:\n keyword arguments to be included in func\n\n Returns\n -------\n array of custom data\n ' reduced_parameters = sweep._parameters.create_sliced(sweep._current_param_indices, remove_fixed=False) total_count = np.prod(reduced_parameters.counts) def func_effective(paramindex_tuple: Tuple[int], params, **kw) -> Any: paramvals_tuple = params[paramindex_tuple] return func(sweep, paramindex_tuple=paramindex_tuple, paramvals_tuple=paramvals_tuple, **kw) if hasattr(func, '__name__'): func_name = func.__name__ else: func_name = '' data_array = list(tqdm(map(functools.partial(func_effective, params=reduced_parameters, **kwargs), itertools.product(*reduced_parameters.ranges)), total=total_count, desc=('sweeping ' + func_name), leave=False, disable=settings.PROGRESSBAR_DISABLED)) data_array = np.asarray(data_array) return NamedSlotsNdarray(data_array.reshape(reduced_parameters.counts), reduced_parameters.paramvals_by_name)
Method for computing custom data as a function of the external parameter, calculated via the function `func`. Parameters ---------- sweep: ParameterSweep object containing HilbertSpace and spectral information func: signature: `func(parametersweep, [paramindex_tuple, paramvals_tuple, **kwargs])`, specifies how to calculate the data for a single choice of parameter(s) **kwargs: keyword arguments to be included in func Returns ------- array of custom data
scqubits/core/sweeps.py
generator
dmtvanzanten/scqubits
0
python
def generator(sweep: 'ParameterSweep', func: callable, **kwargs) -> np.ndarray: 'Method for computing custom data as a function of the external parameter,\n calculated via the function `func`.\n\n Parameters\n ----------\n sweep:\n ParameterSweep object containing HilbertSpace and spectral information\n func:\n signature: `func(parametersweep, [paramindex_tuple, paramvals_tuple,\n **kwargs])`, specifies how to calculate the data for a single choice of\n parameter(s)\n **kwargs:\n keyword arguments to be included in func\n\n Returns\n -------\n array of custom data\n ' reduced_parameters = sweep._parameters.create_sliced(sweep._current_param_indices, remove_fixed=False) total_count = np.prod(reduced_parameters.counts) def func_effective(paramindex_tuple: Tuple[int], params, **kw) -> Any: paramvals_tuple = params[paramindex_tuple] return func(sweep, paramindex_tuple=paramindex_tuple, paramvals_tuple=paramvals_tuple, **kw) if hasattr(func, '__name__'): func_name = func.__name__ else: func_name = data_array = list(tqdm(map(functools.partial(func_effective, params=reduced_parameters, **kwargs), itertools.product(*reduced_parameters.ranges)), total=total_count, desc=('sweeping ' + func_name), leave=False, disable=settings.PROGRESSBAR_DISABLED)) data_array = np.asarray(data_array) return NamedSlotsNdarray(data_array.reshape(reduced_parameters.counts), reduced_parameters.paramvals_by_name)
def generator(sweep: 'ParameterSweep', func: callable, **kwargs) -> np.ndarray: 'Method for computing custom data as a function of the external parameter,\n calculated via the function `func`.\n\n Parameters\n ----------\n sweep:\n ParameterSweep object containing HilbertSpace and spectral information\n func:\n signature: `func(parametersweep, [paramindex_tuple, paramvals_tuple,\n **kwargs])`, specifies how to calculate the data for a single choice of\n parameter(s)\n **kwargs:\n keyword arguments to be included in func\n\n Returns\n -------\n array of custom data\n ' reduced_parameters = sweep._parameters.create_sliced(sweep._current_param_indices, remove_fixed=False) total_count = np.prod(reduced_parameters.counts) def func_effective(paramindex_tuple: Tuple[int], params, **kw) -> Any: paramvals_tuple = params[paramindex_tuple] return func(sweep, paramindex_tuple=paramindex_tuple, paramvals_tuple=paramvals_tuple, **kw) if hasattr(func, '__name__'): func_name = func.__name__ else: func_name = data_array = list(tqdm(map(functools.partial(func_effective, params=reduced_parameters, **kwargs), itertools.product(*reduced_parameters.ranges)), total=total_count, desc=('sweeping ' + func_name), leave=False, disable=settings.PROGRESSBAR_DISABLED)) data_array = np.asarray(data_array) return NamedSlotsNdarray(data_array.reshape(reduced_parameters.counts), reduced_parameters.paramvals_by_name)<|docstring|>Method for computing custom data as a function of the external parameter, calculated via the function `func`. Parameters ---------- sweep: ParameterSweep object containing HilbertSpace and spectral information func: signature: `func(parametersweep, [paramindex_tuple, paramvals_tuple, **kwargs])`, specifies how to calculate the data for a single choice of parameter(s) **kwargs: keyword arguments to be included in func Returns ------- array of custom data<|endoftext|>
3012aeec5d58a16a5507fb061ee43b57a6b13dc10d77897b147dec75aa10aef0
def all_neighbour_nodes(chain_state: ChainState, light_client_address: Address=None) -> Set[Address]: ' Return the identifiers for all nodes accross all payment networks which\n have a channel open with this one.\n ' addresses = set() address_to_search_neighbour = None if (light_client_address is not None): address_to_search_neighbour = to_canonical_address(light_client_address) else: address_to_search_neighbour = chain_state.our_address for payment_network in chain_state.identifiers_to_paymentnetworks.values(): for token_network in payment_network.tokenidentifiers_to_tokennetworks.values(): if (address_to_search_neighbour in token_network.channelidentifiers_to_channels): channel_states = token_network.channelidentifiers_to_channels[address_to_search_neighbour].values() for channel_state in channel_states: addresses.add(channel_state.partner_state.address) return addresses
Return the identifiers for all nodes accross all payment networks which have a channel open with this one.
raiden/transfer/views.py
all_neighbour_nodes
marcosmartinez7/lumino
8
python
def all_neighbour_nodes(chain_state: ChainState, light_client_address: Address=None) -> Set[Address]: ' Return the identifiers for all nodes accross all payment networks which\n have a channel open with this one.\n ' addresses = set() address_to_search_neighbour = None if (light_client_address is not None): address_to_search_neighbour = to_canonical_address(light_client_address) else: address_to_search_neighbour = chain_state.our_address for payment_network in chain_state.identifiers_to_paymentnetworks.values(): for token_network in payment_network.tokenidentifiers_to_tokennetworks.values(): if (address_to_search_neighbour in token_network.channelidentifiers_to_channels): channel_states = token_network.channelidentifiers_to_channels[address_to_search_neighbour].values() for channel_state in channel_states: addresses.add(channel_state.partner_state.address) return addresses
def all_neighbour_nodes(chain_state: ChainState, light_client_address: Address=None) -> Set[Address]: ' Return the identifiers for all nodes accross all payment networks which\n have a channel open with this one.\n ' addresses = set() address_to_search_neighbour = None if (light_client_address is not None): address_to_search_neighbour = to_canonical_address(light_client_address) else: address_to_search_neighbour = chain_state.our_address for payment_network in chain_state.identifiers_to_paymentnetworks.values(): for token_network in payment_network.tokenidentifiers_to_tokennetworks.values(): if (address_to_search_neighbour in token_network.channelidentifiers_to_channels): channel_states = token_network.channelidentifiers_to_channels[address_to_search_neighbour].values() for channel_state in channel_states: addresses.add(channel_state.partner_state.address) return addresses<|docstring|>Return the identifiers for all nodes accross all payment networks which have a channel open with this one.<|endoftext|>
8faf20f65b2589bbb823874a8e494c1ea3a43977c0460c0293b6ec4fd9f6c319
def get_token_network_identifiers(chain_state: ChainState, payment_network_id: PaymentNetworkID) -> List[TokenNetworkID]: ' Return the list of token networks registered with the given payment network. ' payment_network = chain_state.identifiers_to_paymentnetworks.get(payment_network_id) if (payment_network is not None): return [token_network.address for token_network in payment_network.tokenidentifiers_to_tokennetworks.values()] return list()
Return the list of token networks registered with the given payment network.
raiden/transfer/views.py
get_token_network_identifiers
marcosmartinez7/lumino
8
python
def get_token_network_identifiers(chain_state: ChainState, payment_network_id: PaymentNetworkID) -> List[TokenNetworkID]: ' ' payment_network = chain_state.identifiers_to_paymentnetworks.get(payment_network_id) if (payment_network is not None): return [token_network.address for token_network in payment_network.tokenidentifiers_to_tokennetworks.values()] return list()
def get_token_network_identifiers(chain_state: ChainState, payment_network_id: PaymentNetworkID) -> List[TokenNetworkID]: ' ' payment_network = chain_state.identifiers_to_paymentnetworks.get(payment_network_id) if (payment_network is not None): return [token_network.address for token_network in payment_network.tokenidentifiers_to_tokennetworks.values()] return list()<|docstring|>Return the list of token networks registered with the given payment network.<|endoftext|>
574177e5f114c1cb7156eed1869754b30655a001e37062f3322f0851944efd3a
def get_token_identifiers(chain_state: ChainState, payment_network_id: PaymentNetworkID) -> List[TokenAddress]: ' Return the list of tokens registered with the given payment network. ' payment_network = chain_state.identifiers_to_paymentnetworks.get(payment_network_id) if (payment_network is not None): return [token_address for token_address in payment_network.tokenaddresses_to_tokenidentifiers.keys()] return list()
Return the list of tokens registered with the given payment network.
raiden/transfer/views.py
get_token_identifiers
marcosmartinez7/lumino
8
python
def get_token_identifiers(chain_state: ChainState, payment_network_id: PaymentNetworkID) -> List[TokenAddress]: ' ' payment_network = chain_state.identifiers_to_paymentnetworks.get(payment_network_id) if (payment_network is not None): return [token_address for token_address in payment_network.tokenaddresses_to_tokenidentifiers.keys()] return list()
def get_token_identifiers(chain_state: ChainState, payment_network_id: PaymentNetworkID) -> List[TokenAddress]: ' ' payment_network = chain_state.identifiers_to_paymentnetworks.get(payment_network_id) if (payment_network is not None): return [token_address for token_address in payment_network.tokenaddresses_to_tokenidentifiers.keys()] return list()<|docstring|>Return the list of tokens registered with the given payment network.<|endoftext|>
d9e9bbadc12a32d6be6f314f9e69385aef5371199d26345d40cd448fa374362d
def get_channelstate_for(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress, creator_address: Address, partner_address: Address) -> Optional[NettingChannelState]: ' Return the NettingChannelState if it exists, None otherwise. ' token_network = get_token_network_by_token_address(chain_state, payment_network_id, token_address) channel_state = None address_to_get_channel_state = creator_address channel = None if ((token_network and (creator_address in token_network.channelidentifiers_to_channels)) or (token_network and (partner_address in token_network.channelidentifiers_to_channels))): channels = [] for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]: if (creator_address in token_network.channelidentifiers_to_channels): channel = token_network.channelidentifiers_to_channels[creator_address].get(channel_id) if ((channel is None) and (partner_address in token_network.channelidentifiers_to_channels)): channel = token_network.channelidentifiers_to_channels[partner_address].get(channel_id) address_to_get_channel_state = partner_address if (channel is not None): if ((channel.close_transaction is None) or (channel.close_transaction.result != 'success')): channels.append(token_network.channelidentifiers_to_channels[address_to_get_channel_state][channel_id]) channel = None states = filter_channels_by_status(channels, [CHANNEL_STATE_UNUSABLE]) if states: channel_state = states[(- 1)] return channel_state
Return the NettingChannelState if it exists, None otherwise.
raiden/transfer/views.py
get_channelstate_for
marcosmartinez7/lumino
8
python
def get_channelstate_for(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress, creator_address: Address, partner_address: Address) -> Optional[NettingChannelState]: ' ' token_network = get_token_network_by_token_address(chain_state, payment_network_id, token_address) channel_state = None address_to_get_channel_state = creator_address channel = None if ((token_network and (creator_address in token_network.channelidentifiers_to_channels)) or (token_network and (partner_address in token_network.channelidentifiers_to_channels))): channels = [] for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]: if (creator_address in token_network.channelidentifiers_to_channels): channel = token_network.channelidentifiers_to_channels[creator_address].get(channel_id) if ((channel is None) and (partner_address in token_network.channelidentifiers_to_channels)): channel = token_network.channelidentifiers_to_channels[partner_address].get(channel_id) address_to_get_channel_state = partner_address if (channel is not None): if ((channel.close_transaction is None) or (channel.close_transaction.result != 'success')): channels.append(token_network.channelidentifiers_to_channels[address_to_get_channel_state][channel_id]) channel = None states = filter_channels_by_status(channels, [CHANNEL_STATE_UNUSABLE]) if states: channel_state = states[(- 1)] return channel_state
def get_channelstate_for(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress, creator_address: Address, partner_address: Address) -> Optional[NettingChannelState]: ' ' token_network = get_token_network_by_token_address(chain_state, payment_network_id, token_address) channel_state = None address_to_get_channel_state = creator_address channel = None if ((token_network and (creator_address in token_network.channelidentifiers_to_channels)) or (token_network and (partner_address in token_network.channelidentifiers_to_channels))): channels = [] for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]: if (creator_address in token_network.channelidentifiers_to_channels): channel = token_network.channelidentifiers_to_channels[creator_address].get(channel_id) if ((channel is None) and (partner_address in token_network.channelidentifiers_to_channels)): channel = token_network.channelidentifiers_to_channels[partner_address].get(channel_id) address_to_get_channel_state = partner_address if (channel is not None): if ((channel.close_transaction is None) or (channel.close_transaction.result != 'success')): channels.append(token_network.channelidentifiers_to_channels[address_to_get_channel_state][channel_id]) channel = None states = filter_channels_by_status(channels, [CHANNEL_STATE_UNUSABLE]) if states: channel_state = states[(- 1)] return channel_state<|docstring|>Return the NettingChannelState if it exists, None otherwise.<|endoftext|>
98f83656a0d1dab8d6560b75dc44a4f9aaa4fea1396a4d630fcbebd37fb5c27c
def get_channelstate_for_close_channel(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress, creator_address: Address, partner_address: Address, channel_id_to_check: ChannelID=None) -> Optional[NettingChannelState]: ' Return the NettingChannelState if it exists, None otherwise. ' token_network = get_token_network_by_token_address(chain_state, payment_network_id, token_address) channel_state = None address_to_get_channel_state = creator_address if (token_network and (creator_address in token_network.channelidentifiers_to_channels)): channels = [] for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]: if (channel_id == channel_id_to_check): channel = token_network.channelidentifiers_to_channels[creator_address].get(channel_id) if (channel is None): channel = token_network.channelidentifiers_to_channels[partner_address].get(channel_id) address_to_get_channel_state = partner_address if (channel is not None): if ((channel.close_transaction is not None) and (channel.close_transaction.result == 'success')): channels.append(token_network.channelidentifiers_to_channels[address_to_get_channel_state][channel_id]) states = filter_channels_by_status(channels, [CHANNEL_STATE_UNUSABLE]) if states: channel_state = states[(- 1)] return channel_state
Return the NettingChannelState if it exists, None otherwise.
raiden/transfer/views.py
get_channelstate_for_close_channel
marcosmartinez7/lumino
8
python
def get_channelstate_for_close_channel(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress, creator_address: Address, partner_address: Address, channel_id_to_check: ChannelID=None) -> Optional[NettingChannelState]: ' ' token_network = get_token_network_by_token_address(chain_state, payment_network_id, token_address) channel_state = None address_to_get_channel_state = creator_address if (token_network and (creator_address in token_network.channelidentifiers_to_channels)): channels = [] for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]: if (channel_id == channel_id_to_check): channel = token_network.channelidentifiers_to_channels[creator_address].get(channel_id) if (channel is None): channel = token_network.channelidentifiers_to_channels[partner_address].get(channel_id) address_to_get_channel_state = partner_address if (channel is not None): if ((channel.close_transaction is not None) and (channel.close_transaction.result == 'success')): channels.append(token_network.channelidentifiers_to_channels[address_to_get_channel_state][channel_id]) states = filter_channels_by_status(channels, [CHANNEL_STATE_UNUSABLE]) if states: channel_state = states[(- 1)] return channel_state
def get_channelstate_for_close_channel(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress, creator_address: Address, partner_address: Address, channel_id_to_check: ChannelID=None) -> Optional[NettingChannelState]: ' ' token_network = get_token_network_by_token_address(chain_state, payment_network_id, token_address) channel_state = None address_to_get_channel_state = creator_address if (token_network and (creator_address in token_network.channelidentifiers_to_channels)): channels = [] for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]: if (channel_id == channel_id_to_check): channel = token_network.channelidentifiers_to_channels[creator_address].get(channel_id) if (channel is None): channel = token_network.channelidentifiers_to_channels[partner_address].get(channel_id) address_to_get_channel_state = partner_address if (channel is not None): if ((channel.close_transaction is not None) and (channel.close_transaction.result == 'success')): channels.append(token_network.channelidentifiers_to_channels[address_to_get_channel_state][channel_id]) states = filter_channels_by_status(channels, [CHANNEL_STATE_UNUSABLE]) if states: channel_state = states[(- 1)] return channel_state<|docstring|>Return the NettingChannelState if it exists, None otherwise.<|endoftext|>
7aa53593c5ef4d02ec52b9b7333fb9f19d4226c0b4fcc6d83137e7ecd1e3b1c8
def get_channelstate_by_token_network_and_partner(chain_state: ChainState, token_network_id: TokenNetworkID, creator_address: Address, partner_address: Address) -> Optional[NettingChannelState]: ' Return the NettingChannelState if it exists, None otherwise. ' token_network = get_token_network_by_identifier(chain_state, token_network_id) channel_state = None if token_network: channels = [] for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]: if (token_network.channelidentifiers_to_channels[creator_address].get(channel_id) is not None): channels.append(token_network.channelidentifiers_to_channels[creator_address][channel_id]) states = filter_channels_by_status(channels, [CHANNEL_STATE_UNUSABLE]) if states: channel_state = states[(- 1)] return channel_state
Return the NettingChannelState if it exists, None otherwise.
raiden/transfer/views.py
get_channelstate_by_token_network_and_partner
marcosmartinez7/lumino
8
python
def get_channelstate_by_token_network_and_partner(chain_state: ChainState, token_network_id: TokenNetworkID, creator_address: Address, partner_address: Address) -> Optional[NettingChannelState]: ' ' token_network = get_token_network_by_identifier(chain_state, token_network_id) channel_state = None if token_network: channels = [] for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]: if (token_network.channelidentifiers_to_channels[creator_address].get(channel_id) is not None): channels.append(token_network.channelidentifiers_to_channels[creator_address][channel_id]) states = filter_channels_by_status(channels, [CHANNEL_STATE_UNUSABLE]) if states: channel_state = states[(- 1)] return channel_state
def get_channelstate_by_token_network_and_partner(chain_state: ChainState, token_network_id: TokenNetworkID, creator_address: Address, partner_address: Address) -> Optional[NettingChannelState]: ' ' token_network = get_token_network_by_identifier(chain_state, token_network_id) channel_state = None if token_network: channels = [] for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]: if (token_network.channelidentifiers_to_channels[creator_address].get(channel_id) is not None): channels.append(token_network.channelidentifiers_to_channels[creator_address][channel_id]) states = filter_channels_by_status(channels, [CHANNEL_STATE_UNUSABLE]) if states: channel_state = states[(- 1)] return channel_state<|docstring|>Return the NettingChannelState if it exists, None otherwise.<|endoftext|>
53315e10997006b120ad817e50a9786f603ec6beebf3a65d14dab1f0118201c5
def get_channelstate_by_canonical_identifier_and_address(chain_state: ChainState, canonical_identifier: CanonicalIdentifier, address: AddressHex) -> Optional[NettingChannelState]: ' Return the NettingChannelState if it exists, None otherwise. ' token_network = get_token_network_by_identifier(chain_state, TokenNetworkID(canonical_identifier.token_network_address)) channel_state = None if (token_network and (address in token_network.channelidentifiers_to_channels)): channel_state = token_network.channelidentifiers_to_channels[address].get(canonical_identifier.channel_identifier) else: lc_address = get_lc_address_by_channel_id_and_partner(token_network, address, canonical_identifier) if (lc_address in token_network.channelidentifiers_to_channels): channel_state = token_network.channelidentifiers_to_channels[lc_address].get(canonical_identifier.channel_identifier) return channel_state
Return the NettingChannelState if it exists, None otherwise.
raiden/transfer/views.py
get_channelstate_by_canonical_identifier_and_address
marcosmartinez7/lumino
8
python
def get_channelstate_by_canonical_identifier_and_address(chain_state: ChainState, canonical_identifier: CanonicalIdentifier, address: AddressHex) -> Optional[NettingChannelState]: ' ' token_network = get_token_network_by_identifier(chain_state, TokenNetworkID(canonical_identifier.token_network_address)) channel_state = None if (token_network and (address in token_network.channelidentifiers_to_channels)): channel_state = token_network.channelidentifiers_to_channels[address].get(canonical_identifier.channel_identifier) else: lc_address = get_lc_address_by_channel_id_and_partner(token_network, address, canonical_identifier) if (lc_address in token_network.channelidentifiers_to_channels): channel_state = token_network.channelidentifiers_to_channels[lc_address].get(canonical_identifier.channel_identifier) return channel_state
def get_channelstate_by_canonical_identifier_and_address(chain_state: ChainState, canonical_identifier: CanonicalIdentifier, address: AddressHex) -> Optional[NettingChannelState]: ' ' token_network = get_token_network_by_identifier(chain_state, TokenNetworkID(canonical_identifier.token_network_address)) channel_state = None if (token_network and (address in token_network.channelidentifiers_to_channels)): channel_state = token_network.channelidentifiers_to_channels[address].get(canonical_identifier.channel_identifier) else: lc_address = get_lc_address_by_channel_id_and_partner(token_network, address, canonical_identifier) if (lc_address in token_network.channelidentifiers_to_channels): channel_state = token_network.channelidentifiers_to_channels[lc_address].get(canonical_identifier.channel_identifier) return channel_state<|docstring|>Return the NettingChannelState if it exists, None otherwise.<|endoftext|>
c8dc386aab50900f2e1fb8c29a201ee0c292583e84763a442e9a69667740ea97
def get_channelstate_filter(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress, filter_fn: Callable) -> List[NettingChannelState]: ' Return the state of channels that match the condition in `filter_fn` ' token_network = get_token_network_by_token_address(chain_state, payment_network_id, token_address) result: List[NettingChannelState] = [] if (not token_network): return result if (chain_state.our_address in token_network.channelidentifiers_to_channels): for channel_state in token_network.channelidentifiers_to_channels[chain_state.our_address].values(): if filter_fn(channel_state): result.append(channel_state) return result
Return the state of channels that match the condition in `filter_fn`
raiden/transfer/views.py
get_channelstate_filter
marcosmartinez7/lumino
8
python
def get_channelstate_filter(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress, filter_fn: Callable) -> List[NettingChannelState]: ' ' token_network = get_token_network_by_token_address(chain_state, payment_network_id, token_address) result: List[NettingChannelState] = [] if (not token_network): return result if (chain_state.our_address in token_network.channelidentifiers_to_channels): for channel_state in token_network.channelidentifiers_to_channels[chain_state.our_address].values(): if filter_fn(channel_state): result.append(channel_state) return result
def get_channelstate_filter(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress, filter_fn: Callable) -> List[NettingChannelState]: ' ' token_network = get_token_network_by_token_address(chain_state, payment_network_id, token_address) result: List[NettingChannelState] = [] if (not token_network): return result if (chain_state.our_address in token_network.channelidentifiers_to_channels): for channel_state in token_network.channelidentifiers_to_channels[chain_state.our_address].values(): if filter_fn(channel_state): result.append(channel_state) return result<|docstring|>Return the state of channels that match the condition in `filter_fn`<|endoftext|>
8341ec5c718dd6f1453ecb10483c0397833db0db9e652d4716e5e942528e6e7d
def get_channelstate_open(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: 'Return the state of open channels in a token network.' return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_OPENED)))
Return the state of open channels in a token network.
raiden/transfer/views.py
get_channelstate_open
marcosmartinez7/lumino
8
python
def get_channelstate_open(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_OPENED)))
def get_channelstate_open(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_OPENED)))<|docstring|>Return the state of open channels in a token network.<|endoftext|>
c2767cd39a40533821134f65ed2a14befdba4d604759d890565e9943a1eee728
def get_channelstate_closing(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: 'Return the state of closing channels in a token network.' return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_CLOSING)))
Return the state of closing channels in a token network.
raiden/transfer/views.py
get_channelstate_closing
marcosmartinez7/lumino
8
python
def get_channelstate_closing(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_CLOSING)))
def get_channelstate_closing(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_CLOSING)))<|docstring|>Return the state of closing channels in a token network.<|endoftext|>
b4d2508a3f55a76cdc307f917b29753bd2b73dcab16b23eab8fc4b97ed094dbe
def get_channelstate_closed(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: 'Return the state of closed channels in a token network.' return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_CLOSED)))
Return the state of closed channels in a token network.
raiden/transfer/views.py
get_channelstate_closed
marcosmartinez7/lumino
8
python
def get_channelstate_closed(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_CLOSED)))
def get_channelstate_closed(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_CLOSED)))<|docstring|>Return the state of closed channels in a token network.<|endoftext|>
378bab465297f5b7a05a846bd4fc2896cc47ef3bd8d8dbd5e6ff93607494a5d5
def get_channelstate_settling(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: 'Return the state of settling channels in a token network.' return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_SETTLING)))
Return the state of settling channels in a token network.
raiden/transfer/views.py
get_channelstate_settling
marcosmartinez7/lumino
8
python
def get_channelstate_settling(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_SETTLING)))
def get_channelstate_settling(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_SETTLING)))<|docstring|>Return the state of settling channels in a token network.<|endoftext|>
9b05db16adab9d3ca6ac7ef1fed6b4d45ba97472fadc96a4fdcea388fbb165b1
def get_channelstate_settled(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: 'Return the state of settled channels in a token network.' return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_SETTLED)))
Return the state of settled channels in a token network.
raiden/transfer/views.py
get_channelstate_settled
marcosmartinez7/lumino
8
python
def get_channelstate_settled(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_SETTLED)))
def get_channelstate_settled(chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress) -> List[NettingChannelState]: return get_channelstate_filter(chain_state, payment_network_id, token_address, (lambda channel_state: (channel.get_status(channel_state) == CHANNEL_STATE_SETTLED)))<|docstring|>Return the state of settled channels in a token network.<|endoftext|>
3526f61b5c4c6f07816c9933a4384873eabddade2d504f87540ced6ade2b6103
def role_from_transfer_task(transfer_task: TransferTask) -> str: 'Return the role and type for the transfer. Throws an exception on error' if isinstance(transfer_task, InitiatorTask): return 'initiator' if isinstance(transfer_task, MediatorTask): return 'mediator' if isinstance(transfer_task, TargetTask): return 'target' raise ValueError('Argument to role_from_transfer_task is not a TransferTask')
Return the role and type for the transfer. Throws an exception on error
raiden/transfer/views.py
role_from_transfer_task
marcosmartinez7/lumino
8
python
def role_from_transfer_task(transfer_task: TransferTask) -> str: if isinstance(transfer_task, InitiatorTask): return 'initiator' if isinstance(transfer_task, MediatorTask): return 'mediator' if isinstance(transfer_task, TargetTask): return 'target' raise ValueError('Argument to role_from_transfer_task is not a TransferTask')
def role_from_transfer_task(transfer_task: TransferTask) -> str: if isinstance(transfer_task, InitiatorTask): return 'initiator' if isinstance(transfer_task, MediatorTask): return 'mediator' if isinstance(transfer_task, TargetTask): return 'target' raise ValueError('Argument to role_from_transfer_task is not a TransferTask')<|docstring|>Return the role and type for the transfer. Throws an exception on error<|endoftext|>
8749109366d65fdd7a4f8db85fc0868357bf1f30350937cc2d9ce9e2aee5c0a2
def secret_from_transfer_task(transfer_task: Optional[TransferTask], secrethash: SecretHash) -> Optional[Secret]: 'Return the secret for the transfer, None on EMPTY_SECRET.' assert isinstance(transfer_task, InitiatorTask) transfer_state = transfer_task.manager_state.initiator_transfers[secrethash] if (transfer_state is None): return None return transfer_state.transfer_description.secret
Return the secret for the transfer, None on EMPTY_SECRET.
raiden/transfer/views.py
secret_from_transfer_task
marcosmartinez7/lumino
8
python
def secret_from_transfer_task(transfer_task: Optional[TransferTask], secrethash: SecretHash) -> Optional[Secret]: assert isinstance(transfer_task, InitiatorTask) transfer_state = transfer_task.manager_state.initiator_transfers[secrethash] if (transfer_state is None): return None return transfer_state.transfer_description.secret
def secret_from_transfer_task(transfer_task: Optional[TransferTask], secrethash: SecretHash) -> Optional[Secret]: assert isinstance(transfer_task, InitiatorTask) transfer_state = transfer_task.manager_state.initiator_transfers[secrethash] if (transfer_state is None): return None return transfer_state.transfer_description.secret<|docstring|>Return the secret for the transfer, None on EMPTY_SECRET.<|endoftext|>
5ddc2968c659922a1df09c69415e00255bcf8a80467c24e71f85dabed8e85a43
def get_transfer_role(chain_state: ChainState, secrethash: SecretHash) -> Optional[str]: "\n Returns 'initiator', 'mediator' or 'target' to signify the role the node has\n in a transfer. If a transfer task is not found for the secrethash then the\n function returns None\n " task = chain_state.payment_mapping.secrethashes_to_task.get(secrethash) if (not task): return None return role_from_transfer_task(task)
Returns 'initiator', 'mediator' or 'target' to signify the role the node has in a transfer. If a transfer task is not found for the secrethash then the function returns None
raiden/transfer/views.py
get_transfer_role
marcosmartinez7/lumino
8
python
def get_transfer_role(chain_state: ChainState, secrethash: SecretHash) -> Optional[str]: "\n Returns 'initiator', 'mediator' or 'target' to signify the role the node has\n in a transfer. If a transfer task is not found for the secrethash then the\n function returns None\n " task = chain_state.payment_mapping.secrethashes_to_task.get(secrethash) if (not task): return None return role_from_transfer_task(task)
def get_transfer_role(chain_state: ChainState, secrethash: SecretHash) -> Optional[str]: "\n Returns 'initiator', 'mediator' or 'target' to signify the role the node has\n in a transfer. If a transfer task is not found for the secrethash then the\n function returns None\n " task = chain_state.payment_mapping.secrethashes_to_task.get(secrethash) if (not task): return None return role_from_transfer_task(task)<|docstring|>Returns 'initiator', 'mediator' or 'target' to signify the role the node has in a transfer. If a transfer task is not found for the secrethash then the function returns None<|endoftext|>
6257ee8ee37c57610f39d1f4de9718f7d7dcceb53bd59dbad05642a9608b44ab
def filter_channels_by_status(channel_states: List[NettingChannelState], exclude_states: Optional[List[str]]=None) -> List[NettingChannelState]: ' Filter the list of channels by excluding ones\n for which the state exists in `exclude_states`. ' if (exclude_states is None): exclude_states = [] states = [] for channel_state in channel_states: if (channel.get_status(channel_state) not in exclude_states): states.append(channel_state) return states
Filter the list of channels by excluding ones for which the state exists in `exclude_states`.
raiden/transfer/views.py
filter_channels_by_status
marcosmartinez7/lumino
8
python
def filter_channels_by_status(channel_states: List[NettingChannelState], exclude_states: Optional[List[str]]=None) -> List[NettingChannelState]: ' Filter the list of channels by excluding ones\n for which the state exists in `exclude_states`. ' if (exclude_states is None): exclude_states = [] states = [] for channel_state in channel_states: if (channel.get_status(channel_state) not in exclude_states): states.append(channel_state) return states
def filter_channels_by_status(channel_states: List[NettingChannelState], exclude_states: Optional[List[str]]=None) -> List[NettingChannelState]: ' Filter the list of channels by excluding ones\n for which the state exists in `exclude_states`. ' if (exclude_states is None): exclude_states = [] states = [] for channel_state in channel_states: if (channel.get_status(channel_state) not in exclude_states): states.append(channel_state) return states<|docstring|>Filter the list of channels by excluding ones for which the state exists in `exclude_states`.<|endoftext|>
02037668f75bb51636b48856221867cf60679f9950e3ffcc6a4670d7a3845925
def detect_balance_proof_change(old_state: ChainState, current_state: ChainState) -> Iterator[Union[(BalanceProofSignedState, BalanceProofUnsignedState)]]: ' Compare two states for any received balance_proofs that are not in `old_state`. ' if (old_state == current_state): return for payment_network_identifier in current_state.identifiers_to_paymentnetworks: try: old_payment_network = old_state.identifiers_to_paymentnetworks.get(payment_network_identifier) except AttributeError: old_payment_network = None current_payment_network = current_state.identifiers_to_paymentnetworks[payment_network_identifier] if (old_payment_network == current_payment_network): continue for token_network_identifier in current_payment_network.tokenidentifiers_to_tokennetworks: if old_payment_network: old_token_network = old_payment_network.tokenidentifiers_to_tokennetworks.get(token_network_identifier) else: old_token_network = None current_token_network = current_payment_network.tokenidentifiers_to_tokennetworks[token_network_identifier] if (old_token_network == current_token_network): continue for channel_identifier in current_token_network.channelidentifiers_to_channels[current_state.our_address]: if old_token_network: old_channel = old_token_network.channelidentifiers_to_channels[current_state.our_address].get(channel_identifier) else: old_channel = None current_channel = current_token_network.channelidentifiers_to_channels[current_state.our_address][channel_identifier] if (current_channel == old_channel): continue else: partner_state_updated = ((current_channel.partner_state.balance_proof is not None) and ((old_channel is None) or (old_channel.partner_state.balance_proof != current_channel.partner_state.balance_proof))) if partner_state_updated: assert current_channel.partner_state.balance_proof, MYPY_ANNOTATION (yield current_channel.partner_state.balance_proof) our_state_updated = ((current_channel.our_state.balance_proof is not None) and ((old_channel is None) or (old_channel.our_state.balance_proof != current_channel.our_state.balance_proof))) if our_state_updated: assert current_channel.our_state.balance_proof, MYPY_ANNOTATION (yield current_channel.our_state.balance_proof)
Compare two states for any received balance_proofs that are not in `old_state`.
raiden/transfer/views.py
detect_balance_proof_change
marcosmartinez7/lumino
8
python
def detect_balance_proof_change(old_state: ChainState, current_state: ChainState) -> Iterator[Union[(BalanceProofSignedState, BalanceProofUnsignedState)]]: ' ' if (old_state == current_state): return for payment_network_identifier in current_state.identifiers_to_paymentnetworks: try: old_payment_network = old_state.identifiers_to_paymentnetworks.get(payment_network_identifier) except AttributeError: old_payment_network = None current_payment_network = current_state.identifiers_to_paymentnetworks[payment_network_identifier] if (old_payment_network == current_payment_network): continue for token_network_identifier in current_payment_network.tokenidentifiers_to_tokennetworks: if old_payment_network: old_token_network = old_payment_network.tokenidentifiers_to_tokennetworks.get(token_network_identifier) else: old_token_network = None current_token_network = current_payment_network.tokenidentifiers_to_tokennetworks[token_network_identifier] if (old_token_network == current_token_network): continue for channel_identifier in current_token_network.channelidentifiers_to_channels[current_state.our_address]: if old_token_network: old_channel = old_token_network.channelidentifiers_to_channels[current_state.our_address].get(channel_identifier) else: old_channel = None current_channel = current_token_network.channelidentifiers_to_channels[current_state.our_address][channel_identifier] if (current_channel == old_channel): continue else: partner_state_updated = ((current_channel.partner_state.balance_proof is not None) and ((old_channel is None) or (old_channel.partner_state.balance_proof != current_channel.partner_state.balance_proof))) if partner_state_updated: assert current_channel.partner_state.balance_proof, MYPY_ANNOTATION (yield current_channel.partner_state.balance_proof) our_state_updated = ((current_channel.our_state.balance_proof is not None) and ((old_channel is None) or (old_channel.our_state.balance_proof != current_channel.our_state.balance_proof))) if our_state_updated: assert current_channel.our_state.balance_proof, MYPY_ANNOTATION (yield current_channel.our_state.balance_proof)
def detect_balance_proof_change(old_state: ChainState, current_state: ChainState) -> Iterator[Union[(BalanceProofSignedState, BalanceProofUnsignedState)]]: ' ' if (old_state == current_state): return for payment_network_identifier in current_state.identifiers_to_paymentnetworks: try: old_payment_network = old_state.identifiers_to_paymentnetworks.get(payment_network_identifier) except AttributeError: old_payment_network = None current_payment_network = current_state.identifiers_to_paymentnetworks[payment_network_identifier] if (old_payment_network == current_payment_network): continue for token_network_identifier in current_payment_network.tokenidentifiers_to_tokennetworks: if old_payment_network: old_token_network = old_payment_network.tokenidentifiers_to_tokennetworks.get(token_network_identifier) else: old_token_network = None current_token_network = current_payment_network.tokenidentifiers_to_tokennetworks[token_network_identifier] if (old_token_network == current_token_network): continue for channel_identifier in current_token_network.channelidentifiers_to_channels[current_state.our_address]: if old_token_network: old_channel = old_token_network.channelidentifiers_to_channels[current_state.our_address].get(channel_identifier) else: old_channel = None current_channel = current_token_network.channelidentifiers_to_channels[current_state.our_address][channel_identifier] if (current_channel == old_channel): continue else: partner_state_updated = ((current_channel.partner_state.balance_proof is not None) and ((old_channel is None) or (old_channel.partner_state.balance_proof != current_channel.partner_state.balance_proof))) if partner_state_updated: assert current_channel.partner_state.balance_proof, MYPY_ANNOTATION (yield current_channel.partner_state.balance_proof) our_state_updated = ((current_channel.our_state.balance_proof is not None) and ((old_channel is None) or (old_channel.our_state.balance_proof != current_channel.our_state.balance_proof))) if our_state_updated: assert current_channel.our_state.balance_proof, MYPY_ANNOTATION (yield current_channel.our_state.balance_proof)<|docstring|>Compare two states for any received balance_proofs that are not in `old_state`.<|endoftext|>
1a1dc2c25f2fb6e846bd80d8f7e218ea6a7cb0e13e04e3a822c55086e5bc4cae
@autojit def rotx(points, beta, mode='deg'): '\n\tRotate points about the global X axis by the angles beta (roll). Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\t' if (mode == 'rad'): pass elif (mode == 'deg'): beta = np.deg2rad(beta) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), np.sin(beta)], [0, (- np.sin(beta)), np.cos(beta)]]) if (np.size(points) == 3): rot_x = np.dot(points, x_rot_mat) else: rot_x = np.dot(points, x_rot_mat) return rot_x
Rotate points about the global X axis by the angles beta (roll). Rotation matrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html
rotate_lib.py
rotx
Nate28/traj_calc
10
python
@autojit def rotx(points, beta, mode='deg'): '\n\tRotate points about the global X axis by the angles beta (roll). Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\t' if (mode == 'rad'): pass elif (mode == 'deg'): beta = np.deg2rad(beta) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), np.sin(beta)], [0, (- np.sin(beta)), np.cos(beta)]]) if (np.size(points) == 3): rot_x = np.dot(points, x_rot_mat) else: rot_x = np.dot(points, x_rot_mat) return rot_x
@autojit def rotx(points, beta, mode='deg'): '\n\tRotate points about the global X axis by the angles beta (roll). Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\t' if (mode == 'rad'): pass elif (mode == 'deg'): beta = np.deg2rad(beta) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), np.sin(beta)], [0, (- np.sin(beta)), np.cos(beta)]]) if (np.size(points) == 3): rot_x = np.dot(points, x_rot_mat) else: rot_x = np.dot(points, x_rot_mat) return rot_x<|docstring|>Rotate points about the global X axis by the angles beta (roll). Rotation matrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html<|endoftext|>
c81933a42ab583fe8d45b4f942062c4d2856a30833e094bb1c460a9bb6ff715a
@autojit def roty(points, theta, mode='deg'): '\n\tRotate points about the global Y axis by the angles theta (yaw). Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\t' if (mode == 'rad'): pass elif (mode == 'deg'): theta = np.deg2rad(theta) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') y_rot_mat = np.array([[np.cos(theta), 0, (- np.sin(theta))], [0, 1, 0], [np.sin(theta), 0, np.cos(theta)]]) if (np.size(points) == 3): rot_y = np.dot(points, y_rot_mat) else: rot_y = np.dot(points, y_rot_mat) return rot_y
Rotate points about the global Y axis by the angles theta (yaw). Rotation matrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html
rotate_lib.py
roty
Nate28/traj_calc
10
python
@autojit def roty(points, theta, mode='deg'): '\n\tRotate points about the global Y axis by the angles theta (yaw). Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\t' if (mode == 'rad'): pass elif (mode == 'deg'): theta = np.deg2rad(theta) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') y_rot_mat = np.array([[np.cos(theta), 0, (- np.sin(theta))], [0, 1, 0], [np.sin(theta), 0, np.cos(theta)]]) if (np.size(points) == 3): rot_y = np.dot(points, y_rot_mat) else: rot_y = np.dot(points, y_rot_mat) return rot_y
@autojit def roty(points, theta, mode='deg'): '\n\tRotate points about the global Y axis by the angles theta (yaw). Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\t' if (mode == 'rad'): pass elif (mode == 'deg'): theta = np.deg2rad(theta) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') y_rot_mat = np.array([[np.cos(theta), 0, (- np.sin(theta))], [0, 1, 0], [np.sin(theta), 0, np.cos(theta)]]) if (np.size(points) == 3): rot_y = np.dot(points, y_rot_mat) else: rot_y = np.dot(points, y_rot_mat) return rot_y<|docstring|>Rotate points about the global Y axis by the angles theta (yaw). Rotation matrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html<|endoftext|>
6e900f8466417bc288c9fc0d562ed6f7c9eb3b3224a5de49cfc2bce0bf6103c4
@autojit def rotz(points, alpha, mode='deg'): '\n\tRotate points about the global Z axis by the angles theta (pitch). Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\t' if (mode == 'rad'): pass elif (mode == 'deg'): alpha = np.deg2rad(alpha) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') z_rot_mat = np.array([[np.cos(alpha), np.sin(alpha), 0], [(- np.sin(alpha)), np.cos(alpha), 0], [0, 0, 1]]) if (np.size(points) == 3): rot_z = np.dot(points, z_rot_mat) else: rot_z = np.dot(points, z_rot_mat) return rot_z
Rotate points about the global Z axis by the angles theta (pitch). Rotation matrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html
rotate_lib.py
rotz
Nate28/traj_calc
10
python
@autojit def rotz(points, alpha, mode='deg'): '\n\tRotate points about the global Z axis by the angles theta (pitch). Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\t' if (mode == 'rad'): pass elif (mode == 'deg'): alpha = np.deg2rad(alpha) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') z_rot_mat = np.array([[np.cos(alpha), np.sin(alpha), 0], [(- np.sin(alpha)), np.cos(alpha), 0], [0, 0, 1]]) if (np.size(points) == 3): rot_z = np.dot(points, z_rot_mat) else: rot_z = np.dot(points, z_rot_mat) return rot_z
@autojit def rotz(points, alpha, mode='deg'): '\n\tRotate points about the global Z axis by the angles theta (pitch). Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\t' if (mode == 'rad'): pass elif (mode == 'deg'): alpha = np.deg2rad(alpha) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') z_rot_mat = np.array([[np.cos(alpha), np.sin(alpha), 0], [(- np.sin(alpha)), np.cos(alpha), 0], [0, 0, 1]]) if (np.size(points) == 3): rot_z = np.dot(points, z_rot_mat) else: rot_z = np.dot(points, z_rot_mat) return rot_z<|docstring|>Rotate points about the global Z axis by the angles theta (pitch). Rotation matrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html<|endoftext|>
2b07aca2d1feeaa6cba2a53986c6e9f9db0ed44050505e63d09d215abe641e72
@autojit def rotxyz(points, beta, theta, alpha, mode='deg', clockwise=False): "\n\t=== triPy.rotxyz ===\n\tRotates points about the global X, Y and Z axes by the angles beta, theta,\n\tand alpha (roll, yaw, and pitch, respectively) in that order. Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\n\t=== Inputs ===\n\t'points'\tXYZ coordinates of the points to be rotated\n\t'beta'\t\tAngle of rotation about the X axis (roll)\n\t'theta'\t\tAngle of rotation about the Y axis (yaw)\n\t'alpha'\t\tAngle of rotation about the Z axis (pitch)\n\t'mode'\t\tDefines whether angles are expressed in radians or degrees\n\t\t\t\t(degrees are the default)\n\t'clockwise' Defines direction of rotation about axes\n\t\t\t\t(may be either True or False - the default is False: anticlockwise)\n\n\t=== Usage ===\n\timport triPy\n\trot_x, rot_y, rot_z = rotMatrix(x, y, z, beta, theta, alpha)\n\t" if (mode == 'rad'): pass elif (mode == 'deg'): beta = np.deg2rad(beta) theta = np.deg2rad(theta) alpha = np.deg2rad(alpha) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') if (clockwise == False): x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), np.sin(beta)], [0, (- np.sin(beta)), np.cos(beta)]]) y_rot_mat = np.array([[np.cos(theta), 0, (- np.sin(theta))], [0, 1, 0], [np.sin(theta), 0, np.cos(theta)]]) z_rot_mat = np.array([[np.cos(alpha), np.sin(alpha), 0], [(- np.sin(alpha)), np.cos(alpha), 0], [0, 0, 1]]) else: x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), (- np.sin(beta))], [0, np.sin(beta), np.cos(beta)]]) y_rot_mat = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [(- np.sin(theta)), 0, np.cos(theta)]]) z_rot_mat = np.array([[np.cos(alpha), (- np.sin(alpha)), 0], [np.sin(alpha), np.cos(alpha), 0], [0, 0, 1]]) if (np.size(points) == 3): rot_x = np.dot(points, x_rot_mat) rot_xy = np.dot(rot_x, y_rot_mat) rot_xyz = np.dot(rot_xy, z_rot_mat) else: rot_x = np.dot(points, x_rot_mat) rot_xy = np.dot(rot_x, y_rot_mat) rot_xyz = np.dot(rot_xy, z_rot_mat) return rot_xyz
=== triPy.rotxyz === Rotates points about the global X, Y and Z axes by the angles beta, theta, and alpha (roll, yaw, and pitch, respectively) in that order. Rotation matrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html === Inputs === 'points' XYZ coordinates of the points to be rotated 'beta' Angle of rotation about the X axis (roll) 'theta' Angle of rotation about the Y axis (yaw) 'alpha' Angle of rotation about the Z axis (pitch) 'mode' Defines whether angles are expressed in radians or degrees (degrees are the default) 'clockwise' Defines direction of rotation about axes (may be either True or False - the default is False: anticlockwise) === Usage === import triPy rot_x, rot_y, rot_z = rotMatrix(x, y, z, beta, theta, alpha)
rotate_lib.py
rotxyz
Nate28/traj_calc
10
python
@autojit def rotxyz(points, beta, theta, alpha, mode='deg', clockwise=False): "\n\t=== triPy.rotxyz ===\n\tRotates points about the global X, Y and Z axes by the angles beta, theta,\n\tand alpha (roll, yaw, and pitch, respectively) in that order. Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\n\t=== Inputs ===\n\t'points'\tXYZ coordinates of the points to be rotated\n\t'beta'\t\tAngle of rotation about the X axis (roll)\n\t'theta'\t\tAngle of rotation about the Y axis (yaw)\n\t'alpha'\t\tAngle of rotation about the Z axis (pitch)\n\t'mode'\t\tDefines whether angles are expressed in radians or degrees\n\t\t\t\t(degrees are the default)\n\t'clockwise' Defines direction of rotation about axes\n\t\t\t\t(may be either True or False - the default is False: anticlockwise)\n\n\t=== Usage ===\n\timport triPy\n\trot_x, rot_y, rot_z = rotMatrix(x, y, z, beta, theta, alpha)\n\t" if (mode == 'rad'): pass elif (mode == 'deg'): beta = np.deg2rad(beta) theta = np.deg2rad(theta) alpha = np.deg2rad(alpha) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') if (clockwise == False): x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), np.sin(beta)], [0, (- np.sin(beta)), np.cos(beta)]]) y_rot_mat = np.array([[np.cos(theta), 0, (- np.sin(theta))], [0, 1, 0], [np.sin(theta), 0, np.cos(theta)]]) z_rot_mat = np.array([[np.cos(alpha), np.sin(alpha), 0], [(- np.sin(alpha)), np.cos(alpha), 0], [0, 0, 1]]) else: x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), (- np.sin(beta))], [0, np.sin(beta), np.cos(beta)]]) y_rot_mat = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [(- np.sin(theta)), 0, np.cos(theta)]]) z_rot_mat = np.array([[np.cos(alpha), (- np.sin(alpha)), 0], [np.sin(alpha), np.cos(alpha), 0], [0, 0, 1]]) if (np.size(points) == 3): rot_x = np.dot(points, x_rot_mat) rot_xy = np.dot(rot_x, y_rot_mat) rot_xyz = np.dot(rot_xy, z_rot_mat) else: rot_x = np.dot(points, x_rot_mat) rot_xy = np.dot(rot_x, y_rot_mat) rot_xyz = np.dot(rot_xy, z_rot_mat) return rot_xyz
@autojit def rotxyz(points, beta, theta, alpha, mode='deg', clockwise=False): "\n\t=== triPy.rotxyz ===\n\tRotates points about the global X, Y and Z axes by the angles beta, theta,\n\tand alpha (roll, yaw, and pitch, respectively) in that order. Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\n\t=== Inputs ===\n\t'points'\tXYZ coordinates of the points to be rotated\n\t'beta'\t\tAngle of rotation about the X axis (roll)\n\t'theta'\t\tAngle of rotation about the Y axis (yaw)\n\t'alpha'\t\tAngle of rotation about the Z axis (pitch)\n\t'mode'\t\tDefines whether angles are expressed in radians or degrees\n\t\t\t\t(degrees are the default)\n\t'clockwise' Defines direction of rotation about axes\n\t\t\t\t(may be either True or False - the default is False: anticlockwise)\n\n\t=== Usage ===\n\timport triPy\n\trot_x, rot_y, rot_z = rotMatrix(x, y, z, beta, theta, alpha)\n\t" if (mode == 'rad'): pass elif (mode == 'deg'): beta = np.deg2rad(beta) theta = np.deg2rad(theta) alpha = np.deg2rad(alpha) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') if (clockwise == False): x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), np.sin(beta)], [0, (- np.sin(beta)), np.cos(beta)]]) y_rot_mat = np.array([[np.cos(theta), 0, (- np.sin(theta))], [0, 1, 0], [np.sin(theta), 0, np.cos(theta)]]) z_rot_mat = np.array([[np.cos(alpha), np.sin(alpha), 0], [(- np.sin(alpha)), np.cos(alpha), 0], [0, 0, 1]]) else: x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), (- np.sin(beta))], [0, np.sin(beta), np.cos(beta)]]) y_rot_mat = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [(- np.sin(theta)), 0, np.cos(theta)]]) z_rot_mat = np.array([[np.cos(alpha), (- np.sin(alpha)), 0], [np.sin(alpha), np.cos(alpha), 0], [0, 0, 1]]) if (np.size(points) == 3): rot_x = np.dot(points, x_rot_mat) rot_xy = np.dot(rot_x, y_rot_mat) rot_xyz = np.dot(rot_xy, z_rot_mat) else: rot_x = np.dot(points, x_rot_mat) rot_xy = np.dot(rot_x, y_rot_mat) rot_xyz = np.dot(rot_xy, z_rot_mat) return rot_xyz<|docstring|>=== triPy.rotxyz === Rotates points about the global X, Y and Z axes by the angles beta, theta, and alpha (roll, yaw, and pitch, respectively) in that order. Rotation matrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html === Inputs === 'points' XYZ coordinates of the points to be rotated 'beta' Angle of rotation about the X axis (roll) 'theta' Angle of rotation about the Y axis (yaw) 'alpha' Angle of rotation about the Z axis (pitch) 'mode' Defines whether angles are expressed in radians or degrees (degrees are the default) 'clockwise' Defines direction of rotation about axes (may be either True or False - the default is False: anticlockwise) === Usage === import triPy rot_x, rot_y, rot_z = rotMatrix(x, y, z, beta, theta, alpha)<|endoftext|>
b347ab66932413e26c3d29edb151b5e2e3d12248fb8ce70ee1979601e442cb55
@autojit def rotzyx(points, beta, theta, alpha, mode='deg', clockwise=False): "\n\t=== triPy.rotxyz ===\n\tRotates points about the global X, Y and Z axes by the angles beta, theta,\n\tand alpha (roll, yaw, and pitch, respectively) in reverse order. Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\n\t=== Inputs ===\n\t'points'\tXYZ coordinates of the points to be rotated\n\t'beta'\t\tAngle of rotation about the X axis (roll)\n\t'theta'\t\tAngle of rotation about the Y axis (pitch)\n\t'alpha'\t\tAngle of rotation about the Z axis (yaw)\n\t'mode'\t\tDefines whether angles are expressed in radians or degrees\n\t\t\t\t(degrees are the default)\n\t'clockwise' Defines direction of rotation about axes\n\t\t\t\t(may be either True or False - the default is False: anticlockwise)\n\n\t=== Usage ===\n\timport triPy\n\trot_x, rot_y, rot_z = rotMatrix(x, y, z, beta, theta, alpha)\n\t" if (mode == 'rad'): pass elif (mode == 'deg'): beta = np.deg2rad(beta) theta = np.deg2rad(theta) alpha = np.deg2rad(alpha) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') if (clockwise == False): x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), np.sin(beta)], [0, (- np.sin(beta)), np.cos(beta)]]) y_rot_mat = np.array([[np.cos(theta), 0, (- np.sin(theta))], [0, 1, 0], [np.sin(theta), 0, np.cos(theta)]]) z_rot_mat = np.array([[np.cos(alpha), np.sin(alpha), 0], [(- np.sin(alpha)), np.cos(alpha), 0], [0, 0, 1]]) else: x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), (- np.sin(beta))], [0, np.sin(beta), np.cos(beta)]]) y_rot_mat = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [(- np.sin(theta)), 0, np.cos(theta)]]) z_rot_mat = np.array([[np.cos(alpha), (- np.sin(alpha)), 0], [np.sin(alpha), np.cos(alpha), 0], [0, 0, 1]]) if (np.size(points) == 3): rot_z = np.dot(points, z_rot_mat) rot_zy = np.dot(rot_z, y_rot_mat) rot_zyx = np.dot(rot_zy, x_rot_mat) else: rot_z = np.dot(points, z_rot_mat) rot_zy = np.dot(rot_z, y_rot_mat) rot_zyx = np.dot(rot_zy, x_rot_mat) return rot_zyx
=== triPy.rotxyz === Rotates points about the global X, Y and Z axes by the angles beta, theta, and alpha (roll, yaw, and pitch, respectively) in reverse order. Rotation matrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html === Inputs === 'points' XYZ coordinates of the points to be rotated 'beta' Angle of rotation about the X axis (roll) 'theta' Angle of rotation about the Y axis (pitch) 'alpha' Angle of rotation about the Z axis (yaw) 'mode' Defines whether angles are expressed in radians or degrees (degrees are the default) 'clockwise' Defines direction of rotation about axes (may be either True or False - the default is False: anticlockwise) === Usage === import triPy rot_x, rot_y, rot_z = rotMatrix(x, y, z, beta, theta, alpha)
rotate_lib.py
rotzyx
Nate28/traj_calc
10
python
@autojit def rotzyx(points, beta, theta, alpha, mode='deg', clockwise=False): "\n\t=== triPy.rotxyz ===\n\tRotates points about the global X, Y and Z axes by the angles beta, theta,\n\tand alpha (roll, yaw, and pitch, respectively) in reverse order. Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\n\t=== Inputs ===\n\t'points'\tXYZ coordinates of the points to be rotated\n\t'beta'\t\tAngle of rotation about the X axis (roll)\n\t'theta'\t\tAngle of rotation about the Y axis (pitch)\n\t'alpha'\t\tAngle of rotation about the Z axis (yaw)\n\t'mode'\t\tDefines whether angles are expressed in radians or degrees\n\t\t\t\t(degrees are the default)\n\t'clockwise' Defines direction of rotation about axes\n\t\t\t\t(may be either True or False - the default is False: anticlockwise)\n\n\t=== Usage ===\n\timport triPy\n\trot_x, rot_y, rot_z = rotMatrix(x, y, z, beta, theta, alpha)\n\t" if (mode == 'rad'): pass elif (mode == 'deg'): beta = np.deg2rad(beta) theta = np.deg2rad(theta) alpha = np.deg2rad(alpha) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') if (clockwise == False): x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), np.sin(beta)], [0, (- np.sin(beta)), np.cos(beta)]]) y_rot_mat = np.array([[np.cos(theta), 0, (- np.sin(theta))], [0, 1, 0], [np.sin(theta), 0, np.cos(theta)]]) z_rot_mat = np.array([[np.cos(alpha), np.sin(alpha), 0], [(- np.sin(alpha)), np.cos(alpha), 0], [0, 0, 1]]) else: x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), (- np.sin(beta))], [0, np.sin(beta), np.cos(beta)]]) y_rot_mat = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [(- np.sin(theta)), 0, np.cos(theta)]]) z_rot_mat = np.array([[np.cos(alpha), (- np.sin(alpha)), 0], [np.sin(alpha), np.cos(alpha), 0], [0, 0, 1]]) if (np.size(points) == 3): rot_z = np.dot(points, z_rot_mat) rot_zy = np.dot(rot_z, y_rot_mat) rot_zyx = np.dot(rot_zy, x_rot_mat) else: rot_z = np.dot(points, z_rot_mat) rot_zy = np.dot(rot_z, y_rot_mat) rot_zyx = np.dot(rot_zy, x_rot_mat) return rot_zyx
@autojit def rotzyx(points, beta, theta, alpha, mode='deg', clockwise=False): "\n\t=== triPy.rotxyz ===\n\tRotates points about the global X, Y and Z axes by the angles beta, theta,\n\tand alpha (roll, yaw, and pitch, respectively) in reverse order. Rotation\n\tmatrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html\n\n\t=== Inputs ===\n\t'points'\tXYZ coordinates of the points to be rotated\n\t'beta'\t\tAngle of rotation about the X axis (roll)\n\t'theta'\t\tAngle of rotation about the Y axis (pitch)\n\t'alpha'\t\tAngle of rotation about the Z axis (yaw)\n\t'mode'\t\tDefines whether angles are expressed in radians or degrees\n\t\t\t\t(degrees are the default)\n\t'clockwise' Defines direction of rotation about axes\n\t\t\t\t(may be either True or False - the default is False: anticlockwise)\n\n\t=== Usage ===\n\timport triPy\n\trot_x, rot_y, rot_z = rotMatrix(x, y, z, beta, theta, alpha)\n\t" if (mode == 'rad'): pass elif (mode == 'deg'): beta = np.deg2rad(beta) theta = np.deg2rad(theta) alpha = np.deg2rad(alpha) else: print('ERROR: Incorrect angle type specified. Assuming degrees.') if (clockwise == False): x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), np.sin(beta)], [0, (- np.sin(beta)), np.cos(beta)]]) y_rot_mat = np.array([[np.cos(theta), 0, (- np.sin(theta))], [0, 1, 0], [np.sin(theta), 0, np.cos(theta)]]) z_rot_mat = np.array([[np.cos(alpha), np.sin(alpha), 0], [(- np.sin(alpha)), np.cos(alpha), 0], [0, 0, 1]]) else: x_rot_mat = np.array([[1, 0, 0], [0, np.cos(beta), (- np.sin(beta))], [0, np.sin(beta), np.cos(beta)]]) y_rot_mat = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [(- np.sin(theta)), 0, np.cos(theta)]]) z_rot_mat = np.array([[np.cos(alpha), (- np.sin(alpha)), 0], [np.sin(alpha), np.cos(alpha), 0], [0, 0, 1]]) if (np.size(points) == 3): rot_z = np.dot(points, z_rot_mat) rot_zy = np.dot(rot_z, y_rot_mat) rot_zyx = np.dot(rot_zy, x_rot_mat) else: rot_z = np.dot(points, z_rot_mat) rot_zy = np.dot(rot_z, y_rot_mat) rot_zyx = np.dot(rot_zy, x_rot_mat) return rot_zyx<|docstring|>=== triPy.rotxyz === Rotates points about the global X, Y and Z axes by the angles beta, theta, and alpha (roll, yaw, and pitch, respectively) in reverse order. Rotation matrices are sourced from http://mathworld.wolfram.com/RotationMatrix.html === Inputs === 'points' XYZ coordinates of the points to be rotated 'beta' Angle of rotation about the X axis (roll) 'theta' Angle of rotation about the Y axis (pitch) 'alpha' Angle of rotation about the Z axis (yaw) 'mode' Defines whether angles are expressed in radians or degrees (degrees are the default) 'clockwise' Defines direction of rotation about axes (may be either True or False - the default is False: anticlockwise) === Usage === import triPy rot_x, rot_y, rot_z = rotMatrix(x, y, z, beta, theta, alpha)<|endoftext|>
c9c99df79d2cdc3a389fb5eb757b648c73550cf44635f3941d2e113319b17492
@autojit def eulerToVector(alpha, theta): '\n\tConvert Euler angles (pitch and yaw; alpha and theta) to vector components\n\tin XYZ reference frame.\n\t' x = (np.sin(alpha) * np.cos(theta)) y = (np.sin(alpha) * np.sin(theta)) z = np.cos(alpha) return [x, y, z]
Convert Euler angles (pitch and yaw; alpha and theta) to vector components in XYZ reference frame.
rotate_lib.py
eulerToVector
Nate28/traj_calc
10
python
@autojit def eulerToVector(alpha, theta): '\n\tConvert Euler angles (pitch and yaw; alpha and theta) to vector components\n\tin XYZ reference frame.\n\t' x = (np.sin(alpha) * np.cos(theta)) y = (np.sin(alpha) * np.sin(theta)) z = np.cos(alpha) return [x, y, z]
@autojit def eulerToVector(alpha, theta): '\n\tConvert Euler angles (pitch and yaw; alpha and theta) to vector components\n\tin XYZ reference frame.\n\t' x = (np.sin(alpha) * np.cos(theta)) y = (np.sin(alpha) * np.sin(theta)) z = np.cos(alpha) return [x, y, z]<|docstring|>Convert Euler angles (pitch and yaw; alpha and theta) to vector components in XYZ reference frame.<|endoftext|>
6d0173ee9c7918c6f2e9e6cbc4815ef2e7317480fe70859470610f38c988c635
@autojit def vectorToEuler(x, y, z): '\n\tConvert XYZ vector components to Euler angles (pitch and yaw; alpha and\n\ttheta)\n\t' r = np.linalg.norm([x, y, z]) theta = np.arctan((y / x)) alpha = np.arccos((z / r)) if ((x > 0) & (y > 0)): pass elif ((x < 0) & (y > 0)): theta = (np.pi - np.abs(theta)) elif ((x < 0) & (y < 0)): theta = (np.pi + np.abs(theta)) elif ((x > 0) & (y < 0)): theta = ((2 * np.pi) - np.abs(theta)) else: pass if ((x > 0) & (z > 0)): pass elif ((x < 0) & (z > 0)): alpha = (np.pi - np.abs(alpha)) elif ((x < 0) & (z < 0)): alpha = (np.pi + np.abs(alpha)) elif ((x > 0) & (z < 0)): alpha = ((2 * np.pi) - np.abs(alpha)) else: pass return [alpha, theta]
Convert XYZ vector components to Euler angles (pitch and yaw; alpha and theta)
rotate_lib.py
vectorToEuler
Nate28/traj_calc
10
python
@autojit def vectorToEuler(x, y, z): '\n\tConvert XYZ vector components to Euler angles (pitch and yaw; alpha and\n\ttheta)\n\t' r = np.linalg.norm([x, y, z]) theta = np.arctan((y / x)) alpha = np.arccos((z / r)) if ((x > 0) & (y > 0)): pass elif ((x < 0) & (y > 0)): theta = (np.pi - np.abs(theta)) elif ((x < 0) & (y < 0)): theta = (np.pi + np.abs(theta)) elif ((x > 0) & (y < 0)): theta = ((2 * np.pi) - np.abs(theta)) else: pass if ((x > 0) & (z > 0)): pass elif ((x < 0) & (z > 0)): alpha = (np.pi - np.abs(alpha)) elif ((x < 0) & (z < 0)): alpha = (np.pi + np.abs(alpha)) elif ((x > 0) & (z < 0)): alpha = ((2 * np.pi) - np.abs(alpha)) else: pass return [alpha, theta]
@autojit def vectorToEuler(x, y, z): '\n\tConvert XYZ vector components to Euler angles (pitch and yaw; alpha and\n\ttheta)\n\t' r = np.linalg.norm([x, y, z]) theta = np.arctan((y / x)) alpha = np.arccos((z / r)) if ((x > 0) & (y > 0)): pass elif ((x < 0) & (y > 0)): theta = (np.pi - np.abs(theta)) elif ((x < 0) & (y < 0)): theta = (np.pi + np.abs(theta)) elif ((x > 0) & (y < 0)): theta = ((2 * np.pi) - np.abs(theta)) else: pass if ((x > 0) & (z > 0)): pass elif ((x < 0) & (z > 0)): alpha = (np.pi - np.abs(alpha)) elif ((x < 0) & (z < 0)): alpha = (np.pi + np.abs(alpha)) elif ((x > 0) & (z < 0)): alpha = ((2 * np.pi) - np.abs(alpha)) else: pass return [alpha, theta]<|docstring|>Convert XYZ vector components to Euler angles (pitch and yaw; alpha and theta)<|endoftext|>
48a63f778d900bea393f74226ea3e66ef76a6f3ecfb9bfffafac11b4458d90cf
def localPitchRoll(pitch, yaw, roll, clockwise=False): '\n\tCalculate pitch and roll of an object in its local vetical plane\n\tNB: pitch, yaw, and roll denote rotations about the Y, Z, and X axes,\n\trespectively.\n\t' vecX = np.array([1, 0, 0]) vecZ = np.array([0, 0, 1]) vecX1 = rotxyz(vecX, roll, pitch, yaw, mode='rad', clockwise=clockwise) vecZ1 = rotxyz(vecZ, roll, pitch, yaw, mode='rad', clockwise=clockwise) vecX1_proj = np.array([0, vecX1[1], vecX1[2]]) vecZ_proj = np.array([0, 0, np.linalg.norm(vecX1_proj)]) planeRoll = vectorAngle(vecX1_proj, vecZ_proj) vecZ2 = rotxyz(vecZ1, planeRoll, 0, 0, mode='rad', clockwise=clockwise) localPitch = vectorAngle(vecX1, vecX) vecZ3 = rotxyz(vecZ2, 0, (- localPitch), 0, mode='rad', clockwise=clockwise) localRoll = vectorAngle(vecZ3, vecZ) return (localPitch, localRoll, planeRoll)
Calculate pitch and roll of an object in its local vetical plane NB: pitch, yaw, and roll denote rotations about the Y, Z, and X axes, respectively.
rotate_lib.py
localPitchRoll
Nate28/traj_calc
10
python
def localPitchRoll(pitch, yaw, roll, clockwise=False): '\n\tCalculate pitch and roll of an object in its local vetical plane\n\tNB: pitch, yaw, and roll denote rotations about the Y, Z, and X axes,\n\trespectively.\n\t' vecX = np.array([1, 0, 0]) vecZ = np.array([0, 0, 1]) vecX1 = rotxyz(vecX, roll, pitch, yaw, mode='rad', clockwise=clockwise) vecZ1 = rotxyz(vecZ, roll, pitch, yaw, mode='rad', clockwise=clockwise) vecX1_proj = np.array([0, vecX1[1], vecX1[2]]) vecZ_proj = np.array([0, 0, np.linalg.norm(vecX1_proj)]) planeRoll = vectorAngle(vecX1_proj, vecZ_proj) vecZ2 = rotxyz(vecZ1, planeRoll, 0, 0, mode='rad', clockwise=clockwise) localPitch = vectorAngle(vecX1, vecX) vecZ3 = rotxyz(vecZ2, 0, (- localPitch), 0, mode='rad', clockwise=clockwise) localRoll = vectorAngle(vecZ3, vecZ) return (localPitch, localRoll, planeRoll)
def localPitchRoll(pitch, yaw, roll, clockwise=False): '\n\tCalculate pitch and roll of an object in its local vetical plane\n\tNB: pitch, yaw, and roll denote rotations about the Y, Z, and X axes,\n\trespectively.\n\t' vecX = np.array([1, 0, 0]) vecZ = np.array([0, 0, 1]) vecX1 = rotxyz(vecX, roll, pitch, yaw, mode='rad', clockwise=clockwise) vecZ1 = rotxyz(vecZ, roll, pitch, yaw, mode='rad', clockwise=clockwise) vecX1_proj = np.array([0, vecX1[1], vecX1[2]]) vecZ_proj = np.array([0, 0, np.linalg.norm(vecX1_proj)]) planeRoll = vectorAngle(vecX1_proj, vecZ_proj) vecZ2 = rotxyz(vecZ1, planeRoll, 0, 0, mode='rad', clockwise=clockwise) localPitch = vectorAngle(vecX1, vecX) vecZ3 = rotxyz(vecZ2, 0, (- localPitch), 0, mode='rad', clockwise=clockwise) localRoll = vectorAngle(vecZ3, vecZ) return (localPitch, localRoll, planeRoll)<|docstring|>Calculate pitch and roll of an object in its local vetical plane NB: pitch, yaw, and roll denote rotations about the Y, Z, and X axes, respectively.<|endoftext|>
72d4b722f7faea02f8362b2a0cad632e45a06a570c9175bd799965ab2421c8eb
@autojit def vectorAngle(a, b): '\n\tReturn angle between two vectors\n\t' return np.arccos((np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))))
Return angle between two vectors
rotate_lib.py
vectorAngle
Nate28/traj_calc
10
python
@autojit def vectorAngle(a, b): '\n\t\n\t' return np.arccos((np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))))
@autojit def vectorAngle(a, b): '\n\t\n\t' return np.arccos((np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))))<|docstring|>Return angle between two vectors<|endoftext|>
38bafaae85216d96d6bdee147eeb53241494e68a190a56e18e274bb4312b6f67
def SpiroplasmaCulicicola(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the Spiroplasma culicicola graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Spiroplasma culicicola graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 23:38:26.963703\n\t\n\tThe undirected graph Spiroplasma culicicola has 1070 nodes and 71886 weighted\n\tedges, of which none are self-loops. The graph is quite dense as it has\n\ta density of 0.12569 and has 5 connected components, where the component\n\twith most nodes has 1059 nodes and the component with the least nodes has\n\t2 nodes. The graph median node degree is 114, the mean node degree is 134.37,\n\tand the node degree mode is 9. The top 5 most central nodes are 1276246.SCULI_v1c08760\n\t(degree 508), 1276246.SCULI_v1c03710 (degree 486), 1276246.SCULI_v1c07920\n\t(degree 482), 1276246.SCULI_v1c08580 (degree 465) and 1276246.SCULI_v1c07270\n\t(degree 438).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import SpiroplasmaCulicicola\n\t\n\t # Then load the graph\n\t graph = SpiroplasmaCulicicola()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='SpiroplasmaCulicicola', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
Return new instance of the Spiroplasma culicicola graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Spiroplasma culicicola graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 23:38:26.963703 The undirected graph Spiroplasma culicicola has 1070 nodes and 71886 weighted edges, of which none are self-loops. The graph is quite dense as it has a density of 0.12569 and has 5 connected components, where the component with most nodes has 1059 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 114, the mean node degree is 134.37, and the node degree mode is 9. The top 5 most central nodes are 1276246.SCULI_v1c08760 (degree 508), 1276246.SCULI_v1c03710 (degree 486), 1276246.SCULI_v1c07920 (degree 482), 1276246.SCULI_v1c08580 (degree 465) and 1276246.SCULI_v1c07270 (degree 438). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import SpiroplasmaCulicicola # Then load the graph graph = SpiroplasmaCulicicola() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks.
bindings/python/ensmallen_graph/datasets/string/spiroplasmaculicicola.py
SpiroplasmaCulicicola
caufieldjh/ensmallen_graph
0
python
def SpiroplasmaCulicicola(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the Spiroplasma culicicola graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Spiroplasma culicicola graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 23:38:26.963703\n\t\n\tThe undirected graph Spiroplasma culicicola has 1070 nodes and 71886 weighted\n\tedges, of which none are self-loops. The graph is quite dense as it has\n\ta density of 0.12569 and has 5 connected components, where the component\n\twith most nodes has 1059 nodes and the component with the least nodes has\n\t2 nodes. The graph median node degree is 114, the mean node degree is 134.37,\n\tand the node degree mode is 9. The top 5 most central nodes are 1276246.SCULI_v1c08760\n\t(degree 508), 1276246.SCULI_v1c03710 (degree 486), 1276246.SCULI_v1c07920\n\t(degree 482), 1276246.SCULI_v1c08580 (degree 465) and 1276246.SCULI_v1c07270\n\t(degree 438).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import SpiroplasmaCulicicola\n\t\n\t # Then load the graph\n\t graph = SpiroplasmaCulicicola()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='SpiroplasmaCulicicola', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
def SpiroplasmaCulicicola(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the Spiroplasma culicicola graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Spiroplasma culicicola graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 23:38:26.963703\n\t\n\tThe undirected graph Spiroplasma culicicola has 1070 nodes and 71886 weighted\n\tedges, of which none are self-loops. The graph is quite dense as it has\n\ta density of 0.12569 and has 5 connected components, where the component\n\twith most nodes has 1059 nodes and the component with the least nodes has\n\t2 nodes. The graph median node degree is 114, the mean node degree is 134.37,\n\tand the node degree mode is 9. The top 5 most central nodes are 1276246.SCULI_v1c08760\n\t(degree 508), 1276246.SCULI_v1c03710 (degree 486), 1276246.SCULI_v1c07920\n\t(degree 482), 1276246.SCULI_v1c08580 (degree 465) and 1276246.SCULI_v1c07270\n\t(degree 438).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import SpiroplasmaCulicicola\n\t\n\t # Then load the graph\n\t graph = SpiroplasmaCulicicola()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='SpiroplasmaCulicicola', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()<|docstring|>Return new instance of the Spiroplasma culicicola graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Spiroplasma culicicola graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 23:38:26.963703 The undirected graph Spiroplasma culicicola has 1070 nodes and 71886 weighted edges, of which none are self-loops. The graph is quite dense as it has a density of 0.12569 and has 5 connected components, where the component with most nodes has 1059 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 114, the mean node degree is 134.37, and the node degree mode is 9. The top 5 most central nodes are 1276246.SCULI_v1c08760 (degree 508), 1276246.SCULI_v1c03710 (degree 486), 1276246.SCULI_v1c07920 (degree 482), 1276246.SCULI_v1c08580 (degree 465) and 1276246.SCULI_v1c07270 (degree 438). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import SpiroplasmaCulicicola # Then load the graph graph = SpiroplasmaCulicicola() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks.<|endoftext|>
d19895a6a716e09b1cad231cf61fbde0903e34c00a342a010fdd56694726e54c
def fill_wrfinput_ncdfpy(rootgrp_in, rootgrp_out, laimo=8): '\n This function will populate the arrays in the WRFINPUT file based on array and\n attribute values in the input GEOGRID file.\n ' soilT = rootgrp_in.variables['SOILTEMP'][:] hgt = rootgrp_in.variables['HGT_M'][:] use = rootgrp_in.variables['LU_INDEX'][:] soil_top_cat = rootgrp_in.variables['SOILCTOP'][:] veg = (rootgrp_in.variables['GREENFRAC'][:] * 100.0) ncatts = {key: val for (key, val) in rootgrp_out.__dict__.items()} iswater = ncatts.get('ISWATER') isoilwater = ncatts.get('ISOILWATER') if fix_zero_over_water: soilT_mask = (soilT < 100) soilT_mask_Mean = soilT[(~ soilT_mask)].mean() soilT[soilT_mask] = soilT_mask_Mean if (soilT_mask.sum() > 0): print(' Replaced {0} values in TMN with mean SOILTEMPT value ({1}).'.format(soilT_mask.sum(), soilT_mask_Mean)) print(' Performing topographic soil temperature adjustment.') tmn = (soilT - (0.0065 * hgt)) del soilT, hgt, soilT_mask, soilT_mask_Mean msk = use.copy() msk[numpy.logical_and((msk >= 0), (msk != iswater))] = 1 msk[(msk == iswater)] = 2 a = numpy.ma.array(soil_top_cat[0], mask=False) a.mask[(isoilwater - 1)] = True dominant_value = numpy.amax(a, axis=0).data dominant_index = (numpy.argmax(a, axis=0) + 1) dominant_index[numpy.logical_and((dominant_value < 0.01), (msk[0] == 1))] = dom_lc_fill if (numpy.logical_and((dominant_value < 0.01), (msk[0] == 1)).sum() > 0): print(' Replaced {0} values in ISLTYP with {1} because no dominant land class could be determined'.format(numpy.logical_and((dominant_value < 0.01), (msk[0] == 1)).sum(), dom_lc_fill)) dominant_index[(msk[0] == 2)] = isoilwater del a, dominant_value, soil_top_cat soi = dominant_index[numpy.newaxis] soi[(use == iswater)] = isoilwater soi[numpy.logical_and((use != iswater), (soi == isoilwater))] = fillsoiltyp if (numpy.logical_and((use != iswater), (soi == isoilwater)).sum() > 0): print(' Replaced {0} values in ISLTYP with {1} because of a land landcover type and water soil class'.format(fillsoiltyp, numpy.logical_and((use != iswater), (soi == isoilwater)).sum())) del dominant_index, use smoisArr = numpy.array([0.2, 0.21, 0.25, 0.27]) smois = (smoisArr[(:, None, None)] * numpy.ones(msk.shape)) tslbArr = numpy.array([285.0, 283.0, 279.0, 277.0]) tslb = (tslbArr[(:, None, None)] * numpy.ones(msk.shape)) zs = [((item / 2) + sum(dzs[:num])) for (num, item) in enumerate(dzs)] rootgrp_out.variables['TMN'][:] = tmn rootgrp_out.variables['XLAND'][:] = msk rootgrp_out.variables['SEAICE'][:] = numpy.zeros(msk.shape) rootgrp_out.variables['ISLTYP'][:] = soi rootgrp_out.variables['SHDMAX'][:] = veg.max(axis=1) rootgrp_out.variables['SHDMIN'][:] = veg.min(axis=1) rootgrp_out.variables['LAI'][:] = rootgrp_in.variables['LAI12M'][(:, (laimo - 1))] rootgrp_out.variables['CANWAT'][:] = numpy.zeros(msk.shape) rootgrp_out.variables['SNOW'][:] = numpy.zeros(msk.shape) rootgrp_out.variables['TSK'][:] = (numpy.zeros(msk.shape) + 290.0) rootgrp_out.variables['SMOIS'][:] = smois[numpy.newaxis] rootgrp_out.variables['TSLB'][:] = tslb[numpy.newaxis] rootgrp_out.variables['ZS'][:] = numpy.array(zs)[numpy.newaxis] rootgrp_out.variables['DZS'][:] = numpy.array(dzs)[numpy.newaxis] del msk, veg, ncatts, iswater, isoilwater, soi, smois, smoisArr, tslb, tslbArr, tmn, zs return (rootgrp_in, rootgrp_out)
This function will populate the arrays in the WRFINPUT file based on array and attribute values in the input GEOGRID file.
wrfhydro_gis/Create_wrfinput_from_Geogrid.py
fill_wrfinput_ncdfpy
andrewsoong/wrf_hydro_gis_preprocessor
8
python
def fill_wrfinput_ncdfpy(rootgrp_in, rootgrp_out, laimo=8): '\n This function will populate the arrays in the WRFINPUT file based on array and\n attribute values in the input GEOGRID file.\n ' soilT = rootgrp_in.variables['SOILTEMP'][:] hgt = rootgrp_in.variables['HGT_M'][:] use = rootgrp_in.variables['LU_INDEX'][:] soil_top_cat = rootgrp_in.variables['SOILCTOP'][:] veg = (rootgrp_in.variables['GREENFRAC'][:] * 100.0) ncatts = {key: val for (key, val) in rootgrp_out.__dict__.items()} iswater = ncatts.get('ISWATER') isoilwater = ncatts.get('ISOILWATER') if fix_zero_over_water: soilT_mask = (soilT < 100) soilT_mask_Mean = soilT[(~ soilT_mask)].mean() soilT[soilT_mask] = soilT_mask_Mean if (soilT_mask.sum() > 0): print(' Replaced {0} values in TMN with mean SOILTEMPT value ({1}).'.format(soilT_mask.sum(), soilT_mask_Mean)) print(' Performing topographic soil temperature adjustment.') tmn = (soilT - (0.0065 * hgt)) del soilT, hgt, soilT_mask, soilT_mask_Mean msk = use.copy() msk[numpy.logical_and((msk >= 0), (msk != iswater))] = 1 msk[(msk == iswater)] = 2 a = numpy.ma.array(soil_top_cat[0], mask=False) a.mask[(isoilwater - 1)] = True dominant_value = numpy.amax(a, axis=0).data dominant_index = (numpy.argmax(a, axis=0) + 1) dominant_index[numpy.logical_and((dominant_value < 0.01), (msk[0] == 1))] = dom_lc_fill if (numpy.logical_and((dominant_value < 0.01), (msk[0] == 1)).sum() > 0): print(' Replaced {0} values in ISLTYP with {1} because no dominant land class could be determined'.format(numpy.logical_and((dominant_value < 0.01), (msk[0] == 1)).sum(), dom_lc_fill)) dominant_index[(msk[0] == 2)] = isoilwater del a, dominant_value, soil_top_cat soi = dominant_index[numpy.newaxis] soi[(use == iswater)] = isoilwater soi[numpy.logical_and((use != iswater), (soi == isoilwater))] = fillsoiltyp if (numpy.logical_and((use != iswater), (soi == isoilwater)).sum() > 0): print(' Replaced {0} values in ISLTYP with {1} because of a land landcover type and water soil class'.format(fillsoiltyp, numpy.logical_and((use != iswater), (soi == isoilwater)).sum())) del dominant_index, use smoisArr = numpy.array([0.2, 0.21, 0.25, 0.27]) smois = (smoisArr[(:, None, None)] * numpy.ones(msk.shape)) tslbArr = numpy.array([285.0, 283.0, 279.0, 277.0]) tslb = (tslbArr[(:, None, None)] * numpy.ones(msk.shape)) zs = [((item / 2) + sum(dzs[:num])) for (num, item) in enumerate(dzs)] rootgrp_out.variables['TMN'][:] = tmn rootgrp_out.variables['XLAND'][:] = msk rootgrp_out.variables['SEAICE'][:] = numpy.zeros(msk.shape) rootgrp_out.variables['ISLTYP'][:] = soi rootgrp_out.variables['SHDMAX'][:] = veg.max(axis=1) rootgrp_out.variables['SHDMIN'][:] = veg.min(axis=1) rootgrp_out.variables['LAI'][:] = rootgrp_in.variables['LAI12M'][(:, (laimo - 1))] rootgrp_out.variables['CANWAT'][:] = numpy.zeros(msk.shape) rootgrp_out.variables['SNOW'][:] = numpy.zeros(msk.shape) rootgrp_out.variables['TSK'][:] = (numpy.zeros(msk.shape) + 290.0) rootgrp_out.variables['SMOIS'][:] = smois[numpy.newaxis] rootgrp_out.variables['TSLB'][:] = tslb[numpy.newaxis] rootgrp_out.variables['ZS'][:] = numpy.array(zs)[numpy.newaxis] rootgrp_out.variables['DZS'][:] = numpy.array(dzs)[numpy.newaxis] del msk, veg, ncatts, iswater, isoilwater, soi, smois, smoisArr, tslb, tslbArr, tmn, zs return (rootgrp_in, rootgrp_out)
def fill_wrfinput_ncdfpy(rootgrp_in, rootgrp_out, laimo=8): '\n This function will populate the arrays in the WRFINPUT file based on array and\n attribute values in the input GEOGRID file.\n ' soilT = rootgrp_in.variables['SOILTEMP'][:] hgt = rootgrp_in.variables['HGT_M'][:] use = rootgrp_in.variables['LU_INDEX'][:] soil_top_cat = rootgrp_in.variables['SOILCTOP'][:] veg = (rootgrp_in.variables['GREENFRAC'][:] * 100.0) ncatts = {key: val for (key, val) in rootgrp_out.__dict__.items()} iswater = ncatts.get('ISWATER') isoilwater = ncatts.get('ISOILWATER') if fix_zero_over_water: soilT_mask = (soilT < 100) soilT_mask_Mean = soilT[(~ soilT_mask)].mean() soilT[soilT_mask] = soilT_mask_Mean if (soilT_mask.sum() > 0): print(' Replaced {0} values in TMN with mean SOILTEMPT value ({1}).'.format(soilT_mask.sum(), soilT_mask_Mean)) print(' Performing topographic soil temperature adjustment.') tmn = (soilT - (0.0065 * hgt)) del soilT, hgt, soilT_mask, soilT_mask_Mean msk = use.copy() msk[numpy.logical_and((msk >= 0), (msk != iswater))] = 1 msk[(msk == iswater)] = 2 a = numpy.ma.array(soil_top_cat[0], mask=False) a.mask[(isoilwater - 1)] = True dominant_value = numpy.amax(a, axis=0).data dominant_index = (numpy.argmax(a, axis=0) + 1) dominant_index[numpy.logical_and((dominant_value < 0.01), (msk[0] == 1))] = dom_lc_fill if (numpy.logical_and((dominant_value < 0.01), (msk[0] == 1)).sum() > 0): print(' Replaced {0} values in ISLTYP with {1} because no dominant land class could be determined'.format(numpy.logical_and((dominant_value < 0.01), (msk[0] == 1)).sum(), dom_lc_fill)) dominant_index[(msk[0] == 2)] = isoilwater del a, dominant_value, soil_top_cat soi = dominant_index[numpy.newaxis] soi[(use == iswater)] = isoilwater soi[numpy.logical_and((use != iswater), (soi == isoilwater))] = fillsoiltyp if (numpy.logical_and((use != iswater), (soi == isoilwater)).sum() > 0): print(' Replaced {0} values in ISLTYP with {1} because of a land landcover type and water soil class'.format(fillsoiltyp, numpy.logical_and((use != iswater), (soi == isoilwater)).sum())) del dominant_index, use smoisArr = numpy.array([0.2, 0.21, 0.25, 0.27]) smois = (smoisArr[(:, None, None)] * numpy.ones(msk.shape)) tslbArr = numpy.array([285.0, 283.0, 279.0, 277.0]) tslb = (tslbArr[(:, None, None)] * numpy.ones(msk.shape)) zs = [((item / 2) + sum(dzs[:num])) for (num, item) in enumerate(dzs)] rootgrp_out.variables['TMN'][:] = tmn rootgrp_out.variables['XLAND'][:] = msk rootgrp_out.variables['SEAICE'][:] = numpy.zeros(msk.shape) rootgrp_out.variables['ISLTYP'][:] = soi rootgrp_out.variables['SHDMAX'][:] = veg.max(axis=1) rootgrp_out.variables['SHDMIN'][:] = veg.min(axis=1) rootgrp_out.variables['LAI'][:] = rootgrp_in.variables['LAI12M'][(:, (laimo - 1))] rootgrp_out.variables['CANWAT'][:] = numpy.zeros(msk.shape) rootgrp_out.variables['SNOW'][:] = numpy.zeros(msk.shape) rootgrp_out.variables['TSK'][:] = (numpy.zeros(msk.shape) + 290.0) rootgrp_out.variables['SMOIS'][:] = smois[numpy.newaxis] rootgrp_out.variables['TSLB'][:] = tslb[numpy.newaxis] rootgrp_out.variables['ZS'][:] = numpy.array(zs)[numpy.newaxis] rootgrp_out.variables['DZS'][:] = numpy.array(dzs)[numpy.newaxis] del msk, veg, ncatts, iswater, isoilwater, soi, smois, smoisArr, tslb, tslbArr, tmn, zs return (rootgrp_in, rootgrp_out)<|docstring|>This function will populate the arrays in the WRFINPUT file based on array and attribute values in the input GEOGRID file.<|endoftext|>
3583c04ea44f1c436e994f760f4003b47742fc13e8fb42d77eeccb22da3e7730
def main_wrfinput_ncdfpy(geoFile, wrfinFile, lai=8, outNCType='NETCDF4'): '\n This function is designed to build the wrfinput file using only the\n netCDF4-python library.\n ' tic1 = time.time() print(' Creating wrfinput file from geogrid file.') print(' Input geogrid file: {0}'.format(geoFile)) print(' Output wrfinput file: {0}'.format(wrfinFile)) print(' Month selected (1=Januaray, 12=December): {0}'.format(lai)) rootgrp_in = netCDF4.Dataset(geoFile, 'r') rootgrp_out = netCDF4.Dataset(wrfinFile, 'w', format=outNCType) for (dimname, dim) in rootgrp_in.dimensions.items(): if (dimname in keepDims): rootgrp_out.createDimension(dimname, len(dim)) soildimension = rootgrp_out.createDimension(soildim, nsoil) for (varname, ncvar) in rootgrp_in.variables.items(): if (varname in keepVars): varAtts = {key: val for (key, val) in ncvar.__dict__.items()} varDims = tuple((varDim for varDim in ncvar.dimensions)) if (varname in mapVars): varname = mapVars[varname] var = rootgrp_out.createVariable(varname, ncvar.dtype, varDims) var.setncatts(varAtts) for (varname, units, varDims, missing_value, dtype) in addVars: var = rootgrp_out.createVariable(varname, dtype, varDims) var.setncatts({'units': units, 'missing_value': missing_value}) ncatts = {key: val for (key, val) in rootgrp_in.__dict__.items()} ncatts['Source_Software'] = 'WRF-Hydro {0} script (Python).'.format(sys.argv[0]) ncatts['creation_time'] = 'Created {0}'.format(time.ctime()) rootgrp_out.setncatts(ncatts) for (varname, ncvar) in rootgrp_in.variables.items(): if (varname in keepVars): if (varname in mapVars): varname = mapVars[varname] var = rootgrp_out.variables[varname] var[:] = ncvar[:] (rootgrp_in, rootgrp_out) = fill_wrfinput_ncdfpy(rootgrp_in, rootgrp_out, laimo=lai) rootgrp_in.close() rootgrp_out.close() return
This function is designed to build the wrfinput file using only the netCDF4-python library.
wrfhydro_gis/Create_wrfinput_from_Geogrid.py
main_wrfinput_ncdfpy
andrewsoong/wrf_hydro_gis_preprocessor
8
python
def main_wrfinput_ncdfpy(geoFile, wrfinFile, lai=8, outNCType='NETCDF4'): '\n This function is designed to build the wrfinput file using only the\n netCDF4-python library.\n ' tic1 = time.time() print(' Creating wrfinput file from geogrid file.') print(' Input geogrid file: {0}'.format(geoFile)) print(' Output wrfinput file: {0}'.format(wrfinFile)) print(' Month selected (1=Januaray, 12=December): {0}'.format(lai)) rootgrp_in = netCDF4.Dataset(geoFile, 'r') rootgrp_out = netCDF4.Dataset(wrfinFile, 'w', format=outNCType) for (dimname, dim) in rootgrp_in.dimensions.items(): if (dimname in keepDims): rootgrp_out.createDimension(dimname, len(dim)) soildimension = rootgrp_out.createDimension(soildim, nsoil) for (varname, ncvar) in rootgrp_in.variables.items(): if (varname in keepVars): varAtts = {key: val for (key, val) in ncvar.__dict__.items()} varDims = tuple((varDim for varDim in ncvar.dimensions)) if (varname in mapVars): varname = mapVars[varname] var = rootgrp_out.createVariable(varname, ncvar.dtype, varDims) var.setncatts(varAtts) for (varname, units, varDims, missing_value, dtype) in addVars: var = rootgrp_out.createVariable(varname, dtype, varDims) var.setncatts({'units': units, 'missing_value': missing_value}) ncatts = {key: val for (key, val) in rootgrp_in.__dict__.items()} ncatts['Source_Software'] = 'WRF-Hydro {0} script (Python).'.format(sys.argv[0]) ncatts['creation_time'] = 'Created {0}'.format(time.ctime()) rootgrp_out.setncatts(ncatts) for (varname, ncvar) in rootgrp_in.variables.items(): if (varname in keepVars): if (varname in mapVars): varname = mapVars[varname] var = rootgrp_out.variables[varname] var[:] = ncvar[:] (rootgrp_in, rootgrp_out) = fill_wrfinput_ncdfpy(rootgrp_in, rootgrp_out, laimo=lai) rootgrp_in.close() rootgrp_out.close() return
def main_wrfinput_ncdfpy(geoFile, wrfinFile, lai=8, outNCType='NETCDF4'): '\n This function is designed to build the wrfinput file using only the\n netCDF4-python library.\n ' tic1 = time.time() print(' Creating wrfinput file from geogrid file.') print(' Input geogrid file: {0}'.format(geoFile)) print(' Output wrfinput file: {0}'.format(wrfinFile)) print(' Month selected (1=Januaray, 12=December): {0}'.format(lai)) rootgrp_in = netCDF4.Dataset(geoFile, 'r') rootgrp_out = netCDF4.Dataset(wrfinFile, 'w', format=outNCType) for (dimname, dim) in rootgrp_in.dimensions.items(): if (dimname in keepDims): rootgrp_out.createDimension(dimname, len(dim)) soildimension = rootgrp_out.createDimension(soildim, nsoil) for (varname, ncvar) in rootgrp_in.variables.items(): if (varname in keepVars): varAtts = {key: val for (key, val) in ncvar.__dict__.items()} varDims = tuple((varDim for varDim in ncvar.dimensions)) if (varname in mapVars): varname = mapVars[varname] var = rootgrp_out.createVariable(varname, ncvar.dtype, varDims) var.setncatts(varAtts) for (varname, units, varDims, missing_value, dtype) in addVars: var = rootgrp_out.createVariable(varname, dtype, varDims) var.setncatts({'units': units, 'missing_value': missing_value}) ncatts = {key: val for (key, val) in rootgrp_in.__dict__.items()} ncatts['Source_Software'] = 'WRF-Hydro {0} script (Python).'.format(sys.argv[0]) ncatts['creation_time'] = 'Created {0}'.format(time.ctime()) rootgrp_out.setncatts(ncatts) for (varname, ncvar) in rootgrp_in.variables.items(): if (varname in keepVars): if (varname in mapVars): varname = mapVars[varname] var = rootgrp_out.variables[varname] var[:] = ncvar[:] (rootgrp_in, rootgrp_out) = fill_wrfinput_ncdfpy(rootgrp_in, rootgrp_out, laimo=lai) rootgrp_in.close() rootgrp_out.close() return<|docstring|>This function is designed to build the wrfinput file using only the netCDF4-python library.<|endoftext|>
c055e106ccf5cd89d22d09d8be0d4946690b1100b96d6bbf0bf5563b2fd4d308
def fill_wrfinput_xarray(ds_in, laimo=8): '\n This function will populate the arrays in the WRFINPUT file based on array and\n attribute values in the input GEOGRID file.\n ' iswater = ds_in.attrs.get('ISWATER') isoilwater = ds_in.attrs.get('ISOILWATER') hgt = ds_in['HGT'].data if fix_zero_over_water: soilT = ds_in['SOILTEMP'].data.copy() soilT_mask = (soilT < 100) soilT_mask_Mean = soilT[(~ soilT_mask)].mean() soilT[soilT_mask] = soilT_mask_Mean if (soilT_mask.sum() > 0): print(' Replaced {0} values in TMN with mean SOILTEMPT value ({1}).'.format(soilT_mask.sum(), soilT_mask_Mean)) print(' Performing topographic soil temperature adjustment.') tmn = (soilT - (0.0065 * hgt)) del soilT, hgt, soilT_mask, soilT_mask_Mean use = ds_in['IVGTYP'].data msk = use.copy() msk[numpy.logical_and((msk >= 0), (msk != iswater))] = 1 msk[(msk == iswater)] = 2 soil_top_cat = ds_in['SOILCTOP'].data a = numpy.ma.array(soil_top_cat[0], mask=False) a.mask[(isoilwater - 1)] = True dominant_value = numpy.amax(a, axis=0).data dominant_index = (numpy.argmax(a, axis=0) + 1) dominant_index[numpy.logical_and((dominant_value < 0.01), (msk[0] == 1))] = dom_lc_fill if (numpy.logical_and((dominant_value < 0.01), (msk[0] == 1)).sum() > 0): print(' Replaced {0} values in ISLTYP with {1} because no dominant land class could be determined'.format(numpy.logical_and((dominant_value < 0.01), (msk[0] == 1)).sum(), dom_lc_fill)) dominant_index[(msk[0] == 2)] = isoilwater del a, dominant_value, soil_top_cat soi = dominant_index[numpy.newaxis] soi[(use == iswater)] = isoilwater soi[numpy.logical_and((use != iswater), (soi == isoilwater))] = fillsoiltyp if (numpy.logical_and((use != iswater), (soi == isoilwater)).sum() > 0): print(' Replaced {0} values in ISLTYP with {1} because of a land landcover type and water soil class'.format(fillsoiltyp, numpy.logical_and((use != iswater), (soi == isoilwater)).sum())) del dominant_index, use smoisArr = numpy.array([0.2, 0.21, 0.25, 0.27]) smois = (smoisArr[(:, None, None)] * numpy.ones(msk.shape)) tslbArr = numpy.array([285.0, 283.0, 279.0, 277.0]) tslb = (tslbArr[(:, None, None)] * numpy.ones(msk.shape)) zs = [((item / 2) + sum(dzs[:num])) for (num, item) in enumerate(dzs)] veg = (ds_in['GREENFRAC'].data * 100.0) ds_in.variables['TMN'][:] = tmn ds_in.variables['XLAND'][:] = msk ds_in.variables['SEAICE'][:] = numpy.zeros(msk.shape) ds_in.variables['ISLTYP'][:] = soi ds_in.variables['SHDMAX'][:] = veg.max(axis=1) ds_in.variables['SHDMIN'][:] = veg.min(axis=1) ds_in.variables['LAI'][:] = ds_in.variables['LAI12M'][(:, (laimo - 1))] ds_in.variables['CANWAT'][:] = numpy.zeros(msk.shape) ds_in.variables['SNOW'][:] = numpy.zeros(msk.shape) ds_in.variables['TSK'][:] = (numpy.zeros(msk.shape) + 290.0) ds_in.variables['SMOIS'][:] = smois[numpy.newaxis] ds_in.variables['TSLB'][:] = tslb[numpy.newaxis] ds_in.variables['ZS'][:] = numpy.array(zs)[numpy.newaxis] ds_in.variables['DZS'][:] = numpy.array(dzs)[numpy.newaxis] del msk, veg, iswater, isoilwater, soi, smois, smoisArr, tslb, tslbArr, tmn, zs return ds_in
This function will populate the arrays in the WRFINPUT file based on array and attribute values in the input GEOGRID file.
wrfhydro_gis/Create_wrfinput_from_Geogrid.py
fill_wrfinput_xarray
andrewsoong/wrf_hydro_gis_preprocessor
8
python
def fill_wrfinput_xarray(ds_in, laimo=8): '\n This function will populate the arrays in the WRFINPUT file based on array and\n attribute values in the input GEOGRID file.\n ' iswater = ds_in.attrs.get('ISWATER') isoilwater = ds_in.attrs.get('ISOILWATER') hgt = ds_in['HGT'].data if fix_zero_over_water: soilT = ds_in['SOILTEMP'].data.copy() soilT_mask = (soilT < 100) soilT_mask_Mean = soilT[(~ soilT_mask)].mean() soilT[soilT_mask] = soilT_mask_Mean if (soilT_mask.sum() > 0): print(' Replaced {0} values in TMN with mean SOILTEMPT value ({1}).'.format(soilT_mask.sum(), soilT_mask_Mean)) print(' Performing topographic soil temperature adjustment.') tmn = (soilT - (0.0065 * hgt)) del soilT, hgt, soilT_mask, soilT_mask_Mean use = ds_in['IVGTYP'].data msk = use.copy() msk[numpy.logical_and((msk >= 0), (msk != iswater))] = 1 msk[(msk == iswater)] = 2 soil_top_cat = ds_in['SOILCTOP'].data a = numpy.ma.array(soil_top_cat[0], mask=False) a.mask[(isoilwater - 1)] = True dominant_value = numpy.amax(a, axis=0).data dominant_index = (numpy.argmax(a, axis=0) + 1) dominant_index[numpy.logical_and((dominant_value < 0.01), (msk[0] == 1))] = dom_lc_fill if (numpy.logical_and((dominant_value < 0.01), (msk[0] == 1)).sum() > 0): print(' Replaced {0} values in ISLTYP with {1} because no dominant land class could be determined'.format(numpy.logical_and((dominant_value < 0.01), (msk[0] == 1)).sum(), dom_lc_fill)) dominant_index[(msk[0] == 2)] = isoilwater del a, dominant_value, soil_top_cat soi = dominant_index[numpy.newaxis] soi[(use == iswater)] = isoilwater soi[numpy.logical_and((use != iswater), (soi == isoilwater))] = fillsoiltyp if (numpy.logical_and((use != iswater), (soi == isoilwater)).sum() > 0): print(' Replaced {0} values in ISLTYP with {1} because of a land landcover type and water soil class'.format(fillsoiltyp, numpy.logical_and((use != iswater), (soi == isoilwater)).sum())) del dominant_index, use smoisArr = numpy.array([0.2, 0.21, 0.25, 0.27]) smois = (smoisArr[(:, None, None)] * numpy.ones(msk.shape)) tslbArr = numpy.array([285.0, 283.0, 279.0, 277.0]) tslb = (tslbArr[(:, None, None)] * numpy.ones(msk.shape)) zs = [((item / 2) + sum(dzs[:num])) for (num, item) in enumerate(dzs)] veg = (ds_in['GREENFRAC'].data * 100.0) ds_in.variables['TMN'][:] = tmn ds_in.variables['XLAND'][:] = msk ds_in.variables['SEAICE'][:] = numpy.zeros(msk.shape) ds_in.variables['ISLTYP'][:] = soi ds_in.variables['SHDMAX'][:] = veg.max(axis=1) ds_in.variables['SHDMIN'][:] = veg.min(axis=1) ds_in.variables['LAI'][:] = ds_in.variables['LAI12M'][(:, (laimo - 1))] ds_in.variables['CANWAT'][:] = numpy.zeros(msk.shape) ds_in.variables['SNOW'][:] = numpy.zeros(msk.shape) ds_in.variables['TSK'][:] = (numpy.zeros(msk.shape) + 290.0) ds_in.variables['SMOIS'][:] = smois[numpy.newaxis] ds_in.variables['TSLB'][:] = tslb[numpy.newaxis] ds_in.variables['ZS'][:] = numpy.array(zs)[numpy.newaxis] ds_in.variables['DZS'][:] = numpy.array(dzs)[numpy.newaxis] del msk, veg, iswater, isoilwater, soi, smois, smoisArr, tslb, tslbArr, tmn, zs return ds_in
def fill_wrfinput_xarray(ds_in, laimo=8): '\n This function will populate the arrays in the WRFINPUT file based on array and\n attribute values in the input GEOGRID file.\n ' iswater = ds_in.attrs.get('ISWATER') isoilwater = ds_in.attrs.get('ISOILWATER') hgt = ds_in['HGT'].data if fix_zero_over_water: soilT = ds_in['SOILTEMP'].data.copy() soilT_mask = (soilT < 100) soilT_mask_Mean = soilT[(~ soilT_mask)].mean() soilT[soilT_mask] = soilT_mask_Mean if (soilT_mask.sum() > 0): print(' Replaced {0} values in TMN with mean SOILTEMPT value ({1}).'.format(soilT_mask.sum(), soilT_mask_Mean)) print(' Performing topographic soil temperature adjustment.') tmn = (soilT - (0.0065 * hgt)) del soilT, hgt, soilT_mask, soilT_mask_Mean use = ds_in['IVGTYP'].data msk = use.copy() msk[numpy.logical_and((msk >= 0), (msk != iswater))] = 1 msk[(msk == iswater)] = 2 soil_top_cat = ds_in['SOILCTOP'].data a = numpy.ma.array(soil_top_cat[0], mask=False) a.mask[(isoilwater - 1)] = True dominant_value = numpy.amax(a, axis=0).data dominant_index = (numpy.argmax(a, axis=0) + 1) dominant_index[numpy.logical_and((dominant_value < 0.01), (msk[0] == 1))] = dom_lc_fill if (numpy.logical_and((dominant_value < 0.01), (msk[0] == 1)).sum() > 0): print(' Replaced {0} values in ISLTYP with {1} because no dominant land class could be determined'.format(numpy.logical_and((dominant_value < 0.01), (msk[0] == 1)).sum(), dom_lc_fill)) dominant_index[(msk[0] == 2)] = isoilwater del a, dominant_value, soil_top_cat soi = dominant_index[numpy.newaxis] soi[(use == iswater)] = isoilwater soi[numpy.logical_and((use != iswater), (soi == isoilwater))] = fillsoiltyp if (numpy.logical_and((use != iswater), (soi == isoilwater)).sum() > 0): print(' Replaced {0} values in ISLTYP with {1} because of a land landcover type and water soil class'.format(fillsoiltyp, numpy.logical_and((use != iswater), (soi == isoilwater)).sum())) del dominant_index, use smoisArr = numpy.array([0.2, 0.21, 0.25, 0.27]) smois = (smoisArr[(:, None, None)] * numpy.ones(msk.shape)) tslbArr = numpy.array([285.0, 283.0, 279.0, 277.0]) tslb = (tslbArr[(:, None, None)] * numpy.ones(msk.shape)) zs = [((item / 2) + sum(dzs[:num])) for (num, item) in enumerate(dzs)] veg = (ds_in['GREENFRAC'].data * 100.0) ds_in.variables['TMN'][:] = tmn ds_in.variables['XLAND'][:] = msk ds_in.variables['SEAICE'][:] = numpy.zeros(msk.shape) ds_in.variables['ISLTYP'][:] = soi ds_in.variables['SHDMAX'][:] = veg.max(axis=1) ds_in.variables['SHDMIN'][:] = veg.min(axis=1) ds_in.variables['LAI'][:] = ds_in.variables['LAI12M'][(:, (laimo - 1))] ds_in.variables['CANWAT'][:] = numpy.zeros(msk.shape) ds_in.variables['SNOW'][:] = numpy.zeros(msk.shape) ds_in.variables['TSK'][:] = (numpy.zeros(msk.shape) + 290.0) ds_in.variables['SMOIS'][:] = smois[numpy.newaxis] ds_in.variables['TSLB'][:] = tslb[numpy.newaxis] ds_in.variables['ZS'][:] = numpy.array(zs)[numpy.newaxis] ds_in.variables['DZS'][:] = numpy.array(dzs)[numpy.newaxis] del msk, veg, iswater, isoilwater, soi, smois, smoisArr, tslb, tslbArr, tmn, zs return ds_in<|docstring|>This function will populate the arrays in the WRFINPUT file based on array and attribute values in the input GEOGRID file.<|endoftext|>
5158fb283cd280384760e794b4d633aefffe46f9fe0fed556e95689c752b021f
def main_wrfinput_xarray(geoFile, wrfinFile, lai=8, outNCType='NETCDF4'): '\n This function is designed to build the wrfinput file using the xarray library.\n ' tic1 = time.time() print(' Creating wrfinput file from geogrid file.') print(' Input geogrid file: {0}'.format(geoFile)) print(' Output wrfinput file: {0}'.format(wrfinFile)) print(' Month selected (1=Januaray, 12=December): {0}'.format(lai)) ncDS = xr.open_dataset(geoFile) ncDS = ncDS.rename(mapVars) dims = dict(ncDS.dims) dims.update({soildim: nsoil}) newVars = [] for (varname, units, varDims, missing_value, dtype) in addVars: da = xr.DataArray(data=numpy.empty(tuple([dims[dim] for dim in varDims]), dtype=dtype), dims=varDims, attrs={'units': units, 'missing_value': missing_value}) ncDS[varname] = da newVars.append(varname) ncDS = fill_wrfinput_xarray(ncDS, laimo=lai) dropDims = [item for item in ncDS.dims if (item not in keepDims)] ncDS = ncDS.drop_dims(dropDims) keepVars2 = ([mapVars.get(item, item) for item in keepVars] + newVars) dropVars = [item for item in ncDS.variables if (item not in keepVars2)] ncDS = ncDS.drop(dropVars) ncDS.attrs['Source_Software'] = 'WRF-Hydro {0} script (Python).'.format(sys.argv[0]) ncDS.attrs['creation_time'] = 'Created {0}'.format(time.ctime()) encoding = {varname: {'_FillValue': None} for varname in list(ncDS.variables.keys())} ncDS.to_netcdf(wrfinFile, mode='w', format=outNCType, encoding=encoding) ncDS.close() del encoding, ncDS return
This function is designed to build the wrfinput file using the xarray library.
wrfhydro_gis/Create_wrfinput_from_Geogrid.py
main_wrfinput_xarray
andrewsoong/wrf_hydro_gis_preprocessor
8
python
def main_wrfinput_xarray(geoFile, wrfinFile, lai=8, outNCType='NETCDF4'): '\n \n ' tic1 = time.time() print(' Creating wrfinput file from geogrid file.') print(' Input geogrid file: {0}'.format(geoFile)) print(' Output wrfinput file: {0}'.format(wrfinFile)) print(' Month selected (1=Januaray, 12=December): {0}'.format(lai)) ncDS = xr.open_dataset(geoFile) ncDS = ncDS.rename(mapVars) dims = dict(ncDS.dims) dims.update({soildim: nsoil}) newVars = [] for (varname, units, varDims, missing_value, dtype) in addVars: da = xr.DataArray(data=numpy.empty(tuple([dims[dim] for dim in varDims]), dtype=dtype), dims=varDims, attrs={'units': units, 'missing_value': missing_value}) ncDS[varname] = da newVars.append(varname) ncDS = fill_wrfinput_xarray(ncDS, laimo=lai) dropDims = [item for item in ncDS.dims if (item not in keepDims)] ncDS = ncDS.drop_dims(dropDims) keepVars2 = ([mapVars.get(item, item) for item in keepVars] + newVars) dropVars = [item for item in ncDS.variables if (item not in keepVars2)] ncDS = ncDS.drop(dropVars) ncDS.attrs['Source_Software'] = 'WRF-Hydro {0} script (Python).'.format(sys.argv[0]) ncDS.attrs['creation_time'] = 'Created {0}'.format(time.ctime()) encoding = {varname: {'_FillValue': None} for varname in list(ncDS.variables.keys())} ncDS.to_netcdf(wrfinFile, mode='w', format=outNCType, encoding=encoding) ncDS.close() del encoding, ncDS return
def main_wrfinput_xarray(geoFile, wrfinFile, lai=8, outNCType='NETCDF4'): '\n \n ' tic1 = time.time() print(' Creating wrfinput file from geogrid file.') print(' Input geogrid file: {0}'.format(geoFile)) print(' Output wrfinput file: {0}'.format(wrfinFile)) print(' Month selected (1=Januaray, 12=December): {0}'.format(lai)) ncDS = xr.open_dataset(geoFile) ncDS = ncDS.rename(mapVars) dims = dict(ncDS.dims) dims.update({soildim: nsoil}) newVars = [] for (varname, units, varDims, missing_value, dtype) in addVars: da = xr.DataArray(data=numpy.empty(tuple([dims[dim] for dim in varDims]), dtype=dtype), dims=varDims, attrs={'units': units, 'missing_value': missing_value}) ncDS[varname] = da newVars.append(varname) ncDS = fill_wrfinput_xarray(ncDS, laimo=lai) dropDims = [item for item in ncDS.dims if (item not in keepDims)] ncDS = ncDS.drop_dims(dropDims) keepVars2 = ([mapVars.get(item, item) for item in keepVars] + newVars) dropVars = [item for item in ncDS.variables if (item not in keepVars2)] ncDS = ncDS.drop(dropVars) ncDS.attrs['Source_Software'] = 'WRF-Hydro {0} script (Python).'.format(sys.argv[0]) ncDS.attrs['creation_time'] = 'Created {0}'.format(time.ctime()) encoding = {varname: {'_FillValue': None} for varname in list(ncDS.variables.keys())} ncDS.to_netcdf(wrfinFile, mode='w', format=outNCType, encoding=encoding) ncDS.close() del encoding, ncDS return<|docstring|>This function is designed to build the wrfinput file using the xarray library.<|endoftext|>
e2b327f761f829fe0c11a619939693fa26d382557573d4d493ff1fa07997d008
def process_info(Process, stat_time, cache, name='work'): '\n\n :param Process: psutil.Process\n :param stat_time:\n :param cache:\n :param name:\n :return:\n ' try: name = '{cmd}-{pid}'.format(name=name, pid=Process.pid, cmd=Process.name()) cache.setdefault(name, {}) cache[name][stat_time] = Process.memory_info().rss except Exception: name = 'tmp' cache.setdefault(name, {}) cache[name][stat_time] = 0 return cache
:param Process: psutil.Process :param stat_time: :param cache: :param name: :return:
animalcourier/profile/psopen.py
process_info
btrspg/Animal-Courier
0
python
def process_info(Process, stat_time, cache, name='work'): '\n\n :param Process: psutil.Process\n :param stat_time:\n :param cache:\n :param name:\n :return:\n ' try: name = '{cmd}-{pid}'.format(name=name, pid=Process.pid, cmd=Process.name()) cache.setdefault(name, {}) cache[name][stat_time] = Process.memory_info().rss except Exception: name = 'tmp' cache.setdefault(name, {}) cache[name][stat_time] = 0 return cache
def process_info(Process, stat_time, cache, name='work'): '\n\n :param Process: psutil.Process\n :param stat_time:\n :param cache:\n :param name:\n :return:\n ' try: name = '{cmd}-{pid}'.format(name=name, pid=Process.pid, cmd=Process.name()) cache.setdefault(name, {}) cache[name][stat_time] = Process.memory_info().rss except Exception: name = 'tmp' cache.setdefault(name, {}) cache[name][stat_time] = 0 return cache<|docstring|>:param Process: psutil.Process :param stat_time: :param cache: :param name: :return:<|endoftext|>
6f7fbae7ffe04287eec138c4948acc7364f7cf319a87c664c2d39bcd90fbd14e
def selection_sort(collection): 'Pure implementation of the selection sort algorithm in Python\n :param collection: some mutable ordered collection with heterogeneous\n comparable items inside\n :return: the same collection ordered by ascending\n\n\n Examples:\n >>> selection_sort([0, 5, 3, 2, 2])\n [0, 2, 2, 3, 5]\n\n >>> selection_sort([])\n []\n\n >>> selection_sort([-2, -5, -45])\n [-45, -5, -2]\n ' length = len(collection) for i in range((length - 1)): least = i for k in range((i + 1), length): if (collection[k] < collection[least]): least = k if (least != i): (collection[least], collection[i]) = (collection[i], collection[least]) return collection
Pure implementation of the selection sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> selection_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> selection_sort([]) [] >>> selection_sort([-2, -5, -45]) [-45, -5, -2]
sorts/selection_sort.py
selection_sort
kc8055/Python
145,614
python
def selection_sort(collection): 'Pure implementation of the selection sort algorithm in Python\n :param collection: some mutable ordered collection with heterogeneous\n comparable items inside\n :return: the same collection ordered by ascending\n\n\n Examples:\n >>> selection_sort([0, 5, 3, 2, 2])\n [0, 2, 2, 3, 5]\n\n >>> selection_sort([])\n []\n\n >>> selection_sort([-2, -5, -45])\n [-45, -5, -2]\n ' length = len(collection) for i in range((length - 1)): least = i for k in range((i + 1), length): if (collection[k] < collection[least]): least = k if (least != i): (collection[least], collection[i]) = (collection[i], collection[least]) return collection
def selection_sort(collection): 'Pure implementation of the selection sort algorithm in Python\n :param collection: some mutable ordered collection with heterogeneous\n comparable items inside\n :return: the same collection ordered by ascending\n\n\n Examples:\n >>> selection_sort([0, 5, 3, 2, 2])\n [0, 2, 2, 3, 5]\n\n >>> selection_sort([])\n []\n\n >>> selection_sort([-2, -5, -45])\n [-45, -5, -2]\n ' length = len(collection) for i in range((length - 1)): least = i for k in range((i + 1), length): if (collection[k] < collection[least]): least = k if (least != i): (collection[least], collection[i]) = (collection[i], collection[least]) return collection<|docstring|>Pure implementation of the selection sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> selection_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> selection_sort([]) [] >>> selection_sort([-2, -5, -45]) [-45, -5, -2]<|endoftext|>
0a403c07c949cdb4cea8a6431691ee27e11bc19652185621c613a98ffc71dfa6
def FrankiaSymbiont(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the Frankia symbiont graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Frankia symbiont graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 21:26:45.741765\n\t\n\tThe undirected graph Frankia symbiont has 4114 nodes and 341016 weighted\n\tedges, of which none are self-loops. The graph is dense as it has a density\n\tof 0.04031 and has 26 connected components, where the component with most\n\tnodes has 4056 nodes and the component with the least nodes has 2 nodes.\n\tThe graph median node degree is 133, the mean node degree is 165.78, and\n\tthe node degree mode is 2. The top 5 most central nodes are 656024.FsymDg_2708\n\t(degree 1562), 656024.FsymDg_0527 (degree 1546), 656024.FsymDg_2896 (degree\n\t1252), 656024.FsymDg_2135 (degree 1238) and 656024.FsymDg_3235 (degree\n\t1152).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import FrankiaSymbiont\n\t\n\t # Then load the graph\n\t graph = FrankiaSymbiont()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='FrankiaSymbiont', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
Return new instance of the Frankia symbiont graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Frankia symbiont graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 21:26:45.741765 The undirected graph Frankia symbiont has 4114 nodes and 341016 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.04031 and has 26 connected components, where the component with most nodes has 4056 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 133, the mean node degree is 165.78, and the node degree mode is 2. The top 5 most central nodes are 656024.FsymDg_2708 (degree 1562), 656024.FsymDg_0527 (degree 1546), 656024.FsymDg_2896 (degree 1252), 656024.FsymDg_2135 (degree 1238) and 656024.FsymDg_3235 (degree 1152). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import FrankiaSymbiont # Then load the graph graph = FrankiaSymbiont() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks.
bindings/python/ensmallen_graph/datasets/string/frankiasymbiont.py
FrankiaSymbiont
caufieldjh/ensmallen_graph
0
python
def FrankiaSymbiont(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the Frankia symbiont graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Frankia symbiont graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 21:26:45.741765\n\t\n\tThe undirected graph Frankia symbiont has 4114 nodes and 341016 weighted\n\tedges, of which none are self-loops. The graph is dense as it has a density\n\tof 0.04031 and has 26 connected components, where the component with most\n\tnodes has 4056 nodes and the component with the least nodes has 2 nodes.\n\tThe graph median node degree is 133, the mean node degree is 165.78, and\n\tthe node degree mode is 2. The top 5 most central nodes are 656024.FsymDg_2708\n\t(degree 1562), 656024.FsymDg_0527 (degree 1546), 656024.FsymDg_2896 (degree\n\t1252), 656024.FsymDg_2135 (degree 1238) and 656024.FsymDg_3235 (degree\n\t1152).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import FrankiaSymbiont\n\t\n\t # Then load the graph\n\t graph = FrankiaSymbiont()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='FrankiaSymbiont', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
def FrankiaSymbiont(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the Frankia symbiont graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Frankia symbiont graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 21:26:45.741765\n\t\n\tThe undirected graph Frankia symbiont has 4114 nodes and 341016 weighted\n\tedges, of which none are self-loops. The graph is dense as it has a density\n\tof 0.04031 and has 26 connected components, where the component with most\n\tnodes has 4056 nodes and the component with the least nodes has 2 nodes.\n\tThe graph median node degree is 133, the mean node degree is 165.78, and\n\tthe node degree mode is 2. The top 5 most central nodes are 656024.FsymDg_2708\n\t(degree 1562), 656024.FsymDg_0527 (degree 1546), 656024.FsymDg_2896 (degree\n\t1252), 656024.FsymDg_2135 (degree 1238) and 656024.FsymDg_3235 (degree\n\t1152).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import FrankiaSymbiont\n\t\n\t # Then load the graph\n\t graph = FrankiaSymbiont()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='FrankiaSymbiont', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()<|docstring|>Return new instance of the Frankia symbiont graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Frankia symbiont graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 21:26:45.741765 The undirected graph Frankia symbiont has 4114 nodes and 341016 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.04031 and has 26 connected components, where the component with most nodes has 4056 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 133, the mean node degree is 165.78, and the node degree mode is 2. The top 5 most central nodes are 656024.FsymDg_2708 (degree 1562), 656024.FsymDg_0527 (degree 1546), 656024.FsymDg_2896 (degree 1252), 656024.FsymDg_2135 (degree 1238) and 656024.FsymDg_3235 (degree 1152). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import FrankiaSymbiont # Then load the graph graph = FrankiaSymbiont() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks.<|endoftext|>
1a4bd454b1017518289741bc81694c1deb541505b050490c5b4682e6911c6562
def check(self): '\n 检测是否存在漏洞\n\n :param:\n\n :return bool True or False: 是否存在漏洞\n ' try: check_req = request.get((self.url + '/seeyon/htmlofficeservlet'), headers=self.headers) if ((check_req.status_code == 200) and ('DBSTEP V3.0 0 21 0 htmoffice operate err' in check_req.text)): print('存在seeyou漏洞') return True else: print('不存在seeyou漏洞') return False except Exception as e: print(e) return False finally: pass
检测是否存在漏洞 :param: :return bool True or False: 是否存在漏洞
python/app/plugins/http/See You/See_Yon.py
check
taomujian/linbing
351
python
def check(self): '\n 检测是否存在漏洞\n\n :param:\n\n :return bool True or False: 是否存在漏洞\n ' try: check_req = request.get((self.url + '/seeyon/htmlofficeservlet'), headers=self.headers) if ((check_req.status_code == 200) and ('DBSTEP V3.0 0 21 0 htmoffice operate err' in check_req.text)): print('存在seeyou漏洞') return True else: print('不存在seeyou漏洞') return False except Exception as e: print(e) return False finally: pass
def check(self): '\n 检测是否存在漏洞\n\n :param:\n\n :return bool True or False: 是否存在漏洞\n ' try: check_req = request.get((self.url + '/seeyon/htmlofficeservlet'), headers=self.headers) if ((check_req.status_code == 200) and ('DBSTEP V3.0 0 21 0 htmoffice operate err' in check_req.text)): print('存在seeyou漏洞') return True else: print('不存在seeyou漏洞') return False except Exception as e: print(e) return False finally: pass<|docstring|>检测是否存在漏洞 :param: :return bool True or False: 是否存在漏洞<|endoftext|>
b2a0bd5ab215d49d783abbe3f24e1a1587b57e1461a6447791af830a74d6d6cc
def arthritis(path): 'Arthritis Treatment Data\n\n Data from Koch \\& Edwards (1988) from a double-blind clinical trial\n investigating a new treatment for rheumatoid arthritis.\n\n A data frame with 84 observations and 5 variables.\n\n ID\n patient ID.\n\n Treatment\n factor indicating treatment (Placebo, Treated).\n\n Sex\n factor indicating sex (Female, Male).\n\n Age\n age of patient.\n\n Improved\n ordered factor indicating treatment outcome (None, Some, Marked).\n\n Michael Friendly (2000), Visualizing Categorical Data:\n http://euclid.psych.yorku.ca/ftp/sas/vcd/catdata/arthrit.sas\n\n Args:\n\n path: str.\n Path to directory which either stores file or otherwise file will\n be downloaded and extracted there.\n Filename is `arthritis.csv`.\n\n Returns:\n\n Tuple of np.ndarray `x_train` with 84 rows and 5 columns and\n dictionary `metadata` of column headers (feature names).\n ' import pandas as pd path = os.path.expanduser(path) filename = 'arthritis.csv' if (not os.path.exists(os.path.join(path, filename))): url = 'http://dustintran.com/data/r/vcd/Arthritis.csv' maybe_download_and_extract(path, url, save_file_name='arthritis.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return (x_train, metadata)
Arthritis Treatment Data Data from Koch \& Edwards (1988) from a double-blind clinical trial investigating a new treatment for rheumatoid arthritis. A data frame with 84 observations and 5 variables. ID patient ID. Treatment factor indicating treatment (Placebo, Treated). Sex factor indicating sex (Female, Male). Age age of patient. Improved ordered factor indicating treatment outcome (None, Some, Marked). Michael Friendly (2000), Visualizing Categorical Data: http://euclid.psych.yorku.ca/ftp/sas/vcd/catdata/arthrit.sas Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `arthritis.csv`. Returns: Tuple of np.ndarray `x_train` with 84 rows and 5 columns and dictionary `metadata` of column headers (feature names).
observations/r/arthritis.py
arthritis
hajime9652/observations
199
python
def arthritis(path): 'Arthritis Treatment Data\n\n Data from Koch \\& Edwards (1988) from a double-blind clinical trial\n investigating a new treatment for rheumatoid arthritis.\n\n A data frame with 84 observations and 5 variables.\n\n ID\n patient ID.\n\n Treatment\n factor indicating treatment (Placebo, Treated).\n\n Sex\n factor indicating sex (Female, Male).\n\n Age\n age of patient.\n\n Improved\n ordered factor indicating treatment outcome (None, Some, Marked).\n\n Michael Friendly (2000), Visualizing Categorical Data:\n http://euclid.psych.yorku.ca/ftp/sas/vcd/catdata/arthrit.sas\n\n Args:\n\n path: str.\n Path to directory which either stores file or otherwise file will\n be downloaded and extracted there.\n Filename is `arthritis.csv`.\n\n Returns:\n\n Tuple of np.ndarray `x_train` with 84 rows and 5 columns and\n dictionary `metadata` of column headers (feature names).\n ' import pandas as pd path = os.path.expanduser(path) filename = 'arthritis.csv' if (not os.path.exists(os.path.join(path, filename))): url = 'http://dustintran.com/data/r/vcd/Arthritis.csv' maybe_download_and_extract(path, url, save_file_name='arthritis.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return (x_train, metadata)
def arthritis(path): 'Arthritis Treatment Data\n\n Data from Koch \\& Edwards (1988) from a double-blind clinical trial\n investigating a new treatment for rheumatoid arthritis.\n\n A data frame with 84 observations and 5 variables.\n\n ID\n patient ID.\n\n Treatment\n factor indicating treatment (Placebo, Treated).\n\n Sex\n factor indicating sex (Female, Male).\n\n Age\n age of patient.\n\n Improved\n ordered factor indicating treatment outcome (None, Some, Marked).\n\n Michael Friendly (2000), Visualizing Categorical Data:\n http://euclid.psych.yorku.ca/ftp/sas/vcd/catdata/arthrit.sas\n\n Args:\n\n path: str.\n Path to directory which either stores file or otherwise file will\n be downloaded and extracted there.\n Filename is `arthritis.csv`.\n\n Returns:\n\n Tuple of np.ndarray `x_train` with 84 rows and 5 columns and\n dictionary `metadata` of column headers (feature names).\n ' import pandas as pd path = os.path.expanduser(path) filename = 'arthritis.csv' if (not os.path.exists(os.path.join(path, filename))): url = 'http://dustintran.com/data/r/vcd/Arthritis.csv' maybe_download_and_extract(path, url, save_file_name='arthritis.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return (x_train, metadata)<|docstring|>Arthritis Treatment Data Data from Koch \& Edwards (1988) from a double-blind clinical trial investigating a new treatment for rheumatoid arthritis. A data frame with 84 observations and 5 variables. ID patient ID. Treatment factor indicating treatment (Placebo, Treated). Sex factor indicating sex (Female, Male). Age age of patient. Improved ordered factor indicating treatment outcome (None, Some, Marked). Michael Friendly (2000), Visualizing Categorical Data: http://euclid.psych.yorku.ca/ftp/sas/vcd/catdata/arthrit.sas Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `arthritis.csv`. Returns: Tuple of np.ndarray `x_train` with 84 rows and 5 columns and dictionary `metadata` of column headers (feature names).<|endoftext|>
f6aab136c832a0e95c666b441630b94aa2a9db3904719a5bf2cddffe22d4e929
def train(model, dataloader, optimizer, criterion, device): '\n train Runs one epoch of training.\n\n @param model Model to train.\n @param dataloader Images to train with.\n @param optimizer Optimizer to update weights.\n @param criterion Loss criterion.\n @param device Use of GPU.\n ' model.to(device) model.train() metrics = Metrics() for (_, inputs, labels) in Bar(dataloader): optimizer.zero_grad() (inputs, labels) = (inputs.to(device), labels.to(device)) outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() (_, predicted) = torch.max(outputs.data, 1) metrics.batch(labels=labels, preds=predicted, loss=loss.item()) metrics.print_one_liner() return metrics.summary()
train Runs one epoch of training. @param model Model to train. @param dataloader Images to train with. @param optimizer Optimizer to update weights. @param criterion Loss criterion. @param device Use of GPU.
code/training.py
train
parinitaedke/CNN-Binary-Classification
4
python
def train(model, dataloader, optimizer, criterion, device): '\n train Runs one epoch of training.\n\n @param model Model to train.\n @param dataloader Images to train with.\n @param optimizer Optimizer to update weights.\n @param criterion Loss criterion.\n @param device Use of GPU.\n ' model.to(device) model.train() metrics = Metrics() for (_, inputs, labels) in Bar(dataloader): optimizer.zero_grad() (inputs, labels) = (inputs.to(device), labels.to(device)) outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() (_, predicted) = torch.max(outputs.data, 1) metrics.batch(labels=labels, preds=predicted, loss=loss.item()) metrics.print_one_liner() return metrics.summary()
def train(model, dataloader, optimizer, criterion, device): '\n train Runs one epoch of training.\n\n @param model Model to train.\n @param dataloader Images to train with.\n @param optimizer Optimizer to update weights.\n @param criterion Loss criterion.\n @param device Use of GPU.\n ' model.to(device) model.train() metrics = Metrics() for (_, inputs, labels) in Bar(dataloader): optimizer.zero_grad() (inputs, labels) = (inputs.to(device), labels.to(device)) outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() (_, predicted) = torch.max(outputs.data, 1) metrics.batch(labels=labels, preds=predicted, loss=loss.item()) metrics.print_one_liner() return metrics.summary()<|docstring|>train Runs one epoch of training. @param model Model to train. @param dataloader Images to train with. @param optimizer Optimizer to update weights. @param criterion Loss criterion. @param device Use of GPU.<|endoftext|>
350624c62626172016404514873323231bc996b1f20aa6d2d8c3bcd330b252d6
def validate(model, dataloader, criterion, device): '\n validate Runs one epoch of validation.\n\n @param model Model to train.\n @param dataloader Images to train with.\n @param criterion Loss criterion.\n @param device Use of GPU.\n ' model.to(device) model.eval() metrics = Metrics() with torch.no_grad(): for (_, inputs, labels) in Bar(dataloader): (inputs, labels) = (inputs.to(device), labels.to(device)) outputs = model(inputs) loss = criterion(outputs, labels) (_, predicted) = torch.max(outputs.data, 1) metrics.batch(labels=labels, preds=predicted, loss=loss.item()) metrics.print_one_liner(phase='Val') return metrics.summary()
validate Runs one epoch of validation. @param model Model to train. @param dataloader Images to train with. @param criterion Loss criterion. @param device Use of GPU.
code/training.py
validate
parinitaedke/CNN-Binary-Classification
4
python
def validate(model, dataloader, criterion, device): '\n validate Runs one epoch of validation.\n\n @param model Model to train.\n @param dataloader Images to train with.\n @param criterion Loss criterion.\n @param device Use of GPU.\n ' model.to(device) model.eval() metrics = Metrics() with torch.no_grad(): for (_, inputs, labels) in Bar(dataloader): (inputs, labels) = (inputs.to(device), labels.to(device)) outputs = model(inputs) loss = criterion(outputs, labels) (_, predicted) = torch.max(outputs.data, 1) metrics.batch(labels=labels, preds=predicted, loss=loss.item()) metrics.print_one_liner(phase='Val') return metrics.summary()
def validate(model, dataloader, criterion, device): '\n validate Runs one epoch of validation.\n\n @param model Model to train.\n @param dataloader Images to train with.\n @param criterion Loss criterion.\n @param device Use of GPU.\n ' model.to(device) model.eval() metrics = Metrics() with torch.no_grad(): for (_, inputs, labels) in Bar(dataloader): (inputs, labels) = (inputs.to(device), labels.to(device)) outputs = model(inputs) loss = criterion(outputs, labels) (_, predicted) = torch.max(outputs.data, 1) metrics.batch(labels=labels, preds=predicted, loss=loss.item()) metrics.print_one_liner(phase='Val') return metrics.summary()<|docstring|>validate Runs one epoch of validation. @param model Model to train. @param dataloader Images to train with. @param criterion Loss criterion. @param device Use of GPU.<|endoftext|>
eda035efaae879eea452f52f43f97cbcd52c5526d75fed0bc2d217b3e7f319c4
def train_validate(model, train_loader, val_loader, optimizer, criterion, device, epochs, save_criteria, weights_path, save_name): ' \n train_validate Trains and validates a model.\n\n @param model Model to train on.\n @param train_loader Images to train with.\n @param val_loader Images to use for validation.\n @param optimizer Optimizer to update weights.\n @param criterion Loss criterion.\n @param device Use of GPU.\n @param epochs Amount of epochs to train.\n @param save_criteria What metric to use to save best weights.\n @param weights_path Path to the folder to save best weights.\n @param save_name Filename of the best weights.\n ' best_criteria = 0 best_model = {} for epoch in range(1, (epochs + 1)): print(f'Epoch {epoch}') metrics = train(model, train_loader, optimizer, criterion, device) if val_loader: metrics = validate(model, val_loader, criterion, device) if (save_criteria == 'Loss'): metrics['Model Loss'][0] *= (- 1) if ((epoch == 1) or (metrics[('Model ' + save_criteria)][0] >= best_criteria)): best_criteria = metrics[('Model ' + save_criteria)][0] best_model = {'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'accuracy': metrics['Model Accuracy'][0], 'loss': metrics['Model Loss'][0], 'sensitivity': metrics['Model Sensitivity'][0], 'specificity': metrics['Model Specificity'][0]} save_path = '{}{}_{}_{:.6}.pth'.format(weights_path, save_name, save_criteria, str(best_criteria).replace('.', '_')) torch.save(best_model, save_path) return save_path
train_validate Trains and validates a model. @param model Model to train on. @param train_loader Images to train with. @param val_loader Images to use for validation. @param optimizer Optimizer to update weights. @param criterion Loss criterion. @param device Use of GPU. @param epochs Amount of epochs to train. @param save_criteria What metric to use to save best weights. @param weights_path Path to the folder to save best weights. @param save_name Filename of the best weights.
code/training.py
train_validate
parinitaedke/CNN-Binary-Classification
4
python
def train_validate(model, train_loader, val_loader, optimizer, criterion, device, epochs, save_criteria, weights_path, save_name): ' \n train_validate Trains and validates a model.\n\n @param model Model to train on.\n @param train_loader Images to train with.\n @param val_loader Images to use for validation.\n @param optimizer Optimizer to update weights.\n @param criterion Loss criterion.\n @param device Use of GPU.\n @param epochs Amount of epochs to train.\n @param save_criteria What metric to use to save best weights.\n @param weights_path Path to the folder to save best weights.\n @param save_name Filename of the best weights.\n ' best_criteria = 0 best_model = {} for epoch in range(1, (epochs + 1)): print(f'Epoch {epoch}') metrics = train(model, train_loader, optimizer, criterion, device) if val_loader: metrics = validate(model, val_loader, criterion, device) if (save_criteria == 'Loss'): metrics['Model Loss'][0] *= (- 1) if ((epoch == 1) or (metrics[('Model ' + save_criteria)][0] >= best_criteria)): best_criteria = metrics[('Model ' + save_criteria)][0] best_model = {'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'accuracy': metrics['Model Accuracy'][0], 'loss': metrics['Model Loss'][0], 'sensitivity': metrics['Model Sensitivity'][0], 'specificity': metrics['Model Specificity'][0]} save_path = '{}{}_{}_{:.6}.pth'.format(weights_path, save_name, save_criteria, str(best_criteria).replace('.', '_')) torch.save(best_model, save_path) return save_path
def train_validate(model, train_loader, val_loader, optimizer, criterion, device, epochs, save_criteria, weights_path, save_name): ' \n train_validate Trains and validates a model.\n\n @param model Model to train on.\n @param train_loader Images to train with.\n @param val_loader Images to use for validation.\n @param optimizer Optimizer to update weights.\n @param criterion Loss criterion.\n @param device Use of GPU.\n @param epochs Amount of epochs to train.\n @param save_criteria What metric to use to save best weights.\n @param weights_path Path to the folder to save best weights.\n @param save_name Filename of the best weights.\n ' best_criteria = 0 best_model = {} for epoch in range(1, (epochs + 1)): print(f'Epoch {epoch}') metrics = train(model, train_loader, optimizer, criterion, device) if val_loader: metrics = validate(model, val_loader, criterion, device) if (save_criteria == 'Loss'): metrics['Model Loss'][0] *= (- 1) if ((epoch == 1) or (metrics[('Model ' + save_criteria)][0] >= best_criteria)): best_criteria = metrics[('Model ' + save_criteria)][0] best_model = {'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'accuracy': metrics['Model Accuracy'][0], 'loss': metrics['Model Loss'][0], 'sensitivity': metrics['Model Sensitivity'][0], 'specificity': metrics['Model Specificity'][0]} save_path = '{}{}_{}_{:.6}.pth'.format(weights_path, save_name, save_criteria, str(best_criteria).replace('.', '_')) torch.save(best_model, save_path) return save_path<|docstring|>train_validate Trains and validates a model. @param model Model to train on. @param train_loader Images to train with. @param val_loader Images to use for validation. @param optimizer Optimizer to update weights. @param criterion Loss criterion. @param device Use of GPU. @param epochs Amount of epochs to train. @param save_criteria What metric to use to save best weights. @param weights_path Path to the folder to save best weights. @param save_name Filename of the best weights.<|endoftext|>
0288ed44f630ce68d06fdb47df3ce5abc27f2b495d286654e2ad4ede1a30f471
@manager.registry.add_binding(Keys.F2) def opt_help(event): '\n When F2 has been pressed, fill in the "help" command.\n ' event.cli.current_buffer.insert_text('help')
When F2 has been pressed, fill in the "help" command.
crutch/core/repl/keys.py
opt_help
m4yers/crutch
1
python
@manager.registry.add_binding(Keys.F2) def opt_help(event): '\n \n ' event.cli.current_buffer.insert_text('help')
@manager.registry.add_binding(Keys.F2) def opt_help(event): '\n \n ' event.cli.current_buffer.insert_text('help')<|docstring|>When F2 has been pressed, fill in the "help" command.<|endoftext|>
cb0fc1a5bef7df70aa104d667a355acfa4be78b24aa0817ca5eb45d54a057147
@manager.registry.add_binding(Keys.F3) def opt_set_options_length(_): '\n Enable/Disable long option name suggestion.\n ' set_long_options((not get_long_options()))
Enable/Disable long option name suggestion.
crutch/core/repl/keys.py
opt_set_options_length
m4yers/crutch
1
python
@manager.registry.add_binding(Keys.F3) def opt_set_options_length(_): '\n \n ' set_long_options((not get_long_options()))
@manager.registry.add_binding(Keys.F3) def opt_set_options_length(_): '\n \n ' set_long_options((not get_long_options()))<|docstring|>Enable/Disable long option name suggestion.<|endoftext|>
236a216d7a540a3c0828e9b817974ea64fb339ed5ce819e210be0237cfb56104
@manager.registry.add_binding(Keys.F10) def opt_exit(_): '\n When F10 has been pressed, quit.\n ' raise EOFError
When F10 has been pressed, quit.
crutch/core/repl/keys.py
opt_exit
m4yers/crutch
1
python
@manager.registry.add_binding(Keys.F10) def opt_exit(_): '\n \n ' raise EOFError
@manager.registry.add_binding(Keys.F10) def opt_exit(_): '\n \n ' raise EOFError<|docstring|>When F10 has been pressed, quit.<|endoftext|>
aa28055978769fed6f914087ec70d8c457409767637297a58c3b52464b56b924
@manager.registry.add_binding(Keys.ControlSpace) def opt_auto_complete(event): '\n Initialize autocompletion at cursor. If the autocompletion menu is not\n showing, display it with the appropriate completions for the context. If\n the menu is showing, select the next completion.\n ' buf = event.cli.current_buffer if buf.complete_state: buf.complete_next() else: event.cli.start_completion(select_first=False)
Initialize autocompletion at cursor. If the autocompletion menu is not showing, display it with the appropriate completions for the context. If the menu is showing, select the next completion.
crutch/core/repl/keys.py
opt_auto_complete
m4yers/crutch
1
python
@manager.registry.add_binding(Keys.ControlSpace) def opt_auto_complete(event): '\n Initialize autocompletion at cursor. If the autocompletion menu is not\n showing, display it with the appropriate completions for the context. If\n the menu is showing, select the next completion.\n ' buf = event.cli.current_buffer if buf.complete_state: buf.complete_next() else: event.cli.start_completion(select_first=False)
@manager.registry.add_binding(Keys.ControlSpace) def opt_auto_complete(event): '\n Initialize autocompletion at cursor. If the autocompletion menu is not\n showing, display it with the appropriate completions for the context. If\n the menu is showing, select the next completion.\n ' buf = event.cli.current_buffer if buf.complete_state: buf.complete_next() else: event.cli.start_completion(select_first=False)<|docstring|>Initialize autocompletion at cursor. If the autocompletion menu is not showing, display it with the appropriate completions for the context. If the menu is showing, select the next completion.<|endoftext|>
946ef790615897329fc1f8568ce14aebe6ad3c8e4bad5dd604e119c8475d472b
@click.command('view-files', short_help='filter search files and file ID for files user has access to') @click.option('--name', is_flag=bool, help='provide username in whose repos are to be listed.') @click.option('--types', is_flag=bool, help='provide username in whose repos are to be listed.') @click.option('--pid', is_flag=bool, help='provide parent file ID or sharing link and list its child file/folders.') def view_file(name, types, pid): '\n view-files: Filter based list of the names and ids of the first 10 files the user has access to\n ' token = os.path.join(dirpath, 'token.json') store = file.Storage(token) creds = store.get() service = build('drive', 'v3', http=creds.authorize(Http())) page_token = None query = '' if name: q_name = click.prompt('enter the search value') query = (("name contains '" + q_name) + "' ") if types: mimeTypes = {'xls': 'application/vnd.ms-excel', 'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'xml': 'text/xml', 'ods': 'application/vnd.oasis.opendocument.spreadsheet', 'csv': 'text/plain', 'tmpl': 'text/plain', 'pdf': 'application/pdf', 'php': 'application/x-httpd-php', 'jpg': 'image/jpeg', 'png': 'image/png', 'gif': 'image/gif', 'bmp': 'image/bmp', 'txt': 'text/plain', 'doc': 'application/msword', 'js': 'text/js', 'swf': 'application/x-shockwave-flash', 'mp3': 'audio/mpeg', 'zip': 'application/zip', 'rar': 'application/rar', 'tar': 'application/tar', 'arj': 'application/arj', 'cab': 'application/cab', 'html': 'text/html', 'htm': 'text/html', 'default': 'application/octet-stream', 'audio': 'application/vnd.google-apps.audio', 'Google Docs': 'application/vnd.google-apps.document', 'Google Drawing': 'application/vnd.google-apps.drawing', 'Google Drive file': 'application/vnd.google-apps.file', 'Google Forms': 'application/vnd.google-apps.form', 'Google Fusion Tables': 'application/vnd.google-apps.fusiontable', 'Google My Maps': 'application/vnd.google-apps.map', 'Google Photos': 'application/vnd.google-apps.photo', 'Google Slides': 'application/vnd.google-apps.presentation', 'Google Apps Scripts': 'application/vnd.google-apps.script', 'Google Sites': 'application/vnd.google-apps.site', 'Google Sheets': 'application/vnd.google-apps.spreadsheet', '3rd party shortcut': 'application/vnd.google-apps.drive-sdk', 'folder': 'application/vnd.google-apps.folder'} promptMessage = 'Choose a media type to filter \n(press SPACE to mark, ENTER to continue, s to stop):' title = promptMessage options = [x for x in mimeTypes.keys()] picker = Picker(options, title, multi_select=True, min_selection_count=1) picker.register_custom_handler(ord('s'), go_back) selected = picker.start() if isinstance(selected, list): query += 'and (' for types in selected: query += (("mimeType='" + mimeTypes[types[0]]) + "' or ") query = query[:(- 3)] query += ')' if ((not name) and types): query = query[4:] if pid: parent = click.prompt('enter the fid of parent or sharing link') fid = utils.get_fid(parent) if ((name != False) or (types != False)): query += ' and ' query += (("'" + fid) + "' in parents") i = 1 while True: response = service.files().list(q=query, spaces='drive', fields='nextPageToken, files(id, name,mimeType,modifiedTime)', pageToken=page_token).execute() templist = [response.get('files', [])[j:(j + 25)] for j in range(0, len(response.get('files', [])), 25)] for item in templist: t = PrettyTable(['Sr.', 'Name', 'ID', 'Type', 'Modified Time']) for fils in item: t.add_row([i, fils.get('name')[:25], fils.get('id'), fils.get('mimeType').replace('application/', '')[:25], fils.get('modifiedTime')]) i += 1 print(t) click.confirm('Do you want to continue?', abort=True) click.clear() page_token = response.get('nextPageToken', None) if (page_token is None): break
view-files: Filter based list of the names and ids of the first 10 files the user has access to
drive_cli/actions.py
view_file
ahegde3/drive-cli
0
python
@click.command('view-files', short_help='filter search files and file ID for files user has access to') @click.option('--name', is_flag=bool, help='provide username in whose repos are to be listed.') @click.option('--types', is_flag=bool, help='provide username in whose repos are to be listed.') @click.option('--pid', is_flag=bool, help='provide parent file ID or sharing link and list its child file/folders.') def view_file(name, types, pid): '\n \n ' token = os.path.join(dirpath, 'token.json') store = file.Storage(token) creds = store.get() service = build('drive', 'v3', http=creds.authorize(Http())) page_token = None query = if name: q_name = click.prompt('enter the search value') query = (("name contains '" + q_name) + "' ") if types: mimeTypes = {'xls': 'application/vnd.ms-excel', 'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'xml': 'text/xml', 'ods': 'application/vnd.oasis.opendocument.spreadsheet', 'csv': 'text/plain', 'tmpl': 'text/plain', 'pdf': 'application/pdf', 'php': 'application/x-httpd-php', 'jpg': 'image/jpeg', 'png': 'image/png', 'gif': 'image/gif', 'bmp': 'image/bmp', 'txt': 'text/plain', 'doc': 'application/msword', 'js': 'text/js', 'swf': 'application/x-shockwave-flash', 'mp3': 'audio/mpeg', 'zip': 'application/zip', 'rar': 'application/rar', 'tar': 'application/tar', 'arj': 'application/arj', 'cab': 'application/cab', 'html': 'text/html', 'htm': 'text/html', 'default': 'application/octet-stream', 'audio': 'application/vnd.google-apps.audio', 'Google Docs': 'application/vnd.google-apps.document', 'Google Drawing': 'application/vnd.google-apps.drawing', 'Google Drive file': 'application/vnd.google-apps.file', 'Google Forms': 'application/vnd.google-apps.form', 'Google Fusion Tables': 'application/vnd.google-apps.fusiontable', 'Google My Maps': 'application/vnd.google-apps.map', 'Google Photos': 'application/vnd.google-apps.photo', 'Google Slides': 'application/vnd.google-apps.presentation', 'Google Apps Scripts': 'application/vnd.google-apps.script', 'Google Sites': 'application/vnd.google-apps.site', 'Google Sheets': 'application/vnd.google-apps.spreadsheet', '3rd party shortcut': 'application/vnd.google-apps.drive-sdk', 'folder': 'application/vnd.google-apps.folder'} promptMessage = 'Choose a media type to filter \n(press SPACE to mark, ENTER to continue, s to stop):' title = promptMessage options = [x for x in mimeTypes.keys()] picker = Picker(options, title, multi_select=True, min_selection_count=1) picker.register_custom_handler(ord('s'), go_back) selected = picker.start() if isinstance(selected, list): query += 'and (' for types in selected: query += (("mimeType='" + mimeTypes[types[0]]) + "' or ") query = query[:(- 3)] query += ')' if ((not name) and types): query = query[4:] if pid: parent = click.prompt('enter the fid of parent or sharing link') fid = utils.get_fid(parent) if ((name != False) or (types != False)): query += ' and ' query += (("'" + fid) + "' in parents") i = 1 while True: response = service.files().list(q=query, spaces='drive', fields='nextPageToken, files(id, name,mimeType,modifiedTime)', pageToken=page_token).execute() templist = [response.get('files', [])[j:(j + 25)] for j in range(0, len(response.get('files', [])), 25)] for item in templist: t = PrettyTable(['Sr.', 'Name', 'ID', 'Type', 'Modified Time']) for fils in item: t.add_row([i, fils.get('name')[:25], fils.get('id'), fils.get('mimeType').replace('application/', )[:25], fils.get('modifiedTime')]) i += 1 print(t) click.confirm('Do you want to continue?', abort=True) click.clear() page_token = response.get('nextPageToken', None) if (page_token is None): break
@click.command('view-files', short_help='filter search files and file ID for files user has access to') @click.option('--name', is_flag=bool, help='provide username in whose repos are to be listed.') @click.option('--types', is_flag=bool, help='provide username in whose repos are to be listed.') @click.option('--pid', is_flag=bool, help='provide parent file ID or sharing link and list its child file/folders.') def view_file(name, types, pid): '\n \n ' token = os.path.join(dirpath, 'token.json') store = file.Storage(token) creds = store.get() service = build('drive', 'v3', http=creds.authorize(Http())) page_token = None query = if name: q_name = click.prompt('enter the search value') query = (("name contains '" + q_name) + "' ") if types: mimeTypes = {'xls': 'application/vnd.ms-excel', 'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'xml': 'text/xml', 'ods': 'application/vnd.oasis.opendocument.spreadsheet', 'csv': 'text/plain', 'tmpl': 'text/plain', 'pdf': 'application/pdf', 'php': 'application/x-httpd-php', 'jpg': 'image/jpeg', 'png': 'image/png', 'gif': 'image/gif', 'bmp': 'image/bmp', 'txt': 'text/plain', 'doc': 'application/msword', 'js': 'text/js', 'swf': 'application/x-shockwave-flash', 'mp3': 'audio/mpeg', 'zip': 'application/zip', 'rar': 'application/rar', 'tar': 'application/tar', 'arj': 'application/arj', 'cab': 'application/cab', 'html': 'text/html', 'htm': 'text/html', 'default': 'application/octet-stream', 'audio': 'application/vnd.google-apps.audio', 'Google Docs': 'application/vnd.google-apps.document', 'Google Drawing': 'application/vnd.google-apps.drawing', 'Google Drive file': 'application/vnd.google-apps.file', 'Google Forms': 'application/vnd.google-apps.form', 'Google Fusion Tables': 'application/vnd.google-apps.fusiontable', 'Google My Maps': 'application/vnd.google-apps.map', 'Google Photos': 'application/vnd.google-apps.photo', 'Google Slides': 'application/vnd.google-apps.presentation', 'Google Apps Scripts': 'application/vnd.google-apps.script', 'Google Sites': 'application/vnd.google-apps.site', 'Google Sheets': 'application/vnd.google-apps.spreadsheet', '3rd party shortcut': 'application/vnd.google-apps.drive-sdk', 'folder': 'application/vnd.google-apps.folder'} promptMessage = 'Choose a media type to filter \n(press SPACE to mark, ENTER to continue, s to stop):' title = promptMessage options = [x for x in mimeTypes.keys()] picker = Picker(options, title, multi_select=True, min_selection_count=1) picker.register_custom_handler(ord('s'), go_back) selected = picker.start() if isinstance(selected, list): query += 'and (' for types in selected: query += (("mimeType='" + mimeTypes[types[0]]) + "' or ") query = query[:(- 3)] query += ')' if ((not name) and types): query = query[4:] if pid: parent = click.prompt('enter the fid of parent or sharing link') fid = utils.get_fid(parent) if ((name != False) or (types != False)): query += ' and ' query += (("'" + fid) + "' in parents") i = 1 while True: response = service.files().list(q=query, spaces='drive', fields='nextPageToken, files(id, name,mimeType,modifiedTime)', pageToken=page_token).execute() templist = [response.get('files', [])[j:(j + 25)] for j in range(0, len(response.get('files', [])), 25)] for item in templist: t = PrettyTable(['Sr.', 'Name', 'ID', 'Type', 'Modified Time']) for fils in item: t.add_row([i, fils.get('name')[:25], fils.get('id'), fils.get('mimeType').replace('application/', )[:25], fils.get('modifiedTime')]) i += 1 print(t) click.confirm('Do you want to continue?', abort=True) click.clear() page_token = response.get('nextPageToken', None) if (page_token is None): break<|docstring|>view-files: Filter based list of the names and ids of the first 10 files the user has access to<|endoftext|>