repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
pydron/utwist | utwist/_utwist.py | https://github.com/pydron/utwist/blob/31670bdd7630874e2d24e663dbfce8b863b1f02e/utwist/_utwist.py#L33-L98 | def with_reactor(*dec_args, **dec_kwargs):
"""
Decorator for test functions that require a running reactor.
Can be used like this::
@with_reactor
def test_connect_to_server(self):
...
Or like this::
@with_reactor(timeout=10)
def test_connect_to_server(self):
...
If the test function returns a deferred then the test will
be successful if the deferred resolves to a value or unsuccessful
if the deferred errbacks.
The test must not leave any connections or a like open. This will
otherwise result in a reactor-unclean failure of the test.
If there is a function called `twisted_setup()` in the same class
as the test function is defined, then this function will be invoked
before the test, but already in the context of the reactor. Note that
the regular setup function provided by the testing framework will
be executed too, but not in the reactor context.
Accordingly, if there is a `twisted_teardown()` it executes after the
test function, even if the test failed.
If the test, including `twisted_setup` and `twisted_teardown`, has
not completed within the timout, the test fails. The timeout defaults
to two minutes. A timeout duration of zero disables the timeout.
"""
# This method takes care of the decorator protocol, it
# distinguishes between using the decorator with brackets
# and without brackets. It then calls `_twisted_test_sync()`.
if len(dec_args) == 1 and callable(dec_args[0]) and not dec_kwargs:
# decorator used without brackets:
# @twisted_test
# def test_xxx():
# ....
callee = dec_args[0]
dec_args = ()
dec_kwargs = {}
@functools.wraps(callee)
def wrapper(*call_args, **call_kwargs):
return _twisted_test_sync(callee, call_args, call_kwargs)
return wrapper
else:
# decorator used with brackets:
# @twisted_test(*dec_args, **dec_args)
# def test_xxx():
# ....
def decorator(callee):
@functools.wraps(callee)
def wrapper(*call_args, **call_kwargs):
return _twisted_test_sync(callee, call_args, call_kwargs, *dec_args, **dec_kwargs)
return wrapper
return decorator | [
"def",
"with_reactor",
"(",
"*",
"dec_args",
",",
"*",
"*",
"dec_kwargs",
")",
":",
"# This method takes care of the decorator protocol, it",
"# distinguishes between using the decorator with brackets",
"# and without brackets. It then calls `_twisted_test_sync()`.",
"if",
"len",
"(",
"dec_args",
")",
"==",
"1",
"and",
"callable",
"(",
"dec_args",
"[",
"0",
"]",
")",
"and",
"not",
"dec_kwargs",
":",
"# decorator used without brackets:",
"# @twisted_test",
"# def test_xxx():",
"# ....",
"callee",
"=",
"dec_args",
"[",
"0",
"]",
"dec_args",
"=",
"(",
")",
"dec_kwargs",
"=",
"{",
"}",
"@",
"functools",
".",
"wraps",
"(",
"callee",
")",
"def",
"wrapper",
"(",
"*",
"call_args",
",",
"*",
"*",
"call_kwargs",
")",
":",
"return",
"_twisted_test_sync",
"(",
"callee",
",",
"call_args",
",",
"call_kwargs",
")",
"return",
"wrapper",
"else",
":",
"# decorator used with brackets:",
"# @twisted_test(*dec_args, **dec_args)",
"# def test_xxx():",
"# ....",
"def",
"decorator",
"(",
"callee",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"callee",
")",
"def",
"wrapper",
"(",
"*",
"call_args",
",",
"*",
"*",
"call_kwargs",
")",
":",
"return",
"_twisted_test_sync",
"(",
"callee",
",",
"call_args",
",",
"call_kwargs",
",",
"*",
"dec_args",
",",
"*",
"*",
"dec_kwargs",
")",
"return",
"wrapper",
"return",
"decorator"
]
| Decorator for test functions that require a running reactor.
Can be used like this::
@with_reactor
def test_connect_to_server(self):
...
Or like this::
@with_reactor(timeout=10)
def test_connect_to_server(self):
...
If the test function returns a deferred then the test will
be successful if the deferred resolves to a value or unsuccessful
if the deferred errbacks.
The test must not leave any connections or a like open. This will
otherwise result in a reactor-unclean failure of the test.
If there is a function called `twisted_setup()` in the same class
as the test function is defined, then this function will be invoked
before the test, but already in the context of the reactor. Note that
the regular setup function provided by the testing framework will
be executed too, but not in the reactor context.
Accordingly, if there is a `twisted_teardown()` it executes after the
test function, even if the test failed.
If the test, including `twisted_setup` and `twisted_teardown`, has
not completed within the timout, the test fails. The timeout defaults
to two minutes. A timeout duration of zero disables the timeout. | [
"Decorator",
"for",
"test",
"functions",
"that",
"require",
"a",
"running",
"reactor",
".",
"Can",
"be",
"used",
"like",
"this",
"::"
]
| python | train |
osrg/ryu | ryu/services/protocols/bgp/net_ctrl.py | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/net_ctrl.py#L370-L377 | def _start_rpc_session(self, sock):
"""Starts a new RPC session with given connection.
"""
session_name = RpcSession.NAME_FMT % str(sock.getpeername())
self._stop_child_activities(session_name)
rpc_session = RpcSession(sock, self)
self._spawn_activity(rpc_session) | [
"def",
"_start_rpc_session",
"(",
"self",
",",
"sock",
")",
":",
"session_name",
"=",
"RpcSession",
".",
"NAME_FMT",
"%",
"str",
"(",
"sock",
".",
"getpeername",
"(",
")",
")",
"self",
".",
"_stop_child_activities",
"(",
"session_name",
")",
"rpc_session",
"=",
"RpcSession",
"(",
"sock",
",",
"self",
")",
"self",
".",
"_spawn_activity",
"(",
"rpc_session",
")"
]
| Starts a new RPC session with given connection. | [
"Starts",
"a",
"new",
"RPC",
"session",
"with",
"given",
"connection",
"."
]
| python | train |
obriencj/python-javatools | javatools/report.py | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/report.py#L627-L639 | def quick_report(report_type, change, options):
"""
writes a change report via report_type to options.output or
sys.stdout
"""
report = report_type(None, options)
if options.output:
with open(options.output, "w") as out:
report.run(change, None, out)
else:
report.run(change, None, sys.stdout) | [
"def",
"quick_report",
"(",
"report_type",
",",
"change",
",",
"options",
")",
":",
"report",
"=",
"report_type",
"(",
"None",
",",
"options",
")",
"if",
"options",
".",
"output",
":",
"with",
"open",
"(",
"options",
".",
"output",
",",
"\"w\"",
")",
"as",
"out",
":",
"report",
".",
"run",
"(",
"change",
",",
"None",
",",
"out",
")",
"else",
":",
"report",
".",
"run",
"(",
"change",
",",
"None",
",",
"sys",
".",
"stdout",
")"
]
| writes a change report via report_type to options.output or
sys.stdout | [
"writes",
"a",
"change",
"report",
"via",
"report_type",
"to",
"options",
".",
"output",
"or",
"sys",
".",
"stdout"
]
| python | train |
mardix/Juice | juice/decorators.py | https://github.com/mardix/Juice/blob/7afa8d4238868235dfcdae82272bd77958dd416a/juice/decorators.py#L59-L110 | def route(rule=None, **kwargs):
"""
This decorator defines custom route for both class and methods in the view.
It behaves the same way as Flask's @app.route
on class:
It takes the following args
- rule: the root route of the endpoint
- decorators: a list of decorators to run on each method
on methods:
along with the rule, it takes kwargs
- endpoint
- defaults
- ...
:param rule:
:param kwargs:
:return:
"""
_restricted_keys = ["extends", "route", "decorators"]
def decorator(f):
if inspect.isclass(f):
extends = kwargs.pop("extends", None)
if extends and hasattr(extends, self.view_key):
for k, v in getattr(extends, self.view_key).items():
kwargs.setdefault(k, v)
kwargs.setdefault("route", rule)
kwargs["decorators"] = kwargs.get("decorators", []) + f.decorators
setattr(f, "_route_extends__", kwargs)
setattr(f, "base_route", kwargs.get("route"))
setattr(f, "decorators", kwargs.get("decorators", []))
else:
if not rule:
raise ValueError("'rule' is missing in @route ")
for k in _restricted_keys:
if k in kwargs:
del kwargs[k]
# Put the rule cache on the method itself instead of globally
if not hasattr(f, '_rule_cache') or f._rule_cache is None:
f._rule_cache = {f.__name__: [(rule, kwargs)]}
elif not f.__name__ in f._rule_cache:
f._rule_cache[f.__name__] = [(rule, kwargs)]
else:
f._rule_cache[f.__name__].append((rule, kwargs))
return f
return decorator | [
"def",
"route",
"(",
"rule",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"_restricted_keys",
"=",
"[",
"\"extends\"",
",",
"\"route\"",
",",
"\"decorators\"",
"]",
"def",
"decorator",
"(",
"f",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"f",
")",
":",
"extends",
"=",
"kwargs",
".",
"pop",
"(",
"\"extends\"",
",",
"None",
")",
"if",
"extends",
"and",
"hasattr",
"(",
"extends",
",",
"self",
".",
"view_key",
")",
":",
"for",
"k",
",",
"v",
"in",
"getattr",
"(",
"extends",
",",
"self",
".",
"view_key",
")",
".",
"items",
"(",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"k",
",",
"v",
")",
"kwargs",
".",
"setdefault",
"(",
"\"route\"",
",",
"rule",
")",
"kwargs",
"[",
"\"decorators\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"decorators\"",
",",
"[",
"]",
")",
"+",
"f",
".",
"decorators",
"setattr",
"(",
"f",
",",
"\"_route_extends__\"",
",",
"kwargs",
")",
"setattr",
"(",
"f",
",",
"\"base_route\"",
",",
"kwargs",
".",
"get",
"(",
"\"route\"",
")",
")",
"setattr",
"(",
"f",
",",
"\"decorators\"",
",",
"kwargs",
".",
"get",
"(",
"\"decorators\"",
",",
"[",
"]",
")",
")",
"else",
":",
"if",
"not",
"rule",
":",
"raise",
"ValueError",
"(",
"\"'rule' is missing in @route \"",
")",
"for",
"k",
"in",
"_restricted_keys",
":",
"if",
"k",
"in",
"kwargs",
":",
"del",
"kwargs",
"[",
"k",
"]",
"# Put the rule cache on the method itself instead of globally",
"if",
"not",
"hasattr",
"(",
"f",
",",
"'_rule_cache'",
")",
"or",
"f",
".",
"_rule_cache",
"is",
"None",
":",
"f",
".",
"_rule_cache",
"=",
"{",
"f",
".",
"__name__",
":",
"[",
"(",
"rule",
",",
"kwargs",
")",
"]",
"}",
"elif",
"not",
"f",
".",
"__name__",
"in",
"f",
".",
"_rule_cache",
":",
"f",
".",
"_rule_cache",
"[",
"f",
".",
"__name__",
"]",
"=",
"[",
"(",
"rule",
",",
"kwargs",
")",
"]",
"else",
":",
"f",
".",
"_rule_cache",
"[",
"f",
".",
"__name__",
"]",
".",
"append",
"(",
"(",
"rule",
",",
"kwargs",
")",
")",
"return",
"f",
"return",
"decorator"
]
| This decorator defines custom route for both class and methods in the view.
It behaves the same way as Flask's @app.route
on class:
It takes the following args
- rule: the root route of the endpoint
- decorators: a list of decorators to run on each method
on methods:
along with the rule, it takes kwargs
- endpoint
- defaults
- ...
:param rule:
:param kwargs:
:return: | [
"This",
"decorator",
"defines",
"custom",
"route",
"for",
"both",
"class",
"and",
"methods",
"in",
"the",
"view",
".",
"It",
"behaves",
"the",
"same",
"way",
"as",
"Flask",
"s",
"@app",
".",
"route"
]
| python | train |
datacamp/sqlwhat | sqlwhat/State.py | https://github.com/datacamp/sqlwhat/blob/9ae798c63124f994607a0e2c120b24ebbb2bdbe9/sqlwhat/State.py#L16-L27 | def lower_case(f):
"""Decorator specifically for turning mssql AST into lowercase"""
# if it has already been wrapped, we return original
if hasattr(f, "lower_cased"):
return f
@wraps(f)
def wrapper(*args, **kwargs):
f.lower_cased = True
return f(*args, **kwargs).lower()
return wrapper | [
"def",
"lower_case",
"(",
"f",
")",
":",
"# if it has already been wrapped, we return original",
"if",
"hasattr",
"(",
"f",
",",
"\"lower_cased\"",
")",
":",
"return",
"f",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"f",
".",
"lower_cased",
"=",
"True",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
".",
"lower",
"(",
")",
"return",
"wrapper"
]
| Decorator specifically for turning mssql AST into lowercase | [
"Decorator",
"specifically",
"for",
"turning",
"mssql",
"AST",
"into",
"lowercase"
]
| python | train |
michael-lazar/rtv | rtv/packages/praw/helpers.py | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/helpers.py#L390-L424 | def convert_numeric_id_to_id36(numeric_id):
"""Convert an integer into its base36 string representation.
This method has been cleaned up slightly to improve readability. For more
info see:
https://github.com/reddit/reddit/blob/master/r2/r2/lib/utils/_utils.pyx
https://www.reddit.com/r/redditdev/comments/n624n/submission_ids_question/
https://en.wikipedia.org/wiki/Base36
"""
# base36 allows negative numbers, but reddit does not
if not isinstance(numeric_id, six.integer_types) or numeric_id < 0:
raise ValueError("must supply a positive int/long")
# Alphabet used for base 36 conversion
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
alphabet_len = len(alphabet)
# Temp assign
current_number = numeric_id
base36 = []
# Current_number must be greater than alphabet length to while/divmod
if 0 <= current_number < alphabet_len:
return alphabet[current_number]
# Break up into chunks
while current_number != 0:
current_number, rem = divmod(current_number, alphabet_len)
base36.append(alphabet[rem])
# String is built in reverse order
return ''.join(reversed(base36)) | [
"def",
"convert_numeric_id_to_id36",
"(",
"numeric_id",
")",
":",
"# base36 allows negative numbers, but reddit does not",
"if",
"not",
"isinstance",
"(",
"numeric_id",
",",
"six",
".",
"integer_types",
")",
"or",
"numeric_id",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"must supply a positive int/long\"",
")",
"# Alphabet used for base 36 conversion",
"alphabet",
"=",
"'0123456789abcdefghijklmnopqrstuvwxyz'",
"alphabet_len",
"=",
"len",
"(",
"alphabet",
")",
"# Temp assign",
"current_number",
"=",
"numeric_id",
"base36",
"=",
"[",
"]",
"# Current_number must be greater than alphabet length to while/divmod",
"if",
"0",
"<=",
"current_number",
"<",
"alphabet_len",
":",
"return",
"alphabet",
"[",
"current_number",
"]",
"# Break up into chunks",
"while",
"current_number",
"!=",
"0",
":",
"current_number",
",",
"rem",
"=",
"divmod",
"(",
"current_number",
",",
"alphabet_len",
")",
"base36",
".",
"append",
"(",
"alphabet",
"[",
"rem",
"]",
")",
"# String is built in reverse order",
"return",
"''",
".",
"join",
"(",
"reversed",
"(",
"base36",
")",
")"
]
| Convert an integer into its base36 string representation.
This method has been cleaned up slightly to improve readability. For more
info see:
https://github.com/reddit/reddit/blob/master/r2/r2/lib/utils/_utils.pyx
https://www.reddit.com/r/redditdev/comments/n624n/submission_ids_question/
https://en.wikipedia.org/wiki/Base36 | [
"Convert",
"an",
"integer",
"into",
"its",
"base36",
"string",
"representation",
"."
]
| python | train |
ejeschke/ginga | ginga/util/contour.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/contour.py#L36-L45 | def calc_contours(data, num_contours):
"""Get sets of contour points for numpy array `data`.
`num_contours` specifies the number (int) of contours to make.
Returns a list of numpy arrays of points--each array makes a polygon
if plotted as such.
"""
mn = np.nanmean(data)
top = np.nanmax(data)
levels = np.linspace(mn, top, num_contours)
return get_contours(data, levels) | [
"def",
"calc_contours",
"(",
"data",
",",
"num_contours",
")",
":",
"mn",
"=",
"np",
".",
"nanmean",
"(",
"data",
")",
"top",
"=",
"np",
".",
"nanmax",
"(",
"data",
")",
"levels",
"=",
"np",
".",
"linspace",
"(",
"mn",
",",
"top",
",",
"num_contours",
")",
"return",
"get_contours",
"(",
"data",
",",
"levels",
")"
]
| Get sets of contour points for numpy array `data`.
`num_contours` specifies the number (int) of contours to make.
Returns a list of numpy arrays of points--each array makes a polygon
if plotted as such. | [
"Get",
"sets",
"of",
"contour",
"points",
"for",
"numpy",
"array",
"data",
".",
"num_contours",
"specifies",
"the",
"number",
"(",
"int",
")",
"of",
"contours",
"to",
"make",
".",
"Returns",
"a",
"list",
"of",
"numpy",
"arrays",
"of",
"points",
"--",
"each",
"array",
"makes",
"a",
"polygon",
"if",
"plotted",
"as",
"such",
"."
]
| python | train |
hivetech/dna | python/dna/time_utils.py | https://github.com/hivetech/dna/blob/50ad00031be29765b2576fa407d35a36e0608de9/python/dna/time_utils.py#L42-L49 | def _detect_timezone():
'''
Get timezone as set by the system
'''
default_timezone = 'America/New_York'
locale_code = locale.getdefaultlocale()
return default_timezone if not locale_code[0] else \
str(pytz.country_timezones[locale_code[0][-2:]][0]) | [
"def",
"_detect_timezone",
"(",
")",
":",
"default_timezone",
"=",
"'America/New_York'",
"locale_code",
"=",
"locale",
".",
"getdefaultlocale",
"(",
")",
"return",
"default_timezone",
"if",
"not",
"locale_code",
"[",
"0",
"]",
"else",
"str",
"(",
"pytz",
".",
"country_timezones",
"[",
"locale_code",
"[",
"0",
"]",
"[",
"-",
"2",
":",
"]",
"]",
"[",
"0",
"]",
")"
]
| Get timezone as set by the system | [
"Get",
"timezone",
"as",
"set",
"by",
"the",
"system"
]
| python | test |
quintusdias/glymur | glymur/lib/openjpeg.py | https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/lib/openjpeg.py#L422-L438 | def cio_open(cinfo, src=None):
"""Wrapper for openjpeg library function opj_cio_open."""
argtypes = [ctypes.POINTER(CommonStructType), ctypes.c_char_p,
ctypes.c_int]
OPENJPEG.opj_cio_open.argtypes = argtypes
OPENJPEG.opj_cio_open.restype = ctypes.POINTER(CioType)
if src is None:
length = 0
else:
length = len(src)
cio = OPENJPEG.opj_cio_open(ctypes.cast(cinfo,
ctypes.POINTER(CommonStructType)),
src,
length)
return cio | [
"def",
"cio_open",
"(",
"cinfo",
",",
"src",
"=",
"None",
")",
":",
"argtypes",
"=",
"[",
"ctypes",
".",
"POINTER",
"(",
"CommonStructType",
")",
",",
"ctypes",
".",
"c_char_p",
",",
"ctypes",
".",
"c_int",
"]",
"OPENJPEG",
".",
"opj_cio_open",
".",
"argtypes",
"=",
"argtypes",
"OPENJPEG",
".",
"opj_cio_open",
".",
"restype",
"=",
"ctypes",
".",
"POINTER",
"(",
"CioType",
")",
"if",
"src",
"is",
"None",
":",
"length",
"=",
"0",
"else",
":",
"length",
"=",
"len",
"(",
"src",
")",
"cio",
"=",
"OPENJPEG",
".",
"opj_cio_open",
"(",
"ctypes",
".",
"cast",
"(",
"cinfo",
",",
"ctypes",
".",
"POINTER",
"(",
"CommonStructType",
")",
")",
",",
"src",
",",
"length",
")",
"return",
"cio"
]
| Wrapper for openjpeg library function opj_cio_open. | [
"Wrapper",
"for",
"openjpeg",
"library",
"function",
"opj_cio_open",
"."
]
| python | train |
JoseAntFer/pyny3d | pyny3d/geoms.py | https://github.com/JoseAntFer/pyny3d/blob/fb81684935a24f7e50c975cb4383c81a63ab56df/pyny3d/geoms.py#L395-L403 | def get_path(self):
"""
:returns: matplotlib.path.Path object for the z=0 projection of
this polygon.
"""
if self.path == None:
from matplotlib import path
return path.Path(self.points[:, :2]) # z=0 projection!
return self.path | [
"def",
"get_path",
"(",
"self",
")",
":",
"if",
"self",
".",
"path",
"==",
"None",
":",
"from",
"matplotlib",
"import",
"path",
"return",
"path",
".",
"Path",
"(",
"self",
".",
"points",
"[",
":",
",",
":",
"2",
"]",
")",
"# z=0 projection!\r",
"return",
"self",
".",
"path"
]
| :returns: matplotlib.path.Path object for the z=0 projection of
this polygon. | [
":",
"returns",
":",
"matplotlib",
".",
"path",
".",
"Path",
"object",
"for",
"the",
"z",
"=",
"0",
"projection",
"of",
"this",
"polygon",
"."
]
| python | train |
wal-e/wal-e | wal_e/tar_partition.py | https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/tar_partition.py#L351-L444 | def _segmentation_guts(root, file_paths, max_partition_size):
"""Segment a series of file paths into TarPartition values
These TarPartitions are disjoint and roughly below the prescribed
size.
"""
# Canonicalize root to include the trailing slash, since root is
# intended to be a directory anyway.
if not root.endswith(os.path.sep):
root += os.path.sep
# Ensure that the root path is a directory before continuing.
if not os.path.isdir(root):
raise TarBadRootError(root=root)
bogus_tar = None
try:
# Create a bogus TarFile as a contrivance to be able to run
# gettarinfo and produce such instances. Some of the settings
# on the TarFile are important, like whether to de-reference
# symlinks.
bogus_tar = tarfile.TarFile(os.devnull, 'w', dereference=False)
# Bookkeeping for segmentation of tar members into partitions.
partition_number = 0
partition_bytes = 0
partition_members = 0
partition = TarPartition(partition_number)
for file_path in file_paths:
# Ensure tar members exist within a shared root before
# continuing.
if not file_path.startswith(root):
raise TarBadPathError(root=root, offensive_path=file_path)
# Create an ExtendedTarInfo to represent the tarfile.
try:
et_info = ExtendedTarInfo(
tarinfo=bogus_tar.gettarinfo(
file_path, arcname=file_path[len(root):]),
submitted_path=file_path)
except EnvironmentError as e:
if (e.errno == errno.ENOENT and
e.filename == file_path):
# log a NOTICE/INFO that the file was unlinked.
# Ostensibly harmless (such unlinks should be replayed
# in the WAL) but good to know.
logger.debug(
msg='tar member additions skipping an unlinked file',
detail='Skipping {0}.'.format(et_info.submitted_path))
continue
else:
raise
# Ensure tar members are within an expected size before
# continuing.
if et_info.tarinfo.size > max_partition_size:
raise TarMemberTooBigError(
et_info.tarinfo.name, max_partition_size,
et_info.tarinfo.size)
if (partition_bytes + et_info.tarinfo.size >= max_partition_size
or partition_members >= PARTITION_MAX_MEMBERS):
# Partition is full and cannot accept another member,
# so yield the complete one to the caller.
yield partition
# Prepare a fresh partition to accrue additional file
# paths into.
partition_number += 1
partition_bytes = et_info.tarinfo.size
partition_members = 1
partition = TarPartition(
partition_number, [et_info])
else:
# Partition is able to accept this member, so just add
# it and increment the size counters.
partition_bytes += et_info.tarinfo.size
partition_members += 1
partition.append(et_info)
# Partition size overflow must not to be possible
# here.
assert partition_bytes < max_partition_size
finally:
if bogus_tar is not None:
bogus_tar.close()
# Flush out the final partition should it be non-empty.
if partition:
yield partition | [
"def",
"_segmentation_guts",
"(",
"root",
",",
"file_paths",
",",
"max_partition_size",
")",
":",
"# Canonicalize root to include the trailing slash, since root is",
"# intended to be a directory anyway.",
"if",
"not",
"root",
".",
"endswith",
"(",
"os",
".",
"path",
".",
"sep",
")",
":",
"root",
"+=",
"os",
".",
"path",
".",
"sep",
"# Ensure that the root path is a directory before continuing.",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"root",
")",
":",
"raise",
"TarBadRootError",
"(",
"root",
"=",
"root",
")",
"bogus_tar",
"=",
"None",
"try",
":",
"# Create a bogus TarFile as a contrivance to be able to run",
"# gettarinfo and produce such instances. Some of the settings",
"# on the TarFile are important, like whether to de-reference",
"# symlinks.",
"bogus_tar",
"=",
"tarfile",
".",
"TarFile",
"(",
"os",
".",
"devnull",
",",
"'w'",
",",
"dereference",
"=",
"False",
")",
"# Bookkeeping for segmentation of tar members into partitions.",
"partition_number",
"=",
"0",
"partition_bytes",
"=",
"0",
"partition_members",
"=",
"0",
"partition",
"=",
"TarPartition",
"(",
"partition_number",
")",
"for",
"file_path",
"in",
"file_paths",
":",
"# Ensure tar members exist within a shared root before",
"# continuing.",
"if",
"not",
"file_path",
".",
"startswith",
"(",
"root",
")",
":",
"raise",
"TarBadPathError",
"(",
"root",
"=",
"root",
",",
"offensive_path",
"=",
"file_path",
")",
"# Create an ExtendedTarInfo to represent the tarfile.",
"try",
":",
"et_info",
"=",
"ExtendedTarInfo",
"(",
"tarinfo",
"=",
"bogus_tar",
".",
"gettarinfo",
"(",
"file_path",
",",
"arcname",
"=",
"file_path",
"[",
"len",
"(",
"root",
")",
":",
"]",
")",
",",
"submitted_path",
"=",
"file_path",
")",
"except",
"EnvironmentError",
"as",
"e",
":",
"if",
"(",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
"and",
"e",
".",
"filename",
"==",
"file_path",
")",
":",
"# log a NOTICE/INFO that the file was unlinked.",
"# Ostensibly harmless (such unlinks should be replayed",
"# in the WAL) but good to know.",
"logger",
".",
"debug",
"(",
"msg",
"=",
"'tar member additions skipping an unlinked file'",
",",
"detail",
"=",
"'Skipping {0}.'",
".",
"format",
"(",
"et_info",
".",
"submitted_path",
")",
")",
"continue",
"else",
":",
"raise",
"# Ensure tar members are within an expected size before",
"# continuing.",
"if",
"et_info",
".",
"tarinfo",
".",
"size",
">",
"max_partition_size",
":",
"raise",
"TarMemberTooBigError",
"(",
"et_info",
".",
"tarinfo",
".",
"name",
",",
"max_partition_size",
",",
"et_info",
".",
"tarinfo",
".",
"size",
")",
"if",
"(",
"partition_bytes",
"+",
"et_info",
".",
"tarinfo",
".",
"size",
">=",
"max_partition_size",
"or",
"partition_members",
">=",
"PARTITION_MAX_MEMBERS",
")",
":",
"# Partition is full and cannot accept another member,",
"# so yield the complete one to the caller.",
"yield",
"partition",
"# Prepare a fresh partition to accrue additional file",
"# paths into.",
"partition_number",
"+=",
"1",
"partition_bytes",
"=",
"et_info",
".",
"tarinfo",
".",
"size",
"partition_members",
"=",
"1",
"partition",
"=",
"TarPartition",
"(",
"partition_number",
",",
"[",
"et_info",
"]",
")",
"else",
":",
"# Partition is able to accept this member, so just add",
"# it and increment the size counters.",
"partition_bytes",
"+=",
"et_info",
".",
"tarinfo",
".",
"size",
"partition_members",
"+=",
"1",
"partition",
".",
"append",
"(",
"et_info",
")",
"# Partition size overflow must not to be possible",
"# here.",
"assert",
"partition_bytes",
"<",
"max_partition_size",
"finally",
":",
"if",
"bogus_tar",
"is",
"not",
"None",
":",
"bogus_tar",
".",
"close",
"(",
")",
"# Flush out the final partition should it be non-empty.",
"if",
"partition",
":",
"yield",
"partition"
]
| Segment a series of file paths into TarPartition values
These TarPartitions are disjoint and roughly below the prescribed
size. | [
"Segment",
"a",
"series",
"of",
"file",
"paths",
"into",
"TarPartition",
"values"
]
| python | train |
Clinical-Genomics/scout | scout/load/all.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/load/all.py#L90-L105 | def load_scout(adapter, config, ped=None, update=False):
"""Load a new case from a Scout config.
Args:
adapter(MongoAdapter)
config(dict): loading info
ped(Iterable(str)): Pedigree ingformation
update(bool): If existing case should be updated
"""
log.info("Check that the panels exists")
if not check_panels(adapter, config.get('gene_panels', []),
config.get('default_gene_panels')):
raise ConfigError("Some panel(s) does not exist in the database")
case_obj = adapter.load_case(config, update=update)
return case_obj | [
"def",
"load_scout",
"(",
"adapter",
",",
"config",
",",
"ped",
"=",
"None",
",",
"update",
"=",
"False",
")",
":",
"log",
".",
"info",
"(",
"\"Check that the panels exists\"",
")",
"if",
"not",
"check_panels",
"(",
"adapter",
",",
"config",
".",
"get",
"(",
"'gene_panels'",
",",
"[",
"]",
")",
",",
"config",
".",
"get",
"(",
"'default_gene_panels'",
")",
")",
":",
"raise",
"ConfigError",
"(",
"\"Some panel(s) does not exist in the database\"",
")",
"case_obj",
"=",
"adapter",
".",
"load_case",
"(",
"config",
",",
"update",
"=",
"update",
")",
"return",
"case_obj"
]
| Load a new case from a Scout config.
Args:
adapter(MongoAdapter)
config(dict): loading info
ped(Iterable(str)): Pedigree ingformation
update(bool): If existing case should be updated | [
"Load",
"a",
"new",
"case",
"from",
"a",
"Scout",
"config",
"."
]
| python | test |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/generators.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/generators.py#L909-L940 | def try_one_generator (project, name, generator, target_type, properties, sources):
""" Checks if generator invocation can be pruned, because it's guaranteed
to fail. If so, quickly returns empty list. Otherwise, calls
try_one_generator_really.
"""
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
assert isinstance(name, basestring) or name is None
assert isinstance(generator, Generator)
assert isinstance(target_type, basestring)
assert isinstance(properties, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
source_types = []
for s in sources:
source_types.append (s.type ())
viable_source_types = viable_source_types_for_generator (generator)
if source_types and viable_source_types != ['*'] and\
not set_.intersection (source_types, viable_source_types):
if project.manager ().logger ().on ():
id = generator.id ()
project.manager ().logger ().log (__name__, "generator '%s' pruned" % id)
project.manager ().logger ().log (__name__, "source_types" '%s' % source_types)
project.manager ().logger ().log (__name__, "viable_source_types '%s'" % viable_source_types)
return []
else:
return try_one_generator_really (project, name, generator, target_type, properties, sources) | [
"def",
"try_one_generator",
"(",
"project",
",",
"name",
",",
"generator",
",",
"target_type",
",",
"properties",
",",
"sources",
")",
":",
"if",
"__debug__",
":",
"from",
".",
"targets",
"import",
"ProjectTarget",
"assert",
"isinstance",
"(",
"project",
",",
"ProjectTarget",
")",
"assert",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"or",
"name",
"is",
"None",
"assert",
"isinstance",
"(",
"generator",
",",
"Generator",
")",
"assert",
"isinstance",
"(",
"target_type",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"properties",
",",
"property_set",
".",
"PropertySet",
")",
"assert",
"is_iterable_typed",
"(",
"sources",
",",
"virtual_target",
".",
"VirtualTarget",
")",
"source_types",
"=",
"[",
"]",
"for",
"s",
"in",
"sources",
":",
"source_types",
".",
"append",
"(",
"s",
".",
"type",
"(",
")",
")",
"viable_source_types",
"=",
"viable_source_types_for_generator",
"(",
"generator",
")",
"if",
"source_types",
"and",
"viable_source_types",
"!=",
"[",
"'*'",
"]",
"and",
"not",
"set_",
".",
"intersection",
"(",
"source_types",
",",
"viable_source_types",
")",
":",
"if",
"project",
".",
"manager",
"(",
")",
".",
"logger",
"(",
")",
".",
"on",
"(",
")",
":",
"id",
"=",
"generator",
".",
"id",
"(",
")",
"project",
".",
"manager",
"(",
")",
".",
"logger",
"(",
")",
".",
"log",
"(",
"__name__",
",",
"\"generator '%s' pruned\"",
"%",
"id",
")",
"project",
".",
"manager",
"(",
")",
".",
"logger",
"(",
")",
".",
"log",
"(",
"__name__",
",",
"\"source_types\"",
"'%s'",
"%",
"source_types",
")",
"project",
".",
"manager",
"(",
")",
".",
"logger",
"(",
")",
".",
"log",
"(",
"__name__",
",",
"\"viable_source_types '%s'\"",
"%",
"viable_source_types",
")",
"return",
"[",
"]",
"else",
":",
"return",
"try_one_generator_really",
"(",
"project",
",",
"name",
",",
"generator",
",",
"target_type",
",",
"properties",
",",
"sources",
")"
]
| Checks if generator invocation can be pruned, because it's guaranteed
to fail. If so, quickly returns empty list. Otherwise, calls
try_one_generator_really. | [
"Checks",
"if",
"generator",
"invocation",
"can",
"be",
"pruned",
"because",
"it",
"s",
"guaranteed",
"to",
"fail",
".",
"If",
"so",
"quickly",
"returns",
"empty",
"list",
".",
"Otherwise",
"calls",
"try_one_generator_really",
"."
]
| python | train |
Gandi/gandi.cli | gandi/cli/modules/cert.py | https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/cert.py#L377-L399 | def process_csr(cls, common_name, csr=None, private_key=None,
country=None, state=None, city=None, organisation=None,
branch=None):
""" Create a PK and a CSR if needed."""
if csr:
if branch or organisation or city or state or country:
cls.echo('Following options are only used to generate'
' the CSR.')
else:
params = (('CN', common_name),
('OU', branch),
('O', organisation),
('L', city),
('ST', state),
('C', country))
params = [(key, val) for key, val in params if val]
csr = cls.create_csr(common_name, private_key, params)
if csr and os.path.exists(csr):
with open(csr) as fcsr:
csr = fcsr.read()
return csr | [
"def",
"process_csr",
"(",
"cls",
",",
"common_name",
",",
"csr",
"=",
"None",
",",
"private_key",
"=",
"None",
",",
"country",
"=",
"None",
",",
"state",
"=",
"None",
",",
"city",
"=",
"None",
",",
"organisation",
"=",
"None",
",",
"branch",
"=",
"None",
")",
":",
"if",
"csr",
":",
"if",
"branch",
"or",
"organisation",
"or",
"city",
"or",
"state",
"or",
"country",
":",
"cls",
".",
"echo",
"(",
"'Following options are only used to generate'",
"' the CSR.'",
")",
"else",
":",
"params",
"=",
"(",
"(",
"'CN'",
",",
"common_name",
")",
",",
"(",
"'OU'",
",",
"branch",
")",
",",
"(",
"'O'",
",",
"organisation",
")",
",",
"(",
"'L'",
",",
"city",
")",
",",
"(",
"'ST'",
",",
"state",
")",
",",
"(",
"'C'",
",",
"country",
")",
")",
"params",
"=",
"[",
"(",
"key",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"params",
"if",
"val",
"]",
"csr",
"=",
"cls",
".",
"create_csr",
"(",
"common_name",
",",
"private_key",
",",
"params",
")",
"if",
"csr",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"csr",
")",
":",
"with",
"open",
"(",
"csr",
")",
"as",
"fcsr",
":",
"csr",
"=",
"fcsr",
".",
"read",
"(",
")",
"return",
"csr"
]
| Create a PK and a CSR if needed. | [
"Create",
"a",
"PK",
"and",
"a",
"CSR",
"if",
"needed",
"."
]
| python | train |
materialsproject/pymatgen | pymatgen/symmetry/analyzer.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/symmetry/analyzer.py#L1206-L1226 | def is_valid_op(self, symmop):
"""
Check if a particular symmetry operation is a valid symmetry operation
for a molecule, i.e., the operation maps all atoms to another
equivalent atom.
Args:
symmop (SymmOp): Symmetry operation to test.
Returns:
(bool): Whether SymmOp is valid for Molecule.
"""
coords = self.centered_mol.cart_coords
for site in self.centered_mol:
coord = symmop.operate(site.coords)
ind = find_in_coord_list(coords, coord, self.tol)
if not (len(ind) == 1
and self.centered_mol[ind[0]].species
== site.species):
return False
return True | [
"def",
"is_valid_op",
"(",
"self",
",",
"symmop",
")",
":",
"coords",
"=",
"self",
".",
"centered_mol",
".",
"cart_coords",
"for",
"site",
"in",
"self",
".",
"centered_mol",
":",
"coord",
"=",
"symmop",
".",
"operate",
"(",
"site",
".",
"coords",
")",
"ind",
"=",
"find_in_coord_list",
"(",
"coords",
",",
"coord",
",",
"self",
".",
"tol",
")",
"if",
"not",
"(",
"len",
"(",
"ind",
")",
"==",
"1",
"and",
"self",
".",
"centered_mol",
"[",
"ind",
"[",
"0",
"]",
"]",
".",
"species",
"==",
"site",
".",
"species",
")",
":",
"return",
"False",
"return",
"True"
]
| Check if a particular symmetry operation is a valid symmetry operation
for a molecule, i.e., the operation maps all atoms to another
equivalent atom.
Args:
symmop (SymmOp): Symmetry operation to test.
Returns:
(bool): Whether SymmOp is valid for Molecule. | [
"Check",
"if",
"a",
"particular",
"symmetry",
"operation",
"is",
"a",
"valid",
"symmetry",
"operation",
"for",
"a",
"molecule",
"i",
".",
"e",
".",
"the",
"operation",
"maps",
"all",
"atoms",
"to",
"another",
"equivalent",
"atom",
"."
]
| python | train |
guaix-ucm/numina | numina/core/pipelineload.py | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipelineload.py#L43-L50 | def drp_load_data(package, data, confclass=None):
"""Load the DRPS from data."""
drpdict = yaml.safe_load(data)
ins = load_instrument(package, drpdict, confclass=confclass)
if ins.version == 'undefined':
pkg = importlib.import_module(package)
ins.version = getattr(pkg, '__version__', 'undefined')
return ins | [
"def",
"drp_load_data",
"(",
"package",
",",
"data",
",",
"confclass",
"=",
"None",
")",
":",
"drpdict",
"=",
"yaml",
".",
"safe_load",
"(",
"data",
")",
"ins",
"=",
"load_instrument",
"(",
"package",
",",
"drpdict",
",",
"confclass",
"=",
"confclass",
")",
"if",
"ins",
".",
"version",
"==",
"'undefined'",
":",
"pkg",
"=",
"importlib",
".",
"import_module",
"(",
"package",
")",
"ins",
".",
"version",
"=",
"getattr",
"(",
"pkg",
",",
"'__version__'",
",",
"'undefined'",
")",
"return",
"ins"
]
| Load the DRPS from data. | [
"Load",
"the",
"DRPS",
"from",
"data",
"."
]
| python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/utils/text.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/text.py#L447-L467 | def marquee(txt='',width=78,mark='*'):
"""Return the input string centered in a 'marquee'.
:Examples:
In [16]: marquee('A test',40)
Out[16]: '**************** A test ****************'
In [17]: marquee('A test',40,'-')
Out[17]: '---------------- A test ----------------'
In [18]: marquee('A test',40,' ')
Out[18]: ' A test '
"""
if not txt:
return (mark*width)[:width]
nmark = (width-len(txt)-2)//len(mark)//2
if nmark < 0: nmark =0
marks = mark*nmark
return '%s %s %s' % (marks,txt,marks) | [
"def",
"marquee",
"(",
"txt",
"=",
"''",
",",
"width",
"=",
"78",
",",
"mark",
"=",
"'*'",
")",
":",
"if",
"not",
"txt",
":",
"return",
"(",
"mark",
"*",
"width",
")",
"[",
":",
"width",
"]",
"nmark",
"=",
"(",
"width",
"-",
"len",
"(",
"txt",
")",
"-",
"2",
")",
"//",
"len",
"(",
"mark",
")",
"//",
"2",
"if",
"nmark",
"<",
"0",
":",
"nmark",
"=",
"0",
"marks",
"=",
"mark",
"*",
"nmark",
"return",
"'%s %s %s'",
"%",
"(",
"marks",
",",
"txt",
",",
"marks",
")"
]
| Return the input string centered in a 'marquee'.
:Examples:
In [16]: marquee('A test',40)
Out[16]: '**************** A test ****************'
In [17]: marquee('A test',40,'-')
Out[17]: '---------------- A test ----------------'
In [18]: marquee('A test',40,' ')
Out[18]: ' A test ' | [
"Return",
"the",
"input",
"string",
"centered",
"in",
"a",
"marquee",
"."
]
| python | test |
ga4gh/ga4gh-server | ga4gh/server/cli/repomanager.py | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/cli/repomanager.py#L480-L489 | def addBiosample(self):
"""
Adds a new biosample into this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
biosample = bio_metadata.Biosample(
dataset, self._args.biosampleName)
biosample.populateFromJson(self._args.biosample)
self._updateRepo(self._repo.insertBiosample, biosample) | [
"def",
"addBiosample",
"(",
"self",
")",
":",
"self",
".",
"_openRepo",
"(",
")",
"dataset",
"=",
"self",
".",
"_repo",
".",
"getDatasetByName",
"(",
"self",
".",
"_args",
".",
"datasetName",
")",
"biosample",
"=",
"bio_metadata",
".",
"Biosample",
"(",
"dataset",
",",
"self",
".",
"_args",
".",
"biosampleName",
")",
"biosample",
".",
"populateFromJson",
"(",
"self",
".",
"_args",
".",
"biosample",
")",
"self",
".",
"_updateRepo",
"(",
"self",
".",
"_repo",
".",
"insertBiosample",
",",
"biosample",
")"
]
| Adds a new biosample into this repo | [
"Adds",
"a",
"new",
"biosample",
"into",
"this",
"repo"
]
| python | train |
mushkevych/scheduler | synergy/scheduler/timetable.py | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/timetable.py#L252-L259 | def get_next_job_record(self, process_name):
""" :returns: the next job record to work on for the given process"""
tree = self.get_tree(process_name)
node = tree.get_next_node(process_name)
if node.job_record is None:
self.assign_job_record(node)
return node.job_record | [
"def",
"get_next_job_record",
"(",
"self",
",",
"process_name",
")",
":",
"tree",
"=",
"self",
".",
"get_tree",
"(",
"process_name",
")",
"node",
"=",
"tree",
".",
"get_next_node",
"(",
"process_name",
")",
"if",
"node",
".",
"job_record",
"is",
"None",
":",
"self",
".",
"assign_job_record",
"(",
"node",
")",
"return",
"node",
".",
"job_record"
]
| :returns: the next job record to work on for the given process | [
":",
"returns",
":",
"the",
"next",
"job",
"record",
"to",
"work",
"on",
"for",
"the",
"given",
"process"
]
| python | train |
briandilley/ebs-deploy | ebs_deploy/commands/list_environments_command.py | https://github.com/briandilley/ebs-deploy/blob/4178c9c1282a9025fb987dab3470bea28c202e10/ebs_deploy/commands/list_environments_command.py#L4-L16 | def execute(helper, config, args):
"""
Lists environments
"""
envs = config.get('app', {}).get('environments', [])
out("Parsed environments:")
for name, conf in list(envs.items()):
out('\t'+name)
envs = helper.get_environments()
out("Deployed environments:")
for env in envs:
if env['Status'] != 'Terminated':
out('\t'+str(env['EnvironmentName'])+' ('+str(env['Status'])+', '+str(env['CNAME'])+')') | [
"def",
"execute",
"(",
"helper",
",",
"config",
",",
"args",
")",
":",
"envs",
"=",
"config",
".",
"get",
"(",
"'app'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'environments'",
",",
"[",
"]",
")",
"out",
"(",
"\"Parsed environments:\"",
")",
"for",
"name",
",",
"conf",
"in",
"list",
"(",
"envs",
".",
"items",
"(",
")",
")",
":",
"out",
"(",
"'\\t'",
"+",
"name",
")",
"envs",
"=",
"helper",
".",
"get_environments",
"(",
")",
"out",
"(",
"\"Deployed environments:\"",
")",
"for",
"env",
"in",
"envs",
":",
"if",
"env",
"[",
"'Status'",
"]",
"!=",
"'Terminated'",
":",
"out",
"(",
"'\\t'",
"+",
"str",
"(",
"env",
"[",
"'EnvironmentName'",
"]",
")",
"+",
"' ('",
"+",
"str",
"(",
"env",
"[",
"'Status'",
"]",
")",
"+",
"', '",
"+",
"str",
"(",
"env",
"[",
"'CNAME'",
"]",
")",
"+",
"')'",
")"
]
| Lists environments | [
"Lists",
"environments"
]
| python | valid |
apache/incubator-mxnet | python/mxnet/kvstore_server.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/kvstore_server.py#L64-L73 | def run(self):
"""Run the server, whose behavior is like.
>>> while receive(x):
... if is_command x: controller(x)
... else if is_key_value x: updater(x)
"""
_ctrl_proto = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p)
check_call(_LIB.MXKVStoreRunServer(self.handle, _ctrl_proto(self._controller()), None)) | [
"def",
"run",
"(",
"self",
")",
":",
"_ctrl_proto",
"=",
"ctypes",
".",
"CFUNCTYPE",
"(",
"None",
",",
"ctypes",
".",
"c_int",
",",
"ctypes",
".",
"c_char_p",
",",
"ctypes",
".",
"c_void_p",
")",
"check_call",
"(",
"_LIB",
".",
"MXKVStoreRunServer",
"(",
"self",
".",
"handle",
",",
"_ctrl_proto",
"(",
"self",
".",
"_controller",
"(",
")",
")",
",",
"None",
")",
")"
]
| Run the server, whose behavior is like.
>>> while receive(x):
... if is_command x: controller(x)
... else if is_key_value x: updater(x) | [
"Run",
"the",
"server",
"whose",
"behavior",
"is",
"like",
"."
]
| python | train |
shoebot/shoebot | lib/web/google.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/web/google.py#L220-L226 | def search_images(q, start=0, size="", wait=10, asynchronous=False, cached=False):
""" Returns a Google images query formatted as a GoogleSearch list object.
"""
service = GOOGLE_IMAGES
return GoogleSearch(q, start, service, size, wait, asynchronous, cached) | [
"def",
"search_images",
"(",
"q",
",",
"start",
"=",
"0",
",",
"size",
"=",
"\"\"",
",",
"wait",
"=",
"10",
",",
"asynchronous",
"=",
"False",
",",
"cached",
"=",
"False",
")",
":",
"service",
"=",
"GOOGLE_IMAGES",
"return",
"GoogleSearch",
"(",
"q",
",",
"start",
",",
"service",
",",
"size",
",",
"wait",
",",
"asynchronous",
",",
"cached",
")"
]
| Returns a Google images query formatted as a GoogleSearch list object. | [
"Returns",
"a",
"Google",
"images",
"query",
"formatted",
"as",
"a",
"GoogleSearch",
"list",
"object",
"."
]
| python | valid |
manahl/arctic | arctic/multi_index.py | https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/multi_index.py#L106-L118 | def multi_index_insert_row(df, index_row, values_row):
""" Return a new dataframe with a row inserted for a multi-index dataframe.
This will sort the rows according to the ordered multi-index levels.
"""
row_index = pd.MultiIndex(levels=[[i] for i in index_row],
labels=[[0] for i in index_row])
row = pd.DataFrame(values_row, index=row_index, columns=df.columns)
df = pd.concat((df, row))
if df.index.lexsort_depth == len(index_row) and df.index[-2] < df.index[-1]:
# We've just appended a row to an already-sorted dataframe
return df
# The df wasn't sorted or the row has to be put in the middle somewhere
return df.sort_index() | [
"def",
"multi_index_insert_row",
"(",
"df",
",",
"index_row",
",",
"values_row",
")",
":",
"row_index",
"=",
"pd",
".",
"MultiIndex",
"(",
"levels",
"=",
"[",
"[",
"i",
"]",
"for",
"i",
"in",
"index_row",
"]",
",",
"labels",
"=",
"[",
"[",
"0",
"]",
"for",
"i",
"in",
"index_row",
"]",
")",
"row",
"=",
"pd",
".",
"DataFrame",
"(",
"values_row",
",",
"index",
"=",
"row_index",
",",
"columns",
"=",
"df",
".",
"columns",
")",
"df",
"=",
"pd",
".",
"concat",
"(",
"(",
"df",
",",
"row",
")",
")",
"if",
"df",
".",
"index",
".",
"lexsort_depth",
"==",
"len",
"(",
"index_row",
")",
"and",
"df",
".",
"index",
"[",
"-",
"2",
"]",
"<",
"df",
".",
"index",
"[",
"-",
"1",
"]",
":",
"# We've just appended a row to an already-sorted dataframe",
"return",
"df",
"# The df wasn't sorted or the row has to be put in the middle somewhere",
"return",
"df",
".",
"sort_index",
"(",
")"
]
| Return a new dataframe with a row inserted for a multi-index dataframe.
This will sort the rows according to the ordered multi-index levels. | [
"Return",
"a",
"new",
"dataframe",
"with",
"a",
"row",
"inserted",
"for",
"a",
"multi",
"-",
"index",
"dataframe",
".",
"This",
"will",
"sort",
"the",
"rows",
"according",
"to",
"the",
"ordered",
"multi",
"-",
"index",
"levels",
"."
]
| python | train |
michael-lazar/rtv | rtv/packages/praw/objects.py | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/objects.py#L1050-L1053 | def get_blocked(self):
"""Return a UserList of Redditors with whom the user has blocked."""
url = self.reddit_session.config['blocked']
return self.reddit_session.request_json(url) | [
"def",
"get_blocked",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"reddit_session",
".",
"config",
"[",
"'blocked'",
"]",
"return",
"self",
".",
"reddit_session",
".",
"request_json",
"(",
"url",
")"
]
| Return a UserList of Redditors with whom the user has blocked. | [
"Return",
"a",
"UserList",
"of",
"Redditors",
"with",
"whom",
"the",
"user",
"has",
"blocked",
"."
]
| python | train |
iotile/coretools | iotilecore/iotile/core/utilities/schema_verify/bytes_verify.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/schema_verify/bytes_verify.py#L26-L56 | def verify(self, obj):
"""Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Returns:
bytes or byterray: The decoded byte buffer
Raises:
ValidationError: If there is a problem verifying the object, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
"""
if self.encoding == 'none' and not isinstance(obj, (bytes, bytearray)):
raise ValidationError('Byte object was not either bytes or a bytearray', type=obj.__class__.__name__)
elif self.encoding == 'base64':
try:
data = base64.b64decode(obj)
return data
except TypeError:
raise ValidationError("Could not decode base64 encoded bytes", obj=obj)
elif self.encoding == 'hex':
try:
data = binascii.unhexlify(obj)
return data
except TypeError:
raise ValidationError("Could not decode hex encoded bytes", obj=obj)
return obj | [
"def",
"verify",
"(",
"self",
",",
"obj",
")",
":",
"if",
"self",
".",
"encoding",
"==",
"'none'",
"and",
"not",
"isinstance",
"(",
"obj",
",",
"(",
"bytes",
",",
"bytearray",
")",
")",
":",
"raise",
"ValidationError",
"(",
"'Byte object was not either bytes or a bytearray'",
",",
"type",
"=",
"obj",
".",
"__class__",
".",
"__name__",
")",
"elif",
"self",
".",
"encoding",
"==",
"'base64'",
":",
"try",
":",
"data",
"=",
"base64",
".",
"b64decode",
"(",
"obj",
")",
"return",
"data",
"except",
"TypeError",
":",
"raise",
"ValidationError",
"(",
"\"Could not decode base64 encoded bytes\"",
",",
"obj",
"=",
"obj",
")",
"elif",
"self",
".",
"encoding",
"==",
"'hex'",
":",
"try",
":",
"data",
"=",
"binascii",
".",
"unhexlify",
"(",
"obj",
")",
"return",
"data",
"except",
"TypeError",
":",
"raise",
"ValidationError",
"(",
"\"Could not decode hex encoded bytes\"",
",",
"obj",
"=",
"obj",
")",
"return",
"obj"
]
| Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Returns:
bytes or byterray: The decoded byte buffer
Raises:
ValidationError: If there is a problem verifying the object, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation. | [
"Verify",
"that",
"the",
"object",
"conforms",
"to",
"this",
"verifier",
"s",
"schema"
]
| python | train |
rshipp/python-dshield | dshield.py | https://github.com/rshipp/python-dshield/blob/1b003d0dfac0bc2ee8b86ca5f1a44b765b8cc6e0/dshield.py#L113-L126 | def topports(sort_by='records', limit=10, date=None, return_format=None):
"""Information about top ports for a particular date with return limit.
:param sort_by: one of 'records', 'targets', 'sources'
:param limit: number of records to be returned
:param date: an optional string in 'Y-M-D' format or datetime.date() object
"""
uri = '/'.join(['topports', sort_by, str(limit)])
if date:
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
return _get(uri, return_format) | [
"def",
"topports",
"(",
"sort_by",
"=",
"'records'",
",",
"limit",
"=",
"10",
",",
"date",
"=",
"None",
",",
"return_format",
"=",
"None",
")",
":",
"uri",
"=",
"'/'",
".",
"join",
"(",
"[",
"'topports'",
",",
"sort_by",
",",
"str",
"(",
"limit",
")",
"]",
")",
"if",
"date",
":",
"try",
":",
"uri",
"=",
"'/'",
".",
"join",
"(",
"[",
"uri",
",",
"date",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"]",
")",
"except",
"AttributeError",
":",
"uri",
"=",
"'/'",
".",
"join",
"(",
"[",
"uri",
",",
"date",
"]",
")",
"return",
"_get",
"(",
"uri",
",",
"return_format",
")"
]
| Information about top ports for a particular date with return limit.
:param sort_by: one of 'records', 'targets', 'sources'
:param limit: number of records to be returned
:param date: an optional string in 'Y-M-D' format or datetime.date() object | [
"Information",
"about",
"top",
"ports",
"for",
"a",
"particular",
"date",
"with",
"return",
"limit",
"."
]
| python | train |
bitesofcode/projexui | projexui/widgets/xlineedit.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xlineedit.py#L152-L182 | def adjustButtons( self ):
"""
Adjusts the placement of the buttons for this line edit.
"""
y = 1
for btn in self.buttons():
btn.setIconSize(self.iconSize())
btn.setFixedSize(QSize(self.height() - 2, self.height() - 2))
# adjust the location for the left buttons
left_buttons = self._buttons.get(Qt.AlignLeft, [])
x = (self.cornerRadius() / 2.0) + 2
for btn in left_buttons:
btn.move(x, y)
x += btn.width()
# adjust the location for the right buttons
right_buttons = self._buttons.get(Qt.AlignRight, [])
w = self.width()
bwidth = sum([btn.width() for btn in right_buttons])
bwidth += (self.cornerRadius() / 2.0) + 1
for btn in right_buttons:
btn.move(w - bwidth, y)
bwidth -= btn.width()
self._buttonWidth = sum([btn.width() for btn in self.buttons()])
self.adjustTextMargins() | [
"def",
"adjustButtons",
"(",
"self",
")",
":",
"y",
"=",
"1",
"for",
"btn",
"in",
"self",
".",
"buttons",
"(",
")",
":",
"btn",
".",
"setIconSize",
"(",
"self",
".",
"iconSize",
"(",
")",
")",
"btn",
".",
"setFixedSize",
"(",
"QSize",
"(",
"self",
".",
"height",
"(",
")",
"-",
"2",
",",
"self",
".",
"height",
"(",
")",
"-",
"2",
")",
")",
"# adjust the location for the left buttons",
"left_buttons",
"=",
"self",
".",
"_buttons",
".",
"get",
"(",
"Qt",
".",
"AlignLeft",
",",
"[",
"]",
")",
"x",
"=",
"(",
"self",
".",
"cornerRadius",
"(",
")",
"/",
"2.0",
")",
"+",
"2",
"for",
"btn",
"in",
"left_buttons",
":",
"btn",
".",
"move",
"(",
"x",
",",
"y",
")",
"x",
"+=",
"btn",
".",
"width",
"(",
")",
"# adjust the location for the right buttons",
"right_buttons",
"=",
"self",
".",
"_buttons",
".",
"get",
"(",
"Qt",
".",
"AlignRight",
",",
"[",
"]",
")",
"w",
"=",
"self",
".",
"width",
"(",
")",
"bwidth",
"=",
"sum",
"(",
"[",
"btn",
".",
"width",
"(",
")",
"for",
"btn",
"in",
"right_buttons",
"]",
")",
"bwidth",
"+=",
"(",
"self",
".",
"cornerRadius",
"(",
")",
"/",
"2.0",
")",
"+",
"1",
"for",
"btn",
"in",
"right_buttons",
":",
"btn",
".",
"move",
"(",
"w",
"-",
"bwidth",
",",
"y",
")",
"bwidth",
"-=",
"btn",
".",
"width",
"(",
")",
"self",
".",
"_buttonWidth",
"=",
"sum",
"(",
"[",
"btn",
".",
"width",
"(",
")",
"for",
"btn",
"in",
"self",
".",
"buttons",
"(",
")",
"]",
")",
"self",
".",
"adjustTextMargins",
"(",
")"
]
| Adjusts the placement of the buttons for this line edit. | [
"Adjusts",
"the",
"placement",
"of",
"the",
"buttons",
"for",
"this",
"line",
"edit",
"."
]
| python | train |
Bystroushaak/pyDHTMLParser | src/dhtmlparser/htmlelement/html_query.py | https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/htmlelement/html_query.py#L176-L238 | def wfind(self, tag_name, params=None, fn=None, case_sensitive=False):
"""
This methods works same as :meth:`find`, but only in one level of the
:attr:`childs`.
This allows to chain :meth:`wfind` calls::
>>> dom = dhtmlparser.parseString('''
... <root>
... <some>
... <something>
... <xe id="wanted xe" />
... </something>
... <something>
... asd
... </something>
... <xe id="another xe" />
... </some>
... <some>
... else
... <xe id="yet another xe" />
... </some>
... </root>
... ''')
>>> xe = dom.wfind("root").wfind("some").wfind("something").find("xe")
>>> xe
[<dhtmlparser.htmlelement.HTMLElement object at 0x8a979ac>]
>>> str(xe[0])
'<xe id="wanted xe" />'
Args:
tag_name (str): Name of the tag you are looking for. Set to "" if
you wish to use only `fn` parameter.
params (dict, default None): Parameters which have to be present
in tag to be considered matching.
fn (function, default None): Use this function to match tags.
Function expects one parameter which is HTMLElement instance.
case_sensitive (bool, default False): Use case sensitive search.
Returns:
obj: Blank HTMLElement with all matches in :attr:`childs` property.
Note:
Returned element also have set :attr:`_container` property to True.
"""
childs = self.childs
if self._container: # container object
childs = map(
lambda x: x.childs,
filter(lambda x: x.childs, self.childs)
)
childs = sum(childs, []) # flattern the list
el = self.__class__() # HTMLElement()
el._container = True
for child in childs:
if child.isEndTag():
continue
if child.isAlmostEqual(tag_name, params, fn, case_sensitive):
el.childs.append(child)
return el | [
"def",
"wfind",
"(",
"self",
",",
"tag_name",
",",
"params",
"=",
"None",
",",
"fn",
"=",
"None",
",",
"case_sensitive",
"=",
"False",
")",
":",
"childs",
"=",
"self",
".",
"childs",
"if",
"self",
".",
"_container",
":",
"# container object",
"childs",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"childs",
",",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"childs",
",",
"self",
".",
"childs",
")",
")",
"childs",
"=",
"sum",
"(",
"childs",
",",
"[",
"]",
")",
"# flattern the list",
"el",
"=",
"self",
".",
"__class__",
"(",
")",
"# HTMLElement()",
"el",
".",
"_container",
"=",
"True",
"for",
"child",
"in",
"childs",
":",
"if",
"child",
".",
"isEndTag",
"(",
")",
":",
"continue",
"if",
"child",
".",
"isAlmostEqual",
"(",
"tag_name",
",",
"params",
",",
"fn",
",",
"case_sensitive",
")",
":",
"el",
".",
"childs",
".",
"append",
"(",
"child",
")",
"return",
"el"
]
| This methods works same as :meth:`find`, but only in one level of the
:attr:`childs`.
This allows to chain :meth:`wfind` calls::
>>> dom = dhtmlparser.parseString('''
... <root>
... <some>
... <something>
... <xe id="wanted xe" />
... </something>
... <something>
... asd
... </something>
... <xe id="another xe" />
... </some>
... <some>
... else
... <xe id="yet another xe" />
... </some>
... </root>
... ''')
>>> xe = dom.wfind("root").wfind("some").wfind("something").find("xe")
>>> xe
[<dhtmlparser.htmlelement.HTMLElement object at 0x8a979ac>]
>>> str(xe[0])
'<xe id="wanted xe" />'
Args:
tag_name (str): Name of the tag you are looking for. Set to "" if
you wish to use only `fn` parameter.
params (dict, default None): Parameters which have to be present
in tag to be considered matching.
fn (function, default None): Use this function to match tags.
Function expects one parameter which is HTMLElement instance.
case_sensitive (bool, default False): Use case sensitive search.
Returns:
obj: Blank HTMLElement with all matches in :attr:`childs` property.
Note:
Returned element also have set :attr:`_container` property to True. | [
"This",
"methods",
"works",
"same",
"as",
":",
"meth",
":",
"find",
"but",
"only",
"in",
"one",
"level",
"of",
"the",
":",
"attr",
":",
"childs",
"."
]
| python | train |
Kane610/axis | axis/vapix.py | https://github.com/Kane610/axis/blob/b2b44ce595c7b722b5e13eabcab7b91f048e1808/axis/vapix.py#L49-L65 | def request(self, method, path, **kwargs):
"""Prepare HTTP request."""
if method == 'get':
session_method = self.config.session.get
elif method == 'post':
session_method = self.config.session.post
else:
raise AxisException
url = self.config.url + path
result = session_request(session_method, url, **kwargs)
_LOGGER.debug("Response: %s from %s", result, self.config.host)
return result | [
"def",
"request",
"(",
"self",
",",
"method",
",",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"method",
"==",
"'get'",
":",
"session_method",
"=",
"self",
".",
"config",
".",
"session",
".",
"get",
"elif",
"method",
"==",
"'post'",
":",
"session_method",
"=",
"self",
".",
"config",
".",
"session",
".",
"post",
"else",
":",
"raise",
"AxisException",
"url",
"=",
"self",
".",
"config",
".",
"url",
"+",
"path",
"result",
"=",
"session_request",
"(",
"session_method",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Response: %s from %s\"",
",",
"result",
",",
"self",
".",
"config",
".",
"host",
")",
"return",
"result"
]
| Prepare HTTP request. | [
"Prepare",
"HTTP",
"request",
"."
]
| python | train |
mozilla/Marketplace.Python | marketplace/connection.py | https://github.com/mozilla/Marketplace.Python/blob/88176b12201f766b6b96bccc1e4c3e82f0676283/marketplace/connection.py#L81-L84 | def fetch_json(self, method, url, data=None, expected_status_code=None):
"""Return json decoded data from fetch
"""
return self.fetch(method, url, data, expected_status_code).json() | [
"def",
"fetch_json",
"(",
"self",
",",
"method",
",",
"url",
",",
"data",
"=",
"None",
",",
"expected_status_code",
"=",
"None",
")",
":",
"return",
"self",
".",
"fetch",
"(",
"method",
",",
"url",
",",
"data",
",",
"expected_status_code",
")",
".",
"json",
"(",
")"
]
| Return json decoded data from fetch | [
"Return",
"json",
"decoded",
"data",
"from",
"fetch"
]
| python | train |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py#L679-L695 | def _fw_rule_update(self, drvr_name, data):
"""Firewall Rule update routine.
Function to decode the updated rules and call routines that
in turn calls the device routines to update rules.
"""
LOG.debug("FW Update %s", data)
tenant_id = data.get('firewall_rule').get('tenant_id')
fw_rule = data.get('firewall_rule')
rule = self._fw_rule_decode_store(data)
rule_id = fw_rule.get('id')
if tenant_id not in self.fwid_attr or not (
self.fwid_attr[tenant_id].is_rule_present(rule_id)):
LOG.error("Incorrect update info for tenant %s", tenant_id)
return
self.fwid_attr[tenant_id].rule_update(rule_id, rule)
self._check_update_fw(tenant_id, drvr_name) | [
"def",
"_fw_rule_update",
"(",
"self",
",",
"drvr_name",
",",
"data",
")",
":",
"LOG",
".",
"debug",
"(",
"\"FW Update %s\"",
",",
"data",
")",
"tenant_id",
"=",
"data",
".",
"get",
"(",
"'firewall_rule'",
")",
".",
"get",
"(",
"'tenant_id'",
")",
"fw_rule",
"=",
"data",
".",
"get",
"(",
"'firewall_rule'",
")",
"rule",
"=",
"self",
".",
"_fw_rule_decode_store",
"(",
"data",
")",
"rule_id",
"=",
"fw_rule",
".",
"get",
"(",
"'id'",
")",
"if",
"tenant_id",
"not",
"in",
"self",
".",
"fwid_attr",
"or",
"not",
"(",
"self",
".",
"fwid_attr",
"[",
"tenant_id",
"]",
".",
"is_rule_present",
"(",
"rule_id",
")",
")",
":",
"LOG",
".",
"error",
"(",
"\"Incorrect update info for tenant %s\"",
",",
"tenant_id",
")",
"return",
"self",
".",
"fwid_attr",
"[",
"tenant_id",
"]",
".",
"rule_update",
"(",
"rule_id",
",",
"rule",
")",
"self",
".",
"_check_update_fw",
"(",
"tenant_id",
",",
"drvr_name",
")"
]
| Firewall Rule update routine.
Function to decode the updated rules and call routines that
in turn calls the device routines to update rules. | [
"Firewall",
"Rule",
"update",
"routine",
"."
]
| python | train |
note35/sinon | sinon/lib/mock.py | https://github.com/note35/sinon/blob/f1d551b679b393d64d926a8a279320904c38d0f5/sinon/lib/mock.py#L124-L136 | def verify(self):
"""
Running all conditions in the instance variable valid_list
Return:
True: pass all conditions
False: fail at more than one condition
"""
if self not in self._queue:
return False
valid = True
for check in self.valid_list:
valid = valid & check()
return valid | [
"def",
"verify",
"(",
"self",
")",
":",
"if",
"self",
"not",
"in",
"self",
".",
"_queue",
":",
"return",
"False",
"valid",
"=",
"True",
"for",
"check",
"in",
"self",
".",
"valid_list",
":",
"valid",
"=",
"valid",
"&",
"check",
"(",
")",
"return",
"valid"
]
| Running all conditions in the instance variable valid_list
Return:
True: pass all conditions
False: fail at more than one condition | [
"Running",
"all",
"conditions",
"in",
"the",
"instance",
"variable",
"valid_list",
"Return",
":",
"True",
":",
"pass",
"all",
"conditions",
"False",
":",
"fail",
"at",
"more",
"than",
"one",
"condition"
]
| python | train |
mfitzp/padua | padua/utils.py | https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/utils.py#L191-L205 | def format_label(sl, fmt=None):
"""
Combine a list of strings to a single str, joined by sep.
Passes through single strings.
:param sl:
:return:
"""
if isinstance(sl, str):
# Already is a string.
return sl
if fmt:
return fmt.format(*sl)
return ' '.join(str(s) for s in sl) | [
"def",
"format_label",
"(",
"sl",
",",
"fmt",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"sl",
",",
"str",
")",
":",
"# Already is a string.",
"return",
"sl",
"if",
"fmt",
":",
"return",
"fmt",
".",
"format",
"(",
"*",
"sl",
")",
"return",
"' '",
".",
"join",
"(",
"str",
"(",
"s",
")",
"for",
"s",
"in",
"sl",
")"
]
| Combine a list of strings to a single str, joined by sep.
Passes through single strings.
:param sl:
:return: | [
"Combine",
"a",
"list",
"of",
"strings",
"to",
"a",
"single",
"str",
"joined",
"by",
"sep",
".",
"Passes",
"through",
"single",
"strings",
".",
":",
"param",
"sl",
":",
":",
"return",
":"
]
| python | train |
Qiskit/qiskit-terra | qiskit/quantum_info/operators/channel/superop.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/channel/superop.py#L122-L127 | def transpose(self):
"""Return the transpose of the QuantumChannel."""
return SuperOp(
np.transpose(self._data),
input_dims=self.output_dims(),
output_dims=self.input_dims()) | [
"def",
"transpose",
"(",
"self",
")",
":",
"return",
"SuperOp",
"(",
"np",
".",
"transpose",
"(",
"self",
".",
"_data",
")",
",",
"input_dims",
"=",
"self",
".",
"output_dims",
"(",
")",
",",
"output_dims",
"=",
"self",
".",
"input_dims",
"(",
")",
")"
]
| Return the transpose of the QuantumChannel. | [
"Return",
"the",
"transpose",
"of",
"the",
"QuantumChannel",
"."
]
| python | test |
rigetti/pyquil | pyquil/api/_quantum_computer.py | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_quantum_computer.py#L441-L461 | def _get_9q_square_qvm(name: str, noisy: bool,
connection: ForestConnection = None,
qvm_type: str = 'qvm') -> QuantumComputer:
"""
A nine-qubit 3x3 square lattice.
This uses a "generic" lattice not tied to any specific device. 9 qubits is large enough
to do vaguely interesting algorithms and small enough to simulate quickly.
:param name: The name of this QVM
:param connection: The connection to use to talk to external services
:param noisy: Whether to construct a noisy quantum computer
:param qvm_type: The type of QVM. Either 'qvm' or 'pyqvm'.
:return: A pre-configured QuantumComputer
"""
topology = nx.convert_node_labels_to_integers(nx.grid_2d_graph(3, 3))
return _get_qvm_with_topology(name=name, connection=connection,
topology=topology,
noisy=noisy,
requires_executable=True,
qvm_type=qvm_type) | [
"def",
"_get_9q_square_qvm",
"(",
"name",
":",
"str",
",",
"noisy",
":",
"bool",
",",
"connection",
":",
"ForestConnection",
"=",
"None",
",",
"qvm_type",
":",
"str",
"=",
"'qvm'",
")",
"->",
"QuantumComputer",
":",
"topology",
"=",
"nx",
".",
"convert_node_labels_to_integers",
"(",
"nx",
".",
"grid_2d_graph",
"(",
"3",
",",
"3",
")",
")",
"return",
"_get_qvm_with_topology",
"(",
"name",
"=",
"name",
",",
"connection",
"=",
"connection",
",",
"topology",
"=",
"topology",
",",
"noisy",
"=",
"noisy",
",",
"requires_executable",
"=",
"True",
",",
"qvm_type",
"=",
"qvm_type",
")"
]
| A nine-qubit 3x3 square lattice.
This uses a "generic" lattice not tied to any specific device. 9 qubits is large enough
to do vaguely interesting algorithms and small enough to simulate quickly.
:param name: The name of this QVM
:param connection: The connection to use to talk to external services
:param noisy: Whether to construct a noisy quantum computer
:param qvm_type: The type of QVM. Either 'qvm' or 'pyqvm'.
:return: A pre-configured QuantumComputer | [
"A",
"nine",
"-",
"qubit",
"3x3",
"square",
"lattice",
"."
]
| python | train |
claudep/translitcodec | translitcodec/__init__.py | https://github.com/claudep/translitcodec/blob/63b4a928afcb1e7721b33936e4d56254f415ddaf/translitcodec/__init__.py#L25-L36 | def long_encode(input, errors='strict'):
"""Transliterate to 8 bit using as many letters as needed.
For example, \u00e4 LATIN SMALL LETTER A WITH DIAERESIS ``ä`` will
be replaced with ``ae``.
"""
if not isinstance(input, text_type):
input = text_type(input, sys.getdefaultencoding(), errors)
length = len(input)
input = unicodedata.normalize('NFKC', input)
return input.translate(long_table), length | [
"def",
"long_encode",
"(",
"input",
",",
"errors",
"=",
"'strict'",
")",
":",
"if",
"not",
"isinstance",
"(",
"input",
",",
"text_type",
")",
":",
"input",
"=",
"text_type",
"(",
"input",
",",
"sys",
".",
"getdefaultencoding",
"(",
")",
",",
"errors",
")",
"length",
"=",
"len",
"(",
"input",
")",
"input",
"=",
"unicodedata",
".",
"normalize",
"(",
"'NFKC'",
",",
"input",
")",
"return",
"input",
".",
"translate",
"(",
"long_table",
")",
",",
"length"
]
| Transliterate to 8 bit using as many letters as needed.
For example, \u00e4 LATIN SMALL LETTER A WITH DIAERESIS ``ä`` will
be replaced with ``ae``. | [
"Transliterate",
"to",
"8",
"bit",
"using",
"as",
"many",
"letters",
"as",
"needed",
"."
]
| python | train |
Contraz/demosys-py | demosys/scene/camera.py | https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/scene/camera.py#L65-L76 | def _update_yaw_and_pitch(self):
"""
Updates the camera vectors based on the current yaw and pitch
"""
front = Vector3([0.0, 0.0, 0.0])
front.x = cos(radians(self.yaw)) * cos(radians(self.pitch))
front.y = sin(radians(self.pitch))
front.z = sin(radians(self.yaw)) * cos(radians(self.pitch))
self.dir = vector.normalise(front)
self.right = vector.normalise(vector3.cross(self.dir, self._up))
self.up = vector.normalise(vector3.cross(self.right, self.dir)) | [
"def",
"_update_yaw_and_pitch",
"(",
"self",
")",
":",
"front",
"=",
"Vector3",
"(",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
")",
"front",
".",
"x",
"=",
"cos",
"(",
"radians",
"(",
"self",
".",
"yaw",
")",
")",
"*",
"cos",
"(",
"radians",
"(",
"self",
".",
"pitch",
")",
")",
"front",
".",
"y",
"=",
"sin",
"(",
"radians",
"(",
"self",
".",
"pitch",
")",
")",
"front",
".",
"z",
"=",
"sin",
"(",
"radians",
"(",
"self",
".",
"yaw",
")",
")",
"*",
"cos",
"(",
"radians",
"(",
"self",
".",
"pitch",
")",
")",
"self",
".",
"dir",
"=",
"vector",
".",
"normalise",
"(",
"front",
")",
"self",
".",
"right",
"=",
"vector",
".",
"normalise",
"(",
"vector3",
".",
"cross",
"(",
"self",
".",
"dir",
",",
"self",
".",
"_up",
")",
")",
"self",
".",
"up",
"=",
"vector",
".",
"normalise",
"(",
"vector3",
".",
"cross",
"(",
"self",
".",
"right",
",",
"self",
".",
"dir",
")",
")"
]
| Updates the camera vectors based on the current yaw and pitch | [
"Updates",
"the",
"camera",
"vectors",
"based",
"on",
"the",
"current",
"yaw",
"and",
"pitch"
]
| python | valid |
jkokorian/pyqt2waybinding | pyqt2waybinding/__init__.py | https://github.com/jkokorian/pyqt2waybinding/blob/fb1fb84f55608cfbf99c6486650100ba81743117/pyqt2waybinding/__init__.py#L142-L166 | def bindToProperty(self,instance,propertyName,useGetter=False):
"""
2-way binds to an instance property.
Parameters:
- instance -- the object instance
- propertyName -- the name of the property to bind to
- useGetter: when True, calls the getter method to obtain the value. When False, the signal argument is used as input for the target setter. (default False)
Notes:
2-way binds to an instance property according to one of the following naming conventions:
@property, propertyName.setter and pyqtSignal
- getter: propertyName
- setter: propertyName
- changedSignal: propertyNameChanged
getter, setter and pyqtSignal (this is used when binding to standard QWidgets like QSpinBox)
- getter: propertyName()
- setter: setPropertyName()
- changedSignal: propertyNameChanged
"""
endpoint = BindingEndpoint.forProperty(instance,propertyName,useGetter = useGetter)
self.bindToEndPoint(endpoint) | [
"def",
"bindToProperty",
"(",
"self",
",",
"instance",
",",
"propertyName",
",",
"useGetter",
"=",
"False",
")",
":",
"endpoint",
"=",
"BindingEndpoint",
".",
"forProperty",
"(",
"instance",
",",
"propertyName",
",",
"useGetter",
"=",
"useGetter",
")",
"self",
".",
"bindToEndPoint",
"(",
"endpoint",
")"
]
| 2-way binds to an instance property.
Parameters:
- instance -- the object instance
- propertyName -- the name of the property to bind to
- useGetter: when True, calls the getter method to obtain the value. When False, the signal argument is used as input for the target setter. (default False)
Notes:
2-way binds to an instance property according to one of the following naming conventions:
@property, propertyName.setter and pyqtSignal
- getter: propertyName
- setter: propertyName
- changedSignal: propertyNameChanged
getter, setter and pyqtSignal (this is used when binding to standard QWidgets like QSpinBox)
- getter: propertyName()
- setter: setPropertyName()
- changedSignal: propertyNameChanged | [
"2",
"-",
"way",
"binds",
"to",
"an",
"instance",
"property",
"."
]
| python | train |
gwastro/pycbc | pycbc/inference/models/gaussian_noise.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/gaussian_noise.py#L380-L399 | def det_lognl(self, det):
"""Returns the log likelihood of the noise in the given detector.
Parameters
----------
det : str
The name of the detector.
Returns
-------
float :
The log likelihood of the noise in the requested detector.
"""
try:
return self.__det_lognls[det]
except AttributeError:
# hasn't been calculated yet, call lognl to calculate & store
self._lognl()
# now try returning
return self.__det_lognls[det] | [
"def",
"det_lognl",
"(",
"self",
",",
"det",
")",
":",
"try",
":",
"return",
"self",
".",
"__det_lognls",
"[",
"det",
"]",
"except",
"AttributeError",
":",
"# hasn't been calculated yet, call lognl to calculate & store",
"self",
".",
"_lognl",
"(",
")",
"# now try returning",
"return",
"self",
".",
"__det_lognls",
"[",
"det",
"]"
]
| Returns the log likelihood of the noise in the given detector.
Parameters
----------
det : str
The name of the detector.
Returns
-------
float :
The log likelihood of the noise in the requested detector. | [
"Returns",
"the",
"log",
"likelihood",
"of",
"the",
"noise",
"in",
"the",
"given",
"detector",
"."
]
| python | train |
bcbio/bcbio-nextgen | bcbio/heterogeneity/phylowgs.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L114-L125 | def _read_cnv_ssms(in_file):
"""Map CNVs to associated SSMs
"""
out = {}
with open(in_file) as in_handle:
in_handle.readline() # header
for line in in_handle:
parts = line.strip().split()
if len(parts) > 3:
cnvid, _, _, ssms = parts
out[cnvid] = [x.split(",")[0] for x in ssms.split(";")]
return out | [
"def",
"_read_cnv_ssms",
"(",
"in_file",
")",
":",
"out",
"=",
"{",
"}",
"with",
"open",
"(",
"in_file",
")",
"as",
"in_handle",
":",
"in_handle",
".",
"readline",
"(",
")",
"# header",
"for",
"line",
"in",
"in_handle",
":",
"parts",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"len",
"(",
"parts",
")",
">",
"3",
":",
"cnvid",
",",
"_",
",",
"_",
",",
"ssms",
"=",
"parts",
"out",
"[",
"cnvid",
"]",
"=",
"[",
"x",
".",
"split",
"(",
"\",\"",
")",
"[",
"0",
"]",
"for",
"x",
"in",
"ssms",
".",
"split",
"(",
"\";\"",
")",
"]",
"return",
"out"
]
| Map CNVs to associated SSMs | [
"Map",
"CNVs",
"to",
"associated",
"SSMs"
]
| python | train |
gpoulter/python-ngram | ngram.py | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L170-L179 | def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N] | [
"def",
"_split",
"(",
"self",
",",
"string",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"string",
")",
"-",
"self",
".",
"N",
"+",
"1",
")",
":",
"yield",
"string",
"[",
"i",
":",
"i",
"+",
"self",
".",
"N",
"]"
]
| Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg'] | [
"Iterates",
"over",
"the",
"ngrams",
"of",
"a",
"string",
"(",
"no",
"padding",
")",
"."
]
| python | train |
ssato/python-anyconfig | src/anyconfig/backend/yaml/pyyaml.py | https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/yaml/pyyaml.py#L170-L190 | def yml_dump(data, stream, yml_fnc=yml_fnc, **options):
"""An wrapper of yaml.safe_dump and yaml.dump.
:param data: Some data to dump
:param stream: a file or file-like object to dump YAML data
"""
_is_dict = anyconfig.utils.is_dict_like(data)
if options.get("ac_safe", False):
options = {}
elif not options.get("Dumper", False) and _is_dict:
# TODO: Any other way to get its constructor?
maybe_container = options.get("ac_dict", type(data))
options["Dumper"] = _customized_dumper(maybe_container)
if _is_dict:
# Type information and the order of items are lost on dump currently.
data = anyconfig.dicts.convert_to(data, ac_dict=dict)
options = common.filter_from_options("ac_dict", options)
return yml_fnc("dump", data, stream, **options) | [
"def",
"yml_dump",
"(",
"data",
",",
"stream",
",",
"yml_fnc",
"=",
"yml_fnc",
",",
"*",
"*",
"options",
")",
":",
"_is_dict",
"=",
"anyconfig",
".",
"utils",
".",
"is_dict_like",
"(",
"data",
")",
"if",
"options",
".",
"get",
"(",
"\"ac_safe\"",
",",
"False",
")",
":",
"options",
"=",
"{",
"}",
"elif",
"not",
"options",
".",
"get",
"(",
"\"Dumper\"",
",",
"False",
")",
"and",
"_is_dict",
":",
"# TODO: Any other way to get its constructor?",
"maybe_container",
"=",
"options",
".",
"get",
"(",
"\"ac_dict\"",
",",
"type",
"(",
"data",
")",
")",
"options",
"[",
"\"Dumper\"",
"]",
"=",
"_customized_dumper",
"(",
"maybe_container",
")",
"if",
"_is_dict",
":",
"# Type information and the order of items are lost on dump currently.",
"data",
"=",
"anyconfig",
".",
"dicts",
".",
"convert_to",
"(",
"data",
",",
"ac_dict",
"=",
"dict",
")",
"options",
"=",
"common",
".",
"filter_from_options",
"(",
"\"ac_dict\"",
",",
"options",
")",
"return",
"yml_fnc",
"(",
"\"dump\"",
",",
"data",
",",
"stream",
",",
"*",
"*",
"options",
")"
]
| An wrapper of yaml.safe_dump and yaml.dump.
:param data: Some data to dump
:param stream: a file or file-like object to dump YAML data | [
"An",
"wrapper",
"of",
"yaml",
".",
"safe_dump",
"and",
"yaml",
".",
"dump",
"."
]
| python | train |
inveniosoftware-attic/invenio-comments | invenio_comments/api.py | https://github.com/inveniosoftware-attic/invenio-comments/blob/62bb6e07c146baf75bf8de80b5896ab2a01a8423/invenio_comments/api.py#L1196-L1214 | def unsubscribe_user_from_discussion(recID, uid):
"""
Unsubscribe users from a discussion.
:param recID: record ID corresponding to the discussion we want to
unsubscribe the user
:param uid: user id
:return 1 if successful, 0 if not
"""
query = """DELETE FROM "cmtSUBSCRIPTION"
WHERE id_bibrec=%s AND id_user=%s"""
params = (recID, uid)
try:
res = run_sql(query, params)
except:
return 0
if res > 0:
return 1
return 0 | [
"def",
"unsubscribe_user_from_discussion",
"(",
"recID",
",",
"uid",
")",
":",
"query",
"=",
"\"\"\"DELETE FROM \"cmtSUBSCRIPTION\"\n WHERE id_bibrec=%s AND id_user=%s\"\"\"",
"params",
"=",
"(",
"recID",
",",
"uid",
")",
"try",
":",
"res",
"=",
"run_sql",
"(",
"query",
",",
"params",
")",
"except",
":",
"return",
"0",
"if",
"res",
">",
"0",
":",
"return",
"1",
"return",
"0"
]
| Unsubscribe users from a discussion.
:param recID: record ID corresponding to the discussion we want to
unsubscribe the user
:param uid: user id
:return 1 if successful, 0 if not | [
"Unsubscribe",
"users",
"from",
"a",
"discussion",
"."
]
| python | train |
pudo/dataset | dataset/table.py | https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L390-L399 | def drop(self):
"""Drop the table from the database.
Deletes both the schema and all the contents within it.
"""
with self.db.lock:
if self.exists:
self._threading_warn()
self.table.drop(self.db.executable, checkfirst=True)
self._table = None | [
"def",
"drop",
"(",
"self",
")",
":",
"with",
"self",
".",
"db",
".",
"lock",
":",
"if",
"self",
".",
"exists",
":",
"self",
".",
"_threading_warn",
"(",
")",
"self",
".",
"table",
".",
"drop",
"(",
"self",
".",
"db",
".",
"executable",
",",
"checkfirst",
"=",
"True",
")",
"self",
".",
"_table",
"=",
"None"
]
| Drop the table from the database.
Deletes both the schema and all the contents within it. | [
"Drop",
"the",
"table",
"from",
"the",
"database",
"."
]
| python | train |
creare-com/pydem | pydem/reader/my_types.py | https://github.com/creare-com/pydem/blob/c2fc8d84cfb411df84f71a6dec9edc4b544f710a/pydem/reader/my_types.py#L244-L251 | def _get_x_axis(self):
"""See http://www.gdal.org/gdal_datamodel.html for details."""
# 0,0 is top/left top top/left pixel. Actual x/y coord of that pixel are (.5,.5).
x_centers = np.linspace(.5, self.x_size - .5, self.x_size)
y_centers = x_centers * 0
return (self.geotransform[0]
+ self.geotransform[1] * x_centers
+ self.geotransform[2] * y_centers) | [
"def",
"_get_x_axis",
"(",
"self",
")",
":",
"# 0,0 is top/left top top/left pixel. Actual x/y coord of that pixel are (.5,.5).",
"x_centers",
"=",
"np",
".",
"linspace",
"(",
".5",
",",
"self",
".",
"x_size",
"-",
".5",
",",
"self",
".",
"x_size",
")",
"y_centers",
"=",
"x_centers",
"*",
"0",
"return",
"(",
"self",
".",
"geotransform",
"[",
"0",
"]",
"+",
"self",
".",
"geotransform",
"[",
"1",
"]",
"*",
"x_centers",
"+",
"self",
".",
"geotransform",
"[",
"2",
"]",
"*",
"y_centers",
")"
]
| See http://www.gdal.org/gdal_datamodel.html for details. | [
"See",
"http",
":",
"//",
"www",
".",
"gdal",
".",
"org",
"/",
"gdal_datamodel",
".",
"html",
"for",
"details",
"."
]
| python | train |
wonambi-python/wonambi | wonambi/widgets/overview.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/overview.py#L388-L419 | def mark_quality(self, start_time, length, qual_name):
"""Mark signal quality, only add the new ones.
Parameters
----------
start_time : int
start time in s of the epoch being scored.
length : int
duration in s of the epoch being scored.
qual_name : str
one of the stages defined in global stages.
"""
y_pos = BARS['quality']['pos0']
height = 10
# the -1 is really important, otherwise we stay on the edge of the rect
old_score = self.scene.itemAt(start_time + length / 2,
y_pos + height - 1,
self.transform())
# check we are not removing the black border
if old_score is not None and old_score.pen() == NoPen:
lg.debug('Removing old score at {}'.format(start_time))
self.scene.removeItem(old_score)
self.idx_annot.remove(old_score)
if qual_name == 'Poor':
rect = QGraphicsRectItem(start_time, y_pos, length, height)
rect.setPen(NoPen)
rect.setBrush(Qt.black)
self.scene.addItem(rect)
self.idx_annot.append(rect) | [
"def",
"mark_quality",
"(",
"self",
",",
"start_time",
",",
"length",
",",
"qual_name",
")",
":",
"y_pos",
"=",
"BARS",
"[",
"'quality'",
"]",
"[",
"'pos0'",
"]",
"height",
"=",
"10",
"# the -1 is really important, otherwise we stay on the edge of the rect",
"old_score",
"=",
"self",
".",
"scene",
".",
"itemAt",
"(",
"start_time",
"+",
"length",
"/",
"2",
",",
"y_pos",
"+",
"height",
"-",
"1",
",",
"self",
".",
"transform",
"(",
")",
")",
"# check we are not removing the black border",
"if",
"old_score",
"is",
"not",
"None",
"and",
"old_score",
".",
"pen",
"(",
")",
"==",
"NoPen",
":",
"lg",
".",
"debug",
"(",
"'Removing old score at {}'",
".",
"format",
"(",
"start_time",
")",
")",
"self",
".",
"scene",
".",
"removeItem",
"(",
"old_score",
")",
"self",
".",
"idx_annot",
".",
"remove",
"(",
"old_score",
")",
"if",
"qual_name",
"==",
"'Poor'",
":",
"rect",
"=",
"QGraphicsRectItem",
"(",
"start_time",
",",
"y_pos",
",",
"length",
",",
"height",
")",
"rect",
".",
"setPen",
"(",
"NoPen",
")",
"rect",
".",
"setBrush",
"(",
"Qt",
".",
"black",
")",
"self",
".",
"scene",
".",
"addItem",
"(",
"rect",
")",
"self",
".",
"idx_annot",
".",
"append",
"(",
"rect",
")"
]
| Mark signal quality, only add the new ones.
Parameters
----------
start_time : int
start time in s of the epoch being scored.
length : int
duration in s of the epoch being scored.
qual_name : str
one of the stages defined in global stages. | [
"Mark",
"signal",
"quality",
"only",
"add",
"the",
"new",
"ones",
"."
]
| python | train |
ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_misseditor/mission_editor.py | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_misseditor/mission_editor.py#L224-L263 | def process_mavlink_packet(self, m):
'''handle an incoming mavlink packet'''
mtype = m.get_type()
# if you add processing for an mtype here, remember to add it
# to mavlink_packet, above
if mtype in ['WAYPOINT_COUNT','MISSION_COUNT']:
if (self.num_wps_expected == 0):
#I haven't asked for WPs, or these messages are duplicates
#of msgs I've already received.
self.console.error("No waypoint load started (from Editor).")
#I only clear the mission in the Editor if this was a read event
elif (self.num_wps_expected == -1):
self.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_CLEAR_MISS_TABLE))
self.num_wps_expected = m.count
self.wps_received = {}
if (m.count > 0):
self.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_ADD_MISS_TABLE_ROWS,num_rows=m.count-1))
#write has been sent by the mission editor:
elif (self.num_wps_expected > 1):
if (m.count != self.num_wps_expected):
self.console.error("Unepxected waypoint count from APM after write (Editor)")
#since this is a write operation from the Editor there
#should be no need to update number of table rows
elif mtype in ['WAYPOINT', 'MISSION_ITEM']:
#still expecting wps?
if (len(self.wps_received) < self.num_wps_expected):
#if we haven't already received this wp, write it to the GUI:
if (m.seq not in self.wps_received.keys()):
self.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_SET_MISS_ITEM,
num=m.seq,command=m.command,param1=m.param1,
param2=m.param2,param3=m.param3,param4=m.param4,
lat=m.x,lon=m.y,alt=m.z,frame=m.frame))
self.wps_received[m.seq] = True | [
"def",
"process_mavlink_packet",
"(",
"self",
",",
"m",
")",
":",
"mtype",
"=",
"m",
".",
"get_type",
"(",
")",
"# if you add processing for an mtype here, remember to add it",
"# to mavlink_packet, above",
"if",
"mtype",
"in",
"[",
"'WAYPOINT_COUNT'",
",",
"'MISSION_COUNT'",
"]",
":",
"if",
"(",
"self",
".",
"num_wps_expected",
"==",
"0",
")",
":",
"#I haven't asked for WPs, or these messages are duplicates",
"#of msgs I've already received.",
"self",
".",
"console",
".",
"error",
"(",
"\"No waypoint load started (from Editor).\"",
")",
"#I only clear the mission in the Editor if this was a read event",
"elif",
"(",
"self",
".",
"num_wps_expected",
"==",
"-",
"1",
")",
":",
"self",
".",
"gui_event_queue",
".",
"put",
"(",
"MissionEditorEvent",
"(",
"me_event",
".",
"MEGE_CLEAR_MISS_TABLE",
")",
")",
"self",
".",
"num_wps_expected",
"=",
"m",
".",
"count",
"self",
".",
"wps_received",
"=",
"{",
"}",
"if",
"(",
"m",
".",
"count",
">",
"0",
")",
":",
"self",
".",
"gui_event_queue",
".",
"put",
"(",
"MissionEditorEvent",
"(",
"me_event",
".",
"MEGE_ADD_MISS_TABLE_ROWS",
",",
"num_rows",
"=",
"m",
".",
"count",
"-",
"1",
")",
")",
"#write has been sent by the mission editor:",
"elif",
"(",
"self",
".",
"num_wps_expected",
">",
"1",
")",
":",
"if",
"(",
"m",
".",
"count",
"!=",
"self",
".",
"num_wps_expected",
")",
":",
"self",
".",
"console",
".",
"error",
"(",
"\"Unepxected waypoint count from APM after write (Editor)\"",
")",
"#since this is a write operation from the Editor there",
"#should be no need to update number of table rows",
"elif",
"mtype",
"in",
"[",
"'WAYPOINT'",
",",
"'MISSION_ITEM'",
"]",
":",
"#still expecting wps?",
"if",
"(",
"len",
"(",
"self",
".",
"wps_received",
")",
"<",
"self",
".",
"num_wps_expected",
")",
":",
"#if we haven't already received this wp, write it to the GUI:",
"if",
"(",
"m",
".",
"seq",
"not",
"in",
"self",
".",
"wps_received",
".",
"keys",
"(",
")",
")",
":",
"self",
".",
"gui_event_queue",
".",
"put",
"(",
"MissionEditorEvent",
"(",
"me_event",
".",
"MEGE_SET_MISS_ITEM",
",",
"num",
"=",
"m",
".",
"seq",
",",
"command",
"=",
"m",
".",
"command",
",",
"param1",
"=",
"m",
".",
"param1",
",",
"param2",
"=",
"m",
".",
"param2",
",",
"param3",
"=",
"m",
".",
"param3",
",",
"param4",
"=",
"m",
".",
"param4",
",",
"lat",
"=",
"m",
".",
"x",
",",
"lon",
"=",
"m",
".",
"y",
",",
"alt",
"=",
"m",
".",
"z",
",",
"frame",
"=",
"m",
".",
"frame",
")",
")",
"self",
".",
"wps_received",
"[",
"m",
".",
"seq",
"]",
"=",
"True"
]
| handle an incoming mavlink packet | [
"handle",
"an",
"incoming",
"mavlink",
"packet"
]
| python | train |
saltstack/salt | salt/modules/elasticsearch.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/elasticsearch.py#L965-L987 | def search_template_delete(id, hosts=None, profile=None):
'''
.. versionadded:: 2017.7.0
Delete existing search template definition.
id
Template ID
CLI example::
salt myminion elasticsearch.search_template_delete mytemplate
'''
es = _get_instance(hosts, profile)
try:
result = es.delete_template(id=id)
return result.get('acknowledged', False)
except elasticsearch.NotFoundError:
return True
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot delete search template {0}, server returned code {1} with message {2}".format(id, e.status_code, e.error)) | [
"def",
"search_template_delete",
"(",
"id",
",",
"hosts",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"es",
"=",
"_get_instance",
"(",
"hosts",
",",
"profile",
")",
"try",
":",
"result",
"=",
"es",
".",
"delete_template",
"(",
"id",
"=",
"id",
")",
"return",
"result",
".",
"get",
"(",
"'acknowledged'",
",",
"False",
")",
"except",
"elasticsearch",
".",
"NotFoundError",
":",
"return",
"True",
"except",
"elasticsearch",
".",
"TransportError",
"as",
"e",
":",
"raise",
"CommandExecutionError",
"(",
"\"Cannot delete search template {0}, server returned code {1} with message {2}\"",
".",
"format",
"(",
"id",
",",
"e",
".",
"status_code",
",",
"e",
".",
"error",
")",
")"
]
| .. versionadded:: 2017.7.0
Delete existing search template definition.
id
Template ID
CLI example::
salt myminion elasticsearch.search_template_delete mytemplate | [
"..",
"versionadded",
"::",
"2017",
".",
"7",
".",
"0"
]
| python | train |
googleapis/google-cloud-python | bigquery/google/cloud/bigquery/_helpers.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/_helpers.py#L536-L569 | def _del_sub_prop(container, keys):
"""Remove a nested key fro a dictionary.
Arguments:
container (dict):
A dictionary which may contain other dictionaries as values.
keys (iterable):
A sequence of keys to attempt to clear the value for. Each item in
the sequence represents a deeper nesting. The first key is for
the top level. If there is a dictionary there, the second key
attempts to get the value within that, and so on.
Examples:
Remove a top-level value (equivalent to ``del container['key']``).
>>> container = {'key': 'value'}
>>> _del_sub_prop(container, ['key'])
>>> container
{}
Remove a nested value.
>>> container = {'key': {'subkey': 'value'}}
>>> _del_sub_prop(container, ['key', 'subkey'])
>>> container
{'key': {}}
"""
sub_val = container
for key in keys[:-1]:
if key not in sub_val:
sub_val[key] = {}
sub_val = sub_val[key]
if keys[-1] in sub_val:
del sub_val[keys[-1]] | [
"def",
"_del_sub_prop",
"(",
"container",
",",
"keys",
")",
":",
"sub_val",
"=",
"container",
"for",
"key",
"in",
"keys",
"[",
":",
"-",
"1",
"]",
":",
"if",
"key",
"not",
"in",
"sub_val",
":",
"sub_val",
"[",
"key",
"]",
"=",
"{",
"}",
"sub_val",
"=",
"sub_val",
"[",
"key",
"]",
"if",
"keys",
"[",
"-",
"1",
"]",
"in",
"sub_val",
":",
"del",
"sub_val",
"[",
"keys",
"[",
"-",
"1",
"]",
"]"
]
| Remove a nested key fro a dictionary.
Arguments:
container (dict):
A dictionary which may contain other dictionaries as values.
keys (iterable):
A sequence of keys to attempt to clear the value for. Each item in
the sequence represents a deeper nesting. The first key is for
the top level. If there is a dictionary there, the second key
attempts to get the value within that, and so on.
Examples:
Remove a top-level value (equivalent to ``del container['key']``).
>>> container = {'key': 'value'}
>>> _del_sub_prop(container, ['key'])
>>> container
{}
Remove a nested value.
>>> container = {'key': {'subkey': 'value'}}
>>> _del_sub_prop(container, ['key', 'subkey'])
>>> container
{'key': {}} | [
"Remove",
"a",
"nested",
"key",
"fro",
"a",
"dictionary",
"."
]
| python | train |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_1/task_agent/task_agent_client.py | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/task_agent/task_agent_client.py#L845-L861 | def get_variable_group(self, project, group_id):
"""GetVariableGroup.
[Preview API] Get a variable group.
:param str project: Project ID or project name
:param int group_id: Id of the variable group.
:rtype: :class:`<VariableGroup> <azure.devops.v5_1.task-agent.models.VariableGroup>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'int')
response = self._send(http_method='GET',
location_id='f5b09dd5-9d54-45a1-8b5a-1c8287d634cc',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('VariableGroup', response) | [
"def",
"get_variable_group",
"(",
"self",
",",
"project",
",",
"group_id",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"group_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'groupId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'group_id'",
",",
"group_id",
",",
"'int'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'f5b09dd5-9d54-45a1-8b5a-1c8287d634cc'",
",",
"version",
"=",
"'5.1-preview.1'",
",",
"route_values",
"=",
"route_values",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'VariableGroup'",
",",
"response",
")"
]
| GetVariableGroup.
[Preview API] Get a variable group.
:param str project: Project ID or project name
:param int group_id: Id of the variable group.
:rtype: :class:`<VariableGroup> <azure.devops.v5_1.task-agent.models.VariableGroup>` | [
"GetVariableGroup",
".",
"[",
"Preview",
"API",
"]",
"Get",
"a",
"variable",
"group",
".",
":",
"param",
"str",
"project",
":",
"Project",
"ID",
"or",
"project",
"name",
":",
"param",
"int",
"group_id",
":",
"Id",
"of",
"the",
"variable",
"group",
".",
":",
"rtype",
":",
":",
"class",
":",
"<VariableGroup",
">",
"<azure",
".",
"devops",
".",
"v5_1",
".",
"task",
"-",
"agent",
".",
"models",
".",
"VariableGroup",
">"
]
| python | train |
rocky/python3-trepan | trepan/lib/bytecode.py | https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/lib/bytecode.py#L36-L49 | def next_opcode(code, offset):
'''Return the next opcode and offset as a tuple. Tuple (-100,
-1000) is returned when reaching the end.'''
n = len(code)
while offset < n:
op = code[offset]
offset += 1
if op >= HAVE_ARGUMENT:
offset += 2
pass
yield op, offset
pass
yield -100, -1000
pass | [
"def",
"next_opcode",
"(",
"code",
",",
"offset",
")",
":",
"n",
"=",
"len",
"(",
"code",
")",
"while",
"offset",
"<",
"n",
":",
"op",
"=",
"code",
"[",
"offset",
"]",
"offset",
"+=",
"1",
"if",
"op",
">=",
"HAVE_ARGUMENT",
":",
"offset",
"+=",
"2",
"pass",
"yield",
"op",
",",
"offset",
"pass",
"yield",
"-",
"100",
",",
"-",
"1000",
"pass"
]
| Return the next opcode and offset as a tuple. Tuple (-100,
-1000) is returned when reaching the end. | [
"Return",
"the",
"next",
"opcode",
"and",
"offset",
"as",
"a",
"tuple",
".",
"Tuple",
"(",
"-",
"100",
"-",
"1000",
")",
"is",
"returned",
"when",
"reaching",
"the",
"end",
"."
]
| python | test |
sernst/cauldron | cauldron/invoke/invoker.py | https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/invoke/invoker.py#L83-L109 | def run(action: str, args: dict) -> int:
"""
Runs the specified command action and returns the return status code
for exit.
:param action:
The action to run
:param args:
The arguments parsed for the specified action
"""
if args.get('show_version_info'):
return run_version(args)
actions = dict(
shell=run_shell,
kernel=run_kernel,
serve=run_kernel,
version=run_version
)
if action not in actions:
print('[ERROR]: Unrecognized sub command "{}"'.format(action))
parser = args['parser'] # type: ArgumentParser
parser.print_help()
return 1
return actions.get(action)(args) | [
"def",
"run",
"(",
"action",
":",
"str",
",",
"args",
":",
"dict",
")",
"->",
"int",
":",
"if",
"args",
".",
"get",
"(",
"'show_version_info'",
")",
":",
"return",
"run_version",
"(",
"args",
")",
"actions",
"=",
"dict",
"(",
"shell",
"=",
"run_shell",
",",
"kernel",
"=",
"run_kernel",
",",
"serve",
"=",
"run_kernel",
",",
"version",
"=",
"run_version",
")",
"if",
"action",
"not",
"in",
"actions",
":",
"print",
"(",
"'[ERROR]: Unrecognized sub command \"{}\"'",
".",
"format",
"(",
"action",
")",
")",
"parser",
"=",
"args",
"[",
"'parser'",
"]",
"# type: ArgumentParser",
"parser",
".",
"print_help",
"(",
")",
"return",
"1",
"return",
"actions",
".",
"get",
"(",
"action",
")",
"(",
"args",
")"
]
| Runs the specified command action and returns the return status code
for exit.
:param action:
The action to run
:param args:
The arguments parsed for the specified action | [
"Runs",
"the",
"specified",
"command",
"action",
"and",
"returns",
"the",
"return",
"status",
"code",
"for",
"exit",
".",
":",
"param",
"action",
":",
"The",
"action",
"to",
"run",
":",
"param",
"args",
":",
"The",
"arguments",
"parsed",
"for",
"the",
"specified",
"action"
]
| python | train |
androguard/androguard | androguard/core/api_specific_resources/__init__.py | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/api_specific_resources/__init__.py#L13-L65 | def load_permissions(apilevel, permtype='permissions'):
"""
Load the Permissions for the given apilevel.
The permissions lists are generated using this tool: https://github.com/U039b/aosp_permissions_extraction
Has a fallback to select the maximum or minimal available API level.
For example, if 28 is requested but only 26 is available, 26 is returned.
If 5 is requested but 16 is available, 16 is returned.
If an API level is requested which is in between of two API levels we got,
the lower level is returned. For example, if 5,6,7,10 is available and 8 is
requested, 7 is returned instead.
:param apilevel: integer value of the API level
:param permtype: either load permissions (:code:`'permissions'`) or
permission groups (:code:`'groups'`)
:return: a dictionary of {Permission Name: {Permission info}
"""
if permtype not in ['permissions', 'groups']:
raise ValueError("The type of permission list is not known.")
# Usually apilevel is supplied as string...
apilevel = int(apilevel)
root = os.path.dirname(os.path.realpath(__file__))
permissions_file = os.path.join(root, "aosp_permissions", "permissions_{}.json".format(apilevel))
levels = filter(lambda x: re.match(r'^permissions_\d+\.json$', x), os.listdir(os.path.join(root, "aosp_permissions")))
levels = list(map(lambda x: int(x[:-5].split('_')[1]), levels))
if not levels:
log.error("No Permissions available, can not load!")
return {}
log.debug("Available API levels: {}".format(", ".join(map(str, sorted(levels)))))
if not os.path.isfile(permissions_file):
if apilevel > max(levels):
log.warning("Requested API level {} is larger than maximum we have, returning API level {} instead.".format(apilevel, max(levels)))
return load_permissions(max(levels), permtype)
if apilevel < min(levels):
log.warning("Requested API level {} is smaller than minimal we have, returning API level {} instead.".format(apilevel, max(levels)))
return load_permissions(min(levels), permtype)
# Missing level between existing ones, return the lower level
lower_level = max(filter(lambda x: x < apilevel, levels))
log.warning("Requested API Level could not be found, using {} instead".format(lower_level))
return load_permissions(lower_level, permtype)
with open(permissions_file, "r") as fp:
return json.load(fp)[permtype] | [
"def",
"load_permissions",
"(",
"apilevel",
",",
"permtype",
"=",
"'permissions'",
")",
":",
"if",
"permtype",
"not",
"in",
"[",
"'permissions'",
",",
"'groups'",
"]",
":",
"raise",
"ValueError",
"(",
"\"The type of permission list is not known.\"",
")",
"# Usually apilevel is supplied as string...",
"apilevel",
"=",
"int",
"(",
"apilevel",
")",
"root",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"permissions_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"\"aosp_permissions\"",
",",
"\"permissions_{}.json\"",
".",
"format",
"(",
"apilevel",
")",
")",
"levels",
"=",
"filter",
"(",
"lambda",
"x",
":",
"re",
".",
"match",
"(",
"r'^permissions_\\d+\\.json$'",
",",
"x",
")",
",",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"\"aosp_permissions\"",
")",
")",
")",
"levels",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"int",
"(",
"x",
"[",
":",
"-",
"5",
"]",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
"]",
")",
",",
"levels",
")",
")",
"if",
"not",
"levels",
":",
"log",
".",
"error",
"(",
"\"No Permissions available, can not load!\"",
")",
"return",
"{",
"}",
"log",
".",
"debug",
"(",
"\"Available API levels: {}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"sorted",
"(",
"levels",
")",
")",
")",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"permissions_file",
")",
":",
"if",
"apilevel",
">",
"max",
"(",
"levels",
")",
":",
"log",
".",
"warning",
"(",
"\"Requested API level {} is larger than maximum we have, returning API level {} instead.\"",
".",
"format",
"(",
"apilevel",
",",
"max",
"(",
"levels",
")",
")",
")",
"return",
"load_permissions",
"(",
"max",
"(",
"levels",
")",
",",
"permtype",
")",
"if",
"apilevel",
"<",
"min",
"(",
"levels",
")",
":",
"log",
".",
"warning",
"(",
"\"Requested API level {} is smaller than minimal we have, returning API level {} instead.\"",
".",
"format",
"(",
"apilevel",
",",
"max",
"(",
"levels",
")",
")",
")",
"return",
"load_permissions",
"(",
"min",
"(",
"levels",
")",
",",
"permtype",
")",
"# Missing level between existing ones, return the lower level",
"lower_level",
"=",
"max",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
"<",
"apilevel",
",",
"levels",
")",
")",
"log",
".",
"warning",
"(",
"\"Requested API Level could not be found, using {} instead\"",
".",
"format",
"(",
"lower_level",
")",
")",
"return",
"load_permissions",
"(",
"lower_level",
",",
"permtype",
")",
"with",
"open",
"(",
"permissions_file",
",",
"\"r\"",
")",
"as",
"fp",
":",
"return",
"json",
".",
"load",
"(",
"fp",
")",
"[",
"permtype",
"]"
]
| Load the Permissions for the given apilevel.
The permissions lists are generated using this tool: https://github.com/U039b/aosp_permissions_extraction
Has a fallback to select the maximum or minimal available API level.
For example, if 28 is requested but only 26 is available, 26 is returned.
If 5 is requested but 16 is available, 16 is returned.
If an API level is requested which is in between of two API levels we got,
the lower level is returned. For example, if 5,6,7,10 is available and 8 is
requested, 7 is returned instead.
:param apilevel: integer value of the API level
:param permtype: either load permissions (:code:`'permissions'`) or
permission groups (:code:`'groups'`)
:return: a dictionary of {Permission Name: {Permission info} | [
"Load",
"the",
"Permissions",
"for",
"the",
"given",
"apilevel",
"."
]
| python | train |
ubc/ubcpi | ubcpi/serialize.py | https://github.com/ubc/ubcpi/blob/7b6de03f93f3a4a8af4b92dfde7c69eeaf21f46e/ubcpi/serialize.py#L157-L189 | def parse_seeds_xml(root):
"""
Parse <seeds> element in the UBCPI XBlock's content XML.
Args:
root (lxml.etree.Element): The root of the <seeds> node in the tree.
Returns:
a list of deserialized representation of seeds. E.g.
[{
'answer': 1, # option index starting from one
'rationale': 'This is a seeded answer',
},
{....
}]
Raises:
ValidationError: The XML definition is invalid.
"""
seeds = []
for seed_el in root.findall('seed'):
seed_dict = dict()
seed_dict['rationale'] = _safe_get_text(seed_el)
if 'option' in seed_el.attrib:
seed_dict['answer'] = int(seed_el.attrib['option']) - 1
else:
raise ValidationError(_('Seed element must have an option attribute.'))
seeds.append(seed_dict)
return seeds | [
"def",
"parse_seeds_xml",
"(",
"root",
")",
":",
"seeds",
"=",
"[",
"]",
"for",
"seed_el",
"in",
"root",
".",
"findall",
"(",
"'seed'",
")",
":",
"seed_dict",
"=",
"dict",
"(",
")",
"seed_dict",
"[",
"'rationale'",
"]",
"=",
"_safe_get_text",
"(",
"seed_el",
")",
"if",
"'option'",
"in",
"seed_el",
".",
"attrib",
":",
"seed_dict",
"[",
"'answer'",
"]",
"=",
"int",
"(",
"seed_el",
".",
"attrib",
"[",
"'option'",
"]",
")",
"-",
"1",
"else",
":",
"raise",
"ValidationError",
"(",
"_",
"(",
"'Seed element must have an option attribute.'",
")",
")",
"seeds",
".",
"append",
"(",
"seed_dict",
")",
"return",
"seeds"
]
| Parse <seeds> element in the UBCPI XBlock's content XML.
Args:
root (lxml.etree.Element): The root of the <seeds> node in the tree.
Returns:
a list of deserialized representation of seeds. E.g.
[{
'answer': 1, # option index starting from one
'rationale': 'This is a seeded answer',
},
{....
}]
Raises:
ValidationError: The XML definition is invalid. | [
"Parse",
"<seeds",
">",
"element",
"in",
"the",
"UBCPI",
"XBlock",
"s",
"content",
"XML",
"."
]
| python | train |
cloudendpoints/endpoints-python | endpoints/endpoints_dispatcher.py | https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/endpoints_dispatcher.py#L149-L172 | def dispatch(self, request, start_response):
"""Handles dispatch to apiserver handlers.
This typically ends up calling start_response and returning the entire
body of the response.
Args:
request: An ApiRequest, the request from the user.
start_response: A function with semantics defined in PEP-333.
Returns:
A string, the body of the response.
"""
# Check if this matches any of our special handlers.
dispatched_response = self.dispatch_non_api_requests(request,
start_response)
if dispatched_response is not None:
return dispatched_response
# Call the service.
try:
return self.call_backend(request, start_response)
except errors.RequestError as error:
return self._handle_request_error(request, error, start_response) | [
"def",
"dispatch",
"(",
"self",
",",
"request",
",",
"start_response",
")",
":",
"# Check if this matches any of our special handlers.",
"dispatched_response",
"=",
"self",
".",
"dispatch_non_api_requests",
"(",
"request",
",",
"start_response",
")",
"if",
"dispatched_response",
"is",
"not",
"None",
":",
"return",
"dispatched_response",
"# Call the service.",
"try",
":",
"return",
"self",
".",
"call_backend",
"(",
"request",
",",
"start_response",
")",
"except",
"errors",
".",
"RequestError",
"as",
"error",
":",
"return",
"self",
".",
"_handle_request_error",
"(",
"request",
",",
"error",
",",
"start_response",
")"
]
| Handles dispatch to apiserver handlers.
This typically ends up calling start_response and returning the entire
body of the response.
Args:
request: An ApiRequest, the request from the user.
start_response: A function with semantics defined in PEP-333.
Returns:
A string, the body of the response. | [
"Handles",
"dispatch",
"to",
"apiserver",
"handlers",
"."
]
| python | train |
aamalev/aiohttp_apiset | aiohttp_apiset/swagger/router.py | https://github.com/aamalev/aiohttp_apiset/blob/ba3492ce929e39be1325d506b727a8bfb34e7b33/aiohttp_apiset/swagger/router.py#L149-L181 | def _handler_swagger_ui(self, request, spec, version):
"""
---
parameters:
- name: spec
in: query
type: string
- name: version
in: query
type: integer
enum: [2,3]
"""
version = version or self._version_ui
if self._spec_url:
spec_url = self._spec_url
else:
spec_url = request.url.with_path(self['swagger:spec'].url())
proto = request.headers.get(hdrs.X_FORWARDED_PROTO)
if proto:
spec_url = spec_url.with_scheme(proto)
if isinstance(spec, str):
spec_url = spec_url.with_query(spec=spec)
elif len(self._swagger_data) == 1:
for basePath in self._swagger_data:
spec_url = spec_url.with_query(spec=basePath)
else:
spec_url = spec_url.with_query(spec='/')
spec_url = spec_url.human_repr()
return web.Response(
text=ui.rend_template(spec_url,
prefix=self._swagger_ui,
version=version),
content_type='text/html') | [
"def",
"_handler_swagger_ui",
"(",
"self",
",",
"request",
",",
"spec",
",",
"version",
")",
":",
"version",
"=",
"version",
"or",
"self",
".",
"_version_ui",
"if",
"self",
".",
"_spec_url",
":",
"spec_url",
"=",
"self",
".",
"_spec_url",
"else",
":",
"spec_url",
"=",
"request",
".",
"url",
".",
"with_path",
"(",
"self",
"[",
"'swagger:spec'",
"]",
".",
"url",
"(",
")",
")",
"proto",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"hdrs",
".",
"X_FORWARDED_PROTO",
")",
"if",
"proto",
":",
"spec_url",
"=",
"spec_url",
".",
"with_scheme",
"(",
"proto",
")",
"if",
"isinstance",
"(",
"spec",
",",
"str",
")",
":",
"spec_url",
"=",
"spec_url",
".",
"with_query",
"(",
"spec",
"=",
"spec",
")",
"elif",
"len",
"(",
"self",
".",
"_swagger_data",
")",
"==",
"1",
":",
"for",
"basePath",
"in",
"self",
".",
"_swagger_data",
":",
"spec_url",
"=",
"spec_url",
".",
"with_query",
"(",
"spec",
"=",
"basePath",
")",
"else",
":",
"spec_url",
"=",
"spec_url",
".",
"with_query",
"(",
"spec",
"=",
"'/'",
")",
"spec_url",
"=",
"spec_url",
".",
"human_repr",
"(",
")",
"return",
"web",
".",
"Response",
"(",
"text",
"=",
"ui",
".",
"rend_template",
"(",
"spec_url",
",",
"prefix",
"=",
"self",
".",
"_swagger_ui",
",",
"version",
"=",
"version",
")",
",",
"content_type",
"=",
"'text/html'",
")"
]
| ---
parameters:
- name: spec
in: query
type: string
- name: version
in: query
type: integer
enum: [2,3] | [
"---",
"parameters",
":",
"-",
"name",
":",
"spec",
"in",
":",
"query",
"type",
":",
"string",
"-",
"name",
":",
"version",
"in",
":",
"query",
"type",
":",
"integer",
"enum",
":",
"[",
"2",
"3",
"]"
]
| python | train |
portfors-lab/sparkle | sparkle/run/acquisition_manager.py | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L214-L221 | def set_threshold(self, threshold):
"""Sets spike detection threshold
:param threshold: electrical potential to determine spikes (V)
:type threshold: float
"""
self.explorer.set_threshold(threshold)
self.protocoler.set_threshold(threshold) | [
"def",
"set_threshold",
"(",
"self",
",",
"threshold",
")",
":",
"self",
".",
"explorer",
".",
"set_threshold",
"(",
"threshold",
")",
"self",
".",
"protocoler",
".",
"set_threshold",
"(",
"threshold",
")"
]
| Sets spike detection threshold
:param threshold: electrical potential to determine spikes (V)
:type threshold: float | [
"Sets",
"spike",
"detection",
"threshold"
]
| python | train |
nmdp-bioinformatics/SeqAnn | seqann/sequence_annotation.py | https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/sequence_annotation.py#L622-L1122 | def ref_align(self, found_seqs, sequence: Seq=None,
locus: str=None, annotation: Annotation=None,
partial_ann: Annotation=None,
run: int=0,
cutoff: float=.90) -> Annotation:
"""
ref_align - Method for doing targeted alignments on partial annotations
:param found_seqs: The input sequence record.
:type found_seqs: Seq
:param sequence: The input sequence record.
:type sequence: Seq
:param locus: The gene locus associated with the sequence.
:type locus: ``str``
:param annotation: The incomplete annotation from a previous iteration.
:type annotation: :ref:`ann`
:param partial_ann: The partial annotation after looping through all of the blast sequences.
:type partial_ann: :ref:`ann`
:rtype: :ref:`ann`
"""
if annotation and isinstance(annotation, Annotation):
if 0 in annotation.mapping \
and not isinstance(annotation.mapping[0], int):
ft = annotation.mapping[0]
start_order = self.refdata.structures[locus][ft]
else:
start_order = 0
# Check whether it's exons only
exon_only = True
if hasattr(annotation, 'annotation') and annotation.annotation:
for f in annotation.annotation:
if re.search("intron", f) or re.search("UTR", f):
exon_only = False
elif(len(sequence.seq) > 900):
exon_only = False
annoated = []
if hasattr(annotation, 'annotation') and annotation.annotation:
annoated = list(annotation.annotation.keys())
# Extract the missing blocks and
# only align those blocks to the known
# missing features
# Start with all blocks missing
# and then delete block if it is found
tmp_missing = []
missing_blocks = annotation.blocks
for b in sorted(annotation.blocks):
# **** Check if block equals full input sequence *** #
# - If it does, then just align the ful
start = b[0]-1 if b[0] != 0 else 0
seq_feat = \
SeqFeature(
FeatureLocation(
ExactPosition(start),
ExactPosition(b[len(b)-1]),
strand=1),
type="unmapped")
feat = seq_feat.extract(annotation.seq)
combosrecs, exons, fullrec = self._refseqs(locus,
start,
annotation,
feat,
b)
# Print out different sequence types being align
if self.verbose and self.verbosity > 3:
for combseqr in combosrecs:
self.logger.info(self.logname + " " + combseqr.id)
ic = 0
# Looping through the generated combos
# from the reference sequence. (ex. intron1-exon1)
for combseqr in combosrecs:
if self.verbose:
self.logger.info(self.logname + " aligning ->" + combseqr.id)
# Running the alignment
an, ins, dels = align_seqs(combseqr, feat, locus,
start,
annotation.missing,
len(annoated),
cutoff=cutoff,
verbose=self.align_verbose,
verbosity=self.align_verbosity)
# Checking if any features were mapped
mapped_feat = list(an.annotation.keys())
if len(mapped_feat) >= 1:
# loop through the annotated features
for f in an.annotation:
f_order = self.refdata.structures[locus][f]
# Only map features if they are in the correct order
if f in annotation.missing \
and f_order >= start_order:
# * HARD CODED LOGIC *
# Only accept the alignments if they are
# somewhat similar to what's been observed already
length, lengthsd = 0, 0
length = float(self.refdata.feature_lengths[locus][f][0])
lengthsd = float(self.refdata.feature_lengths[locus][f][1])
incr = 3 if not is_classII(locus) else 4
max_length = length + (lengthsd*incr) + ins
min_length = length - (lengthsd*incr) - dels
if f == "exon_8" and not is_classII(f):
max_length = 10
# Check ordering when it's only exon sequences
if exon_only:
f_order = self.refdata.structures[locus][f]
endp = an.features[f].location.end + 1
#Make sure order of alignment make sense
if an.features[f].location.start == 0 \
and f != "five_prime_UTR" \
and not isexon(f):
del an.features[f]
continue
if endp in annotation.mapping and not isinstance(annotation.mapping[endp], int):
mf = annotation.mapping[endp]
expected_order = f_order + 1
expected_order2 = f_order + 2
if expected_order != self.refdata.structures[locus][mf] and expected_order2 != self.refdata.structures[locus][mf]:
self.logger.info(self.logname + " out of order1 -> " + mf + " " + f)
del an.features[f]
continue
startp = an.features[f].location.start - 1
if startp in annotation.mapping and not isinstance(annotation.mapping[startp], int):
mf = annotation.mapping[startp]
expected_order = f_order - 1
expected_order2 = f_order - 2
if expected_order != self.refdata.structures[locus][mf] and expected_order2 != self.refdata.structures[locus][mf]:
self.logger.info(self.logname + " out of order2 -> " + mf + " " + f)
del an.features[f]
continue
else:
##Make sure order of alignment make sense
if an.features[f].location.start == 0 \
and f != "five_prime_UTR" \
and 'three_prime_UTR' in annotation.annotation:
del an.features[f]
continue
f_order = self.refdata.structures[locus][f]
endp = an.features[f].location.end + 1
if endp in annotation.mapping and not isinstance(annotation.mapping[endp], int):
mf = annotation.mapping[endp]
expected_order = f_order + 1
if expected_order != self.refdata.structures[locus][mf]:
self.logger.info(self.logname + " out of order12 -> " + mf + " " + f)
del an.features[f]
continue
startp = an.features[f].location.start - 1
if startp in annotation.mapping and not isinstance(annotation.mapping[startp], int):
mf = annotation.mapping[startp]
expected_order = f_order - 1
if expected_order != self.refdata.structures[locus][mf]:
self.logger.info(self.logname + " out of order22 -> " + mf + " " + f)
del an.features[f]
continue
if self.verbose and self.verbosity > 0:
sl = str(len(an.annotation[f]))
self.logger.info(self.logname + " " + locus
+ " " + f
+ " len = " + sl
+ " | max = "
+ str(max_length)
+ " | min = "
+ str(min_length))
if len(an.annotation[f]) <= max_length:
self.logger.info(self.logname
+ " " + locus
+ " " + f
+ " " + sl + " <= "
+ str(max_length))
else:
self.logger.info(self.logname
+ " " + locus
+ " " + f
+ " " + sl + " ! <= !"
+ str(max_length))
if len(an.annotation[f]) >= min_length:
self.logger.info(self.logname
+ " " + locus
+ " " + f
+ " " + sl + " >= "
+ str(min_length))
else:
self.logger.info(self.logname + " "
+ locus
+ " " + f
+ " " + sl + " ! >= !"
+ str(min_length))
# Update the original annotation
# with the returned annotation
if(len(an.annotation[f]) <= max_length and
len(an.annotation[f]) >= min_length):
if self.verbose and self.verbosity > 0:
self.logger.info(self.logname
+ " Annotated " + f
+ " with clustalo using " +
combseqr.id)
self.logger.info(self.logname
+ " Coordinates for " + f
+ str(an.features[f].location.start)
+ " - " + str(an.features[f].location.end)
)
if annotation.annotation:
annotation.annotation.update({f:
an.annotation[f]
})
annotation.features.update({f:
an.features[f]
})
else:
annotation.annotation = {}
annotation.annotation.update({f:
an.annotation[f]
})
annotation.features.update({f:
an.features[f]
})
if f in annotation.refmissing:
i = annotation.refmissing.index(f)
del annotation.refmissing[i]
if f in annotation.missing:
del annotation.missing[f]
#if b in annotation.blocks:
# del annotation.blocks[annotation.blocks.index(b)]
# print(annotation.blocks)
# if an.blocks:
# #print("PARTIAL BLOCK")
# #print(an.blocks)
# if b in missing_blocks:
# del missing_blocks[missing_blocks.index(b)]
# if self.verbose and self.verbosity > 0:
# self.logger.info(self.logname
# + " Part of block mapped")
# else:
# if self.verbose and self.verbosity > 0:
# self.logger.info(self.logname
# + " All blocks mapped")
# del annotation.blocks[annotation.blocks.index(b)]
# if b in missing_blocks:
# del missing_blocks[missing_blocks.index(b)]
else:
self.logger.info(self.logname + " FAILED HERE!!!!!!!!!")
#elif b not in mbtmp and b in missing_blocks:
# mbtmp.append(b)
else:
self.logger.info(self.logname + " OUT OF ORDER !!!!!!!!!!!!!!!!!!!")
# Update the coordinates
coordinates = dict(map(lambda x: [x, 1], [i for i in range(0, len(sequence.seq)+1)]))
for f in annotation.features:
s = annotation.features[f].location.start
e = annotation.features[f].location.end
if s != 0:
s += 1
e += 1
else:
e += 1
for i in range(s, e):
annotation.mapping[i] = f
if i in coordinates:
del coordinates[i]
# Get any remaining blocks after updated
# annotation
blocks = getblocks(coordinates)
annotation.blocks = blocks
# Check to see if that annotation is complete
annotation.check_annotation()
if annotation.complete_annotation:
if self.verbose:
self.logger.info(self.logname
+ " Completed annotation"
+ " with targeted ref_align")
return annotation
else:
if an.features:
# for f in an.features:
# f_order = self.refdata.structures[locus][f]
# # Only add features if they are after the
# # first feature mapped
# if f_order >= start_order and f not in annotation.features \
# and f in annotation.annotation:
# annotation.features[f] = an.features[f]
# Rerunning seqsearch with
# new annotation from alignment
tmpann = self.seqsearch.search_seqs(found_seqs,
sequence,
locus,
partial_ann=annotation,
run=run)
if tmpann.complete_annotation:
for f in tmpann.annotation:
if f not in annotation.annotation:
annotation.annotation[f] = tmpann.annotation[f]
if self.verbose:
self.logger.info(self.logname
+ " Completed annotation"
+ " with targeted ref_align and seqsearch!")
return tmpann
annotation = tmpann
ic += 1
# Has to be missing exons
exons_n = 0
for f in annotation.missing:
if re.search("intron", f) or re.search("UTR", f):
exons_n += 1
# Run exon only alignment
if len(exons.seq) >= 4 and exons_n > 0:
exonan, ins, dels = align_seqs(exons, feat, locus, start,
annotation.missing,
len(annoated),
cutoff=cutoff,
verbose=self.align_verbose,
verbosity=self.align_verbosity)
mapped_exons = list(exonan.annotation.keys())
if len(mapped_exons) >= 1:
if self.verbose:
self.logger.info(self.logname
+ " Annotated exons with align")
for f in exonan.annotation:
if self.verbose and self.verbosity > 0:
self.logger.info(self.logname
+ " Annotated "
+ f + " len = "
+ str(len(exonan
.annotation[f])))
annotation.annotation.update({f: exonan.annotation[f]})
annotation.features.update({f: exonan.features[f]})
coordinates = dict(map(lambda x: [x, 1], [i for i in range(0, len(sequence.seq)+1)]))
for f in annotation.features:
s = annotation.features[f].location.start
e = annotation.features[f].location.end
if s != 0:
s += 1
e += 1
else:
e += 1
for i in range(s, e):
annotation.mapping[i] = f
if i in coordinates:
del coordinates[i]
blocks = getblocks(coordinates)
annotation.blocks = blocks
annotation.check_annotation()
if annotation.complete_annotation:
if self.verbose:
self.logger.info(self.logname + " Completed annotation with targeted exons ref_align")
return annotation
return annotation
elif partial_ann:
annoated = []
if hasattr(partial_ann, 'annotation') and partial_ann.annotation:
annoated = list(partial_ann.annotation.keys())
# Do full sequence alignments
# any only extract out the part
# that couldn't be explained from above
if 0 in partial_ann.mapping \
and not isinstance(partial_ann.mapping[0], int):
ft = partial_ann.mapping[0]
start_order = self.refdata.structures[locus][ft]
else:
start_order = 0
# Extract the missing blocks and
# only align those blocks to the known
# missing features
# Start with all blocks missing
# and then delete block if it is found
tmp_missing = []
missing_blocks = partial_ann.blocks
for b in sorted(partial_ann.blocks):
# **** Check if block equals full input sequence *** #
# - If it does, then just align the ful
start = b[0]-1 if b[0] != 0 else 0
seq_feat = \
SeqFeature(
FeatureLocation(
ExactPosition(start),
ExactPosition(b[len(b)-1]),
strand=1),
type="unmapped")
feat = seq_feat.extract(partial_ann.seq)
combosrecs, exons, fullrec = self._refseqs(locus,
start,
partial_ann,
feat,
b)
if len(fullrec.seq) >= 4:
fullref, ins, dels = align_seqs(fullrec, feat,
locus, start,
partial_ann.missing,
len(annoated),
cutoff=cutoff,
verbose=self.align_verbose,
verbosity=self.align_verbosity)
if hasattr(fullref, 'features') and fullref.features:
mapped_full = list(fullref.annotation.keys())
if len(mapped_full) >= 1:
if self.verbose:
self.logger.info(self.logname
+ " Annotated fullrec"
+ " with clustalo")
# If it wasn't found
del missing_blocks[missing_blocks.index(b)]
for f in fullref.annotation:
if self.verbose and self.verbosity > 0:
self.logger.info(self.logname + " Annotated " + f + " len = " + str(len(fullref.annotation[f])))
partial_ann.annotation.update({f: fullref.annotation[f]})
if b in missing_blocks:
del missing_blocks[missing_blocks.index(b)]
else:
for bm in tmp_missing:
if bm in missing_blocks:
del missing_blocks[missing_blocks.index(bm)]
for f in fullref.features:
f_order = self.refdata.structures[locus][f]
# Only add features if they are after the
# first feature mapped
if f_order >= start_order and f not in partial_ann.features \
and f in partial_ann.annotation:
partial_ann.features[f] = fullref.features[f]
coordinates = dict(map(lambda x: [x, 1], [i for i in range(0, len(sequence.seq)+1)]))
for f in partial_ann.features:
s = partial_ann.features[f].location.start
e = partial_ann.features[f].location.end
if s != 0:
s += 1
e += 1
else:
e += 1
for i in range(s, e):
partial_ann.mapping[i] = f
if i in coordinates:
del coordinates[i]
blocks = getblocks(coordinates)
partial_ann.check_annotation()
if partial_ann.complete_annotation:
if self.verbose:
self.logger.info(self.logname + " Annotated all features with clustalo")
return partial_ann
if self.verbose:
self.logger.info(self.logname
+ " Failed to annotate features")
return '' | [
"def",
"ref_align",
"(",
"self",
",",
"found_seqs",
",",
"sequence",
":",
"Seq",
"=",
"None",
",",
"locus",
":",
"str",
"=",
"None",
",",
"annotation",
":",
"Annotation",
"=",
"None",
",",
"partial_ann",
":",
"Annotation",
"=",
"None",
",",
"run",
":",
"int",
"=",
"0",
",",
"cutoff",
":",
"float",
"=",
".90",
")",
"->",
"Annotation",
":",
"if",
"annotation",
"and",
"isinstance",
"(",
"annotation",
",",
"Annotation",
")",
":",
"if",
"0",
"in",
"annotation",
".",
"mapping",
"and",
"not",
"isinstance",
"(",
"annotation",
".",
"mapping",
"[",
"0",
"]",
",",
"int",
")",
":",
"ft",
"=",
"annotation",
".",
"mapping",
"[",
"0",
"]",
"start_order",
"=",
"self",
".",
"refdata",
".",
"structures",
"[",
"locus",
"]",
"[",
"ft",
"]",
"else",
":",
"start_order",
"=",
"0",
"# Check whether it's exons only",
"exon_only",
"=",
"True",
"if",
"hasattr",
"(",
"annotation",
",",
"'annotation'",
")",
"and",
"annotation",
".",
"annotation",
":",
"for",
"f",
"in",
"annotation",
".",
"annotation",
":",
"if",
"re",
".",
"search",
"(",
"\"intron\"",
",",
"f",
")",
"or",
"re",
".",
"search",
"(",
"\"UTR\"",
",",
"f",
")",
":",
"exon_only",
"=",
"False",
"elif",
"(",
"len",
"(",
"sequence",
".",
"seq",
")",
">",
"900",
")",
":",
"exon_only",
"=",
"False",
"annoated",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"annotation",
",",
"'annotation'",
")",
"and",
"annotation",
".",
"annotation",
":",
"annoated",
"=",
"list",
"(",
"annotation",
".",
"annotation",
".",
"keys",
"(",
")",
")",
"# Extract the missing blocks and",
"# only align those blocks to the known",
"# missing features",
"# Start with all blocks missing",
"# and then delete block if it is found",
"tmp_missing",
"=",
"[",
"]",
"missing_blocks",
"=",
"annotation",
".",
"blocks",
"for",
"b",
"in",
"sorted",
"(",
"annotation",
".",
"blocks",
")",
":",
"# **** Check if block equals full input sequence *** #",
"# - If it does, then just align the ful",
"start",
"=",
"b",
"[",
"0",
"]",
"-",
"1",
"if",
"b",
"[",
"0",
"]",
"!=",
"0",
"else",
"0",
"seq_feat",
"=",
"SeqFeature",
"(",
"FeatureLocation",
"(",
"ExactPosition",
"(",
"start",
")",
",",
"ExactPosition",
"(",
"b",
"[",
"len",
"(",
"b",
")",
"-",
"1",
"]",
")",
",",
"strand",
"=",
"1",
")",
",",
"type",
"=",
"\"unmapped\"",
")",
"feat",
"=",
"seq_feat",
".",
"extract",
"(",
"annotation",
".",
"seq",
")",
"combosrecs",
",",
"exons",
",",
"fullrec",
"=",
"self",
".",
"_refseqs",
"(",
"locus",
",",
"start",
",",
"annotation",
",",
"feat",
",",
"b",
")",
"# Print out different sequence types being align",
"if",
"self",
".",
"verbose",
"and",
"self",
".",
"verbosity",
">",
"3",
":",
"for",
"combseqr",
"in",
"combosrecs",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" \"",
"+",
"combseqr",
".",
"id",
")",
"ic",
"=",
"0",
"# Looping through the generated combos",
"# from the reference sequence. (ex. intron1-exon1)",
"for",
"combseqr",
"in",
"combosrecs",
":",
"if",
"self",
".",
"verbose",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" aligning ->\"",
"+",
"combseqr",
".",
"id",
")",
"# Running the alignment",
"an",
",",
"ins",
",",
"dels",
"=",
"align_seqs",
"(",
"combseqr",
",",
"feat",
",",
"locus",
",",
"start",
",",
"annotation",
".",
"missing",
",",
"len",
"(",
"annoated",
")",
",",
"cutoff",
"=",
"cutoff",
",",
"verbose",
"=",
"self",
".",
"align_verbose",
",",
"verbosity",
"=",
"self",
".",
"align_verbosity",
")",
"# Checking if any features were mapped",
"mapped_feat",
"=",
"list",
"(",
"an",
".",
"annotation",
".",
"keys",
"(",
")",
")",
"if",
"len",
"(",
"mapped_feat",
")",
">=",
"1",
":",
"# loop through the annotated features",
"for",
"f",
"in",
"an",
".",
"annotation",
":",
"f_order",
"=",
"self",
".",
"refdata",
".",
"structures",
"[",
"locus",
"]",
"[",
"f",
"]",
"# Only map features if they are in the correct order",
"if",
"f",
"in",
"annotation",
".",
"missing",
"and",
"f_order",
">=",
"start_order",
":",
"# * HARD CODED LOGIC *",
"# Only accept the alignments if they are",
"# somewhat similar to what's been observed already",
"length",
",",
"lengthsd",
"=",
"0",
",",
"0",
"length",
"=",
"float",
"(",
"self",
".",
"refdata",
".",
"feature_lengths",
"[",
"locus",
"]",
"[",
"f",
"]",
"[",
"0",
"]",
")",
"lengthsd",
"=",
"float",
"(",
"self",
".",
"refdata",
".",
"feature_lengths",
"[",
"locus",
"]",
"[",
"f",
"]",
"[",
"1",
"]",
")",
"incr",
"=",
"3",
"if",
"not",
"is_classII",
"(",
"locus",
")",
"else",
"4",
"max_length",
"=",
"length",
"+",
"(",
"lengthsd",
"*",
"incr",
")",
"+",
"ins",
"min_length",
"=",
"length",
"-",
"(",
"lengthsd",
"*",
"incr",
")",
"-",
"dels",
"if",
"f",
"==",
"\"exon_8\"",
"and",
"not",
"is_classII",
"(",
"f",
")",
":",
"max_length",
"=",
"10",
"# Check ordering when it's only exon sequences",
"if",
"exon_only",
":",
"f_order",
"=",
"self",
".",
"refdata",
".",
"structures",
"[",
"locus",
"]",
"[",
"f",
"]",
"endp",
"=",
"an",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"end",
"+",
"1",
"#Make sure order of alignment make sense",
"if",
"an",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"start",
"==",
"0",
"and",
"f",
"!=",
"\"five_prime_UTR\"",
"and",
"not",
"isexon",
"(",
"f",
")",
":",
"del",
"an",
".",
"features",
"[",
"f",
"]",
"continue",
"if",
"endp",
"in",
"annotation",
".",
"mapping",
"and",
"not",
"isinstance",
"(",
"annotation",
".",
"mapping",
"[",
"endp",
"]",
",",
"int",
")",
":",
"mf",
"=",
"annotation",
".",
"mapping",
"[",
"endp",
"]",
"expected_order",
"=",
"f_order",
"+",
"1",
"expected_order2",
"=",
"f_order",
"+",
"2",
"if",
"expected_order",
"!=",
"self",
".",
"refdata",
".",
"structures",
"[",
"locus",
"]",
"[",
"mf",
"]",
"and",
"expected_order2",
"!=",
"self",
".",
"refdata",
".",
"structures",
"[",
"locus",
"]",
"[",
"mf",
"]",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" out of order1 -> \"",
"+",
"mf",
"+",
"\" \"",
"+",
"f",
")",
"del",
"an",
".",
"features",
"[",
"f",
"]",
"continue",
"startp",
"=",
"an",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"start",
"-",
"1",
"if",
"startp",
"in",
"annotation",
".",
"mapping",
"and",
"not",
"isinstance",
"(",
"annotation",
".",
"mapping",
"[",
"startp",
"]",
",",
"int",
")",
":",
"mf",
"=",
"annotation",
".",
"mapping",
"[",
"startp",
"]",
"expected_order",
"=",
"f_order",
"-",
"1",
"expected_order2",
"=",
"f_order",
"-",
"2",
"if",
"expected_order",
"!=",
"self",
".",
"refdata",
".",
"structures",
"[",
"locus",
"]",
"[",
"mf",
"]",
"and",
"expected_order2",
"!=",
"self",
".",
"refdata",
".",
"structures",
"[",
"locus",
"]",
"[",
"mf",
"]",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" out of order2 -> \"",
"+",
"mf",
"+",
"\" \"",
"+",
"f",
")",
"del",
"an",
".",
"features",
"[",
"f",
"]",
"continue",
"else",
":",
"##Make sure order of alignment make sense",
"if",
"an",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"start",
"==",
"0",
"and",
"f",
"!=",
"\"five_prime_UTR\"",
"and",
"'three_prime_UTR'",
"in",
"annotation",
".",
"annotation",
":",
"del",
"an",
".",
"features",
"[",
"f",
"]",
"continue",
"f_order",
"=",
"self",
".",
"refdata",
".",
"structures",
"[",
"locus",
"]",
"[",
"f",
"]",
"endp",
"=",
"an",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"end",
"+",
"1",
"if",
"endp",
"in",
"annotation",
".",
"mapping",
"and",
"not",
"isinstance",
"(",
"annotation",
".",
"mapping",
"[",
"endp",
"]",
",",
"int",
")",
":",
"mf",
"=",
"annotation",
".",
"mapping",
"[",
"endp",
"]",
"expected_order",
"=",
"f_order",
"+",
"1",
"if",
"expected_order",
"!=",
"self",
".",
"refdata",
".",
"structures",
"[",
"locus",
"]",
"[",
"mf",
"]",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" out of order12 -> \"",
"+",
"mf",
"+",
"\" \"",
"+",
"f",
")",
"del",
"an",
".",
"features",
"[",
"f",
"]",
"continue",
"startp",
"=",
"an",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"start",
"-",
"1",
"if",
"startp",
"in",
"annotation",
".",
"mapping",
"and",
"not",
"isinstance",
"(",
"annotation",
".",
"mapping",
"[",
"startp",
"]",
",",
"int",
")",
":",
"mf",
"=",
"annotation",
".",
"mapping",
"[",
"startp",
"]",
"expected_order",
"=",
"f_order",
"-",
"1",
"if",
"expected_order",
"!=",
"self",
".",
"refdata",
".",
"structures",
"[",
"locus",
"]",
"[",
"mf",
"]",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" out of order22 -> \"",
"+",
"mf",
"+",
"\" \"",
"+",
"f",
")",
"del",
"an",
".",
"features",
"[",
"f",
"]",
"continue",
"if",
"self",
".",
"verbose",
"and",
"self",
".",
"verbosity",
">",
"0",
":",
"sl",
"=",
"str",
"(",
"len",
"(",
"an",
".",
"annotation",
"[",
"f",
"]",
")",
")",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" \"",
"+",
"locus",
"+",
"\" \"",
"+",
"f",
"+",
"\" len = \"",
"+",
"sl",
"+",
"\" | max = \"",
"+",
"str",
"(",
"max_length",
")",
"+",
"\" | min = \"",
"+",
"str",
"(",
"min_length",
")",
")",
"if",
"len",
"(",
"an",
".",
"annotation",
"[",
"f",
"]",
")",
"<=",
"max_length",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" \"",
"+",
"locus",
"+",
"\" \"",
"+",
"f",
"+",
"\" \"",
"+",
"sl",
"+",
"\" <= \"",
"+",
"str",
"(",
"max_length",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" \"",
"+",
"locus",
"+",
"\" \"",
"+",
"f",
"+",
"\" \"",
"+",
"sl",
"+",
"\" ! <= !\"",
"+",
"str",
"(",
"max_length",
")",
")",
"if",
"len",
"(",
"an",
".",
"annotation",
"[",
"f",
"]",
")",
">=",
"min_length",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" \"",
"+",
"locus",
"+",
"\" \"",
"+",
"f",
"+",
"\" \"",
"+",
"sl",
"+",
"\" >= \"",
"+",
"str",
"(",
"min_length",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" \"",
"+",
"locus",
"+",
"\" \"",
"+",
"f",
"+",
"\" \"",
"+",
"sl",
"+",
"\" ! >= !\"",
"+",
"str",
"(",
"min_length",
")",
")",
"# Update the original annotation",
"# with the returned annotation",
"if",
"(",
"len",
"(",
"an",
".",
"annotation",
"[",
"f",
"]",
")",
"<=",
"max_length",
"and",
"len",
"(",
"an",
".",
"annotation",
"[",
"f",
"]",
")",
">=",
"min_length",
")",
":",
"if",
"self",
".",
"verbose",
"and",
"self",
".",
"verbosity",
">",
"0",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" Annotated \"",
"+",
"f",
"+",
"\" with clustalo using \"",
"+",
"combseqr",
".",
"id",
")",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" Coordinates for \"",
"+",
"f",
"+",
"str",
"(",
"an",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"start",
")",
"+",
"\" - \"",
"+",
"str",
"(",
"an",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"end",
")",
")",
"if",
"annotation",
".",
"annotation",
":",
"annotation",
".",
"annotation",
".",
"update",
"(",
"{",
"f",
":",
"an",
".",
"annotation",
"[",
"f",
"]",
"}",
")",
"annotation",
".",
"features",
".",
"update",
"(",
"{",
"f",
":",
"an",
".",
"features",
"[",
"f",
"]",
"}",
")",
"else",
":",
"annotation",
".",
"annotation",
"=",
"{",
"}",
"annotation",
".",
"annotation",
".",
"update",
"(",
"{",
"f",
":",
"an",
".",
"annotation",
"[",
"f",
"]",
"}",
")",
"annotation",
".",
"features",
".",
"update",
"(",
"{",
"f",
":",
"an",
".",
"features",
"[",
"f",
"]",
"}",
")",
"if",
"f",
"in",
"annotation",
".",
"refmissing",
":",
"i",
"=",
"annotation",
".",
"refmissing",
".",
"index",
"(",
"f",
")",
"del",
"annotation",
".",
"refmissing",
"[",
"i",
"]",
"if",
"f",
"in",
"annotation",
".",
"missing",
":",
"del",
"annotation",
".",
"missing",
"[",
"f",
"]",
"#if b in annotation.blocks:",
"# del annotation.blocks[annotation.blocks.index(b)]",
"# print(annotation.blocks)",
"# if an.blocks:",
"# #print(\"PARTIAL BLOCK\")",
"# #print(an.blocks)",
"# if b in missing_blocks:",
"# del missing_blocks[missing_blocks.index(b)]",
"# if self.verbose and self.verbosity > 0:",
"# self.logger.info(self.logname",
"# + \" Part of block mapped\")",
"# else:",
"# if self.verbose and self.verbosity > 0:",
"# self.logger.info(self.logname",
"# + \" All blocks mapped\")",
"# del annotation.blocks[annotation.blocks.index(b)]",
"# if b in missing_blocks:",
"# del missing_blocks[missing_blocks.index(b)]",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" FAILED HERE!!!!!!!!!\"",
")",
"#elif b not in mbtmp and b in missing_blocks:",
"# mbtmp.append(b)",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" OUT OF ORDER !!!!!!!!!!!!!!!!!!!\"",
")",
"# Update the coordinates",
"coordinates",
"=",
"dict",
"(",
"map",
"(",
"lambda",
"x",
":",
"[",
"x",
",",
"1",
"]",
",",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"sequence",
".",
"seq",
")",
"+",
"1",
")",
"]",
")",
")",
"for",
"f",
"in",
"annotation",
".",
"features",
":",
"s",
"=",
"annotation",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"start",
"e",
"=",
"annotation",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"end",
"if",
"s",
"!=",
"0",
":",
"s",
"+=",
"1",
"e",
"+=",
"1",
"else",
":",
"e",
"+=",
"1",
"for",
"i",
"in",
"range",
"(",
"s",
",",
"e",
")",
":",
"annotation",
".",
"mapping",
"[",
"i",
"]",
"=",
"f",
"if",
"i",
"in",
"coordinates",
":",
"del",
"coordinates",
"[",
"i",
"]",
"# Get any remaining blocks after updated",
"# annotation",
"blocks",
"=",
"getblocks",
"(",
"coordinates",
")",
"annotation",
".",
"blocks",
"=",
"blocks",
"# Check to see if that annotation is complete",
"annotation",
".",
"check_annotation",
"(",
")",
"if",
"annotation",
".",
"complete_annotation",
":",
"if",
"self",
".",
"verbose",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" Completed annotation\"",
"+",
"\" with targeted ref_align\"",
")",
"return",
"annotation",
"else",
":",
"if",
"an",
".",
"features",
":",
"# for f in an.features:",
"# f_order = self.refdata.structures[locus][f]",
"# # Only add features if they are after the",
"# # first feature mapped",
"# if f_order >= start_order and f not in annotation.features \\",
"# and f in annotation.annotation:",
"# annotation.features[f] = an.features[f]",
"# Rerunning seqsearch with",
"# new annotation from alignment",
"tmpann",
"=",
"self",
".",
"seqsearch",
".",
"search_seqs",
"(",
"found_seqs",
",",
"sequence",
",",
"locus",
",",
"partial_ann",
"=",
"annotation",
",",
"run",
"=",
"run",
")",
"if",
"tmpann",
".",
"complete_annotation",
":",
"for",
"f",
"in",
"tmpann",
".",
"annotation",
":",
"if",
"f",
"not",
"in",
"annotation",
".",
"annotation",
":",
"annotation",
".",
"annotation",
"[",
"f",
"]",
"=",
"tmpann",
".",
"annotation",
"[",
"f",
"]",
"if",
"self",
".",
"verbose",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" Completed annotation\"",
"+",
"\" with targeted ref_align and seqsearch!\"",
")",
"return",
"tmpann",
"annotation",
"=",
"tmpann",
"ic",
"+=",
"1",
"# Has to be missing exons",
"exons_n",
"=",
"0",
"for",
"f",
"in",
"annotation",
".",
"missing",
":",
"if",
"re",
".",
"search",
"(",
"\"intron\"",
",",
"f",
")",
"or",
"re",
".",
"search",
"(",
"\"UTR\"",
",",
"f",
")",
":",
"exons_n",
"+=",
"1",
"# Run exon only alignment",
"if",
"len",
"(",
"exons",
".",
"seq",
")",
">=",
"4",
"and",
"exons_n",
">",
"0",
":",
"exonan",
",",
"ins",
",",
"dels",
"=",
"align_seqs",
"(",
"exons",
",",
"feat",
",",
"locus",
",",
"start",
",",
"annotation",
".",
"missing",
",",
"len",
"(",
"annoated",
")",
",",
"cutoff",
"=",
"cutoff",
",",
"verbose",
"=",
"self",
".",
"align_verbose",
",",
"verbosity",
"=",
"self",
".",
"align_verbosity",
")",
"mapped_exons",
"=",
"list",
"(",
"exonan",
".",
"annotation",
".",
"keys",
"(",
")",
")",
"if",
"len",
"(",
"mapped_exons",
")",
">=",
"1",
":",
"if",
"self",
".",
"verbose",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" Annotated exons with align\"",
")",
"for",
"f",
"in",
"exonan",
".",
"annotation",
":",
"if",
"self",
".",
"verbose",
"and",
"self",
".",
"verbosity",
">",
"0",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" Annotated \"",
"+",
"f",
"+",
"\" len = \"",
"+",
"str",
"(",
"len",
"(",
"exonan",
".",
"annotation",
"[",
"f",
"]",
")",
")",
")",
"annotation",
".",
"annotation",
".",
"update",
"(",
"{",
"f",
":",
"exonan",
".",
"annotation",
"[",
"f",
"]",
"}",
")",
"annotation",
".",
"features",
".",
"update",
"(",
"{",
"f",
":",
"exonan",
".",
"features",
"[",
"f",
"]",
"}",
")",
"coordinates",
"=",
"dict",
"(",
"map",
"(",
"lambda",
"x",
":",
"[",
"x",
",",
"1",
"]",
",",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"sequence",
".",
"seq",
")",
"+",
"1",
")",
"]",
")",
")",
"for",
"f",
"in",
"annotation",
".",
"features",
":",
"s",
"=",
"annotation",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"start",
"e",
"=",
"annotation",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"end",
"if",
"s",
"!=",
"0",
":",
"s",
"+=",
"1",
"e",
"+=",
"1",
"else",
":",
"e",
"+=",
"1",
"for",
"i",
"in",
"range",
"(",
"s",
",",
"e",
")",
":",
"annotation",
".",
"mapping",
"[",
"i",
"]",
"=",
"f",
"if",
"i",
"in",
"coordinates",
":",
"del",
"coordinates",
"[",
"i",
"]",
"blocks",
"=",
"getblocks",
"(",
"coordinates",
")",
"annotation",
".",
"blocks",
"=",
"blocks",
"annotation",
".",
"check_annotation",
"(",
")",
"if",
"annotation",
".",
"complete_annotation",
":",
"if",
"self",
".",
"verbose",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" Completed annotation with targeted exons ref_align\"",
")",
"return",
"annotation",
"return",
"annotation",
"elif",
"partial_ann",
":",
"annoated",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"partial_ann",
",",
"'annotation'",
")",
"and",
"partial_ann",
".",
"annotation",
":",
"annoated",
"=",
"list",
"(",
"partial_ann",
".",
"annotation",
".",
"keys",
"(",
")",
")",
"# Do full sequence alignments",
"# any only extract out the part",
"# that couldn't be explained from above",
"if",
"0",
"in",
"partial_ann",
".",
"mapping",
"and",
"not",
"isinstance",
"(",
"partial_ann",
".",
"mapping",
"[",
"0",
"]",
",",
"int",
")",
":",
"ft",
"=",
"partial_ann",
".",
"mapping",
"[",
"0",
"]",
"start_order",
"=",
"self",
".",
"refdata",
".",
"structures",
"[",
"locus",
"]",
"[",
"ft",
"]",
"else",
":",
"start_order",
"=",
"0",
"# Extract the missing blocks and",
"# only align those blocks to the known",
"# missing features",
"# Start with all blocks missing",
"# and then delete block if it is found",
"tmp_missing",
"=",
"[",
"]",
"missing_blocks",
"=",
"partial_ann",
".",
"blocks",
"for",
"b",
"in",
"sorted",
"(",
"partial_ann",
".",
"blocks",
")",
":",
"# **** Check if block equals full input sequence *** #",
"# - If it does, then just align the ful",
"start",
"=",
"b",
"[",
"0",
"]",
"-",
"1",
"if",
"b",
"[",
"0",
"]",
"!=",
"0",
"else",
"0",
"seq_feat",
"=",
"SeqFeature",
"(",
"FeatureLocation",
"(",
"ExactPosition",
"(",
"start",
")",
",",
"ExactPosition",
"(",
"b",
"[",
"len",
"(",
"b",
")",
"-",
"1",
"]",
")",
",",
"strand",
"=",
"1",
")",
",",
"type",
"=",
"\"unmapped\"",
")",
"feat",
"=",
"seq_feat",
".",
"extract",
"(",
"partial_ann",
".",
"seq",
")",
"combosrecs",
",",
"exons",
",",
"fullrec",
"=",
"self",
".",
"_refseqs",
"(",
"locus",
",",
"start",
",",
"partial_ann",
",",
"feat",
",",
"b",
")",
"if",
"len",
"(",
"fullrec",
".",
"seq",
")",
">=",
"4",
":",
"fullref",
",",
"ins",
",",
"dels",
"=",
"align_seqs",
"(",
"fullrec",
",",
"feat",
",",
"locus",
",",
"start",
",",
"partial_ann",
".",
"missing",
",",
"len",
"(",
"annoated",
")",
",",
"cutoff",
"=",
"cutoff",
",",
"verbose",
"=",
"self",
".",
"align_verbose",
",",
"verbosity",
"=",
"self",
".",
"align_verbosity",
")",
"if",
"hasattr",
"(",
"fullref",
",",
"'features'",
")",
"and",
"fullref",
".",
"features",
":",
"mapped_full",
"=",
"list",
"(",
"fullref",
".",
"annotation",
".",
"keys",
"(",
")",
")",
"if",
"len",
"(",
"mapped_full",
")",
">=",
"1",
":",
"if",
"self",
".",
"verbose",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" Annotated fullrec\"",
"+",
"\" with clustalo\"",
")",
"# If it wasn't found",
"del",
"missing_blocks",
"[",
"missing_blocks",
".",
"index",
"(",
"b",
")",
"]",
"for",
"f",
"in",
"fullref",
".",
"annotation",
":",
"if",
"self",
".",
"verbose",
"and",
"self",
".",
"verbosity",
">",
"0",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" Annotated \"",
"+",
"f",
"+",
"\" len = \"",
"+",
"str",
"(",
"len",
"(",
"fullref",
".",
"annotation",
"[",
"f",
"]",
")",
")",
")",
"partial_ann",
".",
"annotation",
".",
"update",
"(",
"{",
"f",
":",
"fullref",
".",
"annotation",
"[",
"f",
"]",
"}",
")",
"if",
"b",
"in",
"missing_blocks",
":",
"del",
"missing_blocks",
"[",
"missing_blocks",
".",
"index",
"(",
"b",
")",
"]",
"else",
":",
"for",
"bm",
"in",
"tmp_missing",
":",
"if",
"bm",
"in",
"missing_blocks",
":",
"del",
"missing_blocks",
"[",
"missing_blocks",
".",
"index",
"(",
"bm",
")",
"]",
"for",
"f",
"in",
"fullref",
".",
"features",
":",
"f_order",
"=",
"self",
".",
"refdata",
".",
"structures",
"[",
"locus",
"]",
"[",
"f",
"]",
"# Only add features if they are after the",
"# first feature mapped",
"if",
"f_order",
">=",
"start_order",
"and",
"f",
"not",
"in",
"partial_ann",
".",
"features",
"and",
"f",
"in",
"partial_ann",
".",
"annotation",
":",
"partial_ann",
".",
"features",
"[",
"f",
"]",
"=",
"fullref",
".",
"features",
"[",
"f",
"]",
"coordinates",
"=",
"dict",
"(",
"map",
"(",
"lambda",
"x",
":",
"[",
"x",
",",
"1",
"]",
",",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"sequence",
".",
"seq",
")",
"+",
"1",
")",
"]",
")",
")",
"for",
"f",
"in",
"partial_ann",
".",
"features",
":",
"s",
"=",
"partial_ann",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"start",
"e",
"=",
"partial_ann",
".",
"features",
"[",
"f",
"]",
".",
"location",
".",
"end",
"if",
"s",
"!=",
"0",
":",
"s",
"+=",
"1",
"e",
"+=",
"1",
"else",
":",
"e",
"+=",
"1",
"for",
"i",
"in",
"range",
"(",
"s",
",",
"e",
")",
":",
"partial_ann",
".",
"mapping",
"[",
"i",
"]",
"=",
"f",
"if",
"i",
"in",
"coordinates",
":",
"del",
"coordinates",
"[",
"i",
"]",
"blocks",
"=",
"getblocks",
"(",
"coordinates",
")",
"partial_ann",
".",
"check_annotation",
"(",
")",
"if",
"partial_ann",
".",
"complete_annotation",
":",
"if",
"self",
".",
"verbose",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" Annotated all features with clustalo\"",
")",
"return",
"partial_ann",
"if",
"self",
".",
"verbose",
":",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"logname",
"+",
"\" Failed to annotate features\"",
")",
"return",
"''"
]
| ref_align - Method for doing targeted alignments on partial annotations
:param found_seqs: The input sequence record.
:type found_seqs: Seq
:param sequence: The input sequence record.
:type sequence: Seq
:param locus: The gene locus associated with the sequence.
:type locus: ``str``
:param annotation: The incomplete annotation from a previous iteration.
:type annotation: :ref:`ann`
:param partial_ann: The partial annotation after looping through all of the blast sequences.
:type partial_ann: :ref:`ann`
:rtype: :ref:`ann` | [
"ref_align",
"-",
"Method",
"for",
"doing",
"targeted",
"alignments",
"on",
"partial",
"annotations"
]
| python | train |
bitshares/uptick | uptick/votes.py | https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/votes.py#L34-L107 | def votes(ctx, account, type):
""" List accounts vesting balances
"""
if not isinstance(type, (list, tuple)):
type = [type]
account = Account(account, full=True)
ret = {key: list() for key in Vote.types()}
for vote in account["votes"]:
t = Vote.vote_type_from_id(vote["id"])
ret[t].append(vote)
t = [["id", "url", "account"]]
for vote in ret["committee"]:
t.append(
[vote["id"], vote["url"], Account(vote["committee_member_account"])["name"]]
)
if "committee" in type:
t = [["id", "url", "account", "votes"]]
for vote in ret["committee"]:
t.append(
[
vote["id"],
vote["url"],
Account(vote["committee_member_account"])["name"],
str(Amount({"amount": vote["total_votes"], "asset_id": "1.3.0"})),
]
)
print_table(t)
if "witness" in type:
t = [
[
"id",
"account",
"url",
"votes",
"last_confirmed_block_num",
"total_missed",
"westing",
]
]
for vote in ret["witness"]:
t.append(
[
vote["id"],
Account(vote["witness_account"])["name"],
vote["url"],
str(Amount({"amount": vote["total_votes"], "asset_id": "1.3.0"})),
vote["last_confirmed_block_num"],
vote["total_missed"],
str(Vesting(vote.get("pay_vb")).claimable)
if vote.get("pay_vb")
else "",
]
)
print_table(t)
if "worker" in type:
t = [["id", "name/url", "daily_pay", "votes", "time", "account"]]
for vote in ret["worker"]:
votes = Amount({"amount": vote["total_votes_for"], "asset_id": "1.3.0"})
amount = Amount({"amount": vote["daily_pay"], "asset_id": "1.3.0"})
t.append(
[
vote["id"],
"{name}\n{url}".format(**vote),
str(amount),
str(votes),
"{work_begin_date}\n-\n{work_end_date}".format(**vote),
str(Account(vote["worker_account"])["name"]),
]
)
print_table(t) | [
"def",
"votes",
"(",
"ctx",
",",
"account",
",",
"type",
")",
":",
"if",
"not",
"isinstance",
"(",
"type",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"type",
"=",
"[",
"type",
"]",
"account",
"=",
"Account",
"(",
"account",
",",
"full",
"=",
"True",
")",
"ret",
"=",
"{",
"key",
":",
"list",
"(",
")",
"for",
"key",
"in",
"Vote",
".",
"types",
"(",
")",
"}",
"for",
"vote",
"in",
"account",
"[",
"\"votes\"",
"]",
":",
"t",
"=",
"Vote",
".",
"vote_type_from_id",
"(",
"vote",
"[",
"\"id\"",
"]",
")",
"ret",
"[",
"t",
"]",
".",
"append",
"(",
"vote",
")",
"t",
"=",
"[",
"[",
"\"id\"",
",",
"\"url\"",
",",
"\"account\"",
"]",
"]",
"for",
"vote",
"in",
"ret",
"[",
"\"committee\"",
"]",
":",
"t",
".",
"append",
"(",
"[",
"vote",
"[",
"\"id\"",
"]",
",",
"vote",
"[",
"\"url\"",
"]",
",",
"Account",
"(",
"vote",
"[",
"\"committee_member_account\"",
"]",
")",
"[",
"\"name\"",
"]",
"]",
")",
"if",
"\"committee\"",
"in",
"type",
":",
"t",
"=",
"[",
"[",
"\"id\"",
",",
"\"url\"",
",",
"\"account\"",
",",
"\"votes\"",
"]",
"]",
"for",
"vote",
"in",
"ret",
"[",
"\"committee\"",
"]",
":",
"t",
".",
"append",
"(",
"[",
"vote",
"[",
"\"id\"",
"]",
",",
"vote",
"[",
"\"url\"",
"]",
",",
"Account",
"(",
"vote",
"[",
"\"committee_member_account\"",
"]",
")",
"[",
"\"name\"",
"]",
",",
"str",
"(",
"Amount",
"(",
"{",
"\"amount\"",
":",
"vote",
"[",
"\"total_votes\"",
"]",
",",
"\"asset_id\"",
":",
"\"1.3.0\"",
"}",
")",
")",
",",
"]",
")",
"print_table",
"(",
"t",
")",
"if",
"\"witness\"",
"in",
"type",
":",
"t",
"=",
"[",
"[",
"\"id\"",
",",
"\"account\"",
",",
"\"url\"",
",",
"\"votes\"",
",",
"\"last_confirmed_block_num\"",
",",
"\"total_missed\"",
",",
"\"westing\"",
",",
"]",
"]",
"for",
"vote",
"in",
"ret",
"[",
"\"witness\"",
"]",
":",
"t",
".",
"append",
"(",
"[",
"vote",
"[",
"\"id\"",
"]",
",",
"Account",
"(",
"vote",
"[",
"\"witness_account\"",
"]",
")",
"[",
"\"name\"",
"]",
",",
"vote",
"[",
"\"url\"",
"]",
",",
"str",
"(",
"Amount",
"(",
"{",
"\"amount\"",
":",
"vote",
"[",
"\"total_votes\"",
"]",
",",
"\"asset_id\"",
":",
"\"1.3.0\"",
"}",
")",
")",
",",
"vote",
"[",
"\"last_confirmed_block_num\"",
"]",
",",
"vote",
"[",
"\"total_missed\"",
"]",
",",
"str",
"(",
"Vesting",
"(",
"vote",
".",
"get",
"(",
"\"pay_vb\"",
")",
")",
".",
"claimable",
")",
"if",
"vote",
".",
"get",
"(",
"\"pay_vb\"",
")",
"else",
"\"\"",
",",
"]",
")",
"print_table",
"(",
"t",
")",
"if",
"\"worker\"",
"in",
"type",
":",
"t",
"=",
"[",
"[",
"\"id\"",
",",
"\"name/url\"",
",",
"\"daily_pay\"",
",",
"\"votes\"",
",",
"\"time\"",
",",
"\"account\"",
"]",
"]",
"for",
"vote",
"in",
"ret",
"[",
"\"worker\"",
"]",
":",
"votes",
"=",
"Amount",
"(",
"{",
"\"amount\"",
":",
"vote",
"[",
"\"total_votes_for\"",
"]",
",",
"\"asset_id\"",
":",
"\"1.3.0\"",
"}",
")",
"amount",
"=",
"Amount",
"(",
"{",
"\"amount\"",
":",
"vote",
"[",
"\"daily_pay\"",
"]",
",",
"\"asset_id\"",
":",
"\"1.3.0\"",
"}",
")",
"t",
".",
"append",
"(",
"[",
"vote",
"[",
"\"id\"",
"]",
",",
"\"{name}\\n{url}\"",
".",
"format",
"(",
"*",
"*",
"vote",
")",
",",
"str",
"(",
"amount",
")",
",",
"str",
"(",
"votes",
")",
",",
"\"{work_begin_date}\\n-\\n{work_end_date}\"",
".",
"format",
"(",
"*",
"*",
"vote",
")",
",",
"str",
"(",
"Account",
"(",
"vote",
"[",
"\"worker_account\"",
"]",
")",
"[",
"\"name\"",
"]",
")",
",",
"]",
")",
"print_table",
"(",
"t",
")"
]
| List accounts vesting balances | [
"List",
"accounts",
"vesting",
"balances"
]
| python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/itsdangerous.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/itsdangerous.py#L355-L362 | def verify_signature(self, value, sig):
"""Verifies the signature for the given value."""
key = self.derive_key()
try:
sig = base64_decode(sig)
except Exception:
return False
return self.algorithm.verify_signature(key, value, sig) | [
"def",
"verify_signature",
"(",
"self",
",",
"value",
",",
"sig",
")",
":",
"key",
"=",
"self",
".",
"derive_key",
"(",
")",
"try",
":",
"sig",
"=",
"base64_decode",
"(",
"sig",
")",
"except",
"Exception",
":",
"return",
"False",
"return",
"self",
".",
"algorithm",
".",
"verify_signature",
"(",
"key",
",",
"value",
",",
"sig",
")"
]
| Verifies the signature for the given value. | [
"Verifies",
"the",
"signature",
"for",
"the",
"given",
"value",
"."
]
| python | test |
bcbio/bcbio-nextgen | bcbio/galaxy/nglims.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L73-L88 | def _prepare_sample(data, run_folder):
"""Extract passed keywords from input LIMS information.
"""
want = set(["description", "files", "genome_build", "name", "analysis", "upload", "algorithm"])
out = {}
for k, v in data.items():
if k in want:
out[k] = _relative_paths(v, run_folder)
if "algorithm" not in out:
analysis, algorithm = _select_default_algorithm(out.get("analysis"))
out["algorithm"] = algorithm
out["analysis"] = analysis
description = "%s-%s" % (out["name"], clean_name(out["description"]))
out["name"] = [out["name"], description]
out["description"] = description
return out | [
"def",
"_prepare_sample",
"(",
"data",
",",
"run_folder",
")",
":",
"want",
"=",
"set",
"(",
"[",
"\"description\"",
",",
"\"files\"",
",",
"\"genome_build\"",
",",
"\"name\"",
",",
"\"analysis\"",
",",
"\"upload\"",
",",
"\"algorithm\"",
"]",
")",
"out",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"want",
":",
"out",
"[",
"k",
"]",
"=",
"_relative_paths",
"(",
"v",
",",
"run_folder",
")",
"if",
"\"algorithm\"",
"not",
"in",
"out",
":",
"analysis",
",",
"algorithm",
"=",
"_select_default_algorithm",
"(",
"out",
".",
"get",
"(",
"\"analysis\"",
")",
")",
"out",
"[",
"\"algorithm\"",
"]",
"=",
"algorithm",
"out",
"[",
"\"analysis\"",
"]",
"=",
"analysis",
"description",
"=",
"\"%s-%s\"",
"%",
"(",
"out",
"[",
"\"name\"",
"]",
",",
"clean_name",
"(",
"out",
"[",
"\"description\"",
"]",
")",
")",
"out",
"[",
"\"name\"",
"]",
"=",
"[",
"out",
"[",
"\"name\"",
"]",
",",
"description",
"]",
"out",
"[",
"\"description\"",
"]",
"=",
"description",
"return",
"out"
]
| Extract passed keywords from input LIMS information. | [
"Extract",
"passed",
"keywords",
"from",
"input",
"LIMS",
"information",
"."
]
| python | train |
deginner/mq-client | mq_client.py | https://github.com/deginner/mq-client/blob/a20ab50ea18870c01e8d142b049233c355858872/mq_client.py#L371-L387 | def stop(self):
"""
Stop the mq publisher by closing the channel and connection. We
set a flag here so that we stop scheduling new messages to be
published. The IOLoop is started because this method is
invoked by the Try/Catch below when KeyboardInterrupt is caught.
Starting the IOLoop again will allow the publisher to cleanly
disconnect from RabbitMQ.
"""
self._logger.info('Stopping')
self._stopping = True
self.close_connection()
try:
self._connection.ioloop.start() # supposedly this is necessary...
except Exception as e:
pass
self._logger.info('Stopped') | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'Stopping'",
")",
"self",
".",
"_stopping",
"=",
"True",
"self",
".",
"close_connection",
"(",
")",
"try",
":",
"self",
".",
"_connection",
".",
"ioloop",
".",
"start",
"(",
")",
"# supposedly this is necessary...",
"except",
"Exception",
"as",
"e",
":",
"pass",
"self",
".",
"_logger",
".",
"info",
"(",
"'Stopped'",
")"
]
| Stop the mq publisher by closing the channel and connection. We
set a flag here so that we stop scheduling new messages to be
published. The IOLoop is started because this method is
invoked by the Try/Catch below when KeyboardInterrupt is caught.
Starting the IOLoop again will allow the publisher to cleanly
disconnect from RabbitMQ. | [
"Stop",
"the",
"mq",
"publisher",
"by",
"closing",
"the",
"channel",
"and",
"connection",
".",
"We",
"set",
"a",
"flag",
"here",
"so",
"that",
"we",
"stop",
"scheduling",
"new",
"messages",
"to",
"be",
"published",
".",
"The",
"IOLoop",
"is",
"started",
"because",
"this",
"method",
"is",
"invoked",
"by",
"the",
"Try",
"/",
"Catch",
"below",
"when",
"KeyboardInterrupt",
"is",
"caught",
".",
"Starting",
"the",
"IOLoop",
"again",
"will",
"allow",
"the",
"publisher",
"to",
"cleanly",
"disconnect",
"from",
"RabbitMQ",
"."
]
| python | train |
Ffisegydd/whatis | whatis/_core.py | https://github.com/Ffisegydd/whatis/blob/eef780ced61aae6d001aeeef7574e5e27e613583/whatis/_core.py#L29-L45 | def this(obj, **kwargs):
"""Prints series of debugging steps to user.
Runs through pipeline of functions and print results of each.
"""
verbose = kwargs.get("verbose", True)
if verbose:
print('{:=^30}'.format(" whatis.this? "))
for func in pipeline:
s = func(obj, **kwargs)
if s is not None:
print(s)
if verbose:
print('{:=^30}\n'.format(" whatis.this? ")) | [
"def",
"this",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"verbose",
"=",
"kwargs",
".",
"get",
"(",
"\"verbose\"",
",",
"True",
")",
"if",
"verbose",
":",
"print",
"(",
"'{:=^30}'",
".",
"format",
"(",
"\" whatis.this? \"",
")",
")",
"for",
"func",
"in",
"pipeline",
":",
"s",
"=",
"func",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
"if",
"s",
"is",
"not",
"None",
":",
"print",
"(",
"s",
")",
"if",
"verbose",
":",
"print",
"(",
"'{:=^30}\\n'",
".",
"format",
"(",
"\" whatis.this? \"",
")",
")"
]
| Prints series of debugging steps to user.
Runs through pipeline of functions and print results of each. | [
"Prints",
"series",
"of",
"debugging",
"steps",
"to",
"user",
"."
]
| python | train |
iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/tex.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/tex.py#L495-L560 | def is_LaTeX(flist,env,abspath):
"""Scan a file list to decide if it's TeX- or LaTeX-flavored."""
# We need to scan files that are included in case the
# \documentclass command is in them.
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
paths = paths.split(os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print("is_LaTeX search path ",paths)
print("files to search :",flist)
# Now that we have the search path and file list, check each one
for f in flist:
if Verbose:
print(" checking for Latex source ",str(f))
content = f.get_text_contents()
if LaTeX_re.search(content):
if Verbose:
print("file %s is a LaTeX file" % str(f))
return 1
if Verbose:
print("file %s is not a LaTeX file" % str(f))
# now find included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
print("files included by '%s': "%str(f),inc_files)
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
# search the included files
for src in inc_files:
srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False)
# make this a list since is_LaTeX takes a list.
fileList = [srcNode,]
if Verbose:
print("FindFile found ",srcNode)
if srcNode is not None:
file_test = is_LaTeX(fileList, env, abspath)
# return on first file that finds latex is needed.
if file_test:
return file_test
if Verbose:
print(" done scanning ",str(f))
return 0 | [
"def",
"is_LaTeX",
"(",
"flist",
",",
"env",
",",
"abspath",
")",
":",
"# We need to scan files that are included in case the",
"# \\documentclass command is in them.",
"# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']",
"savedpath",
"=",
"modify_env_var",
"(",
"env",
",",
"'TEXINPUTS'",
",",
"abspath",
")",
"paths",
"=",
"env",
"[",
"'ENV'",
"]",
"[",
"'TEXINPUTS'",
"]",
"if",
"SCons",
".",
"Util",
".",
"is_List",
"(",
"paths",
")",
":",
"pass",
"else",
":",
"# Split at os.pathsep to convert into absolute path",
"paths",
"=",
"paths",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"# now that we have the path list restore the env",
"if",
"savedpath",
"is",
"_null",
":",
"try",
":",
"del",
"env",
"[",
"'ENV'",
"]",
"[",
"'TEXINPUTS'",
"]",
"except",
"KeyError",
":",
"pass",
"# was never set",
"else",
":",
"env",
"[",
"'ENV'",
"]",
"[",
"'TEXINPUTS'",
"]",
"=",
"savedpath",
"if",
"Verbose",
":",
"print",
"(",
"\"is_LaTeX search path \"",
",",
"paths",
")",
"print",
"(",
"\"files to search :\"",
",",
"flist",
")",
"# Now that we have the search path and file list, check each one",
"for",
"f",
"in",
"flist",
":",
"if",
"Verbose",
":",
"print",
"(",
"\" checking for Latex source \"",
",",
"str",
"(",
"f",
")",
")",
"content",
"=",
"f",
".",
"get_text_contents",
"(",
")",
"if",
"LaTeX_re",
".",
"search",
"(",
"content",
")",
":",
"if",
"Verbose",
":",
"print",
"(",
"\"file %s is a LaTeX file\"",
"%",
"str",
"(",
"f",
")",
")",
"return",
"1",
"if",
"Verbose",
":",
"print",
"(",
"\"file %s is not a LaTeX file\"",
"%",
"str",
"(",
"f",
")",
")",
"# now find included files",
"inc_files",
"=",
"[",
"]",
"inc_files",
".",
"extend",
"(",
"include_re",
".",
"findall",
"(",
"content",
")",
")",
"if",
"Verbose",
":",
"print",
"(",
"\"files included by '%s': \"",
"%",
"str",
"(",
"f",
")",
",",
"inc_files",
")",
"# inc_files is list of file names as given. need to find them",
"# using TEXINPUTS paths.",
"# search the included files",
"for",
"src",
"in",
"inc_files",
":",
"srcNode",
"=",
"FindFile",
"(",
"src",
",",
"[",
"'.tex'",
",",
"'.ltx'",
",",
"'.latex'",
"]",
",",
"paths",
",",
"env",
",",
"requireExt",
"=",
"False",
")",
"# make this a list since is_LaTeX takes a list.",
"fileList",
"=",
"[",
"srcNode",
",",
"]",
"if",
"Verbose",
":",
"print",
"(",
"\"FindFile found \"",
",",
"srcNode",
")",
"if",
"srcNode",
"is",
"not",
"None",
":",
"file_test",
"=",
"is_LaTeX",
"(",
"fileList",
",",
"env",
",",
"abspath",
")",
"# return on first file that finds latex is needed.",
"if",
"file_test",
":",
"return",
"file_test",
"if",
"Verbose",
":",
"print",
"(",
"\" done scanning \"",
",",
"str",
"(",
"f",
")",
")",
"return",
"0"
]
| Scan a file list to decide if it's TeX- or LaTeX-flavored. | [
"Scan",
"a",
"file",
"list",
"to",
"decide",
"if",
"it",
"s",
"TeX",
"-",
"or",
"LaTeX",
"-",
"flavored",
"."
]
| python | train |
buildinspace/peru | peru/runtime.py | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/runtime.py#L16-L20 | async def Runtime(args, env):
'This is the async constructor for the _Runtime class.'
r = _Runtime(args, env)
await r._init_cache()
return r | [
"async",
"def",
"Runtime",
"(",
"args",
",",
"env",
")",
":",
"r",
"=",
"_Runtime",
"(",
"args",
",",
"env",
")",
"await",
"r",
".",
"_init_cache",
"(",
")",
"return",
"r"
]
| This is the async constructor for the _Runtime class. | [
"This",
"is",
"the",
"async",
"constructor",
"for",
"the",
"_Runtime",
"class",
"."
]
| python | train |
ehansis/ozelot | examples/leonardo/leonardo/common/input.py | https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/leonardo/leonardo/common/input.py#L41-L53 | def load(self):
"""Load the data file, do some basic type conversions
"""
df = pd.read_csv(self.input_file,
encoding='utf8')
df['wiki_id'] = df['painting'].str.split('/').str[-1]
df['creator_wiki_id'] = df['creator'].str.split('/').str[-1]
df['decade'] = (df['inception'].str[:4].astype(float) / 10.).astype(int) * 10
df['area'] = df['width'] * df['height']
return df | [
"def",
"load",
"(",
"self",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"self",
".",
"input_file",
",",
"encoding",
"=",
"'utf8'",
")",
"df",
"[",
"'wiki_id'",
"]",
"=",
"df",
"[",
"'painting'",
"]",
".",
"str",
".",
"split",
"(",
"'/'",
")",
".",
"str",
"[",
"-",
"1",
"]",
"df",
"[",
"'creator_wiki_id'",
"]",
"=",
"df",
"[",
"'creator'",
"]",
".",
"str",
".",
"split",
"(",
"'/'",
")",
".",
"str",
"[",
"-",
"1",
"]",
"df",
"[",
"'decade'",
"]",
"=",
"(",
"df",
"[",
"'inception'",
"]",
".",
"str",
"[",
":",
"4",
"]",
".",
"astype",
"(",
"float",
")",
"/",
"10.",
")",
".",
"astype",
"(",
"int",
")",
"*",
"10",
"df",
"[",
"'area'",
"]",
"=",
"df",
"[",
"'width'",
"]",
"*",
"df",
"[",
"'height'",
"]",
"return",
"df"
]
| Load the data file, do some basic type conversions | [
"Load",
"the",
"data",
"file",
"do",
"some",
"basic",
"type",
"conversions"
]
| python | train |
etcher-be/elib_config | elib_config/_utils.py | https://github.com/etcher-be/elib_config/blob/5d8c839e84d70126620ab0186dc1f717e5868bd0/elib_config/_utils.py#L28-L39 | def friendly_type_name(raw_type: typing.Type) -> str:
"""
Returns a user-friendly type name
:param raw_type: raw type (str, int, ...)
:return: user friendly type as string
"""
try:
return _TRANSLATE_TYPE[raw_type]
except KeyError:
LOGGER.error('unmanaged value type: %s', raw_type)
return str(raw_type) | [
"def",
"friendly_type_name",
"(",
"raw_type",
":",
"typing",
".",
"Type",
")",
"->",
"str",
":",
"try",
":",
"return",
"_TRANSLATE_TYPE",
"[",
"raw_type",
"]",
"except",
"KeyError",
":",
"LOGGER",
".",
"error",
"(",
"'unmanaged value type: %s'",
",",
"raw_type",
")",
"return",
"str",
"(",
"raw_type",
")"
]
| Returns a user-friendly type name
:param raw_type: raw type (str, int, ...)
:return: user friendly type as string | [
"Returns",
"a",
"user",
"-",
"friendly",
"type",
"name"
]
| python | train |
pyupio/dparse | dparse/parser.py | https://github.com/pyupio/dparse/blob/0cd5aa7eb1f78c39da78b6c63dde6b49a1732cd2/dparse/parser.py#L365-L391 | def parse(self):
"""
Parse a Pipfile.lock (as seen in pipenv)
:return:
"""
try:
data = json.loads(self.obj.content, object_pairs_hook=OrderedDict)
if data:
for package_type in ['default', 'develop']:
if package_type in data:
for name, meta in data[package_type].items():
# skip VCS dependencies
if 'version' not in meta:
continue
specs = meta['version']
hashes = meta['hashes']
self.obj.dependencies.append(
Dependency(
name=name, specs=SpecifierSet(specs),
dependency_type=filetypes.pipfile_lock,
hashes=hashes,
line=''.join([name, specs]),
section=package_type
)
)
except ValueError:
pass | [
"def",
"parse",
"(",
"self",
")",
":",
"try",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"obj",
".",
"content",
",",
"object_pairs_hook",
"=",
"OrderedDict",
")",
"if",
"data",
":",
"for",
"package_type",
"in",
"[",
"'default'",
",",
"'develop'",
"]",
":",
"if",
"package_type",
"in",
"data",
":",
"for",
"name",
",",
"meta",
"in",
"data",
"[",
"package_type",
"]",
".",
"items",
"(",
")",
":",
"# skip VCS dependencies",
"if",
"'version'",
"not",
"in",
"meta",
":",
"continue",
"specs",
"=",
"meta",
"[",
"'version'",
"]",
"hashes",
"=",
"meta",
"[",
"'hashes'",
"]",
"self",
".",
"obj",
".",
"dependencies",
".",
"append",
"(",
"Dependency",
"(",
"name",
"=",
"name",
",",
"specs",
"=",
"SpecifierSet",
"(",
"specs",
")",
",",
"dependency_type",
"=",
"filetypes",
".",
"pipfile_lock",
",",
"hashes",
"=",
"hashes",
",",
"line",
"=",
"''",
".",
"join",
"(",
"[",
"name",
",",
"specs",
"]",
")",
",",
"section",
"=",
"package_type",
")",
")",
"except",
"ValueError",
":",
"pass"
]
| Parse a Pipfile.lock (as seen in pipenv)
:return: | [
"Parse",
"a",
"Pipfile",
".",
"lock",
"(",
"as",
"seen",
"in",
"pipenv",
")",
":",
"return",
":"
]
| python | train |
MacHu-GWU/single_file_module-project | sfm/fingerprint.py | https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/fingerprint.py#L184-L242 | def of_file(self, abspath, nbytes=0, chunk_size=1024):
"""
Use default hash method to return hash value of a piece of a file
Estimate processing time on:
:type abspath: text_type
:param abspath: the absolute path to the file.
:type nbytes: int
:param nbytes: only has first N bytes of the file. if 0, hash all file.
:type chunk_size: int
:param chunk_size: The max memory we use at one time.
CPU = i7-4600U 2.10GHz - 2.70GHz, RAM = 8.00 GB
1 second can process 0.25GB data
- 0.59G - 2.43 sec
- 1.3G - 5.68 sec
- 1.9G - 7.72 sec
- 2.5G - 10.32 sec
- 3.9G - 16.0 sec
ATTENTION:
if you change the meta data (for example, the title, years
information in audio, video) of a multi-media file, then the hash
value gonna also change.
"""
if nbytes < 0:
raise ValueError("chunk_size cannot smaller than 0")
if chunk_size < 1:
raise ValueError("chunk_size cannot smaller than 1")
if (nbytes > 0) and (nbytes < chunk_size):
chunk_size = nbytes
m = self.hash_algo()
with open(abspath, "rb") as f:
if nbytes: # use first n bytes
have_reads = 0
while True:
have_reads += chunk_size
if have_reads > nbytes:
n = nbytes - (have_reads - chunk_size)
if n:
data = f.read(n)
m.update(data)
break
else:
data = f.read(chunk_size)
m.update(data)
else: # use entire content
while True:
data = f.read(chunk_size)
if not data:
break
m.update(data)
return m.hexdigest() | [
"def",
"of_file",
"(",
"self",
",",
"abspath",
",",
"nbytes",
"=",
"0",
",",
"chunk_size",
"=",
"1024",
")",
":",
"if",
"nbytes",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"chunk_size cannot smaller than 0\"",
")",
"if",
"chunk_size",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"chunk_size cannot smaller than 1\"",
")",
"if",
"(",
"nbytes",
">",
"0",
")",
"and",
"(",
"nbytes",
"<",
"chunk_size",
")",
":",
"chunk_size",
"=",
"nbytes",
"m",
"=",
"self",
".",
"hash_algo",
"(",
")",
"with",
"open",
"(",
"abspath",
",",
"\"rb\"",
")",
"as",
"f",
":",
"if",
"nbytes",
":",
"# use first n bytes",
"have_reads",
"=",
"0",
"while",
"True",
":",
"have_reads",
"+=",
"chunk_size",
"if",
"have_reads",
">",
"nbytes",
":",
"n",
"=",
"nbytes",
"-",
"(",
"have_reads",
"-",
"chunk_size",
")",
"if",
"n",
":",
"data",
"=",
"f",
".",
"read",
"(",
"n",
")",
"m",
".",
"update",
"(",
"data",
")",
"break",
"else",
":",
"data",
"=",
"f",
".",
"read",
"(",
"chunk_size",
")",
"m",
".",
"update",
"(",
"data",
")",
"else",
":",
"# use entire content",
"while",
"True",
":",
"data",
"=",
"f",
".",
"read",
"(",
"chunk_size",
")",
"if",
"not",
"data",
":",
"break",
"m",
".",
"update",
"(",
"data",
")",
"return",
"m",
".",
"hexdigest",
"(",
")"
]
| Use default hash method to return hash value of a piece of a file
Estimate processing time on:
:type abspath: text_type
:param abspath: the absolute path to the file.
:type nbytes: int
:param nbytes: only has first N bytes of the file. if 0, hash all file.
:type chunk_size: int
:param chunk_size: The max memory we use at one time.
CPU = i7-4600U 2.10GHz - 2.70GHz, RAM = 8.00 GB
1 second can process 0.25GB data
- 0.59G - 2.43 sec
- 1.3G - 5.68 sec
- 1.9G - 7.72 sec
- 2.5G - 10.32 sec
- 3.9G - 16.0 sec
ATTENTION:
if you change the meta data (for example, the title, years
information in audio, video) of a multi-media file, then the hash
value gonna also change. | [
"Use",
"default",
"hash",
"method",
"to",
"return",
"hash",
"value",
"of",
"a",
"piece",
"of",
"a",
"file"
]
| python | train |
python-bugzilla/python-bugzilla | bugzilla/base.py | https://github.com/python-bugzilla/python-bugzilla/blob/7de8b225104f24a1eee3e837bf1e02d60aefe69f/bugzilla/base.py#L859-L892 | def getcomponents(self, product, force_refresh=False):
"""
Return a list of component names for the passed product.
This can be implemented with Product.get, but behind the
scenes it uses Bug.legal_values. Reason being that on bugzilla
instances with tons of components, like bugzilla.redhat.com
Product=Fedora for example, there's a 10x speed difference
even with properly limited Product.get calls.
On first invocation the value is cached, and subsequent calls
will return the cached data.
:param force_refresh: Force refreshing the cache, and return
the new data
"""
proddict = self._lookup_product_in_cache(product)
product_id = proddict.get("id", None)
if (force_refresh or
product_id is None or
product_id not in self._cache.component_names):
self.refresh_products(names=[product],
include_fields=["name", "id"])
proddict = self._lookup_product_in_cache(product)
if "id" not in proddict:
raise BugzillaError("Product '%s' not found" % product)
product_id = proddict["id"]
opts = {'product_id': product_id, 'field': 'component'}
names = self._proxy.Bug.legal_values(opts)["values"]
self._cache.component_names[product_id] = names
return self._cache.component_names[product_id] | [
"def",
"getcomponents",
"(",
"self",
",",
"product",
",",
"force_refresh",
"=",
"False",
")",
":",
"proddict",
"=",
"self",
".",
"_lookup_product_in_cache",
"(",
"product",
")",
"product_id",
"=",
"proddict",
".",
"get",
"(",
"\"id\"",
",",
"None",
")",
"if",
"(",
"force_refresh",
"or",
"product_id",
"is",
"None",
"or",
"product_id",
"not",
"in",
"self",
".",
"_cache",
".",
"component_names",
")",
":",
"self",
".",
"refresh_products",
"(",
"names",
"=",
"[",
"product",
"]",
",",
"include_fields",
"=",
"[",
"\"name\"",
",",
"\"id\"",
"]",
")",
"proddict",
"=",
"self",
".",
"_lookup_product_in_cache",
"(",
"product",
")",
"if",
"\"id\"",
"not",
"in",
"proddict",
":",
"raise",
"BugzillaError",
"(",
"\"Product '%s' not found\"",
"%",
"product",
")",
"product_id",
"=",
"proddict",
"[",
"\"id\"",
"]",
"opts",
"=",
"{",
"'product_id'",
":",
"product_id",
",",
"'field'",
":",
"'component'",
"}",
"names",
"=",
"self",
".",
"_proxy",
".",
"Bug",
".",
"legal_values",
"(",
"opts",
")",
"[",
"\"values\"",
"]",
"self",
".",
"_cache",
".",
"component_names",
"[",
"product_id",
"]",
"=",
"names",
"return",
"self",
".",
"_cache",
".",
"component_names",
"[",
"product_id",
"]"
]
| Return a list of component names for the passed product.
This can be implemented with Product.get, but behind the
scenes it uses Bug.legal_values. Reason being that on bugzilla
instances with tons of components, like bugzilla.redhat.com
Product=Fedora for example, there's a 10x speed difference
even with properly limited Product.get calls.
On first invocation the value is cached, and subsequent calls
will return the cached data.
:param force_refresh: Force refreshing the cache, and return
the new data | [
"Return",
"a",
"list",
"of",
"component",
"names",
"for",
"the",
"passed",
"product",
"."
]
| python | train |
google/grr | grr/server/grr_response_server/databases/mysql_flows.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_flows.py#L975-L981 | def DeleteAllFlowRequestsAndResponses(self, client_id, flow_id, cursor=None):
"""Deletes all requests and responses for a given flow from the database."""
args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)]
res_query = "DELETE FROM flow_responses WHERE client_id=%s AND flow_id=%s"
cursor.execute(res_query, args)
req_query = "DELETE FROM flow_requests WHERE client_id=%s AND flow_id=%s"
cursor.execute(req_query, args) | [
"def",
"DeleteAllFlowRequestsAndResponses",
"(",
"self",
",",
"client_id",
",",
"flow_id",
",",
"cursor",
"=",
"None",
")",
":",
"args",
"=",
"[",
"db_utils",
".",
"ClientIDToInt",
"(",
"client_id",
")",
",",
"db_utils",
".",
"FlowIDToInt",
"(",
"flow_id",
")",
"]",
"res_query",
"=",
"\"DELETE FROM flow_responses WHERE client_id=%s AND flow_id=%s\"",
"cursor",
".",
"execute",
"(",
"res_query",
",",
"args",
")",
"req_query",
"=",
"\"DELETE FROM flow_requests WHERE client_id=%s AND flow_id=%s\"",
"cursor",
".",
"execute",
"(",
"req_query",
",",
"args",
")"
]
| Deletes all requests and responses for a given flow from the database. | [
"Deletes",
"all",
"requests",
"and",
"responses",
"for",
"a",
"given",
"flow",
"from",
"the",
"database",
"."
]
| python | train |
borntyping/python-riemann-client | riemann_client/transport.py | https://github.com/borntyping/python-riemann-client/blob/3e181d90bdf685afd21c1ec5ee20e6840b011ea5/riemann_client/transport.py#L186-L195 | def send(self, message):
"""Adds a message to the list, returning a fake 'ok' response
:returns: A response message with ``ok = True``
"""
for event in message.events:
self.events.append(event)
reply = riemann_client.riemann_pb2.Msg()
reply.ok = True
return reply | [
"def",
"send",
"(",
"self",
",",
"message",
")",
":",
"for",
"event",
"in",
"message",
".",
"events",
":",
"self",
".",
"events",
".",
"append",
"(",
"event",
")",
"reply",
"=",
"riemann_client",
".",
"riemann_pb2",
".",
"Msg",
"(",
")",
"reply",
".",
"ok",
"=",
"True",
"return",
"reply"
]
| Adds a message to the list, returning a fake 'ok' response
:returns: A response message with ``ok = True`` | [
"Adds",
"a",
"message",
"to",
"the",
"list",
"returning",
"a",
"fake",
"ok",
"response"
]
| python | train |
tmbo/questionary | questionary/prompts/confirm.py | https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/prompts/confirm.py#L16-L94 | def confirm(message: Text,
default: bool = True,
qmark: Text = DEFAULT_QUESTION_PREFIX,
style: Optional[Style] = None,
**kwargs: Any) -> Question:
"""Prompt the user to confirm or reject.
This question type can be used to prompt the user for a confirmation
of a yes-or-no question. If the user just hits enter, the default
value will be returned.
Args:
message: Question text
default: Default value will be returned if the user just hits
enter.
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`).
"""
merged_style = merge_styles([DEFAULT_STYLE, style])
status = {'answer': None}
def get_prompt_tokens():
tokens = []
tokens.append(("class:qmark", qmark))
tokens.append(("class:question", ' {} '.format(message)))
if status['answer'] is not None:
answer = ' {}'.format(YES if status['answer'] else NO)
tokens.append(("class:answer", answer))
else:
instruction = ' {}'.format(YES_OR_NO if default else NO_OR_YES)
tokens.append(("class:instruction", instruction))
return to_formatted_text(tokens)
bindings = KeyBindings()
@bindings.add(Keys.ControlQ, eager=True)
@bindings.add(Keys.ControlC, eager=True)
def _(event):
event.app.exit(exception=KeyboardInterrupt, style='class:aborting')
@bindings.add('n')
@bindings.add('N')
def key_n(event):
status['answer'] = False
event.app.exit(result=False)
@bindings.add('y')
@bindings.add('Y')
def key_y(event):
status['answer'] = True
event.app.exit(result=True)
@bindings.add(Keys.ControlM, eager=True)
def set_answer(event):
status['answer'] = default
event.app.exit(result=default)
@bindings.add(Keys.Any)
def other(event):
"""Disallow inserting other text."""
pass
return Question(PromptSession(get_prompt_tokens,
key_bindings=bindings,
style=merged_style,
**kwargs).app) | [
"def",
"confirm",
"(",
"message",
":",
"Text",
",",
"default",
":",
"bool",
"=",
"True",
",",
"qmark",
":",
"Text",
"=",
"DEFAULT_QUESTION_PREFIX",
",",
"style",
":",
"Optional",
"[",
"Style",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"Question",
":",
"merged_style",
"=",
"merge_styles",
"(",
"[",
"DEFAULT_STYLE",
",",
"style",
"]",
")",
"status",
"=",
"{",
"'answer'",
":",
"None",
"}",
"def",
"get_prompt_tokens",
"(",
")",
":",
"tokens",
"=",
"[",
"]",
"tokens",
".",
"append",
"(",
"(",
"\"class:qmark\"",
",",
"qmark",
")",
")",
"tokens",
".",
"append",
"(",
"(",
"\"class:question\"",
",",
"' {} '",
".",
"format",
"(",
"message",
")",
")",
")",
"if",
"status",
"[",
"'answer'",
"]",
"is",
"not",
"None",
":",
"answer",
"=",
"' {}'",
".",
"format",
"(",
"YES",
"if",
"status",
"[",
"'answer'",
"]",
"else",
"NO",
")",
"tokens",
".",
"append",
"(",
"(",
"\"class:answer\"",
",",
"answer",
")",
")",
"else",
":",
"instruction",
"=",
"' {}'",
".",
"format",
"(",
"YES_OR_NO",
"if",
"default",
"else",
"NO_OR_YES",
")",
"tokens",
".",
"append",
"(",
"(",
"\"class:instruction\"",
",",
"instruction",
")",
")",
"return",
"to_formatted_text",
"(",
"tokens",
")",
"bindings",
"=",
"KeyBindings",
"(",
")",
"@",
"bindings",
".",
"add",
"(",
"Keys",
".",
"ControlQ",
",",
"eager",
"=",
"True",
")",
"@",
"bindings",
".",
"add",
"(",
"Keys",
".",
"ControlC",
",",
"eager",
"=",
"True",
")",
"def",
"_",
"(",
"event",
")",
":",
"event",
".",
"app",
".",
"exit",
"(",
"exception",
"=",
"KeyboardInterrupt",
",",
"style",
"=",
"'class:aborting'",
")",
"@",
"bindings",
".",
"add",
"(",
"'n'",
")",
"@",
"bindings",
".",
"add",
"(",
"'N'",
")",
"def",
"key_n",
"(",
"event",
")",
":",
"status",
"[",
"'answer'",
"]",
"=",
"False",
"event",
".",
"app",
".",
"exit",
"(",
"result",
"=",
"False",
")",
"@",
"bindings",
".",
"add",
"(",
"'y'",
")",
"@",
"bindings",
".",
"add",
"(",
"'Y'",
")",
"def",
"key_y",
"(",
"event",
")",
":",
"status",
"[",
"'answer'",
"]",
"=",
"True",
"event",
".",
"app",
".",
"exit",
"(",
"result",
"=",
"True",
")",
"@",
"bindings",
".",
"add",
"(",
"Keys",
".",
"ControlM",
",",
"eager",
"=",
"True",
")",
"def",
"set_answer",
"(",
"event",
")",
":",
"status",
"[",
"'answer'",
"]",
"=",
"default",
"event",
".",
"app",
".",
"exit",
"(",
"result",
"=",
"default",
")",
"@",
"bindings",
".",
"add",
"(",
"Keys",
".",
"Any",
")",
"def",
"other",
"(",
"event",
")",
":",
"\"\"\"Disallow inserting other text.\"\"\"",
"pass",
"return",
"Question",
"(",
"PromptSession",
"(",
"get_prompt_tokens",
",",
"key_bindings",
"=",
"bindings",
",",
"style",
"=",
"merged_style",
",",
"*",
"*",
"kwargs",
")",
".",
"app",
")"
]
| Prompt the user to confirm or reject.
This question type can be used to prompt the user for a confirmation
of a yes-or-no question. If the user just hits enter, the default
value will be returned.
Args:
message: Question text
default: Default value will be returned if the user just hits
enter.
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`). | [
"Prompt",
"the",
"user",
"to",
"confirm",
"or",
"reject",
"."
]
| python | train |
tensorflow/probability | tensorflow_probability/python/optimizer/nelder_mead.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/nelder_mead.py#L473-L497 | def _expansion_fn(objective_function,
simplex,
objective_values,
worst_index,
reflected,
objective_at_reflected,
face_centroid,
expansion):
"""Creates the condition function pair for an expansion."""
def _expand_and_maybe_replace():
"""Performs the expansion step."""
expanded = face_centroid + expansion * (reflected - face_centroid)
expanded_objective_value = objective_function(expanded)
expanded_is_better = (expanded_objective_value <
objective_at_reflected)
accept_expanded_fn = lambda: (expanded, expanded_objective_value)
accept_reflected_fn = lambda: (reflected, objective_at_reflected)
next_pt, next_objective_value = prefer_static.cond(
expanded_is_better, accept_expanded_fn, accept_reflected_fn)
next_simplex = _replace_at_index(simplex, worst_index, next_pt)
next_objective_at_simplex = _replace_at_index(objective_values,
worst_index,
next_objective_value)
return False, next_simplex, next_objective_at_simplex, 1
return _expand_and_maybe_replace | [
"def",
"_expansion_fn",
"(",
"objective_function",
",",
"simplex",
",",
"objective_values",
",",
"worst_index",
",",
"reflected",
",",
"objective_at_reflected",
",",
"face_centroid",
",",
"expansion",
")",
":",
"def",
"_expand_and_maybe_replace",
"(",
")",
":",
"\"\"\"Performs the expansion step.\"\"\"",
"expanded",
"=",
"face_centroid",
"+",
"expansion",
"*",
"(",
"reflected",
"-",
"face_centroid",
")",
"expanded_objective_value",
"=",
"objective_function",
"(",
"expanded",
")",
"expanded_is_better",
"=",
"(",
"expanded_objective_value",
"<",
"objective_at_reflected",
")",
"accept_expanded_fn",
"=",
"lambda",
":",
"(",
"expanded",
",",
"expanded_objective_value",
")",
"accept_reflected_fn",
"=",
"lambda",
":",
"(",
"reflected",
",",
"objective_at_reflected",
")",
"next_pt",
",",
"next_objective_value",
"=",
"prefer_static",
".",
"cond",
"(",
"expanded_is_better",
",",
"accept_expanded_fn",
",",
"accept_reflected_fn",
")",
"next_simplex",
"=",
"_replace_at_index",
"(",
"simplex",
",",
"worst_index",
",",
"next_pt",
")",
"next_objective_at_simplex",
"=",
"_replace_at_index",
"(",
"objective_values",
",",
"worst_index",
",",
"next_objective_value",
")",
"return",
"False",
",",
"next_simplex",
",",
"next_objective_at_simplex",
",",
"1",
"return",
"_expand_and_maybe_replace"
]
| Creates the condition function pair for an expansion. | [
"Creates",
"the",
"condition",
"function",
"pair",
"for",
"an",
"expansion",
"."
]
| python | test |
cons3rt/pycons3rt | pycons3rt/bash.py | https://github.com/cons3rt/pycons3rt/blob/f004ab3a35c5bff2f698131fef3b2a8ed5a7596d/pycons3rt/bash.py#L958-L998 | def remove_ifcfg_file(device_index='0'):
"""Removes the ifcfg file at the specified device index
and restarts the network service
:param device_index: (int) Device Index
:return: None
:raises CommandError
"""
log = logging.getLogger(mod_logger + '.remove_ifcfg_file')
if not isinstance(device_index, basestring):
msg = 'device_index argument must be a string'
log.error(msg)
raise CommandError(msg)
network_script = '/etc/sysconfig/network-scripts/ifcfg-eth{d}'.format(d=device_index)
if not os.path.isfile(network_script):
log.info('File does not exist, nothing will be removed: {n}'.format(n=network_script))
return
# Remove the network config script
log.info('Attempting to remove file: {n}'.format(n=network_script))
try:
os.remove(network_script)
except(IOError, OSError):
_, ex, trace = sys.exc_info()
msg = 'There was a problem removing network script file: {n}\n{e}'.format(n=network_script, e=str(ex))
log.error(msg)
raise OSError, msg, trace
else:
log.info('Successfully removed file: {n}'.format(n=network_script))
# Restart the network service
log.info('Restarting the network service...')
try:
service_network_restart()
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem restarting the network service\n{e}'.format(e=str(ex))
log.error(msg)
raise OSError, msg, trace
else:
log.info('Successfully restarted the network service') | [
"def",
"remove_ifcfg_file",
"(",
"device_index",
"=",
"'0'",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"mod_logger",
"+",
"'.remove_ifcfg_file'",
")",
"if",
"not",
"isinstance",
"(",
"device_index",
",",
"basestring",
")",
":",
"msg",
"=",
"'device_index argument must be a string'",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"CommandError",
"(",
"msg",
")",
"network_script",
"=",
"'/etc/sysconfig/network-scripts/ifcfg-eth{d}'",
".",
"format",
"(",
"d",
"=",
"device_index",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"network_script",
")",
":",
"log",
".",
"info",
"(",
"'File does not exist, nothing will be removed: {n}'",
".",
"format",
"(",
"n",
"=",
"network_script",
")",
")",
"return",
"# Remove the network config script",
"log",
".",
"info",
"(",
"'Attempting to remove file: {n}'",
".",
"format",
"(",
"n",
"=",
"network_script",
")",
")",
"try",
":",
"os",
".",
"remove",
"(",
"network_script",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
":",
"_",
",",
"ex",
",",
"trace",
"=",
"sys",
".",
"exc_info",
"(",
")",
"msg",
"=",
"'There was a problem removing network script file: {n}\\n{e}'",
".",
"format",
"(",
"n",
"=",
"network_script",
",",
"e",
"=",
"str",
"(",
"ex",
")",
")",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"OSError",
",",
"msg",
",",
"trace",
"else",
":",
"log",
".",
"info",
"(",
"'Successfully removed file: {n}'",
".",
"format",
"(",
"n",
"=",
"network_script",
")",
")",
"# Restart the network service",
"log",
".",
"info",
"(",
"'Restarting the network service...'",
")",
"try",
":",
"service_network_restart",
"(",
")",
"except",
"CommandError",
":",
"_",
",",
"ex",
",",
"trace",
"=",
"sys",
".",
"exc_info",
"(",
")",
"msg",
"=",
"'There was a problem restarting the network service\\n{e}'",
".",
"format",
"(",
"e",
"=",
"str",
"(",
"ex",
")",
")",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"OSError",
",",
"msg",
",",
"trace",
"else",
":",
"log",
".",
"info",
"(",
"'Successfully restarted the network service'",
")"
]
| Removes the ifcfg file at the specified device index
and restarts the network service
:param device_index: (int) Device Index
:return: None
:raises CommandError | [
"Removes",
"the",
"ifcfg",
"file",
"at",
"the",
"specified",
"device",
"index",
"and",
"restarts",
"the",
"network",
"service"
]
| python | train |
xhtml2pdf/xhtml2pdf | xhtml2pdf/util.py | https://github.com/xhtml2pdf/xhtml2pdf/blob/230357a392f48816532d3c2fa082a680b80ece48/xhtml2pdf/util.py#L732-L736 | def setMimeTypeByName(self, name):
" Guess the mime type "
mimetype = mimetypes.guess_type(name)[0]
if mimetype is not None:
self.mimetype = mimetypes.guess_type(name)[0].split(";")[0] | [
"def",
"setMimeTypeByName",
"(",
"self",
",",
"name",
")",
":",
"mimetype",
"=",
"mimetypes",
".",
"guess_type",
"(",
"name",
")",
"[",
"0",
"]",
"if",
"mimetype",
"is",
"not",
"None",
":",
"self",
".",
"mimetype",
"=",
"mimetypes",
".",
"guess_type",
"(",
"name",
")",
"[",
"0",
"]",
".",
"split",
"(",
"\";\"",
")",
"[",
"0",
"]"
]
| Guess the mime type | [
"Guess",
"the",
"mime",
"type"
]
| python | train |
econ-ark/HARK | HARK/cAndCwithStickyE/StickyEtools.py | https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/cAndCwithStickyE/StickyEtools.py#L46-L196 | def makeStickyEdataFile(Economy,ignore_periods,description='',filename=None,save_data=False,calc_micro_stats=True,meas_err_base=None):
'''
Makes descriptive statistics and macroeconomic data file. Behaves slightly
differently for heterogeneous agents vs representative agent models.
Parameters
----------
Economy : Market or AgentType
A representation of the model economy. For heterogeneous agents specifications,
this will be an instance of a subclass of Market. For representative agent
specifications, this will be an instance of an AgentType subclass.
ignore_periods : int
Number of periods at the start of the simulation to throw out.
description : str
Description of the economy that is prepended on the output string.
filename : str
Name of the output log file, if any; .txt will be appended automatically.
save_data : bool
When True, save simulation data to filename + 'Data.txt' for use in Stata.
calc_micro_stats : bool
When True, calculate microeconomic statistics like in Table 2 of the
paper draft.
meas_err_base : float or None
Base value of measurement error standard deviation, which will be adjusted.
When None (default), value is calculated as stdev(DeltaLogC).
Returns
-------
None
'''
# Extract time series data from the economy
if hasattr(Economy,'agents'): # If this is a heterogeneous agent specification...
if len(Economy.agents) > 1:
pLvlAll_hist = np.concatenate([this_type.pLvlTrue_hist for this_type in Economy.agents],axis=1)
aLvlAll_hist = np.concatenate([this_type.aLvlNow_hist for this_type in Economy.agents],axis=1)
cLvlAll_hist = np.concatenate([this_type.cLvlNow_hist for this_type in Economy.agents],axis=1)
yLvlAll_hist = np.concatenate([this_type.yLvlNow_hist for this_type in Economy.agents],axis=1)
else: # Don't duplicate the data unless necessary (with one type, concatenating is useless)
pLvlAll_hist = Economy.agents[0].pLvlTrue_hist
aLvlAll_hist = Economy.agents[0].aLvlNow_hist
cLvlAll_hist = Economy.agents[0].cLvlNow_hist
yLvlAll_hist = Economy.agents[0].yLvlNow_hist
# PermShkAggHist needs to be shifted one period forward
PlvlAgg_hist = np.cumprod(np.concatenate(([1.0],Economy.PermShkAggHist[:-1]),axis=0))
AlvlAgg_hist = np.mean(aLvlAll_hist,axis=1) # Level of aggregate assets
AnrmAgg_hist = AlvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate assets
ClvlAgg_hist = np.mean(cLvlAll_hist,axis=1) # Level of aggregate consumption
CnrmAgg_hist = ClvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate consumption
YlvlAgg_hist = np.mean(yLvlAll_hist,axis=1) # Level of aggregate income
YnrmAgg_hist = YlvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate income
if calc_micro_stats: # Only calculate stats if requested. This is a memory hog with many simulated periods
micro_stat_periods = int((Economy.agents[0].T_sim-ignore_periods)*0.1)
not_newborns = (np.concatenate([this_type.t_age_hist[(ignore_periods+1):(ignore_periods+micro_stat_periods),:] for this_type in Economy.agents],axis=1) > 1).flatten()
Logc = np.log(cLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
DeltaLogc = (Logc[1:] - Logc[0:-1]).flatten()
DeltaLogc_trimmed = DeltaLogc[not_newborns]
Loga = np.log(aLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
DeltaLoga = (Loga[1:] - Loga[0:-1]).flatten()
DeltaLoga_trimmed = DeltaLoga[not_newborns]
Logp = np.log(pLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
DeltaLogp = (Logp[1:] - Logp[0:-1]).flatten()
DeltaLogp_trimmed = DeltaLogp[not_newborns]
Logy = np.log(yLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
Logy_trimmed = Logy
Logy_trimmed[np.isinf(Logy)] = np.nan
birth_events = np.concatenate([this_type.t_age_hist == 1 for this_type in Economy.agents],axis=1)
vBirth = calcValueAtBirth(cLvlAll_hist[ignore_periods:,:],birth_events[ignore_periods:,:],PlvlAgg_hist[ignore_periods:],Economy.MrkvNow_hist[ignore_periods:],Economy.agents[0].DiscFac,Economy.agents[0].CRRA)
BigTheta_hist = Economy.TranShkAggHist
if hasattr(Economy,'MrkvNow'):
Mrkv_hist = Economy.MrkvNow_hist
if ~hasattr(Economy,'Rfree'): # If this is a markov DSGE specification...
# Find the expected interest rate - approximate by assuming growth = expected growth
ExpectedGrowth_hist = Economy.PermGroFacAgg[Mrkv_hist]
ExpectedKLRatio_hist = AnrmAgg_hist/ExpectedGrowth_hist
ExpectedR_hist = Economy.Rfunc(ExpectedKLRatio_hist)
else: # If this is a representative agent specification...
PlvlAgg_hist = Economy.pLvlTrue_hist.flatten()
ClvlAgg_hist = Economy.cLvlNow_hist.flatten()
CnrmAgg_hist = ClvlAgg_hist/PlvlAgg_hist.flatten()
YnrmAgg_hist = Economy.yNrmTrue_hist.flatten()
YlvlAgg_hist = YnrmAgg_hist*PlvlAgg_hist.flatten()
AlvlAgg_hist = Economy.aLvlNow_hist.flatten()
AnrmAgg_hist = AlvlAgg_hist/PlvlAgg_hist.flatten()
BigTheta_hist = Economy.TranShkNow_hist.flatten()
if hasattr(Economy,'MrkvNow'):
Mrkv_hist = Economy.MrkvNow_hist
# Process aggregate data into forms used by regressions
LogC = np.log(ClvlAgg_hist[ignore_periods:])
LogA = np.log(AlvlAgg_hist[ignore_periods:])
LogY = np.log(YlvlAgg_hist[ignore_periods:])
DeltaLogC = LogC[1:] - LogC[0:-1]
DeltaLogA = LogA[1:] - LogA[0:-1]
DeltaLogY = LogY[1:] - LogY[0:-1]
A = AnrmAgg_hist[(ignore_periods+1):] # This is a relabeling for the regression code
BigTheta = BigTheta_hist[(ignore_periods+1):]
if hasattr(Economy,'MrkvNow'):
Mrkv = Mrkv_hist[(ignore_periods+1):] # This is a relabeling for the regression code
if ~hasattr(Economy,'Rfree') and hasattr(Economy,'agents'): # If this is a markov DSGE specification...
R = ExpectedR_hist[(ignore_periods+1):]
Delta8LogC = (np.log(ClvlAgg_hist[8:]) - np.log(ClvlAgg_hist[:-8]))[(ignore_periods-7):]
Delta8LogY = (np.log(YlvlAgg_hist[8:]) - np.log(YlvlAgg_hist[:-8]))[(ignore_periods-7):]
# Add measurement error to LogC
if meas_err_base is None:
meas_err_base = np.std(DeltaLogC)
sigma_meas_err = meas_err_base*0.375 # This approximately matches the change in IV vs OLS in U.S. empirical coefficients
np.random.seed(10)
Measurement_Error = sigma_meas_err*np.random.normal(0.,1.,LogC.size)
LogC_me = LogC + Measurement_Error
DeltaLogC_me = LogC_me[1:] - LogC_me[0:-1]
# Apply measurement error to long delta LogC
LogC_long = np.log(ClvlAgg_hist)
LogC_long_me = LogC_long + sigma_meas_err*np.random.normal(0.,1.,LogC_long.size)
Delta8LogC_me = (LogC_long_me[8:] - LogC_long_me[:-8])[(ignore_periods-7):]
# Make summary statistics for the results file
csv_output_string = str(np.mean(AnrmAgg_hist[ignore_periods:])) +","+ str(np.mean(CnrmAgg_hist[ignore_periods:]))+ ","+str(np.std(np.log(AnrmAgg_hist[ignore_periods:])))+ ","+str(np.std(DeltaLogC))+ ","+str(np.std(DeltaLogY)) +","+ str(np.std(DeltaLogA))
if hasattr(Economy,'agents') and calc_micro_stats: # This block only runs for heterogeneous agents specifications
csv_output_string += ","+str(np.mean(np.std(Loga,axis=1)))+ ","+str(np.mean(np.std(Logc,axis=1))) + ","+str(np.mean(np.std(Logp,axis=1))) +","+ str(np.mean(np.nanstd(Logy_trimmed,axis=1))) +","+ str(np.std(DeltaLoga_trimmed))+","+ str(np.std(DeltaLogc_trimmed))+ ","+str(np.std(DeltaLogp_trimmed))
# Save the results to a logfile if requested
if filename is not None:
with open(results_dir + filename + 'Results.csv','w') as f:
f.write(csv_output_string)
f.close()
if calc_micro_stats and hasattr(Economy,'agents'):
with open(results_dir + filename + 'BirthValue.csv','w') as f:
my_writer = csv.writer(f, delimiter = ',')
my_writer.writerow(vBirth)
f.close()
if save_data:
DataArray = (np.vstack((np.arange(DeltaLogC.size),DeltaLogC_me,DeltaLogC,DeltaLogY,A,BigTheta,Delta8LogC,Delta8LogY,Delta8LogC_me,Measurement_Error[1:]))).transpose()
VarNames = ['time_period','DeltaLogC_me','DeltaLogC','DeltaLogY','A','BigTheta','Delta8LogC','Delta8LogY','Delta8LogC_me','Measurement_Error']
if hasattr(Economy,'MrkvNow'):
DataArray = np.hstack((DataArray,np.reshape(Mrkv,(Mrkv.size,1))))
VarNames.append('MrkvState')
if hasattr(Economy,'MrkvNow') & ~hasattr(Economy,'Rfree') and hasattr(Economy,'agents'):
DataArray = np.hstack((DataArray,np.reshape(R,(R.size,1))))
VarNames.append('R')
with open(results_dir + filename + 'Data.txt','w') as f:
my_writer = csv.writer(f, delimiter = '\t')
my_writer.writerow(VarNames)
for i in range(DataArray.shape[0]):
my_writer.writerow(DataArray[i,:])
f.close() | [
"def",
"makeStickyEdataFile",
"(",
"Economy",
",",
"ignore_periods",
",",
"description",
"=",
"''",
",",
"filename",
"=",
"None",
",",
"save_data",
"=",
"False",
",",
"calc_micro_stats",
"=",
"True",
",",
"meas_err_base",
"=",
"None",
")",
":",
"# Extract time series data from the economy",
"if",
"hasattr",
"(",
"Economy",
",",
"'agents'",
")",
":",
"# If this is a heterogeneous agent specification...",
"if",
"len",
"(",
"Economy",
".",
"agents",
")",
">",
"1",
":",
"pLvlAll_hist",
"=",
"np",
".",
"concatenate",
"(",
"[",
"this_type",
".",
"pLvlTrue_hist",
"for",
"this_type",
"in",
"Economy",
".",
"agents",
"]",
",",
"axis",
"=",
"1",
")",
"aLvlAll_hist",
"=",
"np",
".",
"concatenate",
"(",
"[",
"this_type",
".",
"aLvlNow_hist",
"for",
"this_type",
"in",
"Economy",
".",
"agents",
"]",
",",
"axis",
"=",
"1",
")",
"cLvlAll_hist",
"=",
"np",
".",
"concatenate",
"(",
"[",
"this_type",
".",
"cLvlNow_hist",
"for",
"this_type",
"in",
"Economy",
".",
"agents",
"]",
",",
"axis",
"=",
"1",
")",
"yLvlAll_hist",
"=",
"np",
".",
"concatenate",
"(",
"[",
"this_type",
".",
"yLvlNow_hist",
"for",
"this_type",
"in",
"Economy",
".",
"agents",
"]",
",",
"axis",
"=",
"1",
")",
"else",
":",
"# Don't duplicate the data unless necessary (with one type, concatenating is useless)",
"pLvlAll_hist",
"=",
"Economy",
".",
"agents",
"[",
"0",
"]",
".",
"pLvlTrue_hist",
"aLvlAll_hist",
"=",
"Economy",
".",
"agents",
"[",
"0",
"]",
".",
"aLvlNow_hist",
"cLvlAll_hist",
"=",
"Economy",
".",
"agents",
"[",
"0",
"]",
".",
"cLvlNow_hist",
"yLvlAll_hist",
"=",
"Economy",
".",
"agents",
"[",
"0",
"]",
".",
"yLvlNow_hist",
"# PermShkAggHist needs to be shifted one period forward",
"PlvlAgg_hist",
"=",
"np",
".",
"cumprod",
"(",
"np",
".",
"concatenate",
"(",
"(",
"[",
"1.0",
"]",
",",
"Economy",
".",
"PermShkAggHist",
"[",
":",
"-",
"1",
"]",
")",
",",
"axis",
"=",
"0",
")",
")",
"AlvlAgg_hist",
"=",
"np",
".",
"mean",
"(",
"aLvlAll_hist",
",",
"axis",
"=",
"1",
")",
"# Level of aggregate assets",
"AnrmAgg_hist",
"=",
"AlvlAgg_hist",
"/",
"PlvlAgg_hist",
"# Normalized level of aggregate assets",
"ClvlAgg_hist",
"=",
"np",
".",
"mean",
"(",
"cLvlAll_hist",
",",
"axis",
"=",
"1",
")",
"# Level of aggregate consumption",
"CnrmAgg_hist",
"=",
"ClvlAgg_hist",
"/",
"PlvlAgg_hist",
"# Normalized level of aggregate consumption",
"YlvlAgg_hist",
"=",
"np",
".",
"mean",
"(",
"yLvlAll_hist",
",",
"axis",
"=",
"1",
")",
"# Level of aggregate income",
"YnrmAgg_hist",
"=",
"YlvlAgg_hist",
"/",
"PlvlAgg_hist",
"# Normalized level of aggregate income",
"if",
"calc_micro_stats",
":",
"# Only calculate stats if requested. This is a memory hog with many simulated periods",
"micro_stat_periods",
"=",
"int",
"(",
"(",
"Economy",
".",
"agents",
"[",
"0",
"]",
".",
"T_sim",
"-",
"ignore_periods",
")",
"*",
"0.1",
")",
"not_newborns",
"=",
"(",
"np",
".",
"concatenate",
"(",
"[",
"this_type",
".",
"t_age_hist",
"[",
"(",
"ignore_periods",
"+",
"1",
")",
":",
"(",
"ignore_periods",
"+",
"micro_stat_periods",
")",
",",
":",
"]",
"for",
"this_type",
"in",
"Economy",
".",
"agents",
"]",
",",
"axis",
"=",
"1",
")",
">",
"1",
")",
".",
"flatten",
"(",
")",
"Logc",
"=",
"np",
".",
"log",
"(",
"cLvlAll_hist",
"[",
"ignore_periods",
":",
"(",
"ignore_periods",
"+",
"micro_stat_periods",
")",
",",
":",
"]",
")",
"DeltaLogc",
"=",
"(",
"Logc",
"[",
"1",
":",
"]",
"-",
"Logc",
"[",
"0",
":",
"-",
"1",
"]",
")",
".",
"flatten",
"(",
")",
"DeltaLogc_trimmed",
"=",
"DeltaLogc",
"[",
"not_newborns",
"]",
"Loga",
"=",
"np",
".",
"log",
"(",
"aLvlAll_hist",
"[",
"ignore_periods",
":",
"(",
"ignore_periods",
"+",
"micro_stat_periods",
")",
",",
":",
"]",
")",
"DeltaLoga",
"=",
"(",
"Loga",
"[",
"1",
":",
"]",
"-",
"Loga",
"[",
"0",
":",
"-",
"1",
"]",
")",
".",
"flatten",
"(",
")",
"DeltaLoga_trimmed",
"=",
"DeltaLoga",
"[",
"not_newborns",
"]",
"Logp",
"=",
"np",
".",
"log",
"(",
"pLvlAll_hist",
"[",
"ignore_periods",
":",
"(",
"ignore_periods",
"+",
"micro_stat_periods",
")",
",",
":",
"]",
")",
"DeltaLogp",
"=",
"(",
"Logp",
"[",
"1",
":",
"]",
"-",
"Logp",
"[",
"0",
":",
"-",
"1",
"]",
")",
".",
"flatten",
"(",
")",
"DeltaLogp_trimmed",
"=",
"DeltaLogp",
"[",
"not_newborns",
"]",
"Logy",
"=",
"np",
".",
"log",
"(",
"yLvlAll_hist",
"[",
"ignore_periods",
":",
"(",
"ignore_periods",
"+",
"micro_stat_periods",
")",
",",
":",
"]",
")",
"Logy_trimmed",
"=",
"Logy",
"Logy_trimmed",
"[",
"np",
".",
"isinf",
"(",
"Logy",
")",
"]",
"=",
"np",
".",
"nan",
"birth_events",
"=",
"np",
".",
"concatenate",
"(",
"[",
"this_type",
".",
"t_age_hist",
"==",
"1",
"for",
"this_type",
"in",
"Economy",
".",
"agents",
"]",
",",
"axis",
"=",
"1",
")",
"vBirth",
"=",
"calcValueAtBirth",
"(",
"cLvlAll_hist",
"[",
"ignore_periods",
":",
",",
":",
"]",
",",
"birth_events",
"[",
"ignore_periods",
":",
",",
":",
"]",
",",
"PlvlAgg_hist",
"[",
"ignore_periods",
":",
"]",
",",
"Economy",
".",
"MrkvNow_hist",
"[",
"ignore_periods",
":",
"]",
",",
"Economy",
".",
"agents",
"[",
"0",
"]",
".",
"DiscFac",
",",
"Economy",
".",
"agents",
"[",
"0",
"]",
".",
"CRRA",
")",
"BigTheta_hist",
"=",
"Economy",
".",
"TranShkAggHist",
"if",
"hasattr",
"(",
"Economy",
",",
"'MrkvNow'",
")",
":",
"Mrkv_hist",
"=",
"Economy",
".",
"MrkvNow_hist",
"if",
"~",
"hasattr",
"(",
"Economy",
",",
"'Rfree'",
")",
":",
"# If this is a markov DSGE specification...",
"# Find the expected interest rate - approximate by assuming growth = expected growth",
"ExpectedGrowth_hist",
"=",
"Economy",
".",
"PermGroFacAgg",
"[",
"Mrkv_hist",
"]",
"ExpectedKLRatio_hist",
"=",
"AnrmAgg_hist",
"/",
"ExpectedGrowth_hist",
"ExpectedR_hist",
"=",
"Economy",
".",
"Rfunc",
"(",
"ExpectedKLRatio_hist",
")",
"else",
":",
"# If this is a representative agent specification...",
"PlvlAgg_hist",
"=",
"Economy",
".",
"pLvlTrue_hist",
".",
"flatten",
"(",
")",
"ClvlAgg_hist",
"=",
"Economy",
".",
"cLvlNow_hist",
".",
"flatten",
"(",
")",
"CnrmAgg_hist",
"=",
"ClvlAgg_hist",
"/",
"PlvlAgg_hist",
".",
"flatten",
"(",
")",
"YnrmAgg_hist",
"=",
"Economy",
".",
"yNrmTrue_hist",
".",
"flatten",
"(",
")",
"YlvlAgg_hist",
"=",
"YnrmAgg_hist",
"*",
"PlvlAgg_hist",
".",
"flatten",
"(",
")",
"AlvlAgg_hist",
"=",
"Economy",
".",
"aLvlNow_hist",
".",
"flatten",
"(",
")",
"AnrmAgg_hist",
"=",
"AlvlAgg_hist",
"/",
"PlvlAgg_hist",
".",
"flatten",
"(",
")",
"BigTheta_hist",
"=",
"Economy",
".",
"TranShkNow_hist",
".",
"flatten",
"(",
")",
"if",
"hasattr",
"(",
"Economy",
",",
"'MrkvNow'",
")",
":",
"Mrkv_hist",
"=",
"Economy",
".",
"MrkvNow_hist",
"# Process aggregate data into forms used by regressions",
"LogC",
"=",
"np",
".",
"log",
"(",
"ClvlAgg_hist",
"[",
"ignore_periods",
":",
"]",
")",
"LogA",
"=",
"np",
".",
"log",
"(",
"AlvlAgg_hist",
"[",
"ignore_periods",
":",
"]",
")",
"LogY",
"=",
"np",
".",
"log",
"(",
"YlvlAgg_hist",
"[",
"ignore_periods",
":",
"]",
")",
"DeltaLogC",
"=",
"LogC",
"[",
"1",
":",
"]",
"-",
"LogC",
"[",
"0",
":",
"-",
"1",
"]",
"DeltaLogA",
"=",
"LogA",
"[",
"1",
":",
"]",
"-",
"LogA",
"[",
"0",
":",
"-",
"1",
"]",
"DeltaLogY",
"=",
"LogY",
"[",
"1",
":",
"]",
"-",
"LogY",
"[",
"0",
":",
"-",
"1",
"]",
"A",
"=",
"AnrmAgg_hist",
"[",
"(",
"ignore_periods",
"+",
"1",
")",
":",
"]",
"# This is a relabeling for the regression code",
"BigTheta",
"=",
"BigTheta_hist",
"[",
"(",
"ignore_periods",
"+",
"1",
")",
":",
"]",
"if",
"hasattr",
"(",
"Economy",
",",
"'MrkvNow'",
")",
":",
"Mrkv",
"=",
"Mrkv_hist",
"[",
"(",
"ignore_periods",
"+",
"1",
")",
":",
"]",
"# This is a relabeling for the regression code",
"if",
"~",
"hasattr",
"(",
"Economy",
",",
"'Rfree'",
")",
"and",
"hasattr",
"(",
"Economy",
",",
"'agents'",
")",
":",
"# If this is a markov DSGE specification...",
"R",
"=",
"ExpectedR_hist",
"[",
"(",
"ignore_periods",
"+",
"1",
")",
":",
"]",
"Delta8LogC",
"=",
"(",
"np",
".",
"log",
"(",
"ClvlAgg_hist",
"[",
"8",
":",
"]",
")",
"-",
"np",
".",
"log",
"(",
"ClvlAgg_hist",
"[",
":",
"-",
"8",
"]",
")",
")",
"[",
"(",
"ignore_periods",
"-",
"7",
")",
":",
"]",
"Delta8LogY",
"=",
"(",
"np",
".",
"log",
"(",
"YlvlAgg_hist",
"[",
"8",
":",
"]",
")",
"-",
"np",
".",
"log",
"(",
"YlvlAgg_hist",
"[",
":",
"-",
"8",
"]",
")",
")",
"[",
"(",
"ignore_periods",
"-",
"7",
")",
":",
"]",
"# Add measurement error to LogC",
"if",
"meas_err_base",
"is",
"None",
":",
"meas_err_base",
"=",
"np",
".",
"std",
"(",
"DeltaLogC",
")",
"sigma_meas_err",
"=",
"meas_err_base",
"*",
"0.375",
"# This approximately matches the change in IV vs OLS in U.S. empirical coefficients",
"np",
".",
"random",
".",
"seed",
"(",
"10",
")",
"Measurement_Error",
"=",
"sigma_meas_err",
"*",
"np",
".",
"random",
".",
"normal",
"(",
"0.",
",",
"1.",
",",
"LogC",
".",
"size",
")",
"LogC_me",
"=",
"LogC",
"+",
"Measurement_Error",
"DeltaLogC_me",
"=",
"LogC_me",
"[",
"1",
":",
"]",
"-",
"LogC_me",
"[",
"0",
":",
"-",
"1",
"]",
"# Apply measurement error to long delta LogC",
"LogC_long",
"=",
"np",
".",
"log",
"(",
"ClvlAgg_hist",
")",
"LogC_long_me",
"=",
"LogC_long",
"+",
"sigma_meas_err",
"*",
"np",
".",
"random",
".",
"normal",
"(",
"0.",
",",
"1.",
",",
"LogC_long",
".",
"size",
")",
"Delta8LogC_me",
"=",
"(",
"LogC_long_me",
"[",
"8",
":",
"]",
"-",
"LogC_long_me",
"[",
":",
"-",
"8",
"]",
")",
"[",
"(",
"ignore_periods",
"-",
"7",
")",
":",
"]",
"# Make summary statistics for the results file",
"csv_output_string",
"=",
"str",
"(",
"np",
".",
"mean",
"(",
"AnrmAgg_hist",
"[",
"ignore_periods",
":",
"]",
")",
")",
"+",
"\",\"",
"+",
"str",
"(",
"np",
".",
"mean",
"(",
"CnrmAgg_hist",
"[",
"ignore_periods",
":",
"]",
")",
")",
"+",
"\",\"",
"+",
"str",
"(",
"np",
".",
"std",
"(",
"np",
".",
"log",
"(",
"AnrmAgg_hist",
"[",
"ignore_periods",
":",
"]",
")",
")",
")",
"+",
"\",\"",
"+",
"str",
"(",
"np",
".",
"std",
"(",
"DeltaLogC",
")",
")",
"+",
"\",\"",
"+",
"str",
"(",
"np",
".",
"std",
"(",
"DeltaLogY",
")",
")",
"+",
"\",\"",
"+",
"str",
"(",
"np",
".",
"std",
"(",
"DeltaLogA",
")",
")",
"if",
"hasattr",
"(",
"Economy",
",",
"'agents'",
")",
"and",
"calc_micro_stats",
":",
"# This block only runs for heterogeneous agents specifications",
"csv_output_string",
"+=",
"\",\"",
"+",
"str",
"(",
"np",
".",
"mean",
"(",
"np",
".",
"std",
"(",
"Loga",
",",
"axis",
"=",
"1",
")",
")",
")",
"+",
"\",\"",
"+",
"str",
"(",
"np",
".",
"mean",
"(",
"np",
".",
"std",
"(",
"Logc",
",",
"axis",
"=",
"1",
")",
")",
")",
"+",
"\",\"",
"+",
"str",
"(",
"np",
".",
"mean",
"(",
"np",
".",
"std",
"(",
"Logp",
",",
"axis",
"=",
"1",
")",
")",
")",
"+",
"\",\"",
"+",
"str",
"(",
"np",
".",
"mean",
"(",
"np",
".",
"nanstd",
"(",
"Logy_trimmed",
",",
"axis",
"=",
"1",
")",
")",
")",
"+",
"\",\"",
"+",
"str",
"(",
"np",
".",
"std",
"(",
"DeltaLoga_trimmed",
")",
")",
"+",
"\",\"",
"+",
"str",
"(",
"np",
".",
"std",
"(",
"DeltaLogc_trimmed",
")",
")",
"+",
"\",\"",
"+",
"str",
"(",
"np",
".",
"std",
"(",
"DeltaLogp_trimmed",
")",
")",
"# Save the results to a logfile if requested",
"if",
"filename",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"results_dir",
"+",
"filename",
"+",
"'Results.csv'",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"csv_output_string",
")",
"f",
".",
"close",
"(",
")",
"if",
"calc_micro_stats",
"and",
"hasattr",
"(",
"Economy",
",",
"'agents'",
")",
":",
"with",
"open",
"(",
"results_dir",
"+",
"filename",
"+",
"'BirthValue.csv'",
",",
"'w'",
")",
"as",
"f",
":",
"my_writer",
"=",
"csv",
".",
"writer",
"(",
"f",
",",
"delimiter",
"=",
"','",
")",
"my_writer",
".",
"writerow",
"(",
"vBirth",
")",
"f",
".",
"close",
"(",
")",
"if",
"save_data",
":",
"DataArray",
"=",
"(",
"np",
".",
"vstack",
"(",
"(",
"np",
".",
"arange",
"(",
"DeltaLogC",
".",
"size",
")",
",",
"DeltaLogC_me",
",",
"DeltaLogC",
",",
"DeltaLogY",
",",
"A",
",",
"BigTheta",
",",
"Delta8LogC",
",",
"Delta8LogY",
",",
"Delta8LogC_me",
",",
"Measurement_Error",
"[",
"1",
":",
"]",
")",
")",
")",
".",
"transpose",
"(",
")",
"VarNames",
"=",
"[",
"'time_period'",
",",
"'DeltaLogC_me'",
",",
"'DeltaLogC'",
",",
"'DeltaLogY'",
",",
"'A'",
",",
"'BigTheta'",
",",
"'Delta8LogC'",
",",
"'Delta8LogY'",
",",
"'Delta8LogC_me'",
",",
"'Measurement_Error'",
"]",
"if",
"hasattr",
"(",
"Economy",
",",
"'MrkvNow'",
")",
":",
"DataArray",
"=",
"np",
".",
"hstack",
"(",
"(",
"DataArray",
",",
"np",
".",
"reshape",
"(",
"Mrkv",
",",
"(",
"Mrkv",
".",
"size",
",",
"1",
")",
")",
")",
")",
"VarNames",
".",
"append",
"(",
"'MrkvState'",
")",
"if",
"hasattr",
"(",
"Economy",
",",
"'MrkvNow'",
")",
"&",
"~",
"hasattr",
"(",
"Economy",
",",
"'Rfree'",
")",
"and",
"hasattr",
"(",
"Economy",
",",
"'agents'",
")",
":",
"DataArray",
"=",
"np",
".",
"hstack",
"(",
"(",
"DataArray",
",",
"np",
".",
"reshape",
"(",
"R",
",",
"(",
"R",
".",
"size",
",",
"1",
")",
")",
")",
")",
"VarNames",
".",
"append",
"(",
"'R'",
")",
"with",
"open",
"(",
"results_dir",
"+",
"filename",
"+",
"'Data.txt'",
",",
"'w'",
")",
"as",
"f",
":",
"my_writer",
"=",
"csv",
".",
"writer",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
")",
"my_writer",
".",
"writerow",
"(",
"VarNames",
")",
"for",
"i",
"in",
"range",
"(",
"DataArray",
".",
"shape",
"[",
"0",
"]",
")",
":",
"my_writer",
".",
"writerow",
"(",
"DataArray",
"[",
"i",
",",
":",
"]",
")",
"f",
".",
"close",
"(",
")"
]
| Makes descriptive statistics and macroeconomic data file. Behaves slightly
differently for heterogeneous agents vs representative agent models.
Parameters
----------
Economy : Market or AgentType
A representation of the model economy. For heterogeneous agents specifications,
this will be an instance of a subclass of Market. For representative agent
specifications, this will be an instance of an AgentType subclass.
ignore_periods : int
Number of periods at the start of the simulation to throw out.
description : str
Description of the economy that is prepended on the output string.
filename : str
Name of the output log file, if any; .txt will be appended automatically.
save_data : bool
When True, save simulation data to filename + 'Data.txt' for use in Stata.
calc_micro_stats : bool
When True, calculate microeconomic statistics like in Table 2 of the
paper draft.
meas_err_base : float or None
Base value of measurement error standard deviation, which will be adjusted.
When None (default), value is calculated as stdev(DeltaLogC).
Returns
-------
None | [
"Makes",
"descriptive",
"statistics",
"and",
"macroeconomic",
"data",
"file",
".",
"Behaves",
"slightly",
"differently",
"for",
"heterogeneous",
"agents",
"vs",
"representative",
"agent",
"models",
"."
]
| python | train |
stevearc/dql | dql/cli.py | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/cli.py#L346-L352 | def _print_enum_opt(self, option, choices):
""" Helper for enum options """
for key in choices:
if key == self.conf[option]:
print("* %s" % key)
else:
print(" %s" % key) | [
"def",
"_print_enum_opt",
"(",
"self",
",",
"option",
",",
"choices",
")",
":",
"for",
"key",
"in",
"choices",
":",
"if",
"key",
"==",
"self",
".",
"conf",
"[",
"option",
"]",
":",
"print",
"(",
"\"* %s\"",
"%",
"key",
")",
"else",
":",
"print",
"(",
"\" %s\"",
"%",
"key",
")"
]
| Helper for enum options | [
"Helper",
"for",
"enum",
"options"
]
| python | train |
numenta/nupic | examples/tm/tm_overlapping_sequences.py | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/examples/tm/tm_overlapping_sequences.py#L180-L232 | def buildSequencePool(numSequences = 10,
seqLen = [2,3,4],
numPatterns = 5,
numOnBitsPerPattern = 3,
patternOverlap = 0,
**kwargs
):
""" Create a bunch of sequences of various lengths, all built from
a fixed set of patterns.
Parameters:
-----------------------------------------------------
numSequences: Number of training sequences to generate
seqLen: List of possible sequence lengths
numPatterns: How many possible patterns there are to use within
sequences
numOnBitsPerPattern: Number of ON bits in each TM input pattern
patternOverlap: Max number of bits of overlap between any 2 patterns
retval: (numCols, trainingSequences)
numCols - width of the patterns
trainingSequences - a list of training sequences
"""
# Create the table of patterns
patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)
# Total number of columns required
numCols = len(patterns[0])
# -----------------------------------------------------------------------
# Create the training sequences
trainingSequences = []
for _ in xrange(numSequences):
# Build it up from patterns
sequence = []
length = random.choice(seqLen)
for _ in xrange(length):
patIdx = random.choice(xrange(numPatterns))
sequence.append(patterns[patIdx])
# Put it in
trainingSequences.append(sequence)
if VERBOSITY >= 3:
print "\nTraining sequences"
printAllTrainingSequences(trainingSequences)
return (numCols, trainingSequences) | [
"def",
"buildSequencePool",
"(",
"numSequences",
"=",
"10",
",",
"seqLen",
"=",
"[",
"2",
",",
"3",
",",
"4",
"]",
",",
"numPatterns",
"=",
"5",
",",
"numOnBitsPerPattern",
"=",
"3",
",",
"patternOverlap",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"# Create the table of patterns",
"patterns",
"=",
"getSimplePatterns",
"(",
"numOnBitsPerPattern",
",",
"numPatterns",
",",
"patternOverlap",
")",
"# Total number of columns required",
"numCols",
"=",
"len",
"(",
"patterns",
"[",
"0",
"]",
")",
"# -----------------------------------------------------------------------",
"# Create the training sequences",
"trainingSequences",
"=",
"[",
"]",
"for",
"_",
"in",
"xrange",
"(",
"numSequences",
")",
":",
"# Build it up from patterns",
"sequence",
"=",
"[",
"]",
"length",
"=",
"random",
".",
"choice",
"(",
"seqLen",
")",
"for",
"_",
"in",
"xrange",
"(",
"length",
")",
":",
"patIdx",
"=",
"random",
".",
"choice",
"(",
"xrange",
"(",
"numPatterns",
")",
")",
"sequence",
".",
"append",
"(",
"patterns",
"[",
"patIdx",
"]",
")",
"# Put it in",
"trainingSequences",
".",
"append",
"(",
"sequence",
")",
"if",
"VERBOSITY",
">=",
"3",
":",
"print",
"\"\\nTraining sequences\"",
"printAllTrainingSequences",
"(",
"trainingSequences",
")",
"return",
"(",
"numCols",
",",
"trainingSequences",
")"
]
| Create a bunch of sequences of various lengths, all built from
a fixed set of patterns.
Parameters:
-----------------------------------------------------
numSequences: Number of training sequences to generate
seqLen: List of possible sequence lengths
numPatterns: How many possible patterns there are to use within
sequences
numOnBitsPerPattern: Number of ON bits in each TM input pattern
patternOverlap: Max number of bits of overlap between any 2 patterns
retval: (numCols, trainingSequences)
numCols - width of the patterns
trainingSequences - a list of training sequences | [
"Create",
"a",
"bunch",
"of",
"sequences",
"of",
"various",
"lengths",
"all",
"built",
"from",
"a",
"fixed",
"set",
"of",
"patterns",
"."
]
| python | valid |
ChrisBeaumont/soupy | soupy.py | https://github.com/ChrisBeaumont/soupy/blob/795f2f61f711f574d5218fc8a3375d02bda1104f/soupy.py#L1057-L1062 | def find_parents(self, *args, **kwargs):
"""
Like :meth:`find_all`, but searches through :attr:`parents`
"""
op = operator.methodcaller('find_parents', *args, **kwargs)
return self._wrap_multi(op) | [
"def",
"find_parents",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"op",
"=",
"operator",
".",
"methodcaller",
"(",
"'find_parents'",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_wrap_multi",
"(",
"op",
")"
]
| Like :meth:`find_all`, but searches through :attr:`parents` | [
"Like",
":",
"meth",
":",
"find_all",
"but",
"searches",
"through",
":",
"attr",
":",
"parents"
]
| python | test |
gem/oq-engine | openquake/baselib/node.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/node.py#L350-L352 | def parse(source, remove_comments=True, **kw):
"""Thin wrapper around ElementTree.parse"""
return ElementTree.parse(source, SourceLineParser(), **kw) | [
"def",
"parse",
"(",
"source",
",",
"remove_comments",
"=",
"True",
",",
"*",
"*",
"kw",
")",
":",
"return",
"ElementTree",
".",
"parse",
"(",
"source",
",",
"SourceLineParser",
"(",
")",
",",
"*",
"*",
"kw",
")"
]
| Thin wrapper around ElementTree.parse | [
"Thin",
"wrapper",
"around",
"ElementTree",
".",
"parse"
]
| python | train |
ga4gh/ga4gh-schemas | python/ga4gh/schemas/protocol.py | https://github.com/ga4gh/ga4gh-schemas/blob/30ec8db9b8dfdccf03274025f27920cb41d6d56e/python/ga4gh/schemas/protocol.py#L49-L72 | def setAttribute(values, value):
"""
Takes the values of an attribute value list and attempts to append
attributes of the proper type, inferred from their Python type.
"""
if isinstance(value, int):
values.add().int32_value = value
elif isinstance(value, float):
values.add().double_value = value
elif isinstance(value, long):
values.add().int64_value = value
elif isinstance(value, str):
values.add().string_value = value
elif isinstance(value, bool):
values.add().bool_value = value
elif isinstance(value, (list, tuple, array.array)):
for v in value:
setAttribute(values, v)
elif isinstance(value, dict):
for key in value:
setAttribute(
values.add().attributes.attr[key].values, value[key])
else:
values.add().string_value = str(value) | [
"def",
"setAttribute",
"(",
"values",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"values",
".",
"add",
"(",
")",
".",
"int32_value",
"=",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"float",
")",
":",
"values",
".",
"add",
"(",
")",
".",
"double_value",
"=",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"long",
")",
":",
"values",
".",
"add",
"(",
")",
".",
"int64_value",
"=",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"values",
".",
"add",
"(",
")",
".",
"string_value",
"=",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"values",
".",
"add",
"(",
")",
".",
"bool_value",
"=",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
",",
"array",
".",
"array",
")",
")",
":",
"for",
"v",
"in",
"value",
":",
"setAttribute",
"(",
"values",
",",
"v",
")",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"for",
"key",
"in",
"value",
":",
"setAttribute",
"(",
"values",
".",
"add",
"(",
")",
".",
"attributes",
".",
"attr",
"[",
"key",
"]",
".",
"values",
",",
"value",
"[",
"key",
"]",
")",
"else",
":",
"values",
".",
"add",
"(",
")",
".",
"string_value",
"=",
"str",
"(",
"value",
")"
]
| Takes the values of an attribute value list and attempts to append
attributes of the proper type, inferred from their Python type. | [
"Takes",
"the",
"values",
"of",
"an",
"attribute",
"value",
"list",
"and",
"attempts",
"to",
"append",
"attributes",
"of",
"the",
"proper",
"type",
"inferred",
"from",
"their",
"Python",
"type",
"."
]
| python | train |
mdavidsaver/p4p | src/p4p/nt/__init__.py | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/__init__.py#L155-L169 | def buildType(columns=[], extra=[]):
"""Build a table
:param list columns: List of column names and types. eg [('colA', 'd')]
:param list extra: A list of tuples describing additional non-standard fields
:returns: A :py:class:`Type`
"""
return Type(id="epics:nt/NTTable:1.0",
spec=[
('labels', 'as'),
('value', ('S', None, columns)),
('descriptor', 's'),
('alarm', alarm),
('timeStamp', timeStamp),
] + extra) | [
"def",
"buildType",
"(",
"columns",
"=",
"[",
"]",
",",
"extra",
"=",
"[",
"]",
")",
":",
"return",
"Type",
"(",
"id",
"=",
"\"epics:nt/NTTable:1.0\"",
",",
"spec",
"=",
"[",
"(",
"'labels'",
",",
"'as'",
")",
",",
"(",
"'value'",
",",
"(",
"'S'",
",",
"None",
",",
"columns",
")",
")",
",",
"(",
"'descriptor'",
",",
"'s'",
")",
",",
"(",
"'alarm'",
",",
"alarm",
")",
",",
"(",
"'timeStamp'",
",",
"timeStamp",
")",
",",
"]",
"+",
"extra",
")"
]
| Build a table
:param list columns: List of column names and types. eg [('colA', 'd')]
:param list extra: A list of tuples describing additional non-standard fields
:returns: A :py:class:`Type` | [
"Build",
"a",
"table"
]
| python | train |
RiotGames/cloud-inquisitor | backend/cloud_inquisitor/plugins/views/users.py | https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/plugins/views/users.py#L59-L120 | def post(self):
"""Create a new user"""
self.reqparse.add_argument('username', type=str, required=True)
self.reqparse.add_argument('authSystem', type=str, required=True)
self.reqparse.add_argument('password', type=str, required=False, default=None)
self.reqparse.add_argument('roles', type=str, action='append', default=[])
args = self.reqparse.parse_args()
auditlog(event='user.create', actor=session['user'].username, data=args)
user = db.User.find_one(
User.username == args['username'],
User.auth_system == args['authSystem']
)
roles = []
if user:
return self.make_response('User already exists', HTTP.BAD_REQUEST)
if args['authSystem'] not in current_app.available_auth_systems:
return self.make_response(
'The {} auth system does not allow local edits'.format(args['authSystem']),
HTTP.BAD_REQUEST
)
if current_app.available_auth_systems[args['authSystem']].readonly:
return self.make_response(
'You cannot create users for the {} auth system as it is handled externally'.format(args['authSystem']),
HTTP.BAD_REQUEST
)
for roleName in args['roles']:
role = db.Role.find_one(Role.name == roleName)
if not role:
return self.make_response('No such role {}'.format(roleName), HTTP.BAD_REQUEST)
if roleName == ROLE_ADMIN and ROLE_ADMIN not in session['user'].roles:
self.log.error('User {} tried to grant admin privileges to {}'.format(
session['user'].username,
args['username']
))
return self.make_response('You do not have permission to grant admin privileges', HTTP.FORBIDDEN)
roles.append(role)
authSys = current_app.available_auth_systems[args['authSystem']]
password = args['password'] or generate_password()
user = User()
user.username = args['username']
user.password = hash_password(password)
user.auth_system = authSys.name
db.session.add(user)
db.session.commit()
db.session.refresh(user)
User.add_role(user, roles)
return self.make_response({
'message': 'User {}/{} has been created'.format(user.auth_system, user.username),
'user': user,
'password': password if not args['password'] else None
}) | [
"def",
"post",
"(",
"self",
")",
":",
"self",
".",
"reqparse",
".",
"add_argument",
"(",
"'username'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
")",
"self",
".",
"reqparse",
".",
"add_argument",
"(",
"'authSystem'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
")",
"self",
".",
"reqparse",
".",
"add_argument",
"(",
"'password'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"default",
"=",
"None",
")",
"self",
".",
"reqparse",
".",
"add_argument",
"(",
"'roles'",
",",
"type",
"=",
"str",
",",
"action",
"=",
"'append'",
",",
"default",
"=",
"[",
"]",
")",
"args",
"=",
"self",
".",
"reqparse",
".",
"parse_args",
"(",
")",
"auditlog",
"(",
"event",
"=",
"'user.create'",
",",
"actor",
"=",
"session",
"[",
"'user'",
"]",
".",
"username",
",",
"data",
"=",
"args",
")",
"user",
"=",
"db",
".",
"User",
".",
"find_one",
"(",
"User",
".",
"username",
"==",
"args",
"[",
"'username'",
"]",
",",
"User",
".",
"auth_system",
"==",
"args",
"[",
"'authSystem'",
"]",
")",
"roles",
"=",
"[",
"]",
"if",
"user",
":",
"return",
"self",
".",
"make_response",
"(",
"'User already exists'",
",",
"HTTP",
".",
"BAD_REQUEST",
")",
"if",
"args",
"[",
"'authSystem'",
"]",
"not",
"in",
"current_app",
".",
"available_auth_systems",
":",
"return",
"self",
".",
"make_response",
"(",
"'The {} auth system does not allow local edits'",
".",
"format",
"(",
"args",
"[",
"'authSystem'",
"]",
")",
",",
"HTTP",
".",
"BAD_REQUEST",
")",
"if",
"current_app",
".",
"available_auth_systems",
"[",
"args",
"[",
"'authSystem'",
"]",
"]",
".",
"readonly",
":",
"return",
"self",
".",
"make_response",
"(",
"'You cannot create users for the {} auth system as it is handled externally'",
".",
"format",
"(",
"args",
"[",
"'authSystem'",
"]",
")",
",",
"HTTP",
".",
"BAD_REQUEST",
")",
"for",
"roleName",
"in",
"args",
"[",
"'roles'",
"]",
":",
"role",
"=",
"db",
".",
"Role",
".",
"find_one",
"(",
"Role",
".",
"name",
"==",
"roleName",
")",
"if",
"not",
"role",
":",
"return",
"self",
".",
"make_response",
"(",
"'No such role {}'",
".",
"format",
"(",
"roleName",
")",
",",
"HTTP",
".",
"BAD_REQUEST",
")",
"if",
"roleName",
"==",
"ROLE_ADMIN",
"and",
"ROLE_ADMIN",
"not",
"in",
"session",
"[",
"'user'",
"]",
".",
"roles",
":",
"self",
".",
"log",
".",
"error",
"(",
"'User {} tried to grant admin privileges to {}'",
".",
"format",
"(",
"session",
"[",
"'user'",
"]",
".",
"username",
",",
"args",
"[",
"'username'",
"]",
")",
")",
"return",
"self",
".",
"make_response",
"(",
"'You do not have permission to grant admin privileges'",
",",
"HTTP",
".",
"FORBIDDEN",
")",
"roles",
".",
"append",
"(",
"role",
")",
"authSys",
"=",
"current_app",
".",
"available_auth_systems",
"[",
"args",
"[",
"'authSystem'",
"]",
"]",
"password",
"=",
"args",
"[",
"'password'",
"]",
"or",
"generate_password",
"(",
")",
"user",
"=",
"User",
"(",
")",
"user",
".",
"username",
"=",
"args",
"[",
"'username'",
"]",
"user",
".",
"password",
"=",
"hash_password",
"(",
"password",
")",
"user",
".",
"auth_system",
"=",
"authSys",
".",
"name",
"db",
".",
"session",
".",
"add",
"(",
"user",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"db",
".",
"session",
".",
"refresh",
"(",
"user",
")",
"User",
".",
"add_role",
"(",
"user",
",",
"roles",
")",
"return",
"self",
".",
"make_response",
"(",
"{",
"'message'",
":",
"'User {}/{} has been created'",
".",
"format",
"(",
"user",
".",
"auth_system",
",",
"user",
".",
"username",
")",
",",
"'user'",
":",
"user",
",",
"'password'",
":",
"password",
"if",
"not",
"args",
"[",
"'password'",
"]",
"else",
"None",
"}",
")"
]
| Create a new user | [
"Create",
"a",
"new",
"user"
]
| python | train |
bububa/pyTOP | pyTOP/item.py | https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/item.py#L624-L643 | def add(self, cid, price, image, name, desc, major, market_time, property_alias, session, **kwargs):
'''taobao.product.add 上传一个产品,不包括产品非主图和属性图片
获取类目ID,必需是叶子类目ID;调用taobao.itemcats.get.v2获取 传入关键属性,结构:pid:vid;pid:vid.调用taobao.itemprops.get.v2获取pid, 调用taobao.itempropvalues.get获取vid;如果碰到用户自定义属性,请用customer_props.'''
request = TOPRequest('taobao.product.add')
request['cid'] = cid
request['price'] = price
request['image'] = image
request['name'] = name
request['desc'] = desc
request['major'] = major
request['market_time'] = market_time
request['property_alias'] = property_alias
for k, v in kwargs.iteritems():
if k not in ('outer_id', 'props', 'binds', 'sale_props', 'customer_props', 'order_by', 'ww_status', 'post_free', 'location_state', 'location_city', 'is_3D', 'start_score', 'end_score', 'start_volume', 'end_volume', 'one_station', 'is_cod', 'is_mall', 'is_prepay', 'genuine_security', 'promoted_service', 'stuff_status', 'start_price', 'end_price', 'page_no', 'page_size', 'auction_flag', 'auto_post', 'has_discount', 'is_xinpin') and v==None: continue
if k == 'location_state': k = 'location.state'
if k == 'location_city': k = 'location.city'
request[k] = v
self.create(self.execute(request, session)['product'])
return self | [
"def",
"add",
"(",
"self",
",",
"cid",
",",
"price",
",",
"image",
",",
"name",
",",
"desc",
",",
"major",
",",
"market_time",
",",
"property_alias",
",",
"session",
",",
"*",
"*",
"kwargs",
")",
":",
"request",
"=",
"TOPRequest",
"(",
"'taobao.product.add'",
")",
"request",
"[",
"'cid'",
"]",
"=",
"cid",
"request",
"[",
"'price'",
"]",
"=",
"price",
"request",
"[",
"'image'",
"]",
"=",
"image",
"request",
"[",
"'name'",
"]",
"=",
"name",
"request",
"[",
"'desc'",
"]",
"=",
"desc",
"request",
"[",
"'major'",
"]",
"=",
"major",
"request",
"[",
"'market_time'",
"]",
"=",
"market_time",
"request",
"[",
"'property_alias'",
"]",
"=",
"property_alias",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"not",
"in",
"(",
"'outer_id'",
",",
"'props'",
",",
"'binds'",
",",
"'sale_props'",
",",
"'customer_props'",
",",
"'order_by'",
",",
"'ww_status'",
",",
"'post_free'",
",",
"'location_state'",
",",
"'location_city'",
",",
"'is_3D'",
",",
"'start_score'",
",",
"'end_score'",
",",
"'start_volume'",
",",
"'end_volume'",
",",
"'one_station'",
",",
"'is_cod'",
",",
"'is_mall'",
",",
"'is_prepay'",
",",
"'genuine_security'",
",",
"'promoted_service'",
",",
"'stuff_status'",
",",
"'start_price'",
",",
"'end_price'",
",",
"'page_no'",
",",
"'page_size'",
",",
"'auction_flag'",
",",
"'auto_post'",
",",
"'has_discount'",
",",
"'is_xinpin'",
")",
"and",
"v",
"==",
"None",
":",
"continue",
"if",
"k",
"==",
"'location_state'",
":",
"k",
"=",
"'location.state'",
"if",
"k",
"==",
"'location_city'",
":",
"k",
"=",
"'location.city'",
"request",
"[",
"k",
"]",
"=",
"v",
"self",
".",
"create",
"(",
"self",
".",
"execute",
"(",
"request",
",",
"session",
")",
"[",
"'product'",
"]",
")",
"return",
"self"
]
| taobao.product.add 上传一个产品,不包括产品非主图和属性图片
获取类目ID,必需是叶子类目ID;调用taobao.itemcats.get.v2获取 传入关键属性,结构:pid:vid;pid:vid.调用taobao.itemprops.get.v2获取pid, 调用taobao.itempropvalues.get获取vid;如果碰到用户自定义属性,请用customer_props. | [
"taobao",
".",
"product",
".",
"add",
"上传一个产品,不包括产品非主图和属性图片",
"获取类目ID,必需是叶子类目ID;调用taobao",
".",
"itemcats",
".",
"get",
".",
"v2获取",
"传入关键属性",
"结构",
":",
"pid",
":",
"vid",
";",
"pid",
":",
"vid",
".",
"调用taobao",
".",
"itemprops",
".",
"get",
".",
"v2获取pid",
"调用taobao",
".",
"itempropvalues",
".",
"get获取vid",
";",
"如果碰到用户自定义属性",
"请用customer_props",
"."
]
| python | train |
Tanganelli/CoAPthon3 | coapthon/server/coap.py | https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/server/coap.py#L390-L409 | def notify(self, resource):
"""
Notifies the observers of a certain resource.
:param resource: the resource
"""
observers = self._observeLayer.notify(resource)
logger.debug("Notify")
for transaction in observers:
with transaction:
transaction.response = None
transaction = self._requestLayer.receive_request(transaction)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._messageLayer.send_response(transaction)
if transaction.response is not None:
if transaction.response.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.response)
self.send_datagram(transaction.response) | [
"def",
"notify",
"(",
"self",
",",
"resource",
")",
":",
"observers",
"=",
"self",
".",
"_observeLayer",
".",
"notify",
"(",
"resource",
")",
"logger",
".",
"debug",
"(",
"\"Notify\"",
")",
"for",
"transaction",
"in",
"observers",
":",
"with",
"transaction",
":",
"transaction",
".",
"response",
"=",
"None",
"transaction",
"=",
"self",
".",
"_requestLayer",
".",
"receive_request",
"(",
"transaction",
")",
"transaction",
"=",
"self",
".",
"_observeLayer",
".",
"send_response",
"(",
"transaction",
")",
"transaction",
"=",
"self",
".",
"_blockLayer",
".",
"send_response",
"(",
"transaction",
")",
"transaction",
"=",
"self",
".",
"_messageLayer",
".",
"send_response",
"(",
"transaction",
")",
"if",
"transaction",
".",
"response",
"is",
"not",
"None",
":",
"if",
"transaction",
".",
"response",
".",
"type",
"==",
"defines",
".",
"Types",
"[",
"\"CON\"",
"]",
":",
"self",
".",
"_start_retransmission",
"(",
"transaction",
",",
"transaction",
".",
"response",
")",
"self",
".",
"send_datagram",
"(",
"transaction",
".",
"response",
")"
]
| Notifies the observers of a certain resource.
:param resource: the resource | [
"Notifies",
"the",
"observers",
"of",
"a",
"certain",
"resource",
"."
]
| python | train |
Pythonity/python-ivona-api | ivona_api/ivona_api.py | https://github.com/Pythonity/python-ivona-api/blob/490a2e502d4aa769b9f41603eb5d5e5ebf1ea912/ivona_api/ivona_api.py#L145-L184 | def text_to_speech(self, text, file, voice_name=None, language=None):
"""
Saves given text synthesized audio file, via 'CreateSpeech' endpoint
Docs:
http://developer.ivona.com/en/speechcloud/actions.html#CreateSpeech
:param text: text to synthesize
:type text: str
:param file: file that will be used to save the audio
:type file: file
:param voice_name: voice name
:type voice_name: str
:param language: voice language
:type language: str
"""
endpoint = 'CreateSpeech'
data = {
'Input': {
'Data': text,
},
'OutputFormat': {
'Codec': self.codec.upper(),
},
'Parameters': {
'Rate': self.rate,
'Volume': self.volume,
'SentenceBreak': self.sentence_break,
'ParagraphBreak': self.paragraph_break,
},
'Voice': {
'Name': voice_name or self.voice_name,
'Language': language or self.language,
},
}
response = self._get_response('post', endpoint, data)
file.write(response.content) | [
"def",
"text_to_speech",
"(",
"self",
",",
"text",
",",
"file",
",",
"voice_name",
"=",
"None",
",",
"language",
"=",
"None",
")",
":",
"endpoint",
"=",
"'CreateSpeech'",
"data",
"=",
"{",
"'Input'",
":",
"{",
"'Data'",
":",
"text",
",",
"}",
",",
"'OutputFormat'",
":",
"{",
"'Codec'",
":",
"self",
".",
"codec",
".",
"upper",
"(",
")",
",",
"}",
",",
"'Parameters'",
":",
"{",
"'Rate'",
":",
"self",
".",
"rate",
",",
"'Volume'",
":",
"self",
".",
"volume",
",",
"'SentenceBreak'",
":",
"self",
".",
"sentence_break",
",",
"'ParagraphBreak'",
":",
"self",
".",
"paragraph_break",
",",
"}",
",",
"'Voice'",
":",
"{",
"'Name'",
":",
"voice_name",
"or",
"self",
".",
"voice_name",
",",
"'Language'",
":",
"language",
"or",
"self",
".",
"language",
",",
"}",
",",
"}",
"response",
"=",
"self",
".",
"_get_response",
"(",
"'post'",
",",
"endpoint",
",",
"data",
")",
"file",
".",
"write",
"(",
"response",
".",
"content",
")"
]
| Saves given text synthesized audio file, via 'CreateSpeech' endpoint
Docs:
http://developer.ivona.com/en/speechcloud/actions.html#CreateSpeech
:param text: text to synthesize
:type text: str
:param file: file that will be used to save the audio
:type file: file
:param voice_name: voice name
:type voice_name: str
:param language: voice language
:type language: str | [
"Saves",
"given",
"text",
"synthesized",
"audio",
"file",
"via",
"CreateSpeech",
"endpoint"
]
| python | train |
systemd/python-systemd | systemd/journal.py | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L365-L376 | def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid) | [
"def",
"this_boot",
"(",
"self",
",",
"bootid",
"=",
"None",
")",
":",
"if",
"bootid",
"is",
"None",
":",
"bootid",
"=",
"_id128",
".",
"get_boot",
"(",
")",
".",
"hex",
"else",
":",
"bootid",
"=",
"getattr",
"(",
"bootid",
",",
"'hex'",
",",
"bootid",
")",
"self",
".",
"add_match",
"(",
"_BOOT_ID",
"=",
"bootid",
")"
]
| Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid'). | [
"Add",
"match",
"for",
"_BOOT_ID",
"for",
"current",
"boot",
"or",
"the",
"specified",
"boot",
"ID",
"."
]
| python | train |
ribozz/sphinx-argparse | sphinxarg/markdown.py | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L106-L113 | def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o | [
"def",
"strong",
"(",
"node",
")",
":",
"o",
"=",
"nodes",
".",
"strong",
"(",
")",
"for",
"n",
"in",
"MarkDown",
"(",
"node",
")",
":",
"o",
"+=",
"n",
"return",
"o"
]
| A bolded section | [
"A",
"bolded",
"section"
]
| python | train |
hatemile/hatemile-for-python | hatemile/implementation/css.py | https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/css.py#L423-L443 | def _get_regular_expression_of_symbols(self):
"""
Returns the regular expression to search all symbols.
:return: The regular expression to search all symbols.
:rtype: str
"""
regular_expression = None
for symbol in self.symbols:
formated_symbol = self._get_formated_symbol(symbol['symbol'])
if regular_expression is None:
regular_expression = '(' + formated_symbol + ')'
else:
regular_expression = (
regular_expression +
'|(' +
formated_symbol +
')'
)
return regular_expression | [
"def",
"_get_regular_expression_of_symbols",
"(",
"self",
")",
":",
"regular_expression",
"=",
"None",
"for",
"symbol",
"in",
"self",
".",
"symbols",
":",
"formated_symbol",
"=",
"self",
".",
"_get_formated_symbol",
"(",
"symbol",
"[",
"'symbol'",
"]",
")",
"if",
"regular_expression",
"is",
"None",
":",
"regular_expression",
"=",
"'('",
"+",
"formated_symbol",
"+",
"')'",
"else",
":",
"regular_expression",
"=",
"(",
"regular_expression",
"+",
"'|('",
"+",
"formated_symbol",
"+",
"')'",
")",
"return",
"regular_expression"
]
| Returns the regular expression to search all symbols.
:return: The regular expression to search all symbols.
:rtype: str | [
"Returns",
"the",
"regular",
"expression",
"to",
"search",
"all",
"symbols",
"."
]
| python | train |
digidotcom/python-devicecloud | devicecloud/file_system_service.py | https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/file_system_service.py#L542-L573 | def send_command_block(self, target, command_block):
"""Send an arbitrary file system command block
The primary use for this method is to send multiple file system commands with a single
web service request. This can help to avoid throttling.
:param target: The device(s) to be targeted with this request
:type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances
:param command_block: The block of commands to execute on the target
:type command_block: :class:`~FileSystemServiceCommandBlock`
:return: The response will be a dictionary where the keys are device_ids and the values are
the parsed responses of each command sent in the order listed in the command response for
that device. In practice it seems to be the same order as the commands were sent in, however,
Device Cloud documentation does not explicitly state anywhere that is the case so I cannot
guarantee it. This does mean that if you send different types of commands the response list
will be different types. Please see the commands parse_response functions for what those types
will be. (:meth:`LsCommand.parse_response`, :class:`GetCommand.parse_response`,
:class:`PutCommand.parse_response`, :class:`DeleteCommand.parse_response`)
"""
root = _parse_command_response(
self._sci_api.send_sci("file_system", target, command_block.get_command_string()))
out_dict = {}
for device in root.findall('./file_system/device'):
device_id = device.get('id')
results = []
for command in device.find('./commands'):
for command_class in FILE_SYSTEM_COMMANDS:
if command_class.command_name == command.tag.lower():
results.append(command_class.parse_response(command, fssapi=self, device_id=device_id))
out_dict[device_id] = results
return out_dict | [
"def",
"send_command_block",
"(",
"self",
",",
"target",
",",
"command_block",
")",
":",
"root",
"=",
"_parse_command_response",
"(",
"self",
".",
"_sci_api",
".",
"send_sci",
"(",
"\"file_system\"",
",",
"target",
",",
"command_block",
".",
"get_command_string",
"(",
")",
")",
")",
"out_dict",
"=",
"{",
"}",
"for",
"device",
"in",
"root",
".",
"findall",
"(",
"'./file_system/device'",
")",
":",
"device_id",
"=",
"device",
".",
"get",
"(",
"'id'",
")",
"results",
"=",
"[",
"]",
"for",
"command",
"in",
"device",
".",
"find",
"(",
"'./commands'",
")",
":",
"for",
"command_class",
"in",
"FILE_SYSTEM_COMMANDS",
":",
"if",
"command_class",
".",
"command_name",
"==",
"command",
".",
"tag",
".",
"lower",
"(",
")",
":",
"results",
".",
"append",
"(",
"command_class",
".",
"parse_response",
"(",
"command",
",",
"fssapi",
"=",
"self",
",",
"device_id",
"=",
"device_id",
")",
")",
"out_dict",
"[",
"device_id",
"]",
"=",
"results",
"return",
"out_dict"
]
| Send an arbitrary file system command block
The primary use for this method is to send multiple file system commands with a single
web service request. This can help to avoid throttling.
:param target: The device(s) to be targeted with this request
:type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances
:param command_block: The block of commands to execute on the target
:type command_block: :class:`~FileSystemServiceCommandBlock`
:return: The response will be a dictionary where the keys are device_ids and the values are
the parsed responses of each command sent in the order listed in the command response for
that device. In practice it seems to be the same order as the commands were sent in, however,
Device Cloud documentation does not explicitly state anywhere that is the case so I cannot
guarantee it. This does mean that if you send different types of commands the response list
will be different types. Please see the commands parse_response functions for what those types
will be. (:meth:`LsCommand.parse_response`, :class:`GetCommand.parse_response`,
:class:`PutCommand.parse_response`, :class:`DeleteCommand.parse_response`) | [
"Send",
"an",
"arbitrary",
"file",
"system",
"command",
"block"
]
| python | train |
clalancette/pycdlib | pycdlib/pycdlib.py | https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1137-L1154 | def _udf_name_and_parent_from_path(self, udf_path):
# type: (bytes) -> Tuple[bytes, udfmod.UDFFileEntry]
'''
An internal method to find the parent directory record and name given a
UDF path. If the parent is found, return a tuple containing the basename
of the path and the parent UDF File Entry object.
Parameters:
udf_path - The absolute UDF path to the entry on the ISO.
Returns:
A tuple containing just the name of the entry and a UDF File Entry
object representing the parent of the entry.
'''
splitpath = utils.split_path(udf_path)
name = splitpath.pop()
(parent_ident_unused, parent) = self._find_udf_record(b'/' + b'/'.join(splitpath))
return (name.decode('utf-8').encode('utf-8'), parent) | [
"def",
"_udf_name_and_parent_from_path",
"(",
"self",
",",
"udf_path",
")",
":",
"# type: (bytes) -> Tuple[bytes, udfmod.UDFFileEntry]",
"splitpath",
"=",
"utils",
".",
"split_path",
"(",
"udf_path",
")",
"name",
"=",
"splitpath",
".",
"pop",
"(",
")",
"(",
"parent_ident_unused",
",",
"parent",
")",
"=",
"self",
".",
"_find_udf_record",
"(",
"b'/'",
"+",
"b'/'",
".",
"join",
"(",
"splitpath",
")",
")",
"return",
"(",
"name",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"parent",
")"
]
| An internal method to find the parent directory record and name given a
UDF path. If the parent is found, return a tuple containing the basename
of the path and the parent UDF File Entry object.
Parameters:
udf_path - The absolute UDF path to the entry on the ISO.
Returns:
A tuple containing just the name of the entry and a UDF File Entry
object representing the parent of the entry. | [
"An",
"internal",
"method",
"to",
"find",
"the",
"parent",
"directory",
"record",
"and",
"name",
"given",
"a",
"UDF",
"path",
".",
"If",
"the",
"parent",
"is",
"found",
"return",
"a",
"tuple",
"containing",
"the",
"basename",
"of",
"the",
"path",
"and",
"the",
"parent",
"UDF",
"File",
"Entry",
"object",
"."
]
| python | train |
dead-beef/markovchain | markovchain/base.py | https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/base.py#L56-L69 | def data(self, data, part=False, dataset=''):
"""Parse data and update links.
Parameters
----------
data
Data to parse.
part : `bool`, optional
True if data is partial (default: `False`).
dataset : `str`, optional
Dataset key prefix (default: '').
"""
links = self.parser(self.scanner(data, part), part, dataset)
self.storage.add_links(links) | [
"def",
"data",
"(",
"self",
",",
"data",
",",
"part",
"=",
"False",
",",
"dataset",
"=",
"''",
")",
":",
"links",
"=",
"self",
".",
"parser",
"(",
"self",
".",
"scanner",
"(",
"data",
",",
"part",
")",
",",
"part",
",",
"dataset",
")",
"self",
".",
"storage",
".",
"add_links",
"(",
"links",
")"
]
| Parse data and update links.
Parameters
----------
data
Data to parse.
part : `bool`, optional
True if data is partial (default: `False`).
dataset : `str`, optional
Dataset key prefix (default: ''). | [
"Parse",
"data",
"and",
"update",
"links",
"."
]
| python | train |
tanghaibao/jcvi | jcvi/formats/base.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L582-L605 | def append(args):
"""
%prog append csvfile [tag]
Append a column with fixed value. If tag is missing then just append the
filename.
"""
p = OptionParser(append.__doc__)
p.set_sep()
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
if nargs not in (1, 2):
sys.exit(not p.print_help())
csvfile = args[0]
tag = args[1] if nargs == 2 else csvfile
fp = must_open(csvfile)
fw = must_open(opts.outfile, "w")
for row in fp:
row = row.rstrip("\r\n")
row = opts.sep.join((row, tag))
print(row, file=fw) | [
"def",
"append",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"append",
".",
"__doc__",
")",
"p",
".",
"set_sep",
"(",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"nargs",
"=",
"len",
"(",
"args",
")",
"if",
"nargs",
"not",
"in",
"(",
"1",
",",
"2",
")",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"csvfile",
"=",
"args",
"[",
"0",
"]",
"tag",
"=",
"args",
"[",
"1",
"]",
"if",
"nargs",
"==",
"2",
"else",
"csvfile",
"fp",
"=",
"must_open",
"(",
"csvfile",
")",
"fw",
"=",
"must_open",
"(",
"opts",
".",
"outfile",
",",
"\"w\"",
")",
"for",
"row",
"in",
"fp",
":",
"row",
"=",
"row",
".",
"rstrip",
"(",
"\"\\r\\n\"",
")",
"row",
"=",
"opts",
".",
"sep",
".",
"join",
"(",
"(",
"row",
",",
"tag",
")",
")",
"print",
"(",
"row",
",",
"file",
"=",
"fw",
")"
]
| %prog append csvfile [tag]
Append a column with fixed value. If tag is missing then just append the
filename. | [
"%prog",
"append",
"csvfile",
"[",
"tag",
"]"
]
| python | train |
yandex/yandex-tank | yandextank/common/util.py | https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/common/util.py#L435-L447 | def splitstring(string):
"""
>>> string = 'apple orange "banana tree" green'
>>> splitstring(string)
['apple', 'orange', 'green', '"banana tree"']
"""
patt = re.compile(r'"[\w ]+"')
if patt.search(string):
quoted_item = patt.search(string).group()
newstring = patt.sub('', string)
return newstring.split() + [quoted_item]
else:
return string.split() | [
"def",
"splitstring",
"(",
"string",
")",
":",
"patt",
"=",
"re",
".",
"compile",
"(",
"r'\"[\\w ]+\"'",
")",
"if",
"patt",
".",
"search",
"(",
"string",
")",
":",
"quoted_item",
"=",
"patt",
".",
"search",
"(",
"string",
")",
".",
"group",
"(",
")",
"newstring",
"=",
"patt",
".",
"sub",
"(",
"''",
",",
"string",
")",
"return",
"newstring",
".",
"split",
"(",
")",
"+",
"[",
"quoted_item",
"]",
"else",
":",
"return",
"string",
".",
"split",
"(",
")"
]
| >>> string = 'apple orange "banana tree" green'
>>> splitstring(string)
['apple', 'orange', 'green', '"banana tree"'] | [
">>>",
"string",
"=",
"apple",
"orange",
"banana",
"tree",
"green",
">>>",
"splitstring",
"(",
"string",
")",
"[",
"apple",
"orange",
"green",
"banana",
"tree",
"]"
]
| python | test |
WhyNotHugo/django-afip | django_afip/models.py | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L1052-L1075 | def is_validated(self):
"""
Returns True if this instance is validated.
Note that resolving this property requires a DB query, so if you've a
very large amount of receipts you should prefetch (see django's
``select_related``) the ``validation`` field. Even so, a DB query *may*
be triggered.
If you need a large list of validated receipts, you should actually
filter them via a QuerySet::
Receipt.objects.filter(validation__result==RESULT_APPROVED)
:rtype: bool
"""
# Avoid the DB lookup if possible:
if not self.receipt_number:
return False
try:
return self.validation.result == ReceiptValidation.RESULT_APPROVED
except ReceiptValidation.DoesNotExist:
return False | [
"def",
"is_validated",
"(",
"self",
")",
":",
"# Avoid the DB lookup if possible:",
"if",
"not",
"self",
".",
"receipt_number",
":",
"return",
"False",
"try",
":",
"return",
"self",
".",
"validation",
".",
"result",
"==",
"ReceiptValidation",
".",
"RESULT_APPROVED",
"except",
"ReceiptValidation",
".",
"DoesNotExist",
":",
"return",
"False"
]
| Returns True if this instance is validated.
Note that resolving this property requires a DB query, so if you've a
very large amount of receipts you should prefetch (see django's
``select_related``) the ``validation`` field. Even so, a DB query *may*
be triggered.
If you need a large list of validated receipts, you should actually
filter them via a QuerySet::
Receipt.objects.filter(validation__result==RESULT_APPROVED)
:rtype: bool | [
"Returns",
"True",
"if",
"this",
"instance",
"is",
"validated",
"."
]
| python | train |
loli/medpy | medpy/filter/houghtransform.py | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/houghtransform.py#L156-L177 | def template_sphere (radius, dimensions):
r"""
Returns a spherical binary structure of a of the supplied radius that can be used as
template input to the generalized hough transform.
Parameters
----------
radius : integer
The circles radius in voxels.
dimensions : integer
The dimensionality of the circle
Returns
-------
template_sphere : ndarray
A boolean array containing a sphere.
"""
if int(dimensions) != dimensions:
raise TypeError('The supplied dimension parameter must be of type integer.')
dimensions = int(dimensions)
return template_ellipsoid(dimensions * [radius * 2]) | [
"def",
"template_sphere",
"(",
"radius",
",",
"dimensions",
")",
":",
"if",
"int",
"(",
"dimensions",
")",
"!=",
"dimensions",
":",
"raise",
"TypeError",
"(",
"'The supplied dimension parameter must be of type integer.'",
")",
"dimensions",
"=",
"int",
"(",
"dimensions",
")",
"return",
"template_ellipsoid",
"(",
"dimensions",
"*",
"[",
"radius",
"*",
"2",
"]",
")"
]
| r"""
Returns a spherical binary structure of a of the supplied radius that can be used as
template input to the generalized hough transform.
Parameters
----------
radius : integer
The circles radius in voxels.
dimensions : integer
The dimensionality of the circle
Returns
-------
template_sphere : ndarray
A boolean array containing a sphere. | [
"r",
"Returns",
"a",
"spherical",
"binary",
"structure",
"of",
"a",
"of",
"the",
"supplied",
"radius",
"that",
"can",
"be",
"used",
"as",
"template",
"input",
"to",
"the",
"generalized",
"hough",
"transform",
"."
]
| python | train |
OpenTreeOfLife/peyotl | tutorials/ot-oti-find-tree.py | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/tutorials/ot-oti-find-tree.py#L27-L36 | def print_matching_trees(arg_dict, tree_format, exact, verbose):
"""The `TreeRef` instance returned by the oti.find_trees(... wrap_response=True)
can be used as an argument to the phylesystem_api.get call.
If you pass in a string (instead of a TreeRef), the string will be interpreted as a study ID
"""
from peyotl.sugar import phylesystem_api
tree_list = ot_find_tree(arg_dict, exact=exact, verbose=verbose)
for tree_ref in tree_list:
print(tree_ref)
print(phylesystem_api.get(tree_ref, format=tree_format)) | [
"def",
"print_matching_trees",
"(",
"arg_dict",
",",
"tree_format",
",",
"exact",
",",
"verbose",
")",
":",
"from",
"peyotl",
".",
"sugar",
"import",
"phylesystem_api",
"tree_list",
"=",
"ot_find_tree",
"(",
"arg_dict",
",",
"exact",
"=",
"exact",
",",
"verbose",
"=",
"verbose",
")",
"for",
"tree_ref",
"in",
"tree_list",
":",
"print",
"(",
"tree_ref",
")",
"print",
"(",
"phylesystem_api",
".",
"get",
"(",
"tree_ref",
",",
"format",
"=",
"tree_format",
")",
")"
]
| The `TreeRef` instance returned by the oti.find_trees(... wrap_response=True)
can be used as an argument to the phylesystem_api.get call.
If you pass in a string (instead of a TreeRef), the string will be interpreted as a study ID | [
"The",
"TreeRef",
"instance",
"returned",
"by",
"the",
"oti",
".",
"find_trees",
"(",
"...",
"wrap_response",
"=",
"True",
")",
"can",
"be",
"used",
"as",
"an",
"argument",
"to",
"the",
"phylesystem_api",
".",
"get",
"call",
".",
"If",
"you",
"pass",
"in",
"a",
"string",
"(",
"instead",
"of",
"a",
"TreeRef",
")",
"the",
"string",
"will",
"be",
"interpreted",
"as",
"a",
"study",
"ID"
]
| python | train |
multiformats/py-multibase | multibase/multibase.py | https://github.com/multiformats/py-multibase/blob/8f435762b50a17f921c13b59eb0c7b9c52afc879/multibase/multibase.py#L84-L95 | def decode(data):
"""
Decode the multibase decoded data
:param data: multibase encoded data
:type data: str or bytes
:return: decoded data
:rtype: str
:raises ValueError: if the data is not multibase encoded
"""
data = ensure_bytes(data, 'utf8')
codec = get_codec(data)
return codec.converter.decode(data[CODE_LENGTH:]) | [
"def",
"decode",
"(",
"data",
")",
":",
"data",
"=",
"ensure_bytes",
"(",
"data",
",",
"'utf8'",
")",
"codec",
"=",
"get_codec",
"(",
"data",
")",
"return",
"codec",
".",
"converter",
".",
"decode",
"(",
"data",
"[",
"CODE_LENGTH",
":",
"]",
")"
]
| Decode the multibase decoded data
:param data: multibase encoded data
:type data: str or bytes
:return: decoded data
:rtype: str
:raises ValueError: if the data is not multibase encoded | [
"Decode",
"the",
"multibase",
"decoded",
"data",
":",
"param",
"data",
":",
"multibase",
"encoded",
"data",
":",
"type",
"data",
":",
"str",
"or",
"bytes",
":",
"return",
":",
"decoded",
"data",
":",
"rtype",
":",
"str",
":",
"raises",
"ValueError",
":",
"if",
"the",
"data",
"is",
"not",
"multibase",
"encoded"
]
| python | train |
inasafe/inasafe | safe/gis/generic_expressions.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gis/generic_expressions.py#L130-L148 | def inasafe_exposure_summary_field_values(field, feature, parent):
"""Retrieve all values from a field in the exposure summary layer.
"""
_ = feature, parent # NOQA
layer = exposure_summary_layer()
if not layer:
return None
index = layer.fields().lookupField(field)
if index < 0:
return None
values = []
for feat in layer.getFeatures():
value = feat[index]
values.append(value)
return str(values) | [
"def",
"inasafe_exposure_summary_field_values",
"(",
"field",
",",
"feature",
",",
"parent",
")",
":",
"_",
"=",
"feature",
",",
"parent",
"# NOQA",
"layer",
"=",
"exposure_summary_layer",
"(",
")",
"if",
"not",
"layer",
":",
"return",
"None",
"index",
"=",
"layer",
".",
"fields",
"(",
")",
".",
"lookupField",
"(",
"field",
")",
"if",
"index",
"<",
"0",
":",
"return",
"None",
"values",
"=",
"[",
"]",
"for",
"feat",
"in",
"layer",
".",
"getFeatures",
"(",
")",
":",
"value",
"=",
"feat",
"[",
"index",
"]",
"values",
".",
"append",
"(",
"value",
")",
"return",
"str",
"(",
"values",
")"
]
| Retrieve all values from a field in the exposure summary layer. | [
"Retrieve",
"all",
"values",
"from",
"a",
"field",
"in",
"the",
"exposure",
"summary",
"layer",
"."
]
| python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.