repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
roll/interest-py | interest/service.py | https://github.com/roll/interest-py/blob/e6e1def4f2999222aac2fb1d290ae94250673b89/interest/service.py#L112-L137 | def listen(self, *, host, port, override=False, forever=False, **kwargs):
"""Listen on TCP/IP socket.
Parameters
----------
host: str
Host like '127.0.0.1'
port:
Port like 80.
"""
if override:
argv = dict(enumerate(sys.argv))
host = argv.get(1, host)
port = int(argv.get(2, port))
server = self.loop.create_server(
self.__handler.fork, host, port, **kwargs)
server = self.loop.run_until_complete(server)
self.log('info',
'Start listening host="{host}" port="{port}"'.
format(host=host, port=port))
if forever:
try:
self.loop.run_forever()
except KeyboardInterrupt:
pass
return server | [
"def",
"listen",
"(",
"self",
",",
"*",
",",
"host",
",",
"port",
",",
"override",
"=",
"False",
",",
"forever",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"override",
":",
"argv",
"=",
"dict",
"(",
"enumerate",
"(",
"sys",
".",
"argv",
")",
")",
"host",
"=",
"argv",
".",
"get",
"(",
"1",
",",
"host",
")",
"port",
"=",
"int",
"(",
"argv",
".",
"get",
"(",
"2",
",",
"port",
")",
")",
"server",
"=",
"self",
".",
"loop",
".",
"create_server",
"(",
"self",
".",
"__handler",
".",
"fork",
",",
"host",
",",
"port",
",",
"*",
"*",
"kwargs",
")",
"server",
"=",
"self",
".",
"loop",
".",
"run_until_complete",
"(",
"server",
")",
"self",
".",
"log",
"(",
"'info'",
",",
"'Start listening host=\"{host}\" port=\"{port}\"'",
".",
"format",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
")",
")",
"if",
"forever",
":",
"try",
":",
"self",
".",
"loop",
".",
"run_forever",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"pass",
"return",
"server"
] | Listen on TCP/IP socket.
Parameters
----------
host: str
Host like '127.0.0.1'
port:
Port like 80. | [
"Listen",
"on",
"TCP",
"/",
"IP",
"socket",
"."
] | python | train |
saltstack/salt | salt/utils/network.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L561-L573 | def network_size(value, options=None, version=None):
'''
Get the size of a network.
'''
ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version)
if not ipaddr_filter_out:
return
if not isinstance(value, (list, tuple, types.GeneratorType)):
return _network_size(ipaddr_filter_out[0])
return [
_network_size(ip_a)
for ip_a in ipaddr_filter_out
] | [
"def",
"network_size",
"(",
"value",
",",
"options",
"=",
"None",
",",
"version",
"=",
"None",
")",
":",
"ipaddr_filter_out",
"=",
"_filter_ipaddr",
"(",
"value",
",",
"options",
"=",
"options",
",",
"version",
"=",
"version",
")",
"if",
"not",
"ipaddr_filter_out",
":",
"return",
"if",
"not",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
",",
"types",
".",
"GeneratorType",
")",
")",
":",
"return",
"_network_size",
"(",
"ipaddr_filter_out",
"[",
"0",
"]",
")",
"return",
"[",
"_network_size",
"(",
"ip_a",
")",
"for",
"ip_a",
"in",
"ipaddr_filter_out",
"]"
] | Get the size of a network. | [
"Get",
"the",
"size",
"of",
"a",
"network",
"."
] | python | train |
hdima/erlport | priv/python3/erlport/erlterms.py | https://github.com/hdima/erlport/blob/246b7722d62b87b48be66d9a871509a537728962/priv/python3/erlport/erlterms.py#L168-L188 | def decode(string):
"""Decode Erlang external term."""
if not string:
raise IncompleteData(string)
if string[0] != 131:
raise ValueError("unknown protocol version: %r" % string[0])
if string[1:2] == b'P':
# compressed term
if len(string) < 16:
raise IncompleteData(string)
d = decompressobj()
term_string = d.decompress(string[6:]) + d.flush()
uncompressed_size, = _int4_unpack(string[2:6])
if len(term_string) != uncompressed_size:
raise ValueError(
"invalid compressed tag, "
"%d bytes but got %d" % (uncompressed_size, len(term_string)))
# tail data returned by decode_term() can be simple ignored
term, _tail = decode_term(term_string)
return term, d.unused_data
return decode_term(string[1:]) | [
"def",
"decode",
"(",
"string",
")",
":",
"if",
"not",
"string",
":",
"raise",
"IncompleteData",
"(",
"string",
")",
"if",
"string",
"[",
"0",
"]",
"!=",
"131",
":",
"raise",
"ValueError",
"(",
"\"unknown protocol version: %r\"",
"%",
"string",
"[",
"0",
"]",
")",
"if",
"string",
"[",
"1",
":",
"2",
"]",
"==",
"b'P'",
":",
"# compressed term",
"if",
"len",
"(",
"string",
")",
"<",
"16",
":",
"raise",
"IncompleteData",
"(",
"string",
")",
"d",
"=",
"decompressobj",
"(",
")",
"term_string",
"=",
"d",
".",
"decompress",
"(",
"string",
"[",
"6",
":",
"]",
")",
"+",
"d",
".",
"flush",
"(",
")",
"uncompressed_size",
",",
"=",
"_int4_unpack",
"(",
"string",
"[",
"2",
":",
"6",
"]",
")",
"if",
"len",
"(",
"term_string",
")",
"!=",
"uncompressed_size",
":",
"raise",
"ValueError",
"(",
"\"invalid compressed tag, \"",
"\"%d bytes but got %d\"",
"%",
"(",
"uncompressed_size",
",",
"len",
"(",
"term_string",
")",
")",
")",
"# tail data returned by decode_term() can be simple ignored",
"term",
",",
"_tail",
"=",
"decode_term",
"(",
"term_string",
")",
"return",
"term",
",",
"d",
".",
"unused_data",
"return",
"decode_term",
"(",
"string",
"[",
"1",
":",
"]",
")"
] | Decode Erlang external term. | [
"Decode",
"Erlang",
"external",
"term",
"."
] | python | test |
pypa/pipenv | pipenv/vendor/jinja2/runtime.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/runtime.py#L372-L377 | def changed(self, *value):
"""Checks whether the value has changed since the last call."""
if self._last_checked_value != value:
self._last_checked_value = value
return True
return False | [
"def",
"changed",
"(",
"self",
",",
"*",
"value",
")",
":",
"if",
"self",
".",
"_last_checked_value",
"!=",
"value",
":",
"self",
".",
"_last_checked_value",
"=",
"value",
"return",
"True",
"return",
"False"
] | Checks whether the value has changed since the last call. | [
"Checks",
"whether",
"the",
"value",
"has",
"changed",
"since",
"the",
"last",
"call",
"."
] | python | train |
bwohlberg/sporco | sporco/linalg.py | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/linalg.py#L1190-L1217 | def proj_l2ball(b, s, r, axes=None):
r"""
Project :math:`\mathbf{b}` into the :math:`\ell_2` ball of radius
:math:`r` about :math:`\mathbf{s}`, i.e.
:math:`\{ \mathbf{x} : \|\mathbf{x} - \mathbf{s} \|_2 \leq r \}`.
Note that ``proj_l2ball(b, s, r)`` is equivalent to
:func:`.prox.proj_l2` ``(b - s, r) + s``.
Parameters
----------
b : array_like
Vector :math:`\mathbf{b}` to be projected
s : array_like
Centre of :math:`\ell_2` ball :math:`\mathbf{s}`
r : float
Radius of ball
axes : sequence of ints, optional (default all axes)
Axes over which to compute :math:`\ell_2` norms
Returns
-------
x : ndarray
Projection of :math:`\mathbf{b}` into ball
"""
d = np.sqrt(np.sum((b - s)**2, axis=axes, keepdims=True))
p = zdivide(b - s, d)
return np.asarray((d <= r) * b + (d > r) * (s + r*p), b.dtype) | [
"def",
"proj_l2ball",
"(",
"b",
",",
"s",
",",
"r",
",",
"axes",
"=",
"None",
")",
":",
"d",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"(",
"b",
"-",
"s",
")",
"**",
"2",
",",
"axis",
"=",
"axes",
",",
"keepdims",
"=",
"True",
")",
")",
"p",
"=",
"zdivide",
"(",
"b",
"-",
"s",
",",
"d",
")",
"return",
"np",
".",
"asarray",
"(",
"(",
"d",
"<=",
"r",
")",
"*",
"b",
"+",
"(",
"d",
">",
"r",
")",
"*",
"(",
"s",
"+",
"r",
"*",
"p",
")",
",",
"b",
".",
"dtype",
")"
] | r"""
Project :math:`\mathbf{b}` into the :math:`\ell_2` ball of radius
:math:`r` about :math:`\mathbf{s}`, i.e.
:math:`\{ \mathbf{x} : \|\mathbf{x} - \mathbf{s} \|_2 \leq r \}`.
Note that ``proj_l2ball(b, s, r)`` is equivalent to
:func:`.prox.proj_l2` ``(b - s, r) + s``.
Parameters
----------
b : array_like
Vector :math:`\mathbf{b}` to be projected
s : array_like
Centre of :math:`\ell_2` ball :math:`\mathbf{s}`
r : float
Radius of ball
axes : sequence of ints, optional (default all axes)
Axes over which to compute :math:`\ell_2` norms
Returns
-------
x : ndarray
Projection of :math:`\mathbf{b}` into ball | [
"r",
"Project",
":",
"math",
":",
"\\",
"mathbf",
"{",
"b",
"}",
"into",
"the",
":",
"math",
":",
"\\",
"ell_2",
"ball",
"of",
"radius",
":",
"math",
":",
"r",
"about",
":",
"math",
":",
"\\",
"mathbf",
"{",
"s",
"}",
"i",
".",
"e",
".",
":",
"math",
":",
"\\",
"{",
"\\",
"mathbf",
"{",
"x",
"}",
":",
"\\",
"|",
"\\",
"mathbf",
"{",
"x",
"}",
"-",
"\\",
"mathbf",
"{",
"s",
"}",
"\\",
"|_2",
"\\",
"leq",
"r",
"\\",
"}",
".",
"Note",
"that",
"proj_l2ball",
"(",
"b",
"s",
"r",
")",
"is",
"equivalent",
"to",
":",
"func",
":",
".",
"prox",
".",
"proj_l2",
"(",
"b",
"-",
"s",
"r",
")",
"+",
"s",
"."
] | python | train |
squdle/baseconvert | baseconvert/__main__.py | https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/__main__.py#L8-L47 | def main():
"""
Main entry point for running baseconvert as a command.
Examples:
$ python -m baseconvert -n 0.5 -i 10 -o 20 -s True
0.A
$ echo 3.1415926 | python -m baseconvert -i 10 -o 16 -d 3 -s True
3.243
"""
# Parse arguments
parser = argparse.ArgumentParser(description="Convert rational numbers between bases.")
parser.add_argument("-n", "--number", default=None,
help="The number to convert as a string, else stdin used.")
parser.add_argument("-i", "--input-base", default=10,
help="The input base (default 10).")
parser.add_argument("-o", "--output-base", default=10,
help="The output base (default 10).")
parser.add_argument("-d", "--max_depth", default=10, type=int,
help="The maximum fractional digits (default 10).")
parser.add_argument("-r", "--recurring", default=True, type=bool,
help="Boolean, if True will attempt to find recurring decimals (default True).")
parser.add_argument("-s", "--string", type=bool,
help="Boolean, if True will output number as String, else as tuple (default False).")
args = parser.parse_args()
args.input_base = float(args.input_base)
args.output_base = float(args.output_base)
if args.input_base == int(args.input_base):
args.input_base = int(args.input_base)
if args.output_base == int(args.output_base):
args.output_base = int(args.output_base)
if (args.number):
return base(args.number, args.input_base, args.output_base, string=args.string, max_depth=args.max_depth, recurring=args.recurring)
elif not sys.stdin.isatty():
return base(sys.stdin.read().strip(), args.input_base, args.output_base, string=args.string, max_depth=args.max_depth, recurring=args.recurring)
else:
raise ValueError("Please input a number!") | [
"def",
"main",
"(",
")",
":",
"# Parse arguments",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Convert rational numbers between bases.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-n\"",
",",
"\"--number\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"The number to convert as a string, else stdin used.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-i\"",
",",
"\"--input-base\"",
",",
"default",
"=",
"10",
",",
"help",
"=",
"\"The input base (default 10).\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output-base\"",
",",
"default",
"=",
"10",
",",
"help",
"=",
"\"The output base (default 10).\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--max_depth\"",
",",
"default",
"=",
"10",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"The maximum fractional digits (default 10).\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-r\"",
",",
"\"--recurring\"",
",",
"default",
"=",
"True",
",",
"type",
"=",
"bool",
",",
"help",
"=",
"\"Boolean, if True will attempt to find recurring decimals (default True).\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-s\"",
",",
"\"--string\"",
",",
"type",
"=",
"bool",
",",
"help",
"=",
"\"Boolean, if True will output number as String, else as tuple (default False).\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"args",
".",
"input_base",
"=",
"float",
"(",
"args",
".",
"input_base",
")",
"args",
".",
"output_base",
"=",
"float",
"(",
"args",
".",
"output_base",
")",
"if",
"args",
".",
"input_base",
"==",
"int",
"(",
"args",
".",
"input_base",
")",
":",
"args",
".",
"input_base",
"=",
"int",
"(",
"args",
".",
"input_base",
")",
"if",
"args",
".",
"output_base",
"==",
"int",
"(",
"args",
".",
"output_base",
")",
":",
"args",
".",
"output_base",
"=",
"int",
"(",
"args",
".",
"output_base",
")",
"if",
"(",
"args",
".",
"number",
")",
":",
"return",
"base",
"(",
"args",
".",
"number",
",",
"args",
".",
"input_base",
",",
"args",
".",
"output_base",
",",
"string",
"=",
"args",
".",
"string",
",",
"max_depth",
"=",
"args",
".",
"max_depth",
",",
"recurring",
"=",
"args",
".",
"recurring",
")",
"elif",
"not",
"sys",
".",
"stdin",
".",
"isatty",
"(",
")",
":",
"return",
"base",
"(",
"sys",
".",
"stdin",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
",",
"args",
".",
"input_base",
",",
"args",
".",
"output_base",
",",
"string",
"=",
"args",
".",
"string",
",",
"max_depth",
"=",
"args",
".",
"max_depth",
",",
"recurring",
"=",
"args",
".",
"recurring",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Please input a number!\"",
")"
] | Main entry point for running baseconvert as a command.
Examples:
$ python -m baseconvert -n 0.5 -i 10 -o 20 -s True
0.A
$ echo 3.1415926 | python -m baseconvert -i 10 -o 16 -d 3 -s True
3.243 | [
"Main",
"entry",
"point",
"for",
"running",
"baseconvert",
"as",
"a",
"command",
"."
] | python | train |
aiogram/aiogram | aiogram/dispatcher/webhook.py | https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/dispatcher/webhook.py#L254-L261 | def validate_ip(self):
"""
Check ip if that is needed. Raise web.HTTPUnauthorized for not allowed hosts.
"""
if self.request.app.get('_check_ip', False):
ip_address, accept = self.check_ip()
if not accept:
raise web.HTTPUnauthorized() | [
"def",
"validate_ip",
"(",
"self",
")",
":",
"if",
"self",
".",
"request",
".",
"app",
".",
"get",
"(",
"'_check_ip'",
",",
"False",
")",
":",
"ip_address",
",",
"accept",
"=",
"self",
".",
"check_ip",
"(",
")",
"if",
"not",
"accept",
":",
"raise",
"web",
".",
"HTTPUnauthorized",
"(",
")"
] | Check ip if that is needed. Raise web.HTTPUnauthorized for not allowed hosts. | [
"Check",
"ip",
"if",
"that",
"is",
"needed",
".",
"Raise",
"web",
".",
"HTTPUnauthorized",
"for",
"not",
"allowed",
"hosts",
"."
] | python | train |
angr/angr | angr/analyses/reassembler.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/reassembler.py#L1083-L1095 | def instruction_addresses(self):
"""
Get all instruction addresses in the binary.
:return: A list of sorted instruction addresses.
:rtype: list
"""
addrs = [ ]
for b in sorted(self.blocks, key=lambda x: x.addr): # type: BasicBlock
addrs.extend(b.instruction_addresses())
return sorted(set(addrs), key=lambda x: x[0]) | [
"def",
"instruction_addresses",
"(",
"self",
")",
":",
"addrs",
"=",
"[",
"]",
"for",
"b",
"in",
"sorted",
"(",
"self",
".",
"blocks",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"addr",
")",
":",
"# type: BasicBlock",
"addrs",
".",
"extend",
"(",
"b",
".",
"instruction_addresses",
"(",
")",
")",
"return",
"sorted",
"(",
"set",
"(",
"addrs",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")"
] | Get all instruction addresses in the binary.
:return: A list of sorted instruction addresses.
:rtype: list | [
"Get",
"all",
"instruction",
"addresses",
"in",
"the",
"binary",
"."
] | python | train |
ArangoDB-Community/pyArango | pyArango/collection.py | https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L369-L380 | def ensureFulltextIndex(self, fields, minLength = None) :
"""Creates a fulltext index if it does not already exist, and returns it"""
data = {
"type" : "fulltext",
"fields" : fields,
}
if minLength is not None :
data["minLength"] = minLength
ind = Index(self, creationData = data)
self.indexes["fulltext"][ind.infos["id"]] = ind
return ind | [
"def",
"ensureFulltextIndex",
"(",
"self",
",",
"fields",
",",
"minLength",
"=",
"None",
")",
":",
"data",
"=",
"{",
"\"type\"",
":",
"\"fulltext\"",
",",
"\"fields\"",
":",
"fields",
",",
"}",
"if",
"minLength",
"is",
"not",
"None",
":",
"data",
"[",
"\"minLength\"",
"]",
"=",
"minLength",
"ind",
"=",
"Index",
"(",
"self",
",",
"creationData",
"=",
"data",
")",
"self",
".",
"indexes",
"[",
"\"fulltext\"",
"]",
"[",
"ind",
".",
"infos",
"[",
"\"id\"",
"]",
"]",
"=",
"ind",
"return",
"ind"
] | Creates a fulltext index if it does not already exist, and returns it | [
"Creates",
"a",
"fulltext",
"index",
"if",
"it",
"does",
"not",
"already",
"exist",
"and",
"returns",
"it"
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/elasticity/elastic.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/elasticity/elastic.py#L623-L638 | def get_gruneisen_parameter(self, temperature=None, structure=None,
quad=None):
"""
Gets the single average gruneisen parameter from the TGT.
Args:
temperature (float): Temperature in kelvin, if not specified
will return non-cv-normalized value
structure (float): Structure to be used in directional heat
capacity determination, only necessary if temperature
is specified
quad (dict): quadrature for integration, should be
dictionary with "points" and "weights" keys defaults
to quadpy.sphere.Lebedev(19) as read from file
"""
return np.trace(self.get_tgt(temperature, structure, quad)) / 3. | [
"def",
"get_gruneisen_parameter",
"(",
"self",
",",
"temperature",
"=",
"None",
",",
"structure",
"=",
"None",
",",
"quad",
"=",
"None",
")",
":",
"return",
"np",
".",
"trace",
"(",
"self",
".",
"get_tgt",
"(",
"temperature",
",",
"structure",
",",
"quad",
")",
")",
"/",
"3."
] | Gets the single average gruneisen parameter from the TGT.
Args:
temperature (float): Temperature in kelvin, if not specified
will return non-cv-normalized value
structure (float): Structure to be used in directional heat
capacity determination, only necessary if temperature
is specified
quad (dict): quadrature for integration, should be
dictionary with "points" and "weights" keys defaults
to quadpy.sphere.Lebedev(19) as read from file | [
"Gets",
"the",
"single",
"average",
"gruneisen",
"parameter",
"from",
"the",
"TGT",
"."
] | python | train |
earwig/mwparserfromhell | mwparserfromhell/nodes/template.py | https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/nodes/template.py#L93-L105 | def _select_theory(theories):
"""Return the most likely spacing convention given different options.
Given a dictionary of convention options as keys and their occurrence
as values, return the convention that occurs the most, or ``None`` if
there is no clear preferred style.
"""
if theories:
values = tuple(theories.values())
best = max(values)
confidence = float(best) / sum(values)
if confidence > 0.5:
return tuple(theories.keys())[values.index(best)] | [
"def",
"_select_theory",
"(",
"theories",
")",
":",
"if",
"theories",
":",
"values",
"=",
"tuple",
"(",
"theories",
".",
"values",
"(",
")",
")",
"best",
"=",
"max",
"(",
"values",
")",
"confidence",
"=",
"float",
"(",
"best",
")",
"/",
"sum",
"(",
"values",
")",
"if",
"confidence",
">",
"0.5",
":",
"return",
"tuple",
"(",
"theories",
".",
"keys",
"(",
")",
")",
"[",
"values",
".",
"index",
"(",
"best",
")",
"]"
] | Return the most likely spacing convention given different options.
Given a dictionary of convention options as keys and their occurrence
as values, return the convention that occurs the most, or ``None`` if
there is no clear preferred style. | [
"Return",
"the",
"most",
"likely",
"spacing",
"convention",
"given",
"different",
"options",
"."
] | python | train |
kronenthaler/mod-pbxproj | pbxproj/pbxextensions/ProjectFlags.py | https://github.com/kronenthaler/mod-pbxproj/blob/8de3cbdd3210480ddbb1fa0f50a4f4ea87de6e71/pbxproj/pbxextensions/ProjectFlags.py#L89-L102 | def add_search_paths(self, path_type, paths, recursive=True, escape=False, target_name=None,
configuration_name=None):
"""
Adds the given search paths to the path type section of the target on the configurations
:param path_type: name of the flag to be added the values to
:param paths: A string or array of strings
:param recursive: Add the paths as recursive ones
:param escape: Escape the path in case it contains spaces
:param target_name: Target name or list of target names to add the flag to or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void
"""
for configuration in self.objects.get_configurations_on_targets(target_name, configuration_name):
configuration.add_search_paths(path_type, paths, recursive, escape) | [
"def",
"add_search_paths",
"(",
"self",
",",
"path_type",
",",
"paths",
",",
"recursive",
"=",
"True",
",",
"escape",
"=",
"False",
",",
"target_name",
"=",
"None",
",",
"configuration_name",
"=",
"None",
")",
":",
"for",
"configuration",
"in",
"self",
".",
"objects",
".",
"get_configurations_on_targets",
"(",
"target_name",
",",
"configuration_name",
")",
":",
"configuration",
".",
"add_search_paths",
"(",
"path_type",
",",
"paths",
",",
"recursive",
",",
"escape",
")"
] | Adds the given search paths to the path type section of the target on the configurations
:param path_type: name of the flag to be added the values to
:param paths: A string or array of strings
:param recursive: Add the paths as recursive ones
:param escape: Escape the path in case it contains spaces
:param target_name: Target name or list of target names to add the flag to or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void | [
"Adds",
"the",
"given",
"search",
"paths",
"to",
"the",
"path",
"type",
"section",
"of",
"the",
"target",
"on",
"the",
"configurations",
":",
"param",
"path_type",
":",
"name",
"of",
"the",
"flag",
"to",
"be",
"added",
"the",
"values",
"to",
":",
"param",
"paths",
":",
"A",
"string",
"or",
"array",
"of",
"strings",
":",
"param",
"recursive",
":",
"Add",
"the",
"paths",
"as",
"recursive",
"ones",
":",
"param",
"escape",
":",
"Escape",
"the",
"path",
"in",
"case",
"it",
"contains",
"spaces",
":",
"param",
"target_name",
":",
"Target",
"name",
"or",
"list",
"of",
"target",
"names",
"to",
"add",
"the",
"flag",
"to",
"or",
"None",
"for",
"every",
"target",
":",
"param",
"configuration_name",
":",
"Configuration",
"name",
"to",
"add",
"the",
"flag",
"to",
"or",
"None",
"for",
"every",
"configuration",
":",
"return",
":",
"void"
] | python | train |
JIC-CSB/jicimagelib | jicimagelib/io.py | https://github.com/JIC-CSB/jicimagelib/blob/fbd67accb2e6d55969c6d4ed7e8b4bb4ab65cd44/jicimagelib/io.py#L162-L173 | def metadata_from_fname(self, fname):
"""Return meta data extracted from file name.
:param fname: metadata file name
:returns: dynamically created :class:`collections.namedtuple`
"""
MetaData = namedtuple('MetaData', self.split_order)
base_name = os.path.basename(fname) # e.g. 'test_S1_C2_Z3_T4.tif'
name, suffix = base_name.split('.') # e.g. 'test_S1_C2_Z3_T4', 'tif'
data = name.split('_')[-len(self.split_order):] # e.g. ['S1', 'C2', 'Z3', 'T4']
args = [ int(x[1:]) for x in data ] # e.g. [1, 2, 3, 4]
return MetaData(*args) | [
"def",
"metadata_from_fname",
"(",
"self",
",",
"fname",
")",
":",
"MetaData",
"=",
"namedtuple",
"(",
"'MetaData'",
",",
"self",
".",
"split_order",
")",
"base_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
"# e.g. 'test_S1_C2_Z3_T4.tif'",
"name",
",",
"suffix",
"=",
"base_name",
".",
"split",
"(",
"'.'",
")",
"# e.g. 'test_S1_C2_Z3_T4', 'tif'",
"data",
"=",
"name",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"len",
"(",
"self",
".",
"split_order",
")",
":",
"]",
"# e.g. ['S1', 'C2', 'Z3', 'T4']",
"args",
"=",
"[",
"int",
"(",
"x",
"[",
"1",
":",
"]",
")",
"for",
"x",
"in",
"data",
"]",
"# e.g. [1, 2, 3, 4]",
"return",
"MetaData",
"(",
"*",
"args",
")"
] | Return meta data extracted from file name.
:param fname: metadata file name
:returns: dynamically created :class:`collections.namedtuple` | [
"Return",
"meta",
"data",
"extracted",
"from",
"file",
"name",
".",
":",
"param",
"fname",
":",
"metadata",
"file",
"name",
":",
"returns",
":",
"dynamically",
"created",
":",
"class",
":",
"collections",
".",
"namedtuple"
] | python | train |
zerotk/easyfs | zerotk/easyfs/_easyfs.py | https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L228-L299 | def CopyFile(source_filename, target_filename, override=True, md5_check=False, copy_symlink=True):
'''
Copy a file from source to target.
:param source_filename:
@see _DoCopyFile
:param target_filename:
@see _DoCopyFile
:param bool md5_check:
If True, checks md5 files (of both source and target files), if they match, skip this copy
and return MD5_SKIP
Md5 files are assumed to be {source, target} + '.md5'
If any file is missing (source, target or md5), the copy will always be made.
:param copy_symlink:
@see _DoCopyFile
:raises FileAlreadyExistsError:
If target_filename already exists, and override is False
:raises NotImplementedProtocol:
If file protocol is not accepted
Protocols allowed are:
source_filename: local, ftp, http
target_filename: local, ftp
:rtype: None | MD5_SKIP
:returns:
MD5_SKIP if the file was not copied because there was a matching .md5 file
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from ._exceptions import FileNotFoundError
# Check override
if not override and Exists(target_filename):
from ._exceptions import FileAlreadyExistsError
raise FileAlreadyExistsError(target_filename)
# Don't do md5 check for md5 files themselves.
md5_check = md5_check and not target_filename.endswith('.md5')
# If we enabled md5 checks, ignore copy of files that haven't changed their md5 contents.
if md5_check:
source_md5_filename = source_filename + '.md5'
target_md5_filename = target_filename + '.md5'
try:
source_md5_contents = GetFileContents(source_md5_filename)
except FileNotFoundError:
source_md5_contents = None
try:
target_md5_contents = GetFileContents(target_md5_filename)
except FileNotFoundError:
target_md5_contents = None
if source_md5_contents is not None and \
source_md5_contents == target_md5_contents and \
Exists(target_filename):
return MD5_SKIP
# Copy source file
_DoCopyFile(source_filename, target_filename, copy_symlink=copy_symlink)
# If we have a source_md5, but no target_md5, create the target_md5 file
if md5_check and source_md5_contents is not None and source_md5_contents != target_md5_contents:
CreateFile(target_md5_filename, source_md5_contents) | [
"def",
"CopyFile",
"(",
"source_filename",
",",
"target_filename",
",",
"override",
"=",
"True",
",",
"md5_check",
"=",
"False",
",",
"copy_symlink",
"=",
"True",
")",
":",
"from",
".",
"_exceptions",
"import",
"FileNotFoundError",
"# Check override",
"if",
"not",
"override",
"and",
"Exists",
"(",
"target_filename",
")",
":",
"from",
".",
"_exceptions",
"import",
"FileAlreadyExistsError",
"raise",
"FileAlreadyExistsError",
"(",
"target_filename",
")",
"# Don't do md5 check for md5 files themselves.",
"md5_check",
"=",
"md5_check",
"and",
"not",
"target_filename",
".",
"endswith",
"(",
"'.md5'",
")",
"# If we enabled md5 checks, ignore copy of files that haven't changed their md5 contents.",
"if",
"md5_check",
":",
"source_md5_filename",
"=",
"source_filename",
"+",
"'.md5'",
"target_md5_filename",
"=",
"target_filename",
"+",
"'.md5'",
"try",
":",
"source_md5_contents",
"=",
"GetFileContents",
"(",
"source_md5_filename",
")",
"except",
"FileNotFoundError",
":",
"source_md5_contents",
"=",
"None",
"try",
":",
"target_md5_contents",
"=",
"GetFileContents",
"(",
"target_md5_filename",
")",
"except",
"FileNotFoundError",
":",
"target_md5_contents",
"=",
"None",
"if",
"source_md5_contents",
"is",
"not",
"None",
"and",
"source_md5_contents",
"==",
"target_md5_contents",
"and",
"Exists",
"(",
"target_filename",
")",
":",
"return",
"MD5_SKIP",
"# Copy source file",
"_DoCopyFile",
"(",
"source_filename",
",",
"target_filename",
",",
"copy_symlink",
"=",
"copy_symlink",
")",
"# If we have a source_md5, but no target_md5, create the target_md5 file",
"if",
"md5_check",
"and",
"source_md5_contents",
"is",
"not",
"None",
"and",
"source_md5_contents",
"!=",
"target_md5_contents",
":",
"CreateFile",
"(",
"target_md5_filename",
",",
"source_md5_contents",
")"
] | Copy a file from source to target.
:param source_filename:
@see _DoCopyFile
:param target_filename:
@see _DoCopyFile
:param bool md5_check:
If True, checks md5 files (of both source and target files), if they match, skip this copy
and return MD5_SKIP
Md5 files are assumed to be {source, target} + '.md5'
If any file is missing (source, target or md5), the copy will always be made.
:param copy_symlink:
@see _DoCopyFile
:raises FileAlreadyExistsError:
If target_filename already exists, and override is False
:raises NotImplementedProtocol:
If file protocol is not accepted
Protocols allowed are:
source_filename: local, ftp, http
target_filename: local, ftp
:rtype: None | MD5_SKIP
:returns:
MD5_SKIP if the file was not copied because there was a matching .md5 file
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information | [
"Copy",
"a",
"file",
"from",
"source",
"to",
"target",
"."
] | python | valid |
chriso/gauged | gauged/structures/sparse_map.py | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L65-L71 | def free(self):
"""Free the map"""
if self._ptr is None:
return
Gauged.map_free(self.ptr)
SparseMap.ALLOCATIONS -= 1
self._ptr = None | [
"def",
"free",
"(",
"self",
")",
":",
"if",
"self",
".",
"_ptr",
"is",
"None",
":",
"return",
"Gauged",
".",
"map_free",
"(",
"self",
".",
"ptr",
")",
"SparseMap",
".",
"ALLOCATIONS",
"-=",
"1",
"self",
".",
"_ptr",
"=",
"None"
] | Free the map | [
"Free",
"the",
"map"
] | python | train |
yyuu/botornado | boto/dynamodb/layer2.py | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/dynamodb/layer2.py#L474-L488 | def batch_get_item(self, batch_list):
"""
Return a set of attributes for a multiple items in
multiple tables using their primary keys.
:type batch_list: :class:`boto.dynamodb.batch.BatchList`
:param batch_list: A BatchList object which consists of a
list of :class:`boto.dynamoddb.batch.Batch` objects.
Each Batch object contains the information about one
batch of objects that you wish to retrieve in this
request.
"""
request_items = self.dynamize_request_items(batch_list)
return self.layer1.batch_get_item(request_items,
object_hook=item_object_hook) | [
"def",
"batch_get_item",
"(",
"self",
",",
"batch_list",
")",
":",
"request_items",
"=",
"self",
".",
"dynamize_request_items",
"(",
"batch_list",
")",
"return",
"self",
".",
"layer1",
".",
"batch_get_item",
"(",
"request_items",
",",
"object_hook",
"=",
"item_object_hook",
")"
] | Return a set of attributes for a multiple items in
multiple tables using their primary keys.
:type batch_list: :class:`boto.dynamodb.batch.BatchList`
:param batch_list: A BatchList object which consists of a
list of :class:`boto.dynamoddb.batch.Batch` objects.
Each Batch object contains the information about one
batch of objects that you wish to retrieve in this
request. | [
"Return",
"a",
"set",
"of",
"attributes",
"for",
"a",
"multiple",
"items",
"in",
"multiple",
"tables",
"using",
"their",
"primary",
"keys",
"."
] | python | train |
rosenbrockc/ci | pyci/config.py | https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/config.py#L468-L481 | def load_xml(self, filepath):
"""Loads the values of the configuration variables from an XML path."""
from os import path
import xml.etree.ElementTree as ET
#Make sure the file exists and then import it as XML and read the values out.
uxpath = path.expanduser(filepath)
if path.isfile(uxpath):
tree = ET.parse(uxpath)
vms("Parsing global settings from {}.".format(uxpath))
root = tree.getroot()
for child in root:
if child.tag == "var":
self._vardict[child.attrib["name"]] = child.attrib["value"] | [
"def",
"load_xml",
"(",
"self",
",",
"filepath",
")",
":",
"from",
"os",
"import",
"path",
"import",
"xml",
".",
"etree",
".",
"ElementTree",
"as",
"ET",
"#Make sure the file exists and then import it as XML and read the values out.",
"uxpath",
"=",
"path",
".",
"expanduser",
"(",
"filepath",
")",
"if",
"path",
".",
"isfile",
"(",
"uxpath",
")",
":",
"tree",
"=",
"ET",
".",
"parse",
"(",
"uxpath",
")",
"vms",
"(",
"\"Parsing global settings from {}.\"",
".",
"format",
"(",
"uxpath",
")",
")",
"root",
"=",
"tree",
".",
"getroot",
"(",
")",
"for",
"child",
"in",
"root",
":",
"if",
"child",
".",
"tag",
"==",
"\"var\"",
":",
"self",
".",
"_vardict",
"[",
"child",
".",
"attrib",
"[",
"\"name\"",
"]",
"]",
"=",
"child",
".",
"attrib",
"[",
"\"value\"",
"]"
] | Loads the values of the configuration variables from an XML path. | [
"Loads",
"the",
"values",
"of",
"the",
"configuration",
"variables",
"from",
"an",
"XML",
"path",
"."
] | python | train |
src-d/modelforge | modelforge/slogging.py | https://github.com/src-d/modelforge/blob/4f73c2bf0318261ac01bc8b6c0d4250a5d303418/modelforge/slogging.py#L164-L209 | def setup(level: Union[str, int], structured: bool, config_path: str = None):
"""
Make stdout and stderr unicode friendly in case of misconfigured \
environments, initializes the logging, structured logging and \
enables colored logs if it is appropriate.
:param level: The global logging level.
:param structured: Output JSON logs to stdout.
:param config_path: Path to a yaml file that configures the level of output of the loggers. \
Root logger level is set through the level argument and will override any \
root configuration found in the conf file.
:return: None
"""
global logs_are_structured
logs_are_structured = structured
if not isinstance(level, int):
level = logging._nameToLevel[level]
def ensure_utf8_stream(stream):
if not isinstance(stream, io.StringIO) and hasattr(stream, "buffer"):
stream = codecs.getwriter("utf-8")(stream.buffer)
stream.encoding = "utf-8"
return stream
sys.stdout, sys.stderr = (ensure_utf8_stream(s)
for s in (sys.stdout, sys.stderr))
# basicConfig is only called to make sure there is at least one handler for the root logger.
# All the output level setting is down right afterwards.
logging.basicConfig()
logging.setLogRecordFactory(NumpyLogRecord)
if config_path is not None and os.path.isfile(config_path):
with open(config_path) as fh:
config = yaml.safe_load(fh)
for key, val in config.items():
logging.getLogger(key).setLevel(logging._nameToLevel.get(val, level))
root = logging.getLogger()
root.setLevel(level)
if not structured:
if not sys.stdin.closed and sys.stdout.isatty():
handler = root.handlers[0]
handler.setFormatter(AwesomeFormatter())
else:
root.handlers[0] = StructuredHandler(level) | [
"def",
"setup",
"(",
"level",
":",
"Union",
"[",
"str",
",",
"int",
"]",
",",
"structured",
":",
"bool",
",",
"config_path",
":",
"str",
"=",
"None",
")",
":",
"global",
"logs_are_structured",
"logs_are_structured",
"=",
"structured",
"if",
"not",
"isinstance",
"(",
"level",
",",
"int",
")",
":",
"level",
"=",
"logging",
".",
"_nameToLevel",
"[",
"level",
"]",
"def",
"ensure_utf8_stream",
"(",
"stream",
")",
":",
"if",
"not",
"isinstance",
"(",
"stream",
",",
"io",
".",
"StringIO",
")",
"and",
"hasattr",
"(",
"stream",
",",
"\"buffer\"",
")",
":",
"stream",
"=",
"codecs",
".",
"getwriter",
"(",
"\"utf-8\"",
")",
"(",
"stream",
".",
"buffer",
")",
"stream",
".",
"encoding",
"=",
"\"utf-8\"",
"return",
"stream",
"sys",
".",
"stdout",
",",
"sys",
".",
"stderr",
"=",
"(",
"ensure_utf8_stream",
"(",
"s",
")",
"for",
"s",
"in",
"(",
"sys",
".",
"stdout",
",",
"sys",
".",
"stderr",
")",
")",
"# basicConfig is only called to make sure there is at least one handler for the root logger.",
"# All the output level setting is down right afterwards.",
"logging",
".",
"basicConfig",
"(",
")",
"logging",
".",
"setLogRecordFactory",
"(",
"NumpyLogRecord",
")",
"if",
"config_path",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"config_path",
")",
":",
"with",
"open",
"(",
"config_path",
")",
"as",
"fh",
":",
"config",
"=",
"yaml",
".",
"safe_load",
"(",
"fh",
")",
"for",
"key",
",",
"val",
"in",
"config",
".",
"items",
"(",
")",
":",
"logging",
".",
"getLogger",
"(",
"key",
")",
".",
"setLevel",
"(",
"logging",
".",
"_nameToLevel",
".",
"get",
"(",
"val",
",",
"level",
")",
")",
"root",
"=",
"logging",
".",
"getLogger",
"(",
")",
"root",
".",
"setLevel",
"(",
"level",
")",
"if",
"not",
"structured",
":",
"if",
"not",
"sys",
".",
"stdin",
".",
"closed",
"and",
"sys",
".",
"stdout",
".",
"isatty",
"(",
")",
":",
"handler",
"=",
"root",
".",
"handlers",
"[",
"0",
"]",
"handler",
".",
"setFormatter",
"(",
"AwesomeFormatter",
"(",
")",
")",
"else",
":",
"root",
".",
"handlers",
"[",
"0",
"]",
"=",
"StructuredHandler",
"(",
"level",
")"
] | Make stdout and stderr unicode friendly in case of misconfigured \
environments, initializes the logging, structured logging and \
enables colored logs if it is appropriate.
:param level: The global logging level.
:param structured: Output JSON logs to stdout.
:param config_path: Path to a yaml file that configures the level of output of the loggers. \
Root logger level is set through the level argument and will override any \
root configuration found in the conf file.
:return: None | [
"Make",
"stdout",
"and",
"stderr",
"unicode",
"friendly",
"in",
"case",
"of",
"misconfigured",
"\\",
"environments",
"initializes",
"the",
"logging",
"structured",
"logging",
"and",
"\\",
"enables",
"colored",
"logs",
"if",
"it",
"is",
"appropriate",
"."
] | python | train |
joshuaduffy/dota2api | dota2api/__init__.py | https://github.com/joshuaduffy/dota2api/blob/03c9e1c609ec36728805bbd3ada0a53ec8f51e86/dota2api/__init__.py#L109-L122 | def get_match_details(self, match_id=None, **kwargs):
"""Returns a dictionary containing the details for a Dota 2 match
:param match_id: (int, optional)
:return: dictionary of matches, see :doc:`responses </responses>`
"""
if 'match_id' not in kwargs:
kwargs['match_id'] = match_id
url = self.__build_url(urls.GET_MATCH_DETAILS, **kwargs)
req = self.executor(url)
if self.logger:
self.logger.info('URL: {0}'.format(url))
if not self.__check_http_err(req.status_code):
return response.build(req, url, self.raw_mode) | [
"def",
"get_match_details",
"(",
"self",
",",
"match_id",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'match_id'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'match_id'",
"]",
"=",
"match_id",
"url",
"=",
"self",
".",
"__build_url",
"(",
"urls",
".",
"GET_MATCH_DETAILS",
",",
"*",
"*",
"kwargs",
")",
"req",
"=",
"self",
".",
"executor",
"(",
"url",
")",
"if",
"self",
".",
"logger",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'URL: {0}'",
".",
"format",
"(",
"url",
")",
")",
"if",
"not",
"self",
".",
"__check_http_err",
"(",
"req",
".",
"status_code",
")",
":",
"return",
"response",
".",
"build",
"(",
"req",
",",
"url",
",",
"self",
".",
"raw_mode",
")"
] | Returns a dictionary containing the details for a Dota 2 match
:param match_id: (int, optional)
:return: dictionary of matches, see :doc:`responses </responses>` | [
"Returns",
"a",
"dictionary",
"containing",
"the",
"details",
"for",
"a",
"Dota",
"2",
"match"
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/scene/canvas.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/scene/canvas.py#L465-L479 | def on_resize(self, event):
"""Resize handler
Parameters
----------
event : instance of Event
The resize event.
"""
self._update_transforms()
if self._central_widget is not None:
self._central_widget.size = self.size
if len(self._vp_stack) == 0:
self.context.set_viewport(0, 0, *self.physical_size) | [
"def",
"on_resize",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"_update_transforms",
"(",
")",
"if",
"self",
".",
"_central_widget",
"is",
"not",
"None",
":",
"self",
".",
"_central_widget",
".",
"size",
"=",
"self",
".",
"size",
"if",
"len",
"(",
"self",
".",
"_vp_stack",
")",
"==",
"0",
":",
"self",
".",
"context",
".",
"set_viewport",
"(",
"0",
",",
"0",
",",
"*",
"self",
".",
"physical_size",
")"
] | Resize handler
Parameters
----------
event : instance of Event
The resize event. | [
"Resize",
"handler"
] | python | train |
confirm/ansibleci | ansibleci/helper.py | https://github.com/confirm/ansibleci/blob/6a53ae8c4a4653624977e146092422857f661b8f/ansibleci/helper.py#L49-L63 | def get_roles(self):
'''
Returns a key-value dict with a roles, while the key is the role name
and the value is the absolute role path.
'''
roles = {}
paths = self.get_roles_paths()
for path in paths:
for entry in os.listdir(path):
rolepath = os.path.join(path, entry)
if os.path.isdir(rolepath):
roles[entry] = rolepath
return roles | [
"def",
"get_roles",
"(",
"self",
")",
":",
"roles",
"=",
"{",
"}",
"paths",
"=",
"self",
".",
"get_roles_paths",
"(",
")",
"for",
"path",
"in",
"paths",
":",
"for",
"entry",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"rolepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"entry",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"rolepath",
")",
":",
"roles",
"[",
"entry",
"]",
"=",
"rolepath",
"return",
"roles"
] | Returns a key-value dict with a roles, while the key is the role name
and the value is the absolute role path. | [
"Returns",
"a",
"key",
"-",
"value",
"dict",
"with",
"a",
"roles",
"while",
"the",
"key",
"is",
"the",
"role",
"name",
"and",
"the",
"value",
"is",
"the",
"absolute",
"role",
"path",
"."
] | python | train |
jonfaustman/django-frontend | djfrontend/templatetags/djfrontend.py | https://github.com/jonfaustman/django-frontend/blob/897934d593fade0eb1998f8fadd18c91a89e5b9a/djfrontend/templatetags/djfrontend.py#L240-L253 | def djfrontend_twbs_css(version=None):
"""
Returns Twitter Bootstrap CSS file.
TEMPLATE_DEBUG returns full file, otherwise returns minified file.
"""
if version is None:
if not getattr(settings, 'DJFRONTEND_TWBS_CSS', False):
version = getattr(settings, 'DJFRONTEND_TWBS_VERSION', DJFRONTEND_TWBS_VERSION_DEFAULT)
else:
version = getattr(settings, 'DJFRONTEND_TWBS_CSS', DJFRONTEND_TWBS_VERSION_DEFAULT)
return format_html(
'<link rel="stylesheet" href="{static}djfrontend/css/twbs/{v}/bootstrap{min}.css">',
static=_static_url, v=version, min=_min) | [
"def",
"djfrontend_twbs_css",
"(",
"version",
"=",
"None",
")",
":",
"if",
"version",
"is",
"None",
":",
"if",
"not",
"getattr",
"(",
"settings",
",",
"'DJFRONTEND_TWBS_CSS'",
",",
"False",
")",
":",
"version",
"=",
"getattr",
"(",
"settings",
",",
"'DJFRONTEND_TWBS_VERSION'",
",",
"DJFRONTEND_TWBS_VERSION_DEFAULT",
")",
"else",
":",
"version",
"=",
"getattr",
"(",
"settings",
",",
"'DJFRONTEND_TWBS_CSS'",
",",
"DJFRONTEND_TWBS_VERSION_DEFAULT",
")",
"return",
"format_html",
"(",
"'<link rel=\"stylesheet\" href=\"{static}djfrontend/css/twbs/{v}/bootstrap{min}.css\">'",
",",
"static",
"=",
"_static_url",
",",
"v",
"=",
"version",
",",
"min",
"=",
"_min",
")"
] | Returns Twitter Bootstrap CSS file.
TEMPLATE_DEBUG returns full file, otherwise returns minified file. | [
"Returns",
"Twitter",
"Bootstrap",
"CSS",
"file",
".",
"TEMPLATE_DEBUG",
"returns",
"full",
"file",
"otherwise",
"returns",
"minified",
"file",
"."
] | python | test |
googleapis/google-cloud-python | core/google/cloud/operation.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/core/google/cloud/operation.py#L252-L266 | def poll(self):
"""Check if the operation has finished.
:rtype: bool
:returns: A boolean indicating if the current operation has completed.
:raises ValueError: if the operation
has already completed.
"""
if self.complete:
raise ValueError("The operation has completed.")
operation_pb = self._get_operation()
self._update_state(operation_pb)
return self.complete | [
"def",
"poll",
"(",
"self",
")",
":",
"if",
"self",
".",
"complete",
":",
"raise",
"ValueError",
"(",
"\"The operation has completed.\"",
")",
"operation_pb",
"=",
"self",
".",
"_get_operation",
"(",
")",
"self",
".",
"_update_state",
"(",
"operation_pb",
")",
"return",
"self",
".",
"complete"
] | Check if the operation has finished.
:rtype: bool
:returns: A boolean indicating if the current operation has completed.
:raises ValueError: if the operation
has already completed. | [
"Check",
"if",
"the",
"operation",
"has",
"finished",
"."
] | python | train |
empymod/empymod | empymod/utils.py | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/utils.py#L1316-L1402 | def get_abs(msrc, mrec, srcazm, srcdip, recazm, recdip, verb):
r"""Get required ab's for given angles.
This check-function is called from one of the modelling routines in
:mod:`model`. Consult these modelling routines for a detailed description
of the input parameters.
Parameters
----------
msrc, mrec : bool
True if src/rec is magnetic, else False.
srcazm, recazm : float
Horizontal source/receiver angle (azimuth).
srcdip, recdip : float
Vertical source/receiver angle (dip).
verb : {0, 1, 2, 3, 4}
Level of verbosity.
Returns
-------
ab_calc : array of int
ab's to calculate for this bipole.
"""
# Get required ab's (9 at most)
ab_calc = np.array([[11, 12, 13], [21, 22, 23], [31, 32, 33]])
if msrc:
ab_calc += 3
if mrec:
ab_calc += 30
# Switch <ab> using reciprocity.
if msrc:
# G^mm_ab(s, r, e, z) = -G^ee_ab(s, r, -z, -e)
ab_calc -= 33 # -30 : mrec->erec; -3: msrc->esrc
else:
# G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z)
ab_calc = ab_calc % 10*10 + ab_calc // 10 # Swap alpha/beta
# Remove unnecessary ab's
bab = np.asarray(ab_calc*0+1, dtype=bool)
# Remove if source is x- or y-directed
check = np.atleast_1d(srcazm)[0]
if np.allclose(srcazm % (np.pi/2), 0): # if all angles are multiples of 90
if np.isclose(check // (np.pi/2) % 2, 0): # Multiples of pi (180)
bab[:, 1] *= False # x-directed source, remove y
else: # Multiples of pi/2 (90)
bab[:, 0] *= False # y-directed source, remove x
# Remove if source is vertical
check = np.atleast_1d(srcdip)[0]
if np.allclose(srcdip % (np.pi/2), 0): # if all angles are multiples of 90
if np.isclose(check // (np.pi/2) % 2, 0): # Multiples of pi (180)
bab[:, 2] *= False # Horizontal, remove z
else: # Multiples of pi/2 (90)
bab[:, :2] *= False # Vertical, remove x/y
# Remove if receiver is x- or y-directed
check = np.atleast_1d(recazm)[0]
if np.allclose(recazm % (np.pi/2), 0): # if all angles are multiples of 90
if np.isclose(check // (np.pi/2) % 2, 0): # Multiples of pi (180)
bab[1, :] *= False # x-directed receiver, remove y
else: # Multiples of pi/2 (90)
bab[0, :] *= False # y-directed receiver, remove x
# Remove if receiver is vertical
check = np.atleast_1d(recdip)[0]
if np.allclose(recdip % (np.pi/2), 0): # if all angles are multiples of 90
if np.isclose(check // (np.pi/2) % 2, 0): # Multiples of pi (180)
bab[2, :] *= False # Horizontal, remove z
else: # Multiples of pi/2 (90)
bab[:2, :] *= False # Vertical, remove x/y
# Reduce
ab_calc = ab_calc[bab].ravel()
# Print actual calculated <ab>
if verb > 2:
print(" Required ab's : ", _strvar(ab_calc))
return ab_calc | [
"def",
"get_abs",
"(",
"msrc",
",",
"mrec",
",",
"srcazm",
",",
"srcdip",
",",
"recazm",
",",
"recdip",
",",
"verb",
")",
":",
"# Get required ab's (9 at most)",
"ab_calc",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"11",
",",
"12",
",",
"13",
"]",
",",
"[",
"21",
",",
"22",
",",
"23",
"]",
",",
"[",
"31",
",",
"32",
",",
"33",
"]",
"]",
")",
"if",
"msrc",
":",
"ab_calc",
"+=",
"3",
"if",
"mrec",
":",
"ab_calc",
"+=",
"30",
"# Switch <ab> using reciprocity.",
"if",
"msrc",
":",
"# G^mm_ab(s, r, e, z) = -G^ee_ab(s, r, -z, -e)",
"ab_calc",
"-=",
"33",
"# -30 : mrec->erec; -3: msrc->esrc",
"else",
":",
"# G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z)",
"ab_calc",
"=",
"ab_calc",
"%",
"10",
"*",
"10",
"+",
"ab_calc",
"//",
"10",
"# Swap alpha/beta",
"# Remove unnecessary ab's",
"bab",
"=",
"np",
".",
"asarray",
"(",
"ab_calc",
"*",
"0",
"+",
"1",
",",
"dtype",
"=",
"bool",
")",
"# Remove if source is x- or y-directed",
"check",
"=",
"np",
".",
"atleast_1d",
"(",
"srcazm",
")",
"[",
"0",
"]",
"if",
"np",
".",
"allclose",
"(",
"srcazm",
"%",
"(",
"np",
".",
"pi",
"/",
"2",
")",
",",
"0",
")",
":",
"# if all angles are multiples of 90",
"if",
"np",
".",
"isclose",
"(",
"check",
"//",
"(",
"np",
".",
"pi",
"/",
"2",
")",
"%",
"2",
",",
"0",
")",
":",
"# Multiples of pi (180)",
"bab",
"[",
":",
",",
"1",
"]",
"*=",
"False",
"# x-directed source, remove y",
"else",
":",
"# Multiples of pi/2 (90)",
"bab",
"[",
":",
",",
"0",
"]",
"*=",
"False",
"# y-directed source, remove x",
"# Remove if source is vertical",
"check",
"=",
"np",
".",
"atleast_1d",
"(",
"srcdip",
")",
"[",
"0",
"]",
"if",
"np",
".",
"allclose",
"(",
"srcdip",
"%",
"(",
"np",
".",
"pi",
"/",
"2",
")",
",",
"0",
")",
":",
"# if all angles are multiples of 90",
"if",
"np",
".",
"isclose",
"(",
"check",
"//",
"(",
"np",
".",
"pi",
"/",
"2",
")",
"%",
"2",
",",
"0",
")",
":",
"# Multiples of pi (180)",
"bab",
"[",
":",
",",
"2",
"]",
"*=",
"False",
"# Horizontal, remove z",
"else",
":",
"# Multiples of pi/2 (90)",
"bab",
"[",
":",
",",
":",
"2",
"]",
"*=",
"False",
"# Vertical, remove x/y",
"# Remove if receiver is x- or y-directed",
"check",
"=",
"np",
".",
"atleast_1d",
"(",
"recazm",
")",
"[",
"0",
"]",
"if",
"np",
".",
"allclose",
"(",
"recazm",
"%",
"(",
"np",
".",
"pi",
"/",
"2",
")",
",",
"0",
")",
":",
"# if all angles are multiples of 90",
"if",
"np",
".",
"isclose",
"(",
"check",
"//",
"(",
"np",
".",
"pi",
"/",
"2",
")",
"%",
"2",
",",
"0",
")",
":",
"# Multiples of pi (180)",
"bab",
"[",
"1",
",",
":",
"]",
"*=",
"False",
"# x-directed receiver, remove y",
"else",
":",
"# Multiples of pi/2 (90)",
"bab",
"[",
"0",
",",
":",
"]",
"*=",
"False",
"# y-directed receiver, remove x",
"# Remove if receiver is vertical",
"check",
"=",
"np",
".",
"atleast_1d",
"(",
"recdip",
")",
"[",
"0",
"]",
"if",
"np",
".",
"allclose",
"(",
"recdip",
"%",
"(",
"np",
".",
"pi",
"/",
"2",
")",
",",
"0",
")",
":",
"# if all angles are multiples of 90",
"if",
"np",
".",
"isclose",
"(",
"check",
"//",
"(",
"np",
".",
"pi",
"/",
"2",
")",
"%",
"2",
",",
"0",
")",
":",
"# Multiples of pi (180)",
"bab",
"[",
"2",
",",
":",
"]",
"*=",
"False",
"# Horizontal, remove z",
"else",
":",
"# Multiples of pi/2 (90)",
"bab",
"[",
":",
"2",
",",
":",
"]",
"*=",
"False",
"# Vertical, remove x/y",
"# Reduce",
"ab_calc",
"=",
"ab_calc",
"[",
"bab",
"]",
".",
"ravel",
"(",
")",
"# Print actual calculated <ab>",
"if",
"verb",
">",
"2",
":",
"print",
"(",
"\" Required ab's : \"",
",",
"_strvar",
"(",
"ab_calc",
")",
")",
"return",
"ab_calc"
] | r"""Get required ab's for given angles.
This check-function is called from one of the modelling routines in
:mod:`model`. Consult these modelling routines for a detailed description
of the input parameters.
Parameters
----------
msrc, mrec : bool
True if src/rec is magnetic, else False.
srcazm, recazm : float
Horizontal source/receiver angle (azimuth).
srcdip, recdip : float
Vertical source/receiver angle (dip).
verb : {0, 1, 2, 3, 4}
Level of verbosity.
Returns
-------
ab_calc : array of int
ab's to calculate for this bipole. | [
"r",
"Get",
"required",
"ab",
"s",
"for",
"given",
"angles",
"."
] | python | train |
scanny/python-pptx | pptx/chart/datalabel.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/datalabel.py#L197-L210 | def position(self):
"""
Read/write :ref:`XlDataLabelPosition` member specifying the position
of this data label with respect to its data point, or |None| if no
position is specified. Assigning |None| causes PowerPoint to choose
the default position, which varies by chart type.
"""
dLbl = self._dLbl
if dLbl is None:
return None
dLblPos = dLbl.dLblPos
if dLblPos is None:
return None
return dLblPos.val | [
"def",
"position",
"(",
"self",
")",
":",
"dLbl",
"=",
"self",
".",
"_dLbl",
"if",
"dLbl",
"is",
"None",
":",
"return",
"None",
"dLblPos",
"=",
"dLbl",
".",
"dLblPos",
"if",
"dLblPos",
"is",
"None",
":",
"return",
"None",
"return",
"dLblPos",
".",
"val"
] | Read/write :ref:`XlDataLabelPosition` member specifying the position
of this data label with respect to its data point, or |None| if no
position is specified. Assigning |None| causes PowerPoint to choose
the default position, which varies by chart type. | [
"Read",
"/",
"write",
":",
"ref",
":",
"XlDataLabelPosition",
"member",
"specifying",
"the",
"position",
"of",
"this",
"data",
"label",
"with",
"respect",
"to",
"its",
"data",
"point",
"or",
"|None|",
"if",
"no",
"position",
"is",
"specified",
".",
"Assigning",
"|None|",
"causes",
"PowerPoint",
"to",
"choose",
"the",
"default",
"position",
"which",
"varies",
"by",
"chart",
"type",
"."
] | python | train |
tensorpack/tensorpack | examples/SpatialTransformer/mnist-addition.py | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/SpatialTransformer/mnist-addition.py#L21-L44 | def sample(img, coords):
"""
Args:
img: bxhxwxc
coords: bxh2xw2x2. each coordinate is (y, x) integer.
Out of boundary coordinates will be clipped.
Return:
bxh2xw2xc image
"""
shape = img.get_shape().as_list()[1:] # h, w, c
batch = tf.shape(img)[0]
shape2 = coords.get_shape().as_list()[1:3] # h2, w2
assert None not in shape2, coords.get_shape()
max_coor = tf.constant([shape[0] - 1, shape[1] - 1], dtype=tf.float32)
coords = tf.clip_by_value(coords, 0., max_coor) # borderMode==repeat
coords = tf.cast(coords, tf.int32)
batch_index = tf.range(batch, dtype=tf.int32)
batch_index = tf.reshape(batch_index, [-1, 1, 1, 1])
batch_index = tf.tile(batch_index, [1, shape2[0], shape2[1], 1]) # bxh2xw2x1
indices = tf.concat([batch_index, coords], axis=3) # bxh2xw2x3
sampled = tf.gather_nd(img, indices)
return sampled | [
"def",
"sample",
"(",
"img",
",",
"coords",
")",
":",
"shape",
"=",
"img",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"]",
"# h, w, c",
"batch",
"=",
"tf",
".",
"shape",
"(",
"img",
")",
"[",
"0",
"]",
"shape2",
"=",
"coords",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"3",
"]",
"# h2, w2",
"assert",
"None",
"not",
"in",
"shape2",
",",
"coords",
".",
"get_shape",
"(",
")",
"max_coor",
"=",
"tf",
".",
"constant",
"(",
"[",
"shape",
"[",
"0",
"]",
"-",
"1",
",",
"shape",
"[",
"1",
"]",
"-",
"1",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"coords",
"=",
"tf",
".",
"clip_by_value",
"(",
"coords",
",",
"0.",
",",
"max_coor",
")",
"# borderMode==repeat",
"coords",
"=",
"tf",
".",
"cast",
"(",
"coords",
",",
"tf",
".",
"int32",
")",
"batch_index",
"=",
"tf",
".",
"range",
"(",
"batch",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"batch_index",
"=",
"tf",
".",
"reshape",
"(",
"batch_index",
",",
"[",
"-",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
")",
"batch_index",
"=",
"tf",
".",
"tile",
"(",
"batch_index",
",",
"[",
"1",
",",
"shape2",
"[",
"0",
"]",
",",
"shape2",
"[",
"1",
"]",
",",
"1",
"]",
")",
"# bxh2xw2x1",
"indices",
"=",
"tf",
".",
"concat",
"(",
"[",
"batch_index",
",",
"coords",
"]",
",",
"axis",
"=",
"3",
")",
"# bxh2xw2x3",
"sampled",
"=",
"tf",
".",
"gather_nd",
"(",
"img",
",",
"indices",
")",
"return",
"sampled"
] | Args:
img: bxhxwxc
coords: bxh2xw2x2. each coordinate is (y, x) integer.
Out of boundary coordinates will be clipped.
Return:
bxh2xw2xc image | [
"Args",
":",
"img",
":",
"bxhxwxc",
"coords",
":",
"bxh2xw2x2",
".",
"each",
"coordinate",
"is",
"(",
"y",
"x",
")",
"integer",
".",
"Out",
"of",
"boundary",
"coordinates",
"will",
"be",
"clipped",
".",
"Return",
":",
"bxh2xw2xc",
"image"
] | python | train |
jupyterhub/kubespawner | kubespawner/spawner.py | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1456-L1492 | def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
# have to wait for first load of data before we have a valid answer
if not self.pod_reflector.first_load_future.done():
yield self.pod_reflector.first_load_future
data = self.pod_reflector.pods.get(self.pod_name, None)
if data is not None:
if data.status.phase == 'Pending':
return None
ctr_stat = data.status.container_statuses
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c.name == 'notebook':
if c.state.terminated:
# call self.stop to delete the pod
if self.delete_stopped_pods:
yield self.stop(now=True)
return c.state.terminated.exit_code
break
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1 | [
"def",
"poll",
"(",
"self",
")",
":",
"# have to wait for first load of data before we have a valid answer",
"if",
"not",
"self",
".",
"pod_reflector",
".",
"first_load_future",
".",
"done",
"(",
")",
":",
"yield",
"self",
".",
"pod_reflector",
".",
"first_load_future",
"data",
"=",
"self",
".",
"pod_reflector",
".",
"pods",
".",
"get",
"(",
"self",
".",
"pod_name",
",",
"None",
")",
"if",
"data",
"is",
"not",
"None",
":",
"if",
"data",
".",
"status",
".",
"phase",
"==",
"'Pending'",
":",
"return",
"None",
"ctr_stat",
"=",
"data",
".",
"status",
".",
"container_statuses",
"if",
"ctr_stat",
"is",
"None",
":",
"# No status, no container (we hope)",
"# This seems to happen when a pod is idle-culled.",
"return",
"1",
"for",
"c",
"in",
"ctr_stat",
":",
"# return exit code if notebook container has terminated",
"if",
"c",
".",
"name",
"==",
"'notebook'",
":",
"if",
"c",
".",
"state",
".",
"terminated",
":",
"# call self.stop to delete the pod",
"if",
"self",
".",
"delete_stopped_pods",
":",
"yield",
"self",
".",
"stop",
"(",
"now",
"=",
"True",
")",
"return",
"c",
".",
"state",
".",
"terminated",
".",
"exit_code",
"break",
"# None means pod is running or starting up",
"return",
"None",
"# pod doesn't exist or has been deleted",
"return",
"1"
] | Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running. | [
"Check",
"if",
"the",
"pod",
"is",
"still",
"running",
"."
] | python | train |
GNS3/gns3-server | gns3server/compute/vpcs/vpcs_vm.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vpcs/vpcs_vm.py#L324-L337 | def read_vpcs_stdout(self):
"""
Reads the standard output of the VPCS process.
Only use when the process has been stopped or has crashed.
"""
output = ""
if self._vpcs_stdout_file:
try:
with open(self._vpcs_stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
log.warn("Could not read {}: {}".format(self._vpcs_stdout_file, e))
return output | [
"def",
"read_vpcs_stdout",
"(",
"self",
")",
":",
"output",
"=",
"\"\"",
"if",
"self",
".",
"_vpcs_stdout_file",
":",
"try",
":",
"with",
"open",
"(",
"self",
".",
"_vpcs_stdout_file",
",",
"\"rb\"",
")",
"as",
"file",
":",
"output",
"=",
"file",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
",",
"errors",
"=",
"\"replace\"",
")",
"except",
"OSError",
"as",
"e",
":",
"log",
".",
"warn",
"(",
"\"Could not read {}: {}\"",
".",
"format",
"(",
"self",
".",
"_vpcs_stdout_file",
",",
"e",
")",
")",
"return",
"output"
] | Reads the standard output of the VPCS process.
Only use when the process has been stopped or has crashed. | [
"Reads",
"the",
"standard",
"output",
"of",
"the",
"VPCS",
"process",
".",
"Only",
"use",
"when",
"the",
"process",
"has",
"been",
"stopped",
"or",
"has",
"crashed",
"."
] | python | train |
tdryer/hangups | hangups/client.py | https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/client.py#L374-L398 | async def _add_channel_services(self):
"""Add services to the channel.
The services we add to the channel determine what kind of data we will
receive on it.
The "babel" service includes what we need for Hangouts. If this fails
for some reason, hangups will never receive any events. The
"babel_presence_last_seen" service is also required to receive presence
notifications.
This needs to be re-called whenever we open a new channel (when there's
a new SID and client_id.
"""
logger.info('Adding channel services...')
# Based on what Hangouts for Chrome does over 2 requests, this is
# trimmed down to 1 request that includes the bare minimum to make
# things work.
services = ["babel", "babel_presence_last_seen"]
map_list = [
dict(p=json.dumps({"3": {"1": {"1": service}}}))
for service in services
]
await self._channel.send_maps(map_list)
logger.info('Channel services added') | [
"async",
"def",
"_add_channel_services",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"'Adding channel services...'",
")",
"# Based on what Hangouts for Chrome does over 2 requests, this is",
"# trimmed down to 1 request that includes the bare minimum to make",
"# things work.",
"services",
"=",
"[",
"\"babel\"",
",",
"\"babel_presence_last_seen\"",
"]",
"map_list",
"=",
"[",
"dict",
"(",
"p",
"=",
"json",
".",
"dumps",
"(",
"{",
"\"3\"",
":",
"{",
"\"1\"",
":",
"{",
"\"1\"",
":",
"service",
"}",
"}",
"}",
")",
")",
"for",
"service",
"in",
"services",
"]",
"await",
"self",
".",
"_channel",
".",
"send_maps",
"(",
"map_list",
")",
"logger",
".",
"info",
"(",
"'Channel services added'",
")"
] | Add services to the channel.
The services we add to the channel determine what kind of data we will
receive on it.
The "babel" service includes what we need for Hangouts. If this fails
for some reason, hangups will never receive any events. The
"babel_presence_last_seen" service is also required to receive presence
notifications.
This needs to be re-called whenever we open a new channel (when there's
a new SID and client_id. | [
"Add",
"services",
"to",
"the",
"channel",
"."
] | python | valid |
uw-it-aca/uw-restclients-core | restclients_core/dao.py | https://github.com/uw-it-aca/uw-restclients-core/blob/fda9380dceb6355ec6a3123e88c9ec66ae992682/restclients_core/dao.py#L64-L74 | def _custom_response_edit(self, method, url, headers, body, response):
"""
This method allows a service to edit a response.
If you want to do this, you probably really want to use
_edit_mock_response - this method will operate on Live resources.
"""
if self.get_implementation().is_mock():
delay = self.get_setting("MOCKDATA_DELAY", 0.0)
time.sleep(delay)
self._edit_mock_response(method, url, headers, body, response) | [
"def",
"_custom_response_edit",
"(",
"self",
",",
"method",
",",
"url",
",",
"headers",
",",
"body",
",",
"response",
")",
":",
"if",
"self",
".",
"get_implementation",
"(",
")",
".",
"is_mock",
"(",
")",
":",
"delay",
"=",
"self",
".",
"get_setting",
"(",
"\"MOCKDATA_DELAY\"",
",",
"0.0",
")",
"time",
".",
"sleep",
"(",
"delay",
")",
"self",
".",
"_edit_mock_response",
"(",
"method",
",",
"url",
",",
"headers",
",",
"body",
",",
"response",
")"
] | This method allows a service to edit a response.
If you want to do this, you probably really want to use
_edit_mock_response - this method will operate on Live resources. | [
"This",
"method",
"allows",
"a",
"service",
"to",
"edit",
"a",
"response",
"."
] | python | train |
genialis/resolwe | resolwe/flow/managers/dispatcher.py | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L60-L87 | def dependency_status(data):
"""Return abstracted status of dependencies.
- ``STATUS_ERROR`` .. one dependency has error status or was deleted
- ``STATUS_DONE`` .. all dependencies have done status
- ``None`` .. other
"""
parents_statuses = set(
DataDependency.objects.filter(
child=data, kind=DataDependency.KIND_IO
).distinct('parent__status').values_list('parent__status', flat=True)
)
if not parents_statuses:
return Data.STATUS_DONE
if None in parents_statuses:
# Some parents have been deleted.
return Data.STATUS_ERROR
if Data.STATUS_ERROR in parents_statuses:
return Data.STATUS_ERROR
if len(parents_statuses) == 1 and Data.STATUS_DONE in parents_statuses:
return Data.STATUS_DONE
return None | [
"def",
"dependency_status",
"(",
"data",
")",
":",
"parents_statuses",
"=",
"set",
"(",
"DataDependency",
".",
"objects",
".",
"filter",
"(",
"child",
"=",
"data",
",",
"kind",
"=",
"DataDependency",
".",
"KIND_IO",
")",
".",
"distinct",
"(",
"'parent__status'",
")",
".",
"values_list",
"(",
"'parent__status'",
",",
"flat",
"=",
"True",
")",
")",
"if",
"not",
"parents_statuses",
":",
"return",
"Data",
".",
"STATUS_DONE",
"if",
"None",
"in",
"parents_statuses",
":",
"# Some parents have been deleted.",
"return",
"Data",
".",
"STATUS_ERROR",
"if",
"Data",
".",
"STATUS_ERROR",
"in",
"parents_statuses",
":",
"return",
"Data",
".",
"STATUS_ERROR",
"if",
"len",
"(",
"parents_statuses",
")",
"==",
"1",
"and",
"Data",
".",
"STATUS_DONE",
"in",
"parents_statuses",
":",
"return",
"Data",
".",
"STATUS_DONE",
"return",
"None"
] | Return abstracted status of dependencies.
- ``STATUS_ERROR`` .. one dependency has error status or was deleted
- ``STATUS_DONE`` .. all dependencies have done status
- ``None`` .. other | [
"Return",
"abstracted",
"status",
"of",
"dependencies",
"."
] | python | train |
tensorlayer/tensorlayer | tensorlayer/prepro.py | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L1482-L1513 | def respective_zoom(x, h_range=(0.9, 1.1), w_range=(0.9, 1.1), flags=None, border_mode='constant'):
"""Zooming/Scaling a single image that height and width are changed independently.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
h_range : float or tuple of 2 floats
The zooming/scaling ratio of height, greater than 1 means larger.
- float, a fixed ratio.
- tuple of 2 floats, randomly sample a value as the ratio between 2 values.
w_range : float or tuple of 2 floats
The zooming/scaling ratio of width, greater than 1 means larger.
- float, a fixed ratio.
- tuple of 2 floats, randomly sample a value as the ratio between 2 values.
border_mode : str
- `constant`, pad the image with a constant value (i.e. black or 0)
- `replicate`, the row or column at the very edge of the original is replicated to the extra border.
Returns
-------
numpy.array
A processed image.
"""
zoom_matrix = affine_respective_zoom_matrix(h_range=h_range, w_range=w_range)
h, w = x.shape[0], x.shape[1]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = affine_transform_cv2(
x, transform_matrix, flags=flags, border_mode=border_mode
) #affine_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x | [
"def",
"respective_zoom",
"(",
"x",
",",
"h_range",
"=",
"(",
"0.9",
",",
"1.1",
")",
",",
"w_range",
"=",
"(",
"0.9",
",",
"1.1",
")",
",",
"flags",
"=",
"None",
",",
"border_mode",
"=",
"'constant'",
")",
":",
"zoom_matrix",
"=",
"affine_respective_zoom_matrix",
"(",
"h_range",
"=",
"h_range",
",",
"w_range",
"=",
"w_range",
")",
"h",
",",
"w",
"=",
"x",
".",
"shape",
"[",
"0",
"]",
",",
"x",
".",
"shape",
"[",
"1",
"]",
"transform_matrix",
"=",
"transform_matrix_offset_center",
"(",
"zoom_matrix",
",",
"h",
",",
"w",
")",
"x",
"=",
"affine_transform_cv2",
"(",
"x",
",",
"transform_matrix",
",",
"flags",
"=",
"flags",
",",
"border_mode",
"=",
"border_mode",
")",
"#affine_transform(x, transform_matrix, channel_index, fill_mode, cval, order)",
"return",
"x"
] | Zooming/Scaling a single image that height and width are changed independently.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
h_range : float or tuple of 2 floats
The zooming/scaling ratio of height, greater than 1 means larger.
- float, a fixed ratio.
- tuple of 2 floats, randomly sample a value as the ratio between 2 values.
w_range : float or tuple of 2 floats
The zooming/scaling ratio of width, greater than 1 means larger.
- float, a fixed ratio.
- tuple of 2 floats, randomly sample a value as the ratio between 2 values.
border_mode : str
- `constant`, pad the image with a constant value (i.e. black or 0)
- `replicate`, the row or column at the very edge of the original is replicated to the extra border.
Returns
-------
numpy.array
A processed image. | [
"Zooming",
"/",
"Scaling",
"a",
"single",
"image",
"that",
"height",
"and",
"width",
"are",
"changed",
"independently",
"."
] | python | valid |
hollenstein/maspy | maspy/xml.py | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/xml.py#L503-L564 | def _parseMzml(self):
""" #TODO: docstring """
#TODO: this is already pretty nested, reduce that eg by using a function
# processRunNode
for event, element, elementTag in self:
if elementTag == 'mzML':
metadataNode = ETREE.Element(self.elementTag,
self.element.attrib
)
_, _, targetTag = next(self)
break
while targetTag != 'mzML':
if targetTag == 'run':
runNode = ETREE.Element('run', self.element.attrib)
next(self)
while self.event != 'end' or self.elementTag != 'run':
if self.elementTag == 'spectrumList':
#Add spectrumListNode
specListAttrib = {'defaultDataProcessingRef':
self.element.attrib['defaultDataProcessingRef']
}
specListNode = ETREE.Element('spectrumList', specListAttrib)
runNode.append(specListNode)
#Parse and yield spectrum xml elements
while self.event != 'end' or self.elementTag != 'spectrumList':
if self.event == 'end' and self.elementTag == 'spectrum':
yield self.element
clearParsedElements(self.element)
next(self)
elif self.elementTag == 'chromatogramList':
#Add chromatogramListNode
chromListAttrib = {'defaultDataProcessingRef':
self.element.attrib['defaultDataProcessingRef']
}
chromListNode = ETREE.Element('chromatogramList',
chromListAttrib
)
runNode.append(chromListNode)
#Parse and store chromatogram xml elements
while self.event != 'end' or self.elementTag != 'chromatogramList':
if self.event == 'end' and self.elementTag == 'chromatogram':
self.chromatogramList.append(self.element)
#Alternatively also the chromatogram xml
#elements could be yielded:
# yield self.element
# clearParsedElements(self.element)
next(self)
else:
runNode.append(self.element)
next(self)
metadataNode.append(runNode)
break
else:
while self.event != 'end' or self.elementTag != targetTag:
next(self)
metadataNode.append(self.element)
_, _, targetTag = next(self)
recClearTag(metadataNode)
recRemoveTreeFormating(metadataNode)
self.metadataNode = recCopyElement(metadataNode)
self.openfile.close() | [
"def",
"_parseMzml",
"(",
"self",
")",
":",
"#TODO: this is already pretty nested, reduce that eg by using a function",
"# processRunNode",
"for",
"event",
",",
"element",
",",
"elementTag",
"in",
"self",
":",
"if",
"elementTag",
"==",
"'mzML'",
":",
"metadataNode",
"=",
"ETREE",
".",
"Element",
"(",
"self",
".",
"elementTag",
",",
"self",
".",
"element",
".",
"attrib",
")",
"_",
",",
"_",
",",
"targetTag",
"=",
"next",
"(",
"self",
")",
"break",
"while",
"targetTag",
"!=",
"'mzML'",
":",
"if",
"targetTag",
"==",
"'run'",
":",
"runNode",
"=",
"ETREE",
".",
"Element",
"(",
"'run'",
",",
"self",
".",
"element",
".",
"attrib",
")",
"next",
"(",
"self",
")",
"while",
"self",
".",
"event",
"!=",
"'end'",
"or",
"self",
".",
"elementTag",
"!=",
"'run'",
":",
"if",
"self",
".",
"elementTag",
"==",
"'spectrumList'",
":",
"#Add spectrumListNode",
"specListAttrib",
"=",
"{",
"'defaultDataProcessingRef'",
":",
"self",
".",
"element",
".",
"attrib",
"[",
"'defaultDataProcessingRef'",
"]",
"}",
"specListNode",
"=",
"ETREE",
".",
"Element",
"(",
"'spectrumList'",
",",
"specListAttrib",
")",
"runNode",
".",
"append",
"(",
"specListNode",
")",
"#Parse and yield spectrum xml elements",
"while",
"self",
".",
"event",
"!=",
"'end'",
"or",
"self",
".",
"elementTag",
"!=",
"'spectrumList'",
":",
"if",
"self",
".",
"event",
"==",
"'end'",
"and",
"self",
".",
"elementTag",
"==",
"'spectrum'",
":",
"yield",
"self",
".",
"element",
"clearParsedElements",
"(",
"self",
".",
"element",
")",
"next",
"(",
"self",
")",
"elif",
"self",
".",
"elementTag",
"==",
"'chromatogramList'",
":",
"#Add chromatogramListNode",
"chromListAttrib",
"=",
"{",
"'defaultDataProcessingRef'",
":",
"self",
".",
"element",
".",
"attrib",
"[",
"'defaultDataProcessingRef'",
"]",
"}",
"chromListNode",
"=",
"ETREE",
".",
"Element",
"(",
"'chromatogramList'",
",",
"chromListAttrib",
")",
"runNode",
".",
"append",
"(",
"chromListNode",
")",
"#Parse and store chromatogram xml elements",
"while",
"self",
".",
"event",
"!=",
"'end'",
"or",
"self",
".",
"elementTag",
"!=",
"'chromatogramList'",
":",
"if",
"self",
".",
"event",
"==",
"'end'",
"and",
"self",
".",
"elementTag",
"==",
"'chromatogram'",
":",
"self",
".",
"chromatogramList",
".",
"append",
"(",
"self",
".",
"element",
")",
"#Alternatively also the chromatogram xml",
"#elements could be yielded:",
"# yield self.element",
"# clearParsedElements(self.element)",
"next",
"(",
"self",
")",
"else",
":",
"runNode",
".",
"append",
"(",
"self",
".",
"element",
")",
"next",
"(",
"self",
")",
"metadataNode",
".",
"append",
"(",
"runNode",
")",
"break",
"else",
":",
"while",
"self",
".",
"event",
"!=",
"'end'",
"or",
"self",
".",
"elementTag",
"!=",
"targetTag",
":",
"next",
"(",
"self",
")",
"metadataNode",
".",
"append",
"(",
"self",
".",
"element",
")",
"_",
",",
"_",
",",
"targetTag",
"=",
"next",
"(",
"self",
")",
"recClearTag",
"(",
"metadataNode",
")",
"recRemoveTreeFormating",
"(",
"metadataNode",
")",
"self",
".",
"metadataNode",
"=",
"recCopyElement",
"(",
"metadataNode",
")",
"self",
".",
"openfile",
".",
"close",
"(",
")"
] | #TODO: docstring | [
"#TODO",
":",
"docstring"
] | python | train |
peri-source/peri | peri/comp/comp.py | https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/comp.py#L537-L549 | def sync_params(self):
""" Ensure that shared parameters are the same value everywhere """
def _normalize(comps, param):
vals = [c.get_values(param) for c in comps]
diff = any([vals[i] != vals[i+1] for i in range(len(vals)-1)])
if diff:
for c in comps:
c.set_values(param, vals[0])
for param, comps in iteritems(self.lmap):
if isinstance(comps, list) and len(comps) > 1:
_normalize(comps, param) | [
"def",
"sync_params",
"(",
"self",
")",
":",
"def",
"_normalize",
"(",
"comps",
",",
"param",
")",
":",
"vals",
"=",
"[",
"c",
".",
"get_values",
"(",
"param",
")",
"for",
"c",
"in",
"comps",
"]",
"diff",
"=",
"any",
"(",
"[",
"vals",
"[",
"i",
"]",
"!=",
"vals",
"[",
"i",
"+",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"vals",
")",
"-",
"1",
")",
"]",
")",
"if",
"diff",
":",
"for",
"c",
"in",
"comps",
":",
"c",
".",
"set_values",
"(",
"param",
",",
"vals",
"[",
"0",
"]",
")",
"for",
"param",
",",
"comps",
"in",
"iteritems",
"(",
"self",
".",
"lmap",
")",
":",
"if",
"isinstance",
"(",
"comps",
",",
"list",
")",
"and",
"len",
"(",
"comps",
")",
">",
"1",
":",
"_normalize",
"(",
"comps",
",",
"param",
")"
] | Ensure that shared parameters are the same value everywhere | [
"Ensure",
"that",
"shared",
"parameters",
"are",
"the",
"same",
"value",
"everywhere"
] | python | valid |
gdestuynder/simple_bugzilla | bugzilla.py | https://github.com/gdestuynder/simple_bugzilla/blob/c69766a81fa7960a8f2b22287968fa4787f1bcfe/bugzilla.py#L35-L39 | def quick_search(self, terms):
'''Wrapper for search_bugs, for simple string searches'''
assert type(terms) is str
p = [{'quicksearch': terms}]
return self.search_bugs(p) | [
"def",
"quick_search",
"(",
"self",
",",
"terms",
")",
":",
"assert",
"type",
"(",
"terms",
")",
"is",
"str",
"p",
"=",
"[",
"{",
"'quicksearch'",
":",
"terms",
"}",
"]",
"return",
"self",
".",
"search_bugs",
"(",
"p",
")"
] | Wrapper for search_bugs, for simple string searches | [
"Wrapper",
"for",
"search_bugs",
"for",
"simple",
"string",
"searches"
] | python | train |
singularityhub/sregistry-cli | sregistry/main/google_storage/__init__.py | https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/google_storage/__init__.py#L104-L121 | def _get_bucket(self):
'''get a bucket based on a bucket name. If it doesn't exist, create it.
'''
# Case 1: The bucket already exists
try:
self._bucket = self._bucket_service.get_bucket(self._bucket_name)
# Case 2: The bucket needs to be created
except google.cloud.exceptions.NotFound:
self._bucket = self._bucket_service.create_bucket(self._bucket_name)
# Case 3: The bucket name is already taken
except:
bot.error('Cannot get or create %s' %self._bucket_name)
sys.exit(1)
return self._bucket | [
"def",
"_get_bucket",
"(",
"self",
")",
":",
"# Case 1: The bucket already exists",
"try",
":",
"self",
".",
"_bucket",
"=",
"self",
".",
"_bucket_service",
".",
"get_bucket",
"(",
"self",
".",
"_bucket_name",
")",
"# Case 2: The bucket needs to be created",
"except",
"google",
".",
"cloud",
".",
"exceptions",
".",
"NotFound",
":",
"self",
".",
"_bucket",
"=",
"self",
".",
"_bucket_service",
".",
"create_bucket",
"(",
"self",
".",
"_bucket_name",
")",
"# Case 3: The bucket name is already taken",
"except",
":",
"bot",
".",
"error",
"(",
"'Cannot get or create %s'",
"%",
"self",
".",
"_bucket_name",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"self",
".",
"_bucket"
] | get a bucket based on a bucket name. If it doesn't exist, create it. | [
"get",
"a",
"bucket",
"based",
"on",
"a",
"bucket",
"name",
".",
"If",
"it",
"doesn",
"t",
"exist",
"create",
"it",
"."
] | python | test |
psd-tools/psd-tools | src/psd_tools/api/psd_image.py | https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/api/psd_image.py#L168-L176 | def has_preview(self):
"""
Returns if the document has real merged data. When True, `topil()`
returns pre-composed data.
"""
version_info = self.image_resources.get_data('version_info')
if version_info:
return version_info.has_composite
return True | [
"def",
"has_preview",
"(",
"self",
")",
":",
"version_info",
"=",
"self",
".",
"image_resources",
".",
"get_data",
"(",
"'version_info'",
")",
"if",
"version_info",
":",
"return",
"version_info",
".",
"has_composite",
"return",
"True"
] | Returns if the document has real merged data. When True, `topil()`
returns pre-composed data. | [
"Returns",
"if",
"the",
"document",
"has",
"real",
"merged",
"data",
".",
"When",
"True",
"topil",
"()",
"returns",
"pre",
"-",
"composed",
"data",
"."
] | python | train |
apache/airflow | airflow/models/connection.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/connection.py#L286-L296 | def extra_dejson(self):
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
self.log.exception(e)
self.log.error("Failed parsing the json for conn_id %s", self.conn_id)
return obj | [
"def",
"extra_dejson",
"(",
"self",
")",
":",
"obj",
"=",
"{",
"}",
"if",
"self",
".",
"extra",
":",
"try",
":",
"obj",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"extra",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
".",
"exception",
"(",
"e",
")",
"self",
".",
"log",
".",
"error",
"(",
"\"Failed parsing the json for conn_id %s\"",
",",
"self",
".",
"conn_id",
")",
"return",
"obj"
] | Returns the extra property by deserializing json. | [
"Returns",
"the",
"extra",
"property",
"by",
"deserializing",
"json",
"."
] | python | test |
ptmcg/littletable | littletable.py | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1187-L1222 | def csv_export(self, csv_dest, fieldnames=None, encoding="UTF-8"):
"""Exports the contents of the table to a CSV-formatted file.
@param csv_dest: CSV file - if a string is given, the file with that name will be
opened, written, and closed; if a file object is given, then that object
will be written as-is, and left for the caller to be closed.
@type csv_dest: string or file
@param fieldnames: attribute names to be exported; can be given as a single
string with space-delimited names, or as a list of attribute names
@type fieldnames: list of strings
@param encoding: string (default="UTF-8"); if csv_dest is provided as a string
representing an output filename, an encoding argument can be provided (Python 3 only)
@type encoding: string
"""
close_on_exit = False
if isinstance(csv_dest, basestring):
if PY_3:
csv_dest = open(csv_dest, 'w', newline='', encoding=encoding)
else:
csv_dest = open(csv_dest, 'wb')
close_on_exit = True
try:
if fieldnames is None:
fieldnames = list(_object_attrnames(self.obs[0]))
if isinstance(fieldnames, basestring):
fieldnames = fieldnames.split()
csv_dest.write(','.join(fieldnames) + NL)
csvout = csv.DictWriter(csv_dest, fieldnames, extrasaction='ignore', lineterminator=NL)
if hasattr(self.obs[0], "__dict__"):
csvout.writerows(o.__dict__ for o in self.obs)
else:
do_all(csvout.writerow(ODict(starmap(lambda obj, fld: (fld, getattr(obj, fld)),
zip(repeat(o), fieldnames)))) for o in self.obs)
finally:
if close_on_exit:
csv_dest.close() | [
"def",
"csv_export",
"(",
"self",
",",
"csv_dest",
",",
"fieldnames",
"=",
"None",
",",
"encoding",
"=",
"\"UTF-8\"",
")",
":",
"close_on_exit",
"=",
"False",
"if",
"isinstance",
"(",
"csv_dest",
",",
"basestring",
")",
":",
"if",
"PY_3",
":",
"csv_dest",
"=",
"open",
"(",
"csv_dest",
",",
"'w'",
",",
"newline",
"=",
"''",
",",
"encoding",
"=",
"encoding",
")",
"else",
":",
"csv_dest",
"=",
"open",
"(",
"csv_dest",
",",
"'wb'",
")",
"close_on_exit",
"=",
"True",
"try",
":",
"if",
"fieldnames",
"is",
"None",
":",
"fieldnames",
"=",
"list",
"(",
"_object_attrnames",
"(",
"self",
".",
"obs",
"[",
"0",
"]",
")",
")",
"if",
"isinstance",
"(",
"fieldnames",
",",
"basestring",
")",
":",
"fieldnames",
"=",
"fieldnames",
".",
"split",
"(",
")",
"csv_dest",
".",
"write",
"(",
"','",
".",
"join",
"(",
"fieldnames",
")",
"+",
"NL",
")",
"csvout",
"=",
"csv",
".",
"DictWriter",
"(",
"csv_dest",
",",
"fieldnames",
",",
"extrasaction",
"=",
"'ignore'",
",",
"lineterminator",
"=",
"NL",
")",
"if",
"hasattr",
"(",
"self",
".",
"obs",
"[",
"0",
"]",
",",
"\"__dict__\"",
")",
":",
"csvout",
".",
"writerows",
"(",
"o",
".",
"__dict__",
"for",
"o",
"in",
"self",
".",
"obs",
")",
"else",
":",
"do_all",
"(",
"csvout",
".",
"writerow",
"(",
"ODict",
"(",
"starmap",
"(",
"lambda",
"obj",
",",
"fld",
":",
"(",
"fld",
",",
"getattr",
"(",
"obj",
",",
"fld",
")",
")",
",",
"zip",
"(",
"repeat",
"(",
"o",
")",
",",
"fieldnames",
")",
")",
")",
")",
"for",
"o",
"in",
"self",
".",
"obs",
")",
"finally",
":",
"if",
"close_on_exit",
":",
"csv_dest",
".",
"close",
"(",
")"
] | Exports the contents of the table to a CSV-formatted file.
@param csv_dest: CSV file - if a string is given, the file with that name will be
opened, written, and closed; if a file object is given, then that object
will be written as-is, and left for the caller to be closed.
@type csv_dest: string or file
@param fieldnames: attribute names to be exported; can be given as a single
string with space-delimited names, or as a list of attribute names
@type fieldnames: list of strings
@param encoding: string (default="UTF-8"); if csv_dest is provided as a string
representing an output filename, an encoding argument can be provided (Python 3 only)
@type encoding: string | [
"Exports",
"the",
"contents",
"of",
"the",
"table",
"to",
"a",
"CSV",
"-",
"formatted",
"file",
"."
] | python | train |
wuher/devil | devil/perm/management.py | https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/perm/management.py#L96-L104 | def _ensure_content_type():
""" Add the bulldog content type to the database if it's missing. """
from django.contrib.contenttypes.models import ContentType
try:
row = ContentType.objects.get(app_label=PERM_APP_NAME)
except ContentType.DoesNotExist:
row = ContentType(name=PERM_APP_NAME, app_label=PERM_APP_NAME, model=PERM_APP_NAME)
row.save()
return row.id | [
"def",
"_ensure_content_type",
"(",
")",
":",
"from",
"django",
".",
"contrib",
".",
"contenttypes",
".",
"models",
"import",
"ContentType",
"try",
":",
"row",
"=",
"ContentType",
".",
"objects",
".",
"get",
"(",
"app_label",
"=",
"PERM_APP_NAME",
")",
"except",
"ContentType",
".",
"DoesNotExist",
":",
"row",
"=",
"ContentType",
"(",
"name",
"=",
"PERM_APP_NAME",
",",
"app_label",
"=",
"PERM_APP_NAME",
",",
"model",
"=",
"PERM_APP_NAME",
")",
"row",
".",
"save",
"(",
")",
"return",
"row",
".",
"id"
] | Add the bulldog content type to the database if it's missing. | [
"Add",
"the",
"bulldog",
"content",
"type",
"to",
"the",
"database",
"if",
"it",
"s",
"missing",
"."
] | python | train |
trustar/trustar-python | trustar/examples/ingest_fireeye_alerts.py | https://github.com/trustar/trustar-python/blob/707d51adc58d68aed7de12a4ca37949cb75cf122/trustar/examples/ingest_fireeye_alerts.py#L114-L133 | def filter_webapp_attack(df, process_time):
"""
A function that filters out the BASH SHELLSHOCK alert data obtained from FireEye
:param df: a DataFrame object
:param process_time:
:return:
"""
result = []
track = []
for o in df:
if 'METHODOLOGY - WEB APP ATTACK' in o['message']:
track.append(o)
else:
result.append(o)
trackfile = open('tracking_webAppAttack_' + process_time + '.txt', 'w')
numskip = 1
for item in track:
trackfile.write("\n\n**** {:d}: Display ID {} ****\n\n{}".format(numskip, item['displayId'], item))
numskip += 1
return result | [
"def",
"filter_webapp_attack",
"(",
"df",
",",
"process_time",
")",
":",
"result",
"=",
"[",
"]",
"track",
"=",
"[",
"]",
"for",
"o",
"in",
"df",
":",
"if",
"'METHODOLOGY - WEB APP ATTACK'",
"in",
"o",
"[",
"'message'",
"]",
":",
"track",
".",
"append",
"(",
"o",
")",
"else",
":",
"result",
".",
"append",
"(",
"o",
")",
"trackfile",
"=",
"open",
"(",
"'tracking_webAppAttack_'",
"+",
"process_time",
"+",
"'.txt'",
",",
"'w'",
")",
"numskip",
"=",
"1",
"for",
"item",
"in",
"track",
":",
"trackfile",
".",
"write",
"(",
"\"\\n\\n**** {:d}: Display ID {} ****\\n\\n{}\"",
".",
"format",
"(",
"numskip",
",",
"item",
"[",
"'displayId'",
"]",
",",
"item",
")",
")",
"numskip",
"+=",
"1",
"return",
"result"
] | A function that filters out the BASH SHELLSHOCK alert data obtained from FireEye
:param df: a DataFrame object
:param process_time:
:return: | [
"A",
"function",
"that",
"filters",
"out",
"the",
"BASH",
"SHELLSHOCK",
"alert",
"data",
"obtained",
"from",
"FireEye",
":",
"param",
"df",
":",
"a",
"DataFrame",
"object",
":",
"param",
"process_time",
":",
":",
"return",
":"
] | python | train |
wgnet/webium | webium/cookie.py | https://github.com/wgnet/webium/blob/ccb09876a201e75f5c5810392d4db7a8708b90cb/webium/cookie.py#L20-L38 | def convert_cookie_to_dict(cookie, keys_map=WEB_DRIVER_COOKIE_KEYS_MAP):
"""
Converts an instance of Cookie class from cookielib to a dict.
The names of attributes can be changed according to keys_map:.
For example, this method can be used to create a cookie which compatible with WebDriver format.
:param cookie: Cookie instance received from requests/sessions using url2lib or requests libraries.
:param keys_map: The dict to map cookie attributes for different schemas. By default WebDriver format is used.
:return:
"""
cookie_dict = dict()
for k in keys_map.keys():
key = _to_unicode_if_str(keys_map[k])
value = _to_unicode_if_str(getattr(cookie, k))
cookie_dict[key] = value
return cookie_dict | [
"def",
"convert_cookie_to_dict",
"(",
"cookie",
",",
"keys_map",
"=",
"WEB_DRIVER_COOKIE_KEYS_MAP",
")",
":",
"cookie_dict",
"=",
"dict",
"(",
")",
"for",
"k",
"in",
"keys_map",
".",
"keys",
"(",
")",
":",
"key",
"=",
"_to_unicode_if_str",
"(",
"keys_map",
"[",
"k",
"]",
")",
"value",
"=",
"_to_unicode_if_str",
"(",
"getattr",
"(",
"cookie",
",",
"k",
")",
")",
"cookie_dict",
"[",
"key",
"]",
"=",
"value",
"return",
"cookie_dict"
] | Converts an instance of Cookie class from cookielib to a dict.
The names of attributes can be changed according to keys_map:.
For example, this method can be used to create a cookie which compatible with WebDriver format.
:param cookie: Cookie instance received from requests/sessions using url2lib or requests libraries.
:param keys_map: The dict to map cookie attributes for different schemas. By default WebDriver format is used.
:return: | [
"Converts",
"an",
"instance",
"of",
"Cookie",
"class",
"from",
"cookielib",
"to",
"a",
"dict",
".",
"The",
"names",
"of",
"attributes",
"can",
"be",
"changed",
"according",
"to",
"keys_map",
":",
".",
"For",
"example",
"this",
"method",
"can",
"be",
"used",
"to",
"create",
"a",
"cookie",
"which",
"compatible",
"with",
"WebDriver",
"format",
"."
] | python | train |
coopernurse/barrister | barrister/runtime.py | https://github.com/coopernurse/barrister/blob/0471b1d98d3327ba381684db496ec94c79c20848/barrister/runtime.py#L867-L911 | def validate(self, expected_type, is_array, val):
"""
Validates that the expected type matches the value
Returns two element tuple: (bool, string)
- `bool` - True if valid, False if not
- `string` - Description of validation error, or None if valid
:Parameters:
expected_type
string name of the type expected. This may be a Barrister primitive, or a user defined type.
is_array
If True then require that the val be a list
val
Value to validate against the expected type
"""
if val == None:
if expected_type.optional:
return True, None
else:
return False, "Value cannot be null"
elif is_array:
if not isinstance(val, list):
return self._type_err(val, "list")
else:
for v in val:
ok, msg = self.validate(expected_type, False, v)
if not ok:
return ok, msg
elif expected_type.type == "int":
if not isinstance(val, (long, int)):
return self._type_err(val, "int")
elif expected_type.type == "float":
if not isinstance(val, (float, int, long)):
return self._type_err(val, "float")
elif expected_type.type == "bool":
if not isinstance(val, bool):
return self._type_err(val, "bool")
elif expected_type.type == "string":
if not isinstance(val, (str, unicode)):
return self._type_err(val, "string")
else:
return self.get(expected_type.type).validate(val)
return True, None | [
"def",
"validate",
"(",
"self",
",",
"expected_type",
",",
"is_array",
",",
"val",
")",
":",
"if",
"val",
"==",
"None",
":",
"if",
"expected_type",
".",
"optional",
":",
"return",
"True",
",",
"None",
"else",
":",
"return",
"False",
",",
"\"Value cannot be null\"",
"elif",
"is_array",
":",
"if",
"not",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"return",
"self",
".",
"_type_err",
"(",
"val",
",",
"\"list\"",
")",
"else",
":",
"for",
"v",
"in",
"val",
":",
"ok",
",",
"msg",
"=",
"self",
".",
"validate",
"(",
"expected_type",
",",
"False",
",",
"v",
")",
"if",
"not",
"ok",
":",
"return",
"ok",
",",
"msg",
"elif",
"expected_type",
".",
"type",
"==",
"\"int\"",
":",
"if",
"not",
"isinstance",
"(",
"val",
",",
"(",
"long",
",",
"int",
")",
")",
":",
"return",
"self",
".",
"_type_err",
"(",
"val",
",",
"\"int\"",
")",
"elif",
"expected_type",
".",
"type",
"==",
"\"float\"",
":",
"if",
"not",
"isinstance",
"(",
"val",
",",
"(",
"float",
",",
"int",
",",
"long",
")",
")",
":",
"return",
"self",
".",
"_type_err",
"(",
"val",
",",
"\"float\"",
")",
"elif",
"expected_type",
".",
"type",
"==",
"\"bool\"",
":",
"if",
"not",
"isinstance",
"(",
"val",
",",
"bool",
")",
":",
"return",
"self",
".",
"_type_err",
"(",
"val",
",",
"\"bool\"",
")",
"elif",
"expected_type",
".",
"type",
"==",
"\"string\"",
":",
"if",
"not",
"isinstance",
"(",
"val",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"return",
"self",
".",
"_type_err",
"(",
"val",
",",
"\"string\"",
")",
"else",
":",
"return",
"self",
".",
"get",
"(",
"expected_type",
".",
"type",
")",
".",
"validate",
"(",
"val",
")",
"return",
"True",
",",
"None"
] | Validates that the expected type matches the value
Returns two element tuple: (bool, string)
- `bool` - True if valid, False if not
- `string` - Description of validation error, or None if valid
:Parameters:
expected_type
string name of the type expected. This may be a Barrister primitive, or a user defined type.
is_array
If True then require that the val be a list
val
Value to validate against the expected type | [
"Validates",
"that",
"the",
"expected",
"type",
"matches",
"the",
"value"
] | python | train |
softlayer/softlayer-python | SoftLayer/managers/ipsec.py | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/ipsec.py#L209-L231 | def update_translation(self, context_id, translation_id, static_ip=None,
remote_ip=None, notes=None):
"""Updates an address translation entry using the given values.
:param int context_id: The id-value representing the context instance.
:param dict template: A key-value mapping of translation properties.
:param string static_ip: The static IP address value to update.
:param string remote_ip: The remote IP address value to update.
:param string notes: The notes value to update.
:return bool: True if the update was successful.
"""
translation = self.get_translation(context_id, translation_id)
if static_ip is not None:
translation['internalIpAddress'] = static_ip
translation.pop('internalIpAddressId', None)
if remote_ip is not None:
translation['customerIpAddress'] = remote_ip
translation.pop('customerIpAddressId', None)
if notes is not None:
translation['notes'] = notes
self.context.editAddressTranslation(translation, id=context_id)
return True | [
"def",
"update_translation",
"(",
"self",
",",
"context_id",
",",
"translation_id",
",",
"static_ip",
"=",
"None",
",",
"remote_ip",
"=",
"None",
",",
"notes",
"=",
"None",
")",
":",
"translation",
"=",
"self",
".",
"get_translation",
"(",
"context_id",
",",
"translation_id",
")",
"if",
"static_ip",
"is",
"not",
"None",
":",
"translation",
"[",
"'internalIpAddress'",
"]",
"=",
"static_ip",
"translation",
".",
"pop",
"(",
"'internalIpAddressId'",
",",
"None",
")",
"if",
"remote_ip",
"is",
"not",
"None",
":",
"translation",
"[",
"'customerIpAddress'",
"]",
"=",
"remote_ip",
"translation",
".",
"pop",
"(",
"'customerIpAddressId'",
",",
"None",
")",
"if",
"notes",
"is",
"not",
"None",
":",
"translation",
"[",
"'notes'",
"]",
"=",
"notes",
"self",
".",
"context",
".",
"editAddressTranslation",
"(",
"translation",
",",
"id",
"=",
"context_id",
")",
"return",
"True"
] | Updates an address translation entry using the given values.
:param int context_id: The id-value representing the context instance.
:param dict template: A key-value mapping of translation properties.
:param string static_ip: The static IP address value to update.
:param string remote_ip: The remote IP address value to update.
:param string notes: The notes value to update.
:return bool: True if the update was successful. | [
"Updates",
"an",
"address",
"translation",
"entry",
"using",
"the",
"given",
"values",
"."
] | python | train |
blockstack/blockstack-core | blockstack/lib/operations/transfer.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/transfer.py#L405-L423 | def canonicalize(parsed_op):
"""
Get the "canonical form" of this operation, putting it into a form where it can be serialized
to form a consensus hash. This method is meant to preserve compatibility across blockstackd releases.
For NAME_TRANSFER, this means:
* add 'keep_data' flag
"""
assert 'op' in parsed_op
assert len(parsed_op['op']) == 2
if parsed_op['op'][1] == TRANSFER_KEEP_DATA:
parsed_op['keep_data'] = True
elif parsed_op['op'][1] == TRANSFER_REMOVE_DATA:
parsed_op['keep_data'] = False
else:
raise ValueError("Invalid op '{}'".format(parsed_op['op']))
return parsed_op | [
"def",
"canonicalize",
"(",
"parsed_op",
")",
":",
"assert",
"'op'",
"in",
"parsed_op",
"assert",
"len",
"(",
"parsed_op",
"[",
"'op'",
"]",
")",
"==",
"2",
"if",
"parsed_op",
"[",
"'op'",
"]",
"[",
"1",
"]",
"==",
"TRANSFER_KEEP_DATA",
":",
"parsed_op",
"[",
"'keep_data'",
"]",
"=",
"True",
"elif",
"parsed_op",
"[",
"'op'",
"]",
"[",
"1",
"]",
"==",
"TRANSFER_REMOVE_DATA",
":",
"parsed_op",
"[",
"'keep_data'",
"]",
"=",
"False",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid op '{}'\"",
".",
"format",
"(",
"parsed_op",
"[",
"'op'",
"]",
")",
")",
"return",
"parsed_op"
] | Get the "canonical form" of this operation, putting it into a form where it can be serialized
to form a consensus hash. This method is meant to preserve compatibility across blockstackd releases.
For NAME_TRANSFER, this means:
* add 'keep_data' flag | [
"Get",
"the",
"canonical",
"form",
"of",
"this",
"operation",
"putting",
"it",
"into",
"a",
"form",
"where",
"it",
"can",
"be",
"serialized",
"to",
"form",
"a",
"consensus",
"hash",
".",
"This",
"method",
"is",
"meant",
"to",
"preserve",
"compatibility",
"across",
"blockstackd",
"releases",
"."
] | python | train |
lucapinello/Haystack | haystack/external.py | https://github.com/lucapinello/Haystack/blob/cc080d741f36cd77b07c0b59d08ea6a4cf0ef2f7/haystack/external.py#L272-L284 | def _parse_seqs(self, LOS):
"""
m._parse_seqs(LOS) -- [utility] Build a matrix of counts from a list of sequences
"""
self.nseqs = len(LOS)
self.width = len(LOS[0])
for i in range(self.width):
Dc = {'A': 0, 'C': 0, 'T': 0, 'G': 0, 'N': 0}
for seq in LOS:
key = seq[i]
Dc[key] = Dc[key] + 1
del(Dc['N'])
self.counts.append(Dc) | [
"def",
"_parse_seqs",
"(",
"self",
",",
"LOS",
")",
":",
"self",
".",
"nseqs",
"=",
"len",
"(",
"LOS",
")",
"self",
".",
"width",
"=",
"len",
"(",
"LOS",
"[",
"0",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"width",
")",
":",
"Dc",
"=",
"{",
"'A'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'N'",
":",
"0",
"}",
"for",
"seq",
"in",
"LOS",
":",
"key",
"=",
"seq",
"[",
"i",
"]",
"Dc",
"[",
"key",
"]",
"=",
"Dc",
"[",
"key",
"]",
"+",
"1",
"del",
"(",
"Dc",
"[",
"'N'",
"]",
")",
"self",
".",
"counts",
".",
"append",
"(",
"Dc",
")"
] | m._parse_seqs(LOS) -- [utility] Build a matrix of counts from a list of sequences | [
"m",
".",
"_parse_seqs",
"(",
"LOS",
")",
"--",
"[",
"utility",
"]",
"Build",
"a",
"matrix",
"of",
"counts",
"from",
"a",
"list",
"of",
"sequences"
] | python | train |
pyviz/geoviews | geoviews/data/iris.py | https://github.com/pyviz/geoviews/blob/cc70ac2d5a96307769bc6192eaef8576c3d24b30/geoviews/data/iris.py#L296-L300 | def length(cls, dataset):
"""
Returns the total number of samples in the dataset.
"""
return np.product([len(d.points) for d in dataset.data.coords(dim_coords=True)], dtype=np.intp) | [
"def",
"length",
"(",
"cls",
",",
"dataset",
")",
":",
"return",
"np",
".",
"product",
"(",
"[",
"len",
"(",
"d",
".",
"points",
")",
"for",
"d",
"in",
"dataset",
".",
"data",
".",
"coords",
"(",
"dim_coords",
"=",
"True",
")",
"]",
",",
"dtype",
"=",
"np",
".",
"intp",
")"
] | Returns the total number of samples in the dataset. | [
"Returns",
"the",
"total",
"number",
"of",
"samples",
"in",
"the",
"dataset",
"."
] | python | train |
Fantomas42/django-blog-zinnia | zinnia/templatetags/zinnia.py | https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/templatetags/zinnia.py#L106-L112 | def get_draft_entries(number=5,
template='zinnia/tags/entries_draft.html'):
"""
Return the last draft entries.
"""
return {'template': template,
'entries': Entry.objects.filter(status=DRAFT)[:number]} | [
"def",
"get_draft_entries",
"(",
"number",
"=",
"5",
",",
"template",
"=",
"'zinnia/tags/entries_draft.html'",
")",
":",
"return",
"{",
"'template'",
":",
"template",
",",
"'entries'",
":",
"Entry",
".",
"objects",
".",
"filter",
"(",
"status",
"=",
"DRAFT",
")",
"[",
":",
"number",
"]",
"}"
] | Return the last draft entries. | [
"Return",
"the",
"last",
"draft",
"entries",
"."
] | python | train |
sbusard/wagoner | wagoner/utils.py | https://github.com/sbusard/wagoner/blob/7f83d66bbd0e009e4d4232ffdf319bd5a2a5683b/wagoner/utils.py#L62-L73 | def random_weighted_choice(choices):
"""
Return a random key of choices, weighted by their value.
:param choices: a dictionary of keys and positive integer pairs;
:return: a random key of choices.
"""
choices, weights = zip(*choices.items())
cumdist = list(accumulate(weights))
x = random.random() * cumdist[-1]
element = bisect.bisect(cumdist, x)
return choices[element] | [
"def",
"random_weighted_choice",
"(",
"choices",
")",
":",
"choices",
",",
"weights",
"=",
"zip",
"(",
"*",
"choices",
".",
"items",
"(",
")",
")",
"cumdist",
"=",
"list",
"(",
"accumulate",
"(",
"weights",
")",
")",
"x",
"=",
"random",
".",
"random",
"(",
")",
"*",
"cumdist",
"[",
"-",
"1",
"]",
"element",
"=",
"bisect",
".",
"bisect",
"(",
"cumdist",
",",
"x",
")",
"return",
"choices",
"[",
"element",
"]"
] | Return a random key of choices, weighted by their value.
:param choices: a dictionary of keys and positive integer pairs;
:return: a random key of choices. | [
"Return",
"a",
"random",
"key",
"of",
"choices",
"weighted",
"by",
"their",
"value",
"."
] | python | train |
Crypto-toolbox/btfxwss | btfxwss/connection.py | https://github.com/Crypto-toolbox/btfxwss/blob/16827fa6aacb2c0e289aa852bf61a18df6905835/btfxwss/connection.py#L458-L491 | def _error_handler(self, data):
"""
Handle Error messages and log them accordingly.
:param data:
:param ts:
"""
errors = {10000: 'Unknown event',
10001: 'Generic error',
10008: 'Concurrency error',
10020: 'Request parameters error',
10050: 'Configuration setup failed',
10100: 'Failed authentication',
10111: 'Error in authentication request payload',
10112: 'Error in authentication request signature',
10113: 'Error in authentication request encryption',
10114: 'Error in authentication request nonce',
10200: 'Error in un-authentication request',
10300: 'Subscription Failed (generic)',
10301: 'Already Subscribed',
10302: 'Unknown channel',
10400: 'Subscription Failed (generic)',
10401: 'Not subscribed',
11000: 'Not ready, try again later',
20000: 'User is invalid!',
20051: 'Websocket server stopping',
20060: 'Websocket server resyncing',
20061: 'Websocket server resync complete'
}
try:
self.log.error(errors[data['code']])
except KeyError:
self.log.error("Received unknown error Code in message %s! "
"Reconnecting..", data) | [
"def",
"_error_handler",
"(",
"self",
",",
"data",
")",
":",
"errors",
"=",
"{",
"10000",
":",
"'Unknown event'",
",",
"10001",
":",
"'Generic error'",
",",
"10008",
":",
"'Concurrency error'",
",",
"10020",
":",
"'Request parameters error'",
",",
"10050",
":",
"'Configuration setup failed'",
",",
"10100",
":",
"'Failed authentication'",
",",
"10111",
":",
"'Error in authentication request payload'",
",",
"10112",
":",
"'Error in authentication request signature'",
",",
"10113",
":",
"'Error in authentication request encryption'",
",",
"10114",
":",
"'Error in authentication request nonce'",
",",
"10200",
":",
"'Error in un-authentication request'",
",",
"10300",
":",
"'Subscription Failed (generic)'",
",",
"10301",
":",
"'Already Subscribed'",
",",
"10302",
":",
"'Unknown channel'",
",",
"10400",
":",
"'Subscription Failed (generic)'",
",",
"10401",
":",
"'Not subscribed'",
",",
"11000",
":",
"'Not ready, try again later'",
",",
"20000",
":",
"'User is invalid!'",
",",
"20051",
":",
"'Websocket server stopping'",
",",
"20060",
":",
"'Websocket server resyncing'",
",",
"20061",
":",
"'Websocket server resync complete'",
"}",
"try",
":",
"self",
".",
"log",
".",
"error",
"(",
"errors",
"[",
"data",
"[",
"'code'",
"]",
"]",
")",
"except",
"KeyError",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Received unknown error Code in message %s! \"",
"\"Reconnecting..\"",
",",
"data",
")"
] | Handle Error messages and log them accordingly.
:param data:
:param ts: | [
"Handle",
"Error",
"messages",
"and",
"log",
"them",
"accordingly",
"."
] | python | test |
mikedh/trimesh | trimesh/voxel.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/voxel.py#L641-L666 | def points_to_indices(points, pitch, origin):
"""
Convert center points of an (n,m,p) matrix into its indices.
Parameters
----------
points: (q, 3) float, center points of voxel matrix (n,m,p)
pitch: float, what pitch was the voxel matrix computed with
origin: (3,) float, what is the origin of the voxel matrix
Returns
----------
indices: (q, 3) int, list of indices
"""
points = np.asanyarray(points, dtype=np.float64)
origin = np.asanyarray(origin, dtype=np.float64)
pitch = float(pitch)
if points.shape != (points.shape[0], 3):
raise ValueError('shape of points must be (q, 3)')
if origin.shape != (3,):
raise ValueError('shape of origin must be (3,)')
indices = np.round((points - origin) / pitch).astype(int)
return indices | [
"def",
"points_to_indices",
"(",
"points",
",",
"pitch",
",",
"origin",
")",
":",
"points",
"=",
"np",
".",
"asanyarray",
"(",
"points",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"origin",
"=",
"np",
".",
"asanyarray",
"(",
"origin",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"pitch",
"=",
"float",
"(",
"pitch",
")",
"if",
"points",
".",
"shape",
"!=",
"(",
"points",
".",
"shape",
"[",
"0",
"]",
",",
"3",
")",
":",
"raise",
"ValueError",
"(",
"'shape of points must be (q, 3)'",
")",
"if",
"origin",
".",
"shape",
"!=",
"(",
"3",
",",
")",
":",
"raise",
"ValueError",
"(",
"'shape of origin must be (3,)'",
")",
"indices",
"=",
"np",
".",
"round",
"(",
"(",
"points",
"-",
"origin",
")",
"/",
"pitch",
")",
".",
"astype",
"(",
"int",
")",
"return",
"indices"
] | Convert center points of an (n,m,p) matrix into its indices.
Parameters
----------
points: (q, 3) float, center points of voxel matrix (n,m,p)
pitch: float, what pitch was the voxel matrix computed with
origin: (3,) float, what is the origin of the voxel matrix
Returns
----------
indices: (q, 3) int, list of indices | [
"Convert",
"center",
"points",
"of",
"an",
"(",
"n",
"m",
"p",
")",
"matrix",
"into",
"its",
"indices",
"."
] | python | train |
pybel/pybel-tools | src/pybel_tools/summary/provenance.py | https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/summary/provenance.py#L43-L55 | def _generate_citation_dict(graph: BELGraph) -> Mapping[str, Mapping[Tuple[BaseEntity, BaseEntity], str]]:
"""Prepare a citation data dictionary from a graph.
:return: A dictionary of dictionaries {citation type: {(source, target): citation reference}
"""
results = defaultdict(lambda: defaultdict(set))
for u, v, data in graph.edges(data=True):
if CITATION not in data:
continue
results[data[CITATION][CITATION_TYPE]][u, v].add(data[CITATION][CITATION_REFERENCE].strip())
return dict(results) | [
"def",
"_generate_citation_dict",
"(",
"graph",
":",
"BELGraph",
")",
"->",
"Mapping",
"[",
"str",
",",
"Mapping",
"[",
"Tuple",
"[",
"BaseEntity",
",",
"BaseEntity",
"]",
",",
"str",
"]",
"]",
":",
"results",
"=",
"defaultdict",
"(",
"lambda",
":",
"defaultdict",
"(",
"set",
")",
")",
"for",
"u",
",",
"v",
",",
"data",
"in",
"graph",
".",
"edges",
"(",
"data",
"=",
"True",
")",
":",
"if",
"CITATION",
"not",
"in",
"data",
":",
"continue",
"results",
"[",
"data",
"[",
"CITATION",
"]",
"[",
"CITATION_TYPE",
"]",
"]",
"[",
"u",
",",
"v",
"]",
".",
"add",
"(",
"data",
"[",
"CITATION",
"]",
"[",
"CITATION_REFERENCE",
"]",
".",
"strip",
"(",
")",
")",
"return",
"dict",
"(",
"results",
")"
] | Prepare a citation data dictionary from a graph.
:return: A dictionary of dictionaries {citation type: {(source, target): citation reference} | [
"Prepare",
"a",
"citation",
"data",
"dictionary",
"from",
"a",
"graph",
"."
] | python | valid |
PmagPy/PmagPy | programs/demag_gui.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L6555-L6581 | def clear_boxes(self):
"""
Clear all boxes
"""
self.tmin_box.Clear()
self.tmin_box.SetStringSelection("")
if self.current_fit:
self.tmin_box.SetItems(self.T_list)
self.tmin_box.SetSelection(-1)
self.tmax_box.Clear()
self.tmax_box.SetStringSelection("")
if self.current_fit:
self.tmax_box.SetItems(self.T_list)
self.tmax_box.SetSelection(-1)
self.fit_box.Clear()
self.fit_box.SetStringSelection("")
if self.s in self.pmag_results_data['specimens'] and self.pmag_results_data['specimens'][self.s]:
self.fit_box.SetItems(
list([x.name for x in self.pmag_results_data['specimens'][self.s]]))
for parameter in ['dec', 'inc', 'n', 'mad', 'dang', 'alpha95']:
COMMAND = "self.s%s_window.SetValue('')" % parameter
exec(COMMAND)
COMMAND = "self.s%s_window.SetBackgroundColour(wx.Colour('grey'))" % parameter
exec(COMMAND) | [
"def",
"clear_boxes",
"(",
"self",
")",
":",
"self",
".",
"tmin_box",
".",
"Clear",
"(",
")",
"self",
".",
"tmin_box",
".",
"SetStringSelection",
"(",
"\"\"",
")",
"if",
"self",
".",
"current_fit",
":",
"self",
".",
"tmin_box",
".",
"SetItems",
"(",
"self",
".",
"T_list",
")",
"self",
".",
"tmin_box",
".",
"SetSelection",
"(",
"-",
"1",
")",
"self",
".",
"tmax_box",
".",
"Clear",
"(",
")",
"self",
".",
"tmax_box",
".",
"SetStringSelection",
"(",
"\"\"",
")",
"if",
"self",
".",
"current_fit",
":",
"self",
".",
"tmax_box",
".",
"SetItems",
"(",
"self",
".",
"T_list",
")",
"self",
".",
"tmax_box",
".",
"SetSelection",
"(",
"-",
"1",
")",
"self",
".",
"fit_box",
".",
"Clear",
"(",
")",
"self",
".",
"fit_box",
".",
"SetStringSelection",
"(",
"\"\"",
")",
"if",
"self",
".",
"s",
"in",
"self",
".",
"pmag_results_data",
"[",
"'specimens'",
"]",
"and",
"self",
".",
"pmag_results_data",
"[",
"'specimens'",
"]",
"[",
"self",
".",
"s",
"]",
":",
"self",
".",
"fit_box",
".",
"SetItems",
"(",
"list",
"(",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"self",
".",
"pmag_results_data",
"[",
"'specimens'",
"]",
"[",
"self",
".",
"s",
"]",
"]",
")",
")",
"for",
"parameter",
"in",
"[",
"'dec'",
",",
"'inc'",
",",
"'n'",
",",
"'mad'",
",",
"'dang'",
",",
"'alpha95'",
"]",
":",
"COMMAND",
"=",
"\"self.s%s_window.SetValue('')\"",
"%",
"parameter",
"exec",
"(",
"COMMAND",
")",
"COMMAND",
"=",
"\"self.s%s_window.SetBackgroundColour(wx.Colour('grey'))\"",
"%",
"parameter",
"exec",
"(",
"COMMAND",
")"
] | Clear all boxes | [
"Clear",
"all",
"boxes"
] | python | train |
PyGithub/PyGithub | github/AuthenticatedUser.py | https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/AuthenticatedUser.py#L1024-L1035 | def has_in_starred(self, starred):
"""
:calls: `GET /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(starred, github.Repository.Repository), starred
status, headers, data = self._requester.requestJson(
"GET",
"/user/starred/" + starred._identity
)
return status == 204 | [
"def",
"has_in_starred",
"(",
"self",
",",
"starred",
")",
":",
"assert",
"isinstance",
"(",
"starred",
",",
"github",
".",
"Repository",
".",
"Repository",
")",
",",
"starred",
"status",
",",
"headers",
",",
"data",
"=",
"self",
".",
"_requester",
".",
"requestJson",
"(",
"\"GET\"",
",",
"\"/user/starred/\"",
"+",
"starred",
".",
"_identity",
")",
"return",
"status",
"==",
"204"
] | :calls: `GET /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: bool | [
":",
"calls",
":",
"GET",
"/",
"user",
"/",
"starred",
"/",
":",
"owner",
"/",
":",
"repo",
"<http",
":",
"//",
"developer",
".",
"github",
".",
"com",
"/",
"v3",
"/",
"activity",
"/",
"starring",
">",
"_",
":",
"param",
"starred",
":",
":",
"class",
":",
"github",
".",
"Repository",
".",
"Repository",
":",
"rtype",
":",
"bool"
] | python | train |
agile-geoscience/striplog | striplog/striplog.py | https://github.com/agile-geoscience/striplog/blob/8033b673a151f96c29802b43763e863519a3124c/striplog/striplog.py#L2147-L2183 | def crop(self, extent, copy=False):
"""
Crop to a new depth range.
Args:
extent (tuple): The new start and stop depth. Must be 'inside'
existing striplog.
copy (bool): Whether to operate in place or make a copy.
Returns:
Operates in place by deault; if copy is True, returns a striplog.
"""
try:
if extent[0] is None:
extent = (self.start.z, extent[1])
if extent[1] is None:
extent = (extent[0], self.stop.z)
except:
m = "You must provide a 2-tuple for the new extents. Use None for"
m += " the existing start or stop."
raise StriplogError(m)
first_ix = self.read_at(extent[0], index=True)
last_ix = self.read_at(extent[1], index=True)
first = self[first_ix].split_at(extent[0])[1]
last = self[last_ix].split_at(extent[1])[0]
new_list = self.__list[first_ix:last_ix+1].copy()
new_list[0] = first
new_list[-1] = last
if copy:
return Striplog(new_list)
else:
self.__list = new_list
return | [
"def",
"crop",
"(",
"self",
",",
"extent",
",",
"copy",
"=",
"False",
")",
":",
"try",
":",
"if",
"extent",
"[",
"0",
"]",
"is",
"None",
":",
"extent",
"=",
"(",
"self",
".",
"start",
".",
"z",
",",
"extent",
"[",
"1",
"]",
")",
"if",
"extent",
"[",
"1",
"]",
"is",
"None",
":",
"extent",
"=",
"(",
"extent",
"[",
"0",
"]",
",",
"self",
".",
"stop",
".",
"z",
")",
"except",
":",
"m",
"=",
"\"You must provide a 2-tuple for the new extents. Use None for\"",
"m",
"+=",
"\" the existing start or stop.\"",
"raise",
"StriplogError",
"(",
"m",
")",
"first_ix",
"=",
"self",
".",
"read_at",
"(",
"extent",
"[",
"0",
"]",
",",
"index",
"=",
"True",
")",
"last_ix",
"=",
"self",
".",
"read_at",
"(",
"extent",
"[",
"1",
"]",
",",
"index",
"=",
"True",
")",
"first",
"=",
"self",
"[",
"first_ix",
"]",
".",
"split_at",
"(",
"extent",
"[",
"0",
"]",
")",
"[",
"1",
"]",
"last",
"=",
"self",
"[",
"last_ix",
"]",
".",
"split_at",
"(",
"extent",
"[",
"1",
"]",
")",
"[",
"0",
"]",
"new_list",
"=",
"self",
".",
"__list",
"[",
"first_ix",
":",
"last_ix",
"+",
"1",
"]",
".",
"copy",
"(",
")",
"new_list",
"[",
"0",
"]",
"=",
"first",
"new_list",
"[",
"-",
"1",
"]",
"=",
"last",
"if",
"copy",
":",
"return",
"Striplog",
"(",
"new_list",
")",
"else",
":",
"self",
".",
"__list",
"=",
"new_list",
"return"
] | Crop to a new depth range.
Args:
extent (tuple): The new start and stop depth. Must be 'inside'
existing striplog.
copy (bool): Whether to operate in place or make a copy.
Returns:
Operates in place by deault; if copy is True, returns a striplog. | [
"Crop",
"to",
"a",
"new",
"depth",
"range",
"."
] | python | test |
awslabs/aws-sam-cli | samcli/lib/logs/fetcher.py | https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/lib/logs/fetcher.py#L32-L85 | def fetch(self, log_group_name, start=None, end=None, filter_pattern=None):
"""
Fetch logs from all streams under the given CloudWatch Log Group and yields in the output. Optionally, caller
can filter the logs using a pattern or a start/end time.
Parameters
----------
log_group_name : string
Name of CloudWatch Logs Group to query.
start : datetime.datetime
Optional start time for logs.
end : datetime.datetime
Optional end time for logs.
filter_pattern : str
Expression to filter the logs by. This is passed directly to CloudWatch, so any expression supported by
CloudWatch Logs API is supported here.
Yields
------
samcli.lib.logs.event.LogEvent
Object containing the information from each log event returned by CloudWatch Logs
"""
kwargs = {
"logGroupName": log_group_name,
"interleaved": True
}
if start:
kwargs["startTime"] = to_timestamp(start)
if end:
kwargs["endTime"] = to_timestamp(end)
if filter_pattern:
kwargs["filterPattern"] = filter_pattern
while True:
LOG.debug("Fetching logs from CloudWatch with parameters %s", kwargs)
result = self.cw_client.filter_log_events(**kwargs)
# Several events will be returned. Yield one at a time
for event in result.get('events', []):
yield LogEvent(log_group_name, event)
# Keep iterating until there are no more logs left to query.
next_token = result.get("nextToken", None)
kwargs["nextToken"] = next_token
if not next_token:
break | [
"def",
"fetch",
"(",
"self",
",",
"log_group_name",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"filter_pattern",
"=",
"None",
")",
":",
"kwargs",
"=",
"{",
"\"logGroupName\"",
":",
"log_group_name",
",",
"\"interleaved\"",
":",
"True",
"}",
"if",
"start",
":",
"kwargs",
"[",
"\"startTime\"",
"]",
"=",
"to_timestamp",
"(",
"start",
")",
"if",
"end",
":",
"kwargs",
"[",
"\"endTime\"",
"]",
"=",
"to_timestamp",
"(",
"end",
")",
"if",
"filter_pattern",
":",
"kwargs",
"[",
"\"filterPattern\"",
"]",
"=",
"filter_pattern",
"while",
"True",
":",
"LOG",
".",
"debug",
"(",
"\"Fetching logs from CloudWatch with parameters %s\"",
",",
"kwargs",
")",
"result",
"=",
"self",
".",
"cw_client",
".",
"filter_log_events",
"(",
"*",
"*",
"kwargs",
")",
"# Several events will be returned. Yield one at a time",
"for",
"event",
"in",
"result",
".",
"get",
"(",
"'events'",
",",
"[",
"]",
")",
":",
"yield",
"LogEvent",
"(",
"log_group_name",
",",
"event",
")",
"# Keep iterating until there are no more logs left to query.",
"next_token",
"=",
"result",
".",
"get",
"(",
"\"nextToken\"",
",",
"None",
")",
"kwargs",
"[",
"\"nextToken\"",
"]",
"=",
"next_token",
"if",
"not",
"next_token",
":",
"break"
] | Fetch logs from all streams under the given CloudWatch Log Group and yields in the output. Optionally, caller
can filter the logs using a pattern or a start/end time.
Parameters
----------
log_group_name : string
Name of CloudWatch Logs Group to query.
start : datetime.datetime
Optional start time for logs.
end : datetime.datetime
Optional end time for logs.
filter_pattern : str
Expression to filter the logs by. This is passed directly to CloudWatch, so any expression supported by
CloudWatch Logs API is supported here.
Yields
------
samcli.lib.logs.event.LogEvent
Object containing the information from each log event returned by CloudWatch Logs | [
"Fetch",
"logs",
"from",
"all",
"streams",
"under",
"the",
"given",
"CloudWatch",
"Log",
"Group",
"and",
"yields",
"in",
"the",
"output",
".",
"Optionally",
"caller",
"can",
"filter",
"the",
"logs",
"using",
"a",
"pattern",
"or",
"a",
"start",
"/",
"end",
"time",
"."
] | python | train |
lablup/backend.ai-client-py | src/ai/backend/client/cli/__init__.py | https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/cli/__init__.py#L99-L112 | def run_alias():
"""
Quick aliases for run command.
"""
mode = Path(sys.argv[0]).stem
help = True if len(sys.argv) <= 1 else False
if mode == 'lcc':
sys.argv.insert(1, 'c')
elif mode == 'lpython':
sys.argv.insert(1, 'python')
sys.argv.insert(1, 'run')
if help:
sys.argv.append('--help')
main.main(prog_name='backend.ai') | [
"def",
"run_alias",
"(",
")",
":",
"mode",
"=",
"Path",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
".",
"stem",
"help",
"=",
"True",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"<=",
"1",
"else",
"False",
"if",
"mode",
"==",
"'lcc'",
":",
"sys",
".",
"argv",
".",
"insert",
"(",
"1",
",",
"'c'",
")",
"elif",
"mode",
"==",
"'lpython'",
":",
"sys",
".",
"argv",
".",
"insert",
"(",
"1",
",",
"'python'",
")",
"sys",
".",
"argv",
".",
"insert",
"(",
"1",
",",
"'run'",
")",
"if",
"help",
":",
"sys",
".",
"argv",
".",
"append",
"(",
"'--help'",
")",
"main",
".",
"main",
"(",
"prog_name",
"=",
"'backend.ai'",
")"
] | Quick aliases for run command. | [
"Quick",
"aliases",
"for",
"run",
"command",
"."
] | python | train |
hyperledger/sawtooth-core | cli/sawtooth_cli/sawset.py | https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/cli/sawtooth_cli/sawset.py#L163-L197 | def _do_config_proposal_vote(args):
"""Executes the 'proposal vote' subcommand. Given a key file, a proposal
id and a vote value, it generates a batch of sawtooth_settings transactions
in a BatchList instance. The BatchList is file or submitted to a
validator.
"""
signer = _read_signer(args.key)
rest_client = RestClient(args.url)
proposals = _get_proposals(rest_client)
proposal = None
for candidate in proposals.candidates:
if candidate.proposal_id == args.proposal_id:
proposal = candidate
break
if proposal is None:
raise CliException('No proposal exists with the given id')
for vote_record in proposal.votes:
if vote_record.public_key == signer.get_public_key().as_hex():
raise CliException(
'A vote has already been recorded with this signing key')
txn = _create_vote_txn(
signer,
args.proposal_id,
proposal.proposal.setting,
args.vote_value)
batch = _create_batch(signer, [txn])
batch_list = BatchList(batches=[batch])
rest_client.send_batches(batch_list) | [
"def",
"_do_config_proposal_vote",
"(",
"args",
")",
":",
"signer",
"=",
"_read_signer",
"(",
"args",
".",
"key",
")",
"rest_client",
"=",
"RestClient",
"(",
"args",
".",
"url",
")",
"proposals",
"=",
"_get_proposals",
"(",
"rest_client",
")",
"proposal",
"=",
"None",
"for",
"candidate",
"in",
"proposals",
".",
"candidates",
":",
"if",
"candidate",
".",
"proposal_id",
"==",
"args",
".",
"proposal_id",
":",
"proposal",
"=",
"candidate",
"break",
"if",
"proposal",
"is",
"None",
":",
"raise",
"CliException",
"(",
"'No proposal exists with the given id'",
")",
"for",
"vote_record",
"in",
"proposal",
".",
"votes",
":",
"if",
"vote_record",
".",
"public_key",
"==",
"signer",
".",
"get_public_key",
"(",
")",
".",
"as_hex",
"(",
")",
":",
"raise",
"CliException",
"(",
"'A vote has already been recorded with this signing key'",
")",
"txn",
"=",
"_create_vote_txn",
"(",
"signer",
",",
"args",
".",
"proposal_id",
",",
"proposal",
".",
"proposal",
".",
"setting",
",",
"args",
".",
"vote_value",
")",
"batch",
"=",
"_create_batch",
"(",
"signer",
",",
"[",
"txn",
"]",
")",
"batch_list",
"=",
"BatchList",
"(",
"batches",
"=",
"[",
"batch",
"]",
")",
"rest_client",
".",
"send_batches",
"(",
"batch_list",
")"
] | Executes the 'proposal vote' subcommand. Given a key file, a proposal
id and a vote value, it generates a batch of sawtooth_settings transactions
in a BatchList instance. The BatchList is file or submitted to a
validator. | [
"Executes",
"the",
"proposal",
"vote",
"subcommand",
".",
"Given",
"a",
"key",
"file",
"a",
"proposal",
"id",
"and",
"a",
"vote",
"value",
"it",
"generates",
"a",
"batch",
"of",
"sawtooth_settings",
"transactions",
"in",
"a",
"BatchList",
"instance",
".",
"The",
"BatchList",
"is",
"file",
"or",
"submitted",
"to",
"a",
"validator",
"."
] | python | train |
Robpol86/colorclass | colorclass/color.py | https://github.com/Robpol86/colorclass/blob/692e2d6f5ad470b6221c8cb9641970dc5563a572/colorclass/color.py#L103-L112 | def yellow(cls, string, auto=False):
"""Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color
"""
return cls.colorize('yellow', string, auto=auto) | [
"def",
"yellow",
"(",
"cls",
",",
"string",
",",
"auto",
"=",
"False",
")",
":",
"return",
"cls",
".",
"colorize",
"(",
"'yellow'",
",",
"string",
",",
"auto",
"=",
"auto",
")"
] | Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color | [
"Color",
"-",
"code",
"entire",
"string",
"."
] | python | train |
pytroll/satpy | satpy/scene.py | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/scene.py#L696-L721 | def _read_datasets(self, dataset_nodes, **kwargs):
"""Read the given datasets from file."""
# Sort requested datasets by reader
reader_datasets = {}
for node in dataset_nodes:
ds_id = node.name
# if we already have this node loaded or the node was assigned
# by the user (node data is None) then don't try to load from a
# reader
if ds_id in self.datasets or not isinstance(node.data, dict):
continue
reader_name = node.data.get('reader_name')
if reader_name is None:
# This shouldn't be possible
raise RuntimeError("Dependency tree has a corrupt node.")
reader_datasets.setdefault(reader_name, set()).add(ds_id)
# load all datasets for one reader at a time
loaded_datasets = DatasetDict()
for reader_name, ds_ids in reader_datasets.items():
reader_instance = self.readers[reader_name]
new_datasets = reader_instance.load(ds_ids, **kwargs)
loaded_datasets.update(new_datasets)
self.datasets.update(loaded_datasets)
return loaded_datasets | [
"def",
"_read_datasets",
"(",
"self",
",",
"dataset_nodes",
",",
"*",
"*",
"kwargs",
")",
":",
"# Sort requested datasets by reader",
"reader_datasets",
"=",
"{",
"}",
"for",
"node",
"in",
"dataset_nodes",
":",
"ds_id",
"=",
"node",
".",
"name",
"# if we already have this node loaded or the node was assigned",
"# by the user (node data is None) then don't try to load from a",
"# reader",
"if",
"ds_id",
"in",
"self",
".",
"datasets",
"or",
"not",
"isinstance",
"(",
"node",
".",
"data",
",",
"dict",
")",
":",
"continue",
"reader_name",
"=",
"node",
".",
"data",
".",
"get",
"(",
"'reader_name'",
")",
"if",
"reader_name",
"is",
"None",
":",
"# This shouldn't be possible",
"raise",
"RuntimeError",
"(",
"\"Dependency tree has a corrupt node.\"",
")",
"reader_datasets",
".",
"setdefault",
"(",
"reader_name",
",",
"set",
"(",
")",
")",
".",
"add",
"(",
"ds_id",
")",
"# load all datasets for one reader at a time",
"loaded_datasets",
"=",
"DatasetDict",
"(",
")",
"for",
"reader_name",
",",
"ds_ids",
"in",
"reader_datasets",
".",
"items",
"(",
")",
":",
"reader_instance",
"=",
"self",
".",
"readers",
"[",
"reader_name",
"]",
"new_datasets",
"=",
"reader_instance",
".",
"load",
"(",
"ds_ids",
",",
"*",
"*",
"kwargs",
")",
"loaded_datasets",
".",
"update",
"(",
"new_datasets",
")",
"self",
".",
"datasets",
".",
"update",
"(",
"loaded_datasets",
")",
"return",
"loaded_datasets"
] | Read the given datasets from file. | [
"Read",
"the",
"given",
"datasets",
"from",
"file",
"."
] | python | train |
w1ll1am23/pubnubsub-handler | pubnubsubhandler.py | https://github.com/w1ll1am23/pubnubsub-handler/blob/0283c191d6042727f55a748f69a485d751f4cacb/pubnubsubhandler.py#L56-L77 | def add_subscription(self, channel, callback_function):
"""
Add a channel to subscribe to and a callback function to
run when the channel receives an update.
If channel already exists, create a new "subscription"
and append another callback function.
Args:
channel (str): The channel to add a subscription too.
callback_function (func): The function to run on an
update to the passed in channel.
"""
if channel not in CHANNELS:
CHANNELS.append(channel)
SUBSCRIPTIONS[channel] = [callback_function]
else:
SUBSCRIPTIONS[channel].append(callback_function)
# If a channel gets added after subscription has already been called
# call subscribe on the individual channel, here.
if self._subscribed:
_LOGGER.info("New channel added after main subscribe call.")
self._pubnub.subscribe().channels(channel).execute() | [
"def",
"add_subscription",
"(",
"self",
",",
"channel",
",",
"callback_function",
")",
":",
"if",
"channel",
"not",
"in",
"CHANNELS",
":",
"CHANNELS",
".",
"append",
"(",
"channel",
")",
"SUBSCRIPTIONS",
"[",
"channel",
"]",
"=",
"[",
"callback_function",
"]",
"else",
":",
"SUBSCRIPTIONS",
"[",
"channel",
"]",
".",
"append",
"(",
"callback_function",
")",
"# If a channel gets added after subscription has already been called",
"# call subscribe on the individual channel, here.",
"if",
"self",
".",
"_subscribed",
":",
"_LOGGER",
".",
"info",
"(",
"\"New channel added after main subscribe call.\"",
")",
"self",
".",
"_pubnub",
".",
"subscribe",
"(",
")",
".",
"channels",
"(",
"channel",
")",
".",
"execute",
"(",
")"
] | Add a channel to subscribe to and a callback function to
run when the channel receives an update.
If channel already exists, create a new "subscription"
and append another callback function.
Args:
channel (str): The channel to add a subscription too.
callback_function (func): The function to run on an
update to the passed in channel. | [
"Add",
"a",
"channel",
"to",
"subscribe",
"to",
"and",
"a",
"callback",
"function",
"to",
"run",
"when",
"the",
"channel",
"receives",
"an",
"update",
".",
"If",
"channel",
"already",
"exists",
"create",
"a",
"new",
"subscription",
"and",
"append",
"another",
"callback",
"function",
"."
] | python | train |
EpistasisLab/tpot | tpot/gp_deap.py | https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/gp_deap.py#L41-L73 | def pick_two_individuals_eligible_for_crossover(population):
"""Pick two individuals from the population which can do crossover, that is, they share a primitive.
Parameters
----------
population: array of individuals
Returns
----------
tuple: (individual, individual)
Two individuals which are not the same, but share at least one primitive.
Alternatively, if no such pair exists in the population, (None, None) is returned instead.
"""
primitives_by_ind = [set([node.name for node in ind if isinstance(node, gp.Primitive)])
for ind in population]
pop_as_str = [str(ind) for ind in population]
eligible_pairs = [(i, i+1+j) for i, ind1_prims in enumerate(primitives_by_ind)
for j, ind2_prims in enumerate(primitives_by_ind[i+1:])
if not ind1_prims.isdisjoint(ind2_prims) and
pop_as_str[i] != pop_as_str[i+1+j]]
# Pairs are eligible in both orders, this ensures that both orders are considered
eligible_pairs += [(j, i) for (i, j) in eligible_pairs]
if not eligible_pairs:
# If there are no eligible pairs, the caller should decide what to do
return None, None
pair = np.random.randint(0, len(eligible_pairs))
idx1, idx2 = eligible_pairs[pair]
return population[idx1], population[idx2] | [
"def",
"pick_two_individuals_eligible_for_crossover",
"(",
"population",
")",
":",
"primitives_by_ind",
"=",
"[",
"set",
"(",
"[",
"node",
".",
"name",
"for",
"node",
"in",
"ind",
"if",
"isinstance",
"(",
"node",
",",
"gp",
".",
"Primitive",
")",
"]",
")",
"for",
"ind",
"in",
"population",
"]",
"pop_as_str",
"=",
"[",
"str",
"(",
"ind",
")",
"for",
"ind",
"in",
"population",
"]",
"eligible_pairs",
"=",
"[",
"(",
"i",
",",
"i",
"+",
"1",
"+",
"j",
")",
"for",
"i",
",",
"ind1_prims",
"in",
"enumerate",
"(",
"primitives_by_ind",
")",
"for",
"j",
",",
"ind2_prims",
"in",
"enumerate",
"(",
"primitives_by_ind",
"[",
"i",
"+",
"1",
":",
"]",
")",
"if",
"not",
"ind1_prims",
".",
"isdisjoint",
"(",
"ind2_prims",
")",
"and",
"pop_as_str",
"[",
"i",
"]",
"!=",
"pop_as_str",
"[",
"i",
"+",
"1",
"+",
"j",
"]",
"]",
"# Pairs are eligible in both orders, this ensures that both orders are considered",
"eligible_pairs",
"+=",
"[",
"(",
"j",
",",
"i",
")",
"for",
"(",
"i",
",",
"j",
")",
"in",
"eligible_pairs",
"]",
"if",
"not",
"eligible_pairs",
":",
"# If there are no eligible pairs, the caller should decide what to do",
"return",
"None",
",",
"None",
"pair",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"eligible_pairs",
")",
")",
"idx1",
",",
"idx2",
"=",
"eligible_pairs",
"[",
"pair",
"]",
"return",
"population",
"[",
"idx1",
"]",
",",
"population",
"[",
"idx2",
"]"
] | Pick two individuals from the population which can do crossover, that is, they share a primitive.
Parameters
----------
population: array of individuals
Returns
----------
tuple: (individual, individual)
Two individuals which are not the same, but share at least one primitive.
Alternatively, if no such pair exists in the population, (None, None) is returned instead. | [
"Pick",
"two",
"individuals",
"from",
"the",
"population",
"which",
"can",
"do",
"crossover",
"that",
"is",
"they",
"share",
"a",
"primitive",
"."
] | python | train |
ergoithz/browsepy | browsepy/__init__.py | https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/__init__.py#L90-L130 | def browse_sortkey_reverse(prop):
'''
Get sorting function for directory listing based on given attribute
name, with some caveats:
* Directories will be first.
* If *name* is given, link widget lowercase text will be used istead.
* If *size* is given, bytesize will be used.
:param prop: file attribute name
:returns: tuple with sorting gunction and reverse bool
:rtype: tuple of a dict and a bool
'''
if prop.startswith('-'):
prop = prop[1:]
reverse = True
else:
reverse = False
if prop == 'text':
return (
lambda x: (
x.is_directory == reverse,
x.link.text.lower() if x.link and x.link.text else x.name
),
reverse
)
if prop == 'size':
return (
lambda x: (
x.is_directory == reverse,
x.stats.st_size
),
reverse
)
return (
lambda x: (
x.is_directory == reverse,
getattr(x, prop, None)
),
reverse
) | [
"def",
"browse_sortkey_reverse",
"(",
"prop",
")",
":",
"if",
"prop",
".",
"startswith",
"(",
"'-'",
")",
":",
"prop",
"=",
"prop",
"[",
"1",
":",
"]",
"reverse",
"=",
"True",
"else",
":",
"reverse",
"=",
"False",
"if",
"prop",
"==",
"'text'",
":",
"return",
"(",
"lambda",
"x",
":",
"(",
"x",
".",
"is_directory",
"==",
"reverse",
",",
"x",
".",
"link",
".",
"text",
".",
"lower",
"(",
")",
"if",
"x",
".",
"link",
"and",
"x",
".",
"link",
".",
"text",
"else",
"x",
".",
"name",
")",
",",
"reverse",
")",
"if",
"prop",
"==",
"'size'",
":",
"return",
"(",
"lambda",
"x",
":",
"(",
"x",
".",
"is_directory",
"==",
"reverse",
",",
"x",
".",
"stats",
".",
"st_size",
")",
",",
"reverse",
")",
"return",
"(",
"lambda",
"x",
":",
"(",
"x",
".",
"is_directory",
"==",
"reverse",
",",
"getattr",
"(",
"x",
",",
"prop",
",",
"None",
")",
")",
",",
"reverse",
")"
] | Get sorting function for directory listing based on given attribute
name, with some caveats:
* Directories will be first.
* If *name* is given, link widget lowercase text will be used istead.
* If *size* is given, bytesize will be used.
:param prop: file attribute name
:returns: tuple with sorting gunction and reverse bool
:rtype: tuple of a dict and a bool | [
"Get",
"sorting",
"function",
"for",
"directory",
"listing",
"based",
"on",
"given",
"attribute",
"name",
"with",
"some",
"caveats",
":",
"*",
"Directories",
"will",
"be",
"first",
".",
"*",
"If",
"*",
"name",
"*",
"is",
"given",
"link",
"widget",
"lowercase",
"text",
"will",
"be",
"used",
"istead",
".",
"*",
"If",
"*",
"size",
"*",
"is",
"given",
"bytesize",
"will",
"be",
"used",
"."
] | python | train |
python-diamond/Diamond | src/diamond/handler/zmq_pubsub.py | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/zmq_pubsub.py#L69-L77 | def _bind(self):
"""
Create PUB socket and bind
"""
if not zmq:
return
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
self.socket.bind("tcp://*:%i" % self.port) | [
"def",
"_bind",
"(",
"self",
")",
":",
"if",
"not",
"zmq",
":",
"return",
"self",
".",
"context",
"=",
"zmq",
".",
"Context",
"(",
")",
"self",
".",
"socket",
"=",
"self",
".",
"context",
".",
"socket",
"(",
"zmq",
".",
"PUB",
")",
"self",
".",
"socket",
".",
"bind",
"(",
"\"tcp://*:%i\"",
"%",
"self",
".",
"port",
")"
] | Create PUB socket and bind | [
"Create",
"PUB",
"socket",
"and",
"bind"
] | python | train |
72squared/redpipe | redpipe/keyspaces.py | https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/keyspaces.py#L1308-L1324 | def lpop(self, name):
"""
Pop the first object from the left.
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
f = Future()
res = pipe.lpop(self.redis_key(name))
def cb():
f.set(self.valueparse.decode(res.result))
pipe.on_execute(cb)
return f | [
"def",
"lpop",
"(",
"self",
",",
"name",
")",
":",
"with",
"self",
".",
"pipe",
"as",
"pipe",
":",
"f",
"=",
"Future",
"(",
")",
"res",
"=",
"pipe",
".",
"lpop",
"(",
"self",
".",
"redis_key",
"(",
"name",
")",
")",
"def",
"cb",
"(",
")",
":",
"f",
".",
"set",
"(",
"self",
".",
"valueparse",
".",
"decode",
"(",
"res",
".",
"result",
")",
")",
"pipe",
".",
"on_execute",
"(",
"cb",
")",
"return",
"f"
] | Pop the first object from the left.
:param name: str the name of the redis key
:return: Future() | [
"Pop",
"the",
"first",
"object",
"from",
"the",
"left",
"."
] | python | train |
svasilev94/GraphLibrary | graphlibrary/digraph.py | https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/digraph.py#L107-L113 | def has_predecessor(self, u, v):
"""
Check if vertex u has predecessor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return(u in self.pred and v in self.pred[u]) | [
"def",
"has_predecessor",
"(",
"self",
",",
"u",
",",
"v",
")",
":",
"if",
"u",
"not",
"in",
"self",
".",
"vertices",
":",
"raise",
"GraphInsertError",
"(",
"\"Vertex %s doesn't exist.\"",
"%",
"(",
"u",
",",
")",
")",
"return",
"(",
"u",
"in",
"self",
".",
"pred",
"and",
"v",
"in",
"self",
".",
"pred",
"[",
"u",
"]",
")"
] | Check if vertex u has predecessor v | [
"Check",
"if",
"vertex",
"u",
"has",
"predecessor",
"v"
] | python | train |
onelogin/python-saml | src/onelogin/saml2/logout_response.py | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/logout_response.py#L240-L252 | def get_response(self, deflate=True):
"""
Returns the Logout Response defated, base64encoded
:param deflate: It makes the deflate process optional
:type: bool
:return: Logout Response maybe deflated and base64 encoded
:rtype: string
"""
if deflate:
response = OneLogin_Saml2_Utils.deflate_and_base64_encode(self.__logout_response)
else:
response = b64encode(self.__logout_response)
return response | [
"def",
"get_response",
"(",
"self",
",",
"deflate",
"=",
"True",
")",
":",
"if",
"deflate",
":",
"response",
"=",
"OneLogin_Saml2_Utils",
".",
"deflate_and_base64_encode",
"(",
"self",
".",
"__logout_response",
")",
"else",
":",
"response",
"=",
"b64encode",
"(",
"self",
".",
"__logout_response",
")",
"return",
"response"
] | Returns the Logout Response defated, base64encoded
:param deflate: It makes the deflate process optional
:type: bool
:return: Logout Response maybe deflated and base64 encoded
:rtype: string | [
"Returns",
"the",
"Logout",
"Response",
"defated",
"base64encoded",
":",
"param",
"deflate",
":",
"It",
"makes",
"the",
"deflate",
"process",
"optional",
":",
"type",
":",
"bool",
":",
"return",
":",
"Logout",
"Response",
"maybe",
"deflated",
"and",
"base64",
"encoded",
":",
"rtype",
":",
"string"
] | python | train |
amaas-fintech/amaas-core-sdk-python | amaascore/market_data/fx_rate.py | https://github.com/amaas-fintech/amaas-core-sdk-python/blob/347b71f8e776b2dde582b015e31b4802d91e8040/amaascore/market_data/fx_rate.py#L57-L67 | def business_date(self, business_date):
"""
Force the business_date to always be a date
:param business_date:
:return:
"""
if business_date is not None:
if isinstance(business_date, type_check):
self._business_date = parse(business_date).date()
else:
self._business_date= business_date | [
"def",
"business_date",
"(",
"self",
",",
"business_date",
")",
":",
"if",
"business_date",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"business_date",
",",
"type_check",
")",
":",
"self",
".",
"_business_date",
"=",
"parse",
"(",
"business_date",
")",
".",
"date",
"(",
")",
"else",
":",
"self",
".",
"_business_date",
"=",
"business_date"
] | Force the business_date to always be a date
:param business_date:
:return: | [
"Force",
"the",
"business_date",
"to",
"always",
"be",
"a",
"date",
":",
"param",
"business_date",
":",
":",
"return",
":"
] | python | train |
mcocdawc/chemcoord | src/chemcoord/internal_coordinates/_zmat_class_core.py | https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/src/chemcoord/internal_coordinates/_zmat_class_core.py#L377-L443 | def subs(self, *args, **kwargs):
"""Substitute a symbolic expression in ``['bond', 'angle', 'dihedral']``
This is a wrapper around the substitution mechanism of
`sympy <http://docs.sympy.org/latest/tutorial/basic_operations.html>`_.
Any symbolic expression in the columns
``['bond', 'angle', 'dihedral']`` of ``self`` will be substituted
with value.
.. note:: This function is not side-effect free.
If all symbolic expressions are evaluated and are concrete numbers
and ``perform_checks`` is True, a check for the transformation
to cartesian coordinates is performed.
If no :class:`~chemcoord.exceptions.InvalidReference`
exceptions are raised, the resulting cartesian is written to
``self._metadata['last_valid_cartesian']``.
Args:
symb_expr (sympy expression):
value :
perform_checks (bool): If ``perform_checks is True``,
it is asserted, that the resulting Zmatrix can be converted
to cartesian coordinates.
Dummy atoms will be inserted automatically if necessary.
Returns:
Zmat: Zmatrix with substituted symbolic expressions.
If all resulting sympy expressions in a column are numbers,
the column is recasted to 64bit float.
"""
perform_checks = kwargs.pop('perform_checks', True)
cols = ['bond', 'angle', 'dihedral']
out = self.copy()
def get_subs_f(*args):
def subs_function(x):
if hasattr(x, 'subs'):
x = x.subs(*args)
try:
x = float(x)
except TypeError:
pass
return x
return subs_function
for col in cols:
if out.loc[:, col].dtype is np.dtype('O'):
out.unsafe_loc[:, col] = out.loc[:, col].map(get_subs_f(*args))
try:
out.unsafe_loc[:, col] = out.loc[:, col].astype('f8')
except (SystemError, TypeError):
pass
if perform_checks:
try:
new_cartesian = out.get_cartesian()
except (AttributeError, TypeError):
# Unevaluated symbolic expressions are remaining.
pass
except InvalidReference as e:
if out.dummy_manipulation_allowed:
out._manipulate_dummies(e, inplace=True)
else:
raise e
else:
out._metadata['last_valid_cartesian'] = new_cartesian
self._metadata['last_valid_cartesian'] = new_cartesian
return out | [
"def",
"subs",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"perform_checks",
"=",
"kwargs",
".",
"pop",
"(",
"'perform_checks'",
",",
"True",
")",
"cols",
"=",
"[",
"'bond'",
",",
"'angle'",
",",
"'dihedral'",
"]",
"out",
"=",
"self",
".",
"copy",
"(",
")",
"def",
"get_subs_f",
"(",
"*",
"args",
")",
":",
"def",
"subs_function",
"(",
"x",
")",
":",
"if",
"hasattr",
"(",
"x",
",",
"'subs'",
")",
":",
"x",
"=",
"x",
".",
"subs",
"(",
"*",
"args",
")",
"try",
":",
"x",
"=",
"float",
"(",
"x",
")",
"except",
"TypeError",
":",
"pass",
"return",
"x",
"return",
"subs_function",
"for",
"col",
"in",
"cols",
":",
"if",
"out",
".",
"loc",
"[",
":",
",",
"col",
"]",
".",
"dtype",
"is",
"np",
".",
"dtype",
"(",
"'O'",
")",
":",
"out",
".",
"unsafe_loc",
"[",
":",
",",
"col",
"]",
"=",
"out",
".",
"loc",
"[",
":",
",",
"col",
"]",
".",
"map",
"(",
"get_subs_f",
"(",
"*",
"args",
")",
")",
"try",
":",
"out",
".",
"unsafe_loc",
"[",
":",
",",
"col",
"]",
"=",
"out",
".",
"loc",
"[",
":",
",",
"col",
"]",
".",
"astype",
"(",
"'f8'",
")",
"except",
"(",
"SystemError",
",",
"TypeError",
")",
":",
"pass",
"if",
"perform_checks",
":",
"try",
":",
"new_cartesian",
"=",
"out",
".",
"get_cartesian",
"(",
")",
"except",
"(",
"AttributeError",
",",
"TypeError",
")",
":",
"# Unevaluated symbolic expressions are remaining.",
"pass",
"except",
"InvalidReference",
"as",
"e",
":",
"if",
"out",
".",
"dummy_manipulation_allowed",
":",
"out",
".",
"_manipulate_dummies",
"(",
"e",
",",
"inplace",
"=",
"True",
")",
"else",
":",
"raise",
"e",
"else",
":",
"out",
".",
"_metadata",
"[",
"'last_valid_cartesian'",
"]",
"=",
"new_cartesian",
"self",
".",
"_metadata",
"[",
"'last_valid_cartesian'",
"]",
"=",
"new_cartesian",
"return",
"out"
] | Substitute a symbolic expression in ``['bond', 'angle', 'dihedral']``
This is a wrapper around the substitution mechanism of
`sympy <http://docs.sympy.org/latest/tutorial/basic_operations.html>`_.
Any symbolic expression in the columns
``['bond', 'angle', 'dihedral']`` of ``self`` will be substituted
with value.
.. note:: This function is not side-effect free.
If all symbolic expressions are evaluated and are concrete numbers
and ``perform_checks`` is True, a check for the transformation
to cartesian coordinates is performed.
If no :class:`~chemcoord.exceptions.InvalidReference`
exceptions are raised, the resulting cartesian is written to
``self._metadata['last_valid_cartesian']``.
Args:
symb_expr (sympy expression):
value :
perform_checks (bool): If ``perform_checks is True``,
it is asserted, that the resulting Zmatrix can be converted
to cartesian coordinates.
Dummy atoms will be inserted automatically if necessary.
Returns:
Zmat: Zmatrix with substituted symbolic expressions.
If all resulting sympy expressions in a column are numbers,
the column is recasted to 64bit float. | [
"Substitute",
"a",
"symbolic",
"expression",
"in",
"[",
"bond",
"angle",
"dihedral",
"]"
] | python | train |
SpriteLink/NIPAP | pynipap/pynipap.py | https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/pynipap/pynipap.py#L1510-L1519 | def _fault_to_exception(f):
""" Converts XML-RPC Fault objects to Pynipap-exceptions.
TODO: Is this one neccesary? Can be done inline...
"""
e = _fault_to_exception_map.get(f.faultCode)
if e is None:
e = NipapError
return e(f.faultString) | [
"def",
"_fault_to_exception",
"(",
"f",
")",
":",
"e",
"=",
"_fault_to_exception_map",
".",
"get",
"(",
"f",
".",
"faultCode",
")",
"if",
"e",
"is",
"None",
":",
"e",
"=",
"NipapError",
"return",
"e",
"(",
"f",
".",
"faultString",
")"
] | Converts XML-RPC Fault objects to Pynipap-exceptions.
TODO: Is this one neccesary? Can be done inline... | [
"Converts",
"XML",
"-",
"RPC",
"Fault",
"objects",
"to",
"Pynipap",
"-",
"exceptions",
"."
] | python | train |
mjirik/imcut | imcut/models.py | https://github.com/mjirik/imcut/blob/1b38e7cd18a7a38fe683c1cabe1222fe5fa03aa3/imcut/models.py#L397-L442 | def likelihood(self, x, cl):
"""
X = numpy.random.random([2,3,4])
# we have data 2x3 with fature vector with 4 fatures
Use likelihoodFromImage() function for 3d image input
m.likelihood(X,0)
"""
# sha = x.shape
# xr = x.reshape(-1, sha[-1])
# outsha = sha[:-1]
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
logger.debug("likel " + str(x.shape))
if self.modelparams["type"] == "gmmsame":
px = self.mdl[cl].score_samples(x)
# todo ošetřit více dimenzionální fv
# px = px.reshape(outsha)
elif self.modelparams["type"] == "kernel":
px = self.mdl[cl].score_samples(x)
elif self.modelparams["type"] == "gaussian_kde":
# print x
# np.log because it is likelihood
# @TODO Zde je patrně problém s reshape
# old
# px = np.log(self.mdl[cl](x.reshape(-1)))
# new
px = np.log(self.mdl[cl](x))
# px = px.reshape(outsha)
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
elif self.modelparams["type"] == "dpgmm":
# todo here is a hack
# dpgmm z nějakého důvodu nefunguje pro naše data
# vždy natrénuje jednu složku v blízkosti nuly
# patrně to bude mít něco společného s parametrem alpha
# přenásobí-li se to malým číslem, zázračně to chodí
logger.warning(".score() replaced with .score_samples() . Check it.")
# px = self.mdl[cl].score(x * 0.01)
px = self.mdl[cl].score_samples(x * 0.01)
elif self.modelparams["type"] == "stored":
px = self.mdl[cl].score(x)
return px | [
"def",
"likelihood",
"(",
"self",
",",
"x",
",",
"cl",
")",
":",
"# sha = x.shape",
"# xr = x.reshape(-1, sha[-1])",
"# outsha = sha[:-1]",
"# from PyQt4.QtCore import pyqtRemoveInputHook",
"# pyqtRemoveInputHook()",
"logger",
".",
"debug",
"(",
"\"likel \"",
"+",
"str",
"(",
"x",
".",
"shape",
")",
")",
"if",
"self",
".",
"modelparams",
"[",
"\"type\"",
"]",
"==",
"\"gmmsame\"",
":",
"px",
"=",
"self",
".",
"mdl",
"[",
"cl",
"]",
".",
"score_samples",
"(",
"x",
")",
"# todo ošetřit více dimenzionální fv",
"# px = px.reshape(outsha)",
"elif",
"self",
".",
"modelparams",
"[",
"\"type\"",
"]",
"==",
"\"kernel\"",
":",
"px",
"=",
"self",
".",
"mdl",
"[",
"cl",
"]",
".",
"score_samples",
"(",
"x",
")",
"elif",
"self",
".",
"modelparams",
"[",
"\"type\"",
"]",
"==",
"\"gaussian_kde\"",
":",
"# print x",
"# np.log because it is likelihood",
"# @TODO Zde je patrně problém s reshape",
"# old",
"# px = np.log(self.mdl[cl](x.reshape(-1)))",
"# new",
"px",
"=",
"np",
".",
"log",
"(",
"self",
".",
"mdl",
"[",
"cl",
"]",
"(",
"x",
")",
")",
"# px = px.reshape(outsha)",
"# from PyQt4.QtCore import pyqtRemoveInputHook",
"# pyqtRemoveInputHook()",
"elif",
"self",
".",
"modelparams",
"[",
"\"type\"",
"]",
"==",
"\"dpgmm\"",
":",
"# todo here is a hack",
"# dpgmm z nějakého důvodu nefunguje pro naše data",
"# vždy natrénuje jednu složku v blízkosti nuly",
"# patrně to bude mít něco společného s parametrem alpha",
"# přenásobí-li se to malým číslem, zázračně to chodí",
"logger",
".",
"warning",
"(",
"\".score() replaced with .score_samples() . Check it.\"",
")",
"# px = self.mdl[cl].score(x * 0.01)",
"px",
"=",
"self",
".",
"mdl",
"[",
"cl",
"]",
".",
"score_samples",
"(",
"x",
"*",
"0.01",
")",
"elif",
"self",
".",
"modelparams",
"[",
"\"type\"",
"]",
"==",
"\"stored\"",
":",
"px",
"=",
"self",
".",
"mdl",
"[",
"cl",
"]",
".",
"score",
"(",
"x",
")",
"return",
"px"
] | X = numpy.random.random([2,3,4])
# we have data 2x3 with fature vector with 4 fatures
Use likelihoodFromImage() function for 3d image input
m.likelihood(X,0) | [
"X",
"=",
"numpy",
".",
"random",
".",
"random",
"(",
"[",
"2",
"3",
"4",
"]",
")",
"#",
"we",
"have",
"data",
"2x3",
"with",
"fature",
"vector",
"with",
"4",
"fatures"
] | python | train |
postlund/pyatv | pyatv/dmap/daap.py | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/dmap/daap.py#L72-L84 | async def post(self, cmd, data=None, timeout=None, **args):
"""Perform DAAP POST command with optional data."""
def _post_request():
headers = copy(_DMAP_HEADERS)
headers['Content-Type'] = 'application/x-www-form-urlencoded'
return self.http.post_data(
self._mkurl(cmd, *args),
data=data,
headers=headers,
timeout=timeout)
await self._assure_logged_in()
return await self._do(_post_request) | [
"async",
"def",
"post",
"(",
"self",
",",
"cmd",
",",
"data",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"*",
"*",
"args",
")",
":",
"def",
"_post_request",
"(",
")",
":",
"headers",
"=",
"copy",
"(",
"_DMAP_HEADERS",
")",
"headers",
"[",
"'Content-Type'",
"]",
"=",
"'application/x-www-form-urlencoded'",
"return",
"self",
".",
"http",
".",
"post_data",
"(",
"self",
".",
"_mkurl",
"(",
"cmd",
",",
"*",
"args",
")",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
",",
"timeout",
"=",
"timeout",
")",
"await",
"self",
".",
"_assure_logged_in",
"(",
")",
"return",
"await",
"self",
".",
"_do",
"(",
"_post_request",
")"
] | Perform DAAP POST command with optional data. | [
"Perform",
"DAAP",
"POST",
"command",
"with",
"optional",
"data",
"."
] | python | train |
fhcrc/taxtastic | taxtastic/refpkg.py | https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L66-L82 | def scratch_file(unlink=True, **kwargs):
"""Create a temporary file and return its name.
Additional arguments are passed to :class:`tempfile.NamedTemporaryFile`
At the start of the with block a secure, temporary file is created
and its name returned. At the end of the with block it is
deleted.
"""
kwargs['delete'] = False
tf = tempfile.NamedTemporaryFile(**kwargs)
tf.close()
try:
yield tf.name
finally:
if unlink:
os.unlink(tf.name) | [
"def",
"scratch_file",
"(",
"unlink",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'delete'",
"]",
"=",
"False",
"tf",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"*",
"*",
"kwargs",
")",
"tf",
".",
"close",
"(",
")",
"try",
":",
"yield",
"tf",
".",
"name",
"finally",
":",
"if",
"unlink",
":",
"os",
".",
"unlink",
"(",
"tf",
".",
"name",
")"
] | Create a temporary file and return its name.
Additional arguments are passed to :class:`tempfile.NamedTemporaryFile`
At the start of the with block a secure, temporary file is created
and its name returned. At the end of the with block it is
deleted. | [
"Create",
"a",
"temporary",
"file",
"and",
"return",
"its",
"name",
"."
] | python | train |
jstitch/MambuPy | MambuPy/rest/mambustruct.py | https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambustruct.py#L278-L283 | def get(self, key, default=None):
"""Dict-like behaviour"""
if type(self.attrs) == dict:
return self.attrs.get(key, default)
else:
raise NotImplementedError | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"if",
"type",
"(",
"self",
".",
"attrs",
")",
"==",
"dict",
":",
"return",
"self",
".",
"attrs",
".",
"get",
"(",
"key",
",",
"default",
")",
"else",
":",
"raise",
"NotImplementedError"
] | Dict-like behaviour | [
"Dict",
"-",
"like",
"behaviour"
] | python | train |
senaite/senaite.core | bika/lims/upgrade/v01_02_009.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/upgrade/v01_02_009.py#L95-L112 | def delete_orphaned_attachments(portal):
"""Delete attachments where the Analysis was removed
https://github.com/senaite/senaite.core/issues/1025
"""
attachments = api.search({"portal_type": "Attachment"})
total = len(attachments)
logger.info("Integrity checking %d attachments" % total)
for num, attachment in enumerate(attachments):
obj = api.get_object(attachment)
# The method `getRequest` from the attachment tries to get the AR
# either directly or from one of the linked Analyses. If it returns
# `None`, we can be sure that the attachment is neither assigned
# directly to an AR nor to an Analysis.
ar = obj.getRequest()
if ar is None:
obj_id = api.get_id(obj)
api.get_parent(obj).manage_delObjects(obj_id)
logger.info("Deleted orphaned Attachment {}".format(obj_id)) | [
"def",
"delete_orphaned_attachments",
"(",
"portal",
")",
":",
"attachments",
"=",
"api",
".",
"search",
"(",
"{",
"\"portal_type\"",
":",
"\"Attachment\"",
"}",
")",
"total",
"=",
"len",
"(",
"attachments",
")",
"logger",
".",
"info",
"(",
"\"Integrity checking %d attachments\"",
"%",
"total",
")",
"for",
"num",
",",
"attachment",
"in",
"enumerate",
"(",
"attachments",
")",
":",
"obj",
"=",
"api",
".",
"get_object",
"(",
"attachment",
")",
"# The method `getRequest` from the attachment tries to get the AR",
"# either directly or from one of the linked Analyses. If it returns",
"# `None`, we can be sure that the attachment is neither assigned",
"# directly to an AR nor to an Analysis.",
"ar",
"=",
"obj",
".",
"getRequest",
"(",
")",
"if",
"ar",
"is",
"None",
":",
"obj_id",
"=",
"api",
".",
"get_id",
"(",
"obj",
")",
"api",
".",
"get_parent",
"(",
"obj",
")",
".",
"manage_delObjects",
"(",
"obj_id",
")",
"logger",
".",
"info",
"(",
"\"Deleted orphaned Attachment {}\"",
".",
"format",
"(",
"obj_id",
")",
")"
] | Delete attachments where the Analysis was removed
https://github.com/senaite/senaite.core/issues/1025 | [
"Delete",
"attachments",
"where",
"the",
"Analysis",
"was",
"removed",
"https",
":",
"//",
"github",
".",
"com",
"/",
"senaite",
"/",
"senaite",
".",
"core",
"/",
"issues",
"/",
"1025"
] | python | train |
saltstack/salt | salt/fileserver/__init__.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/__init__.py#L581-L627 | def find_file(self, path, saltenv, back=None):
'''
Find the path and return the fnd structure, this structure is passed
to other backend interfaces.
'''
path = salt.utils.stringutils.to_unicode(path)
saltenv = salt.utils.stringutils.to_unicode(saltenv)
back = self.backends(back)
kwargs = {}
fnd = {'path': '',
'rel': ''}
if os.path.isabs(path):
return fnd
if '../' in path:
return fnd
if salt.utils.url.is_escaped(path):
# don't attempt to find URL query arguments in the path
path = salt.utils.url.unescape(path)
else:
if '?' in path:
hcomps = path.split('?')
path = hcomps[0]
comps = hcomps[1].split('&')
for comp in comps:
if '=' not in comp:
# Invalid option, skip it
continue
args = comp.split('=', 1)
kwargs[args[0]] = args[1]
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
if 'saltenv' in kwargs:
saltenv = kwargs.pop('saltenv')
if not isinstance(saltenv, six.string_types):
saltenv = six.text_type(saltenv)
for fsb in back:
fstr = '{0}.find_file'.format(fsb)
if fstr in self.servers:
fnd = self.servers[fstr](path, saltenv, **kwargs)
if fnd.get('path'):
fnd['back'] = fsb
return fnd
return fnd | [
"def",
"find_file",
"(",
"self",
",",
"path",
",",
"saltenv",
",",
"back",
"=",
"None",
")",
":",
"path",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"path",
")",
"saltenv",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"saltenv",
")",
"back",
"=",
"self",
".",
"backends",
"(",
"back",
")",
"kwargs",
"=",
"{",
"}",
"fnd",
"=",
"{",
"'path'",
":",
"''",
",",
"'rel'",
":",
"''",
"}",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"return",
"fnd",
"if",
"'../'",
"in",
"path",
":",
"return",
"fnd",
"if",
"salt",
".",
"utils",
".",
"url",
".",
"is_escaped",
"(",
"path",
")",
":",
"# don't attempt to find URL query arguments in the path",
"path",
"=",
"salt",
".",
"utils",
".",
"url",
".",
"unescape",
"(",
"path",
")",
"else",
":",
"if",
"'?'",
"in",
"path",
":",
"hcomps",
"=",
"path",
".",
"split",
"(",
"'?'",
")",
"path",
"=",
"hcomps",
"[",
"0",
"]",
"comps",
"=",
"hcomps",
"[",
"1",
"]",
".",
"split",
"(",
"'&'",
")",
"for",
"comp",
"in",
"comps",
":",
"if",
"'='",
"not",
"in",
"comp",
":",
"# Invalid option, skip it",
"continue",
"args",
"=",
"comp",
".",
"split",
"(",
"'='",
",",
"1",
")",
"kwargs",
"[",
"args",
"[",
"0",
"]",
"]",
"=",
"args",
"[",
"1",
"]",
"if",
"'env'",
"in",
"kwargs",
":",
"# \"env\" is not supported; Use \"saltenv\".",
"kwargs",
".",
"pop",
"(",
"'env'",
")",
"if",
"'saltenv'",
"in",
"kwargs",
":",
"saltenv",
"=",
"kwargs",
".",
"pop",
"(",
"'saltenv'",
")",
"if",
"not",
"isinstance",
"(",
"saltenv",
",",
"six",
".",
"string_types",
")",
":",
"saltenv",
"=",
"six",
".",
"text_type",
"(",
"saltenv",
")",
"for",
"fsb",
"in",
"back",
":",
"fstr",
"=",
"'{0}.find_file'",
".",
"format",
"(",
"fsb",
")",
"if",
"fstr",
"in",
"self",
".",
"servers",
":",
"fnd",
"=",
"self",
".",
"servers",
"[",
"fstr",
"]",
"(",
"path",
",",
"saltenv",
",",
"*",
"*",
"kwargs",
")",
"if",
"fnd",
".",
"get",
"(",
"'path'",
")",
":",
"fnd",
"[",
"'back'",
"]",
"=",
"fsb",
"return",
"fnd",
"return",
"fnd"
] | Find the path and return the fnd structure, this structure is passed
to other backend interfaces. | [
"Find",
"the",
"path",
"and",
"return",
"the",
"fnd",
"structure",
"this",
"structure",
"is",
"passed",
"to",
"other",
"backend",
"interfaces",
"."
] | python | train |
fumitoh/modelx | modelx/core/model.py | https://github.com/fumitoh/modelx/blob/0180da34d052c44fb94dab9e115e218bbebfc9c3/modelx/core/model.py#L58-L65 | def clear_obj(self, obj):
""""Remove all nodes with `obj` and their descendants."""
obj_nodes = self.get_nodes_with(obj)
removed = set()
for node in obj_nodes:
if self.has_node(node):
removed.update(self.clear_descendants(node))
return removed | [
"def",
"clear_obj",
"(",
"self",
",",
"obj",
")",
":",
"obj_nodes",
"=",
"self",
".",
"get_nodes_with",
"(",
"obj",
")",
"removed",
"=",
"set",
"(",
")",
"for",
"node",
"in",
"obj_nodes",
":",
"if",
"self",
".",
"has_node",
"(",
"node",
")",
":",
"removed",
".",
"update",
"(",
"self",
".",
"clear_descendants",
"(",
"node",
")",
")",
"return",
"removed"
] | Remove all nodes with `obj` and their descendants. | [
"Remove",
"all",
"nodes",
"with",
"obj",
"and",
"their",
"descendants",
"."
] | python | valid |
neurodata/ndio | ndio/remote/metadata.py | https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/metadata.py#L136-L158 | def get_image_size(self, token, resolution=0):
"""
Return the size of the volume (3D). Convenient for when you want
to download the entirety of a dataset.
Arguments:
token (str): The token for which to find the dataset image bounds
resolution (int : 0): The resolution at which to get image bounds.
Defaults to 0, to get the largest area available.
Returns:
int[3]: The size of the bounds. Should == get_volume.shape
Raises:
RemoteDataNotFoundError: If the token is invalid, or if the
metadata at that resolution is unavailable in projinfo.
"""
info = self.get_proj_info(token)
res = str(resolution)
if res not in info['dataset']['imagesize']:
raise RemoteDataNotFoundError("Resolution " + res +
" is not available.")
return info['dataset']['imagesize'][str(resolution)] | [
"def",
"get_image_size",
"(",
"self",
",",
"token",
",",
"resolution",
"=",
"0",
")",
":",
"info",
"=",
"self",
".",
"get_proj_info",
"(",
"token",
")",
"res",
"=",
"str",
"(",
"resolution",
")",
"if",
"res",
"not",
"in",
"info",
"[",
"'dataset'",
"]",
"[",
"'imagesize'",
"]",
":",
"raise",
"RemoteDataNotFoundError",
"(",
"\"Resolution \"",
"+",
"res",
"+",
"\" is not available.\"",
")",
"return",
"info",
"[",
"'dataset'",
"]",
"[",
"'imagesize'",
"]",
"[",
"str",
"(",
"resolution",
")",
"]"
] | Return the size of the volume (3D). Convenient for when you want
to download the entirety of a dataset.
Arguments:
token (str): The token for which to find the dataset image bounds
resolution (int : 0): The resolution at which to get image bounds.
Defaults to 0, to get the largest area available.
Returns:
int[3]: The size of the bounds. Should == get_volume.shape
Raises:
RemoteDataNotFoundError: If the token is invalid, or if the
metadata at that resolution is unavailable in projinfo. | [
"Return",
"the",
"size",
"of",
"the",
"volume",
"(",
"3D",
")",
".",
"Convenient",
"for",
"when",
"you",
"want",
"to",
"download",
"the",
"entirety",
"of",
"a",
"dataset",
"."
] | python | test |
twneale/hercules | hercules/lazylist.py | https://github.com/twneale/hercules/blob/cd61582ef7e593093e9b28b56798df4203d1467a/hercules/lazylist.py#L77-L95 | def exhaust(self, index = None):
"""Exhaust the iterator generating this LazyList's values.
if index is None, this will exhaust the iterator completely.
Otherwise, it will iterate over the iterator until either the list
has a value for index or the iterator is exhausted.
"""
if self._exhausted:
return
if index is None:
ind_range = itertools.count(len(self))
else:
ind_range = range(len(self), index + 1)
for ind in ind_range:
try:
self._data.append(next(self._iterator))
except StopIteration: #iterator is fully exhausted
self._exhausted = True
break | [
"def",
"exhaust",
"(",
"self",
",",
"index",
"=",
"None",
")",
":",
"if",
"self",
".",
"_exhausted",
":",
"return",
"if",
"index",
"is",
"None",
":",
"ind_range",
"=",
"itertools",
".",
"count",
"(",
"len",
"(",
"self",
")",
")",
"else",
":",
"ind_range",
"=",
"range",
"(",
"len",
"(",
"self",
")",
",",
"index",
"+",
"1",
")",
"for",
"ind",
"in",
"ind_range",
":",
"try",
":",
"self",
".",
"_data",
".",
"append",
"(",
"next",
"(",
"self",
".",
"_iterator",
")",
")",
"except",
"StopIteration",
":",
"#iterator is fully exhausted",
"self",
".",
"_exhausted",
"=",
"True",
"break"
] | Exhaust the iterator generating this LazyList's values.
if index is None, this will exhaust the iterator completely.
Otherwise, it will iterate over the iterator until either the list
has a value for index or the iterator is exhausted. | [
"Exhaust",
"the",
"iterator",
"generating",
"this",
"LazyList",
"s",
"values",
".",
"if",
"index",
"is",
"None",
"this",
"will",
"exhaust",
"the",
"iterator",
"completely",
".",
"Otherwise",
"it",
"will",
"iterate",
"over",
"the",
"iterator",
"until",
"either",
"the",
"list",
"has",
"a",
"value",
"for",
"index",
"or",
"the",
"iterator",
"is",
"exhausted",
"."
] | python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/flask/blueprints.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/blueprints.py#L345-L352 | def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator | [
"def",
"app_errorhandler",
"(",
"self",
",",
"code",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"self",
".",
"record_once",
"(",
"lambda",
"s",
":",
"s",
".",
"app",
".",
"errorhandler",
"(",
"code",
")",
"(",
"f",
")",
")",
"return",
"f",
"return",
"decorator"
] | Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint. | [
"Like",
":",
"meth",
":",
"Flask",
".",
"errorhandler",
"but",
"for",
"a",
"blueprint",
".",
"This",
"handler",
"is",
"used",
"for",
"all",
"requests",
"even",
"if",
"outside",
"of",
"the",
"blueprint",
"."
] | python | test |
apache/incubator-mxnet | example/reinforcement-learning/dqn/base.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/reinforcement-learning/dqn/base.py#L190-L222 | def compute_internal(self, sym_name, bucket_kwargs=None, **arg_dict):
"""
View the internal symbols using the forward function.
:param sym_name:
:param bucket_kwargs:
:param input_dict:
:return:
"""
data_shapes = {k: v.shape for k, v in arg_dict.items()}
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
internal_sym = self.sym.get_internals()[sym_name]
data_inputs = {k: mx.nd.empty(v, ctx=self.ctx)
for k, v in self.data_shapes.items()
if k in internal_sym.list_arguments()}
params = {k: v for k, v in self.params.items() if
k in internal_sym.list_arguments()}
aux_states = {k: v for k, v in self.aux_states.items()
if k in internal_sym.list_auxiliary_states()}
exe = internal_sym.bind(ctx=self.ctx,
args=dict(params, **data_inputs),
args_grad=None,
grad_req='null',
aux_states=aux_states,
shared_exec=self.exe)
for k, v in arg_dict.items():
exe.arg_dict[k][:] = v
exe.forward(is_train=False)
assert 1 == len(exe.outputs)
for output in exe.outputs:
output.wait_to_read()
return exe.outputs[0] | [
"def",
"compute_internal",
"(",
"self",
",",
"sym_name",
",",
"bucket_kwargs",
"=",
"None",
",",
"*",
"*",
"arg_dict",
")",
":",
"data_shapes",
"=",
"{",
"k",
":",
"v",
".",
"shape",
"for",
"k",
",",
"v",
"in",
"arg_dict",
".",
"items",
"(",
")",
"}",
"self",
".",
"switch_bucket",
"(",
"bucket_kwargs",
"=",
"bucket_kwargs",
",",
"data_shapes",
"=",
"data_shapes",
")",
"internal_sym",
"=",
"self",
".",
"sym",
".",
"get_internals",
"(",
")",
"[",
"sym_name",
"]",
"data_inputs",
"=",
"{",
"k",
":",
"mx",
".",
"nd",
".",
"empty",
"(",
"v",
",",
"ctx",
"=",
"self",
".",
"ctx",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"data_shapes",
".",
"items",
"(",
")",
"if",
"k",
"in",
"internal_sym",
".",
"list_arguments",
"(",
")",
"}",
"params",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"params",
".",
"items",
"(",
")",
"if",
"k",
"in",
"internal_sym",
".",
"list_arguments",
"(",
")",
"}",
"aux_states",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"aux_states",
".",
"items",
"(",
")",
"if",
"k",
"in",
"internal_sym",
".",
"list_auxiliary_states",
"(",
")",
"}",
"exe",
"=",
"internal_sym",
".",
"bind",
"(",
"ctx",
"=",
"self",
".",
"ctx",
",",
"args",
"=",
"dict",
"(",
"params",
",",
"*",
"*",
"data_inputs",
")",
",",
"args_grad",
"=",
"None",
",",
"grad_req",
"=",
"'null'",
",",
"aux_states",
"=",
"aux_states",
",",
"shared_exec",
"=",
"self",
".",
"exe",
")",
"for",
"k",
",",
"v",
"in",
"arg_dict",
".",
"items",
"(",
")",
":",
"exe",
".",
"arg_dict",
"[",
"k",
"]",
"[",
":",
"]",
"=",
"v",
"exe",
".",
"forward",
"(",
"is_train",
"=",
"False",
")",
"assert",
"1",
"==",
"len",
"(",
"exe",
".",
"outputs",
")",
"for",
"output",
"in",
"exe",
".",
"outputs",
":",
"output",
".",
"wait_to_read",
"(",
")",
"return",
"exe",
".",
"outputs",
"[",
"0",
"]"
] | View the internal symbols using the forward function.
:param sym_name:
:param bucket_kwargs:
:param input_dict:
:return: | [
"View",
"the",
"internal",
"symbols",
"using",
"the",
"forward",
"function",
"."
] | python | train |
newville/wxmplot | wxmplot/basepanel.py | https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/basepanel.py#L161-L163 | def set_xlabel(self, s, delay_draw=False):
"set plot xlabel"
self.conf.relabel(xlabel=s, delay_draw=delay_draw) | [
"def",
"set_xlabel",
"(",
"self",
",",
"s",
",",
"delay_draw",
"=",
"False",
")",
":",
"self",
".",
"conf",
".",
"relabel",
"(",
"xlabel",
"=",
"s",
",",
"delay_draw",
"=",
"delay_draw",
")"
] | set plot xlabel | [
"set",
"plot",
"xlabel"
] | python | train |
GuiltyTargets/ppi-network-annotation | src/ppi_network_annotation/model/filtered_network.py | https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/filtered_network.py#L43-L57 | def get_downregulated_genes_network(self) -> Graph:
"""Get the graph of down-regulated genes.
:return Graph: Graph of down-regulated genes.
"""
logger.info("In get_downregulated_genes_network()")
deg_graph = self.graph.copy() # deep copy graph
not_diff_expr = self.graph.vs(down_regulated_eq=False)
# delete genes which are not differentially expressed or have no connections to others
deg_graph.delete_vertices(not_diff_expr.indices)
deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0))
return deg_graph | [
"def",
"get_downregulated_genes_network",
"(",
"self",
")",
"->",
"Graph",
":",
"logger",
".",
"info",
"(",
"\"In get_downregulated_genes_network()\"",
")",
"deg_graph",
"=",
"self",
".",
"graph",
".",
"copy",
"(",
")",
"# deep copy graph",
"not_diff_expr",
"=",
"self",
".",
"graph",
".",
"vs",
"(",
"down_regulated_eq",
"=",
"False",
")",
"# delete genes which are not differentially expressed or have no connections to others",
"deg_graph",
".",
"delete_vertices",
"(",
"not_diff_expr",
".",
"indices",
")",
"deg_graph",
".",
"delete_vertices",
"(",
"deg_graph",
".",
"vs",
".",
"select",
"(",
"_degree_eq",
"=",
"0",
")",
")",
"return",
"deg_graph"
] | Get the graph of down-regulated genes.
:return Graph: Graph of down-regulated genes. | [
"Get",
"the",
"graph",
"of",
"down",
"-",
"regulated",
"genes",
"."
] | python | train |
PyCQA/astroid | astroid/rebuilder.py | https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/rebuilder.py#L696-L700 | def visit_lambda(self, node, parent):
"""visit a Lambda node by returning a fresh instance of it"""
newnode = nodes.Lambda(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.args, newnode), self.visit(node.body, newnode))
return newnode | [
"def",
"visit_lambda",
"(",
"self",
",",
"node",
",",
"parent",
")",
":",
"newnode",
"=",
"nodes",
".",
"Lambda",
"(",
"node",
".",
"lineno",
",",
"node",
".",
"col_offset",
",",
"parent",
")",
"newnode",
".",
"postinit",
"(",
"self",
".",
"visit",
"(",
"node",
".",
"args",
",",
"newnode",
")",
",",
"self",
".",
"visit",
"(",
"node",
".",
"body",
",",
"newnode",
")",
")",
"return",
"newnode"
] | visit a Lambda node by returning a fresh instance of it | [
"visit",
"a",
"Lambda",
"node",
"by",
"returning",
"a",
"fresh",
"instance",
"of",
"it"
] | python | train |
baruwa-enterprise/BaruwaAPI | BaruwaAPI/resource.py | https://github.com/baruwa-enterprise/BaruwaAPI/blob/53335b377ccfd388e42f4f240f181eed72f51180/BaruwaAPI/resource.py#L326-L330 | def get_ldapsettings(self, domainid, serverid, settingsid):
"""Get LDAP settings"""
return self.api_call(
ENDPOINTS['ldapsettings']['get'],
dict(domainid=domainid, serverid=serverid, settingsid=settingsid)) | [
"def",
"get_ldapsettings",
"(",
"self",
",",
"domainid",
",",
"serverid",
",",
"settingsid",
")",
":",
"return",
"self",
".",
"api_call",
"(",
"ENDPOINTS",
"[",
"'ldapsettings'",
"]",
"[",
"'get'",
"]",
",",
"dict",
"(",
"domainid",
"=",
"domainid",
",",
"serverid",
"=",
"serverid",
",",
"settingsid",
"=",
"settingsid",
")",
")"
] | Get LDAP settings | [
"Get",
"LDAP",
"settings"
] | python | train |
myint/rstcheck | rstcheck.py | https://github.com/myint/rstcheck/blob/2f975906b75f3b88d501ef3b13d213815cf7079a/rstcheck.py#L743-L765 | def _add_check(self, node, run, language, is_code_node):
"""Add checker that will be run."""
def run_check():
"""Yield errors."""
all_results = run()
if all_results is not None:
if all_results:
for result in all_results:
error_offset = result[0] - 1
line_number = getattr(node, 'line', None)
if line_number is not None:
yield (
beginning_of_code_block(
node=node,
line_number=line_number,
full_contents=self.contents,
is_code_node=is_code_node) +
error_offset,
'({}) {}'.format(language, result[1]))
else:
yield (self.filename, 0, 'unknown error')
self.checkers.append(run_check) | [
"def",
"_add_check",
"(",
"self",
",",
"node",
",",
"run",
",",
"language",
",",
"is_code_node",
")",
":",
"def",
"run_check",
"(",
")",
":",
"\"\"\"Yield errors.\"\"\"",
"all_results",
"=",
"run",
"(",
")",
"if",
"all_results",
"is",
"not",
"None",
":",
"if",
"all_results",
":",
"for",
"result",
"in",
"all_results",
":",
"error_offset",
"=",
"result",
"[",
"0",
"]",
"-",
"1",
"line_number",
"=",
"getattr",
"(",
"node",
",",
"'line'",
",",
"None",
")",
"if",
"line_number",
"is",
"not",
"None",
":",
"yield",
"(",
"beginning_of_code_block",
"(",
"node",
"=",
"node",
",",
"line_number",
"=",
"line_number",
",",
"full_contents",
"=",
"self",
".",
"contents",
",",
"is_code_node",
"=",
"is_code_node",
")",
"+",
"error_offset",
",",
"'({}) {}'",
".",
"format",
"(",
"language",
",",
"result",
"[",
"1",
"]",
")",
")",
"else",
":",
"yield",
"(",
"self",
".",
"filename",
",",
"0",
",",
"'unknown error'",
")",
"self",
".",
"checkers",
".",
"append",
"(",
"run_check",
")"
] | Add checker that will be run. | [
"Add",
"checker",
"that",
"will",
"be",
"run",
"."
] | python | train |
CybOXProject/mixbox | mixbox/entities.py | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/entities.py#L201-L214 | def typed_fields(cls):
"""Return a tuple of this entity's TypedFields."""
# Checking cls._typed_fields could return a superclass _typed_fields
# value. So we check our class __dict__ which does not include
# inherited attributes.
klassdict = cls.__dict__
try:
return klassdict["_typed_fields"]
except KeyError:
fields = cls.typed_fields_with_attrnames()
cls._typed_fields = tuple(field for _, field in fields)
return cls._typed_fields | [
"def",
"typed_fields",
"(",
"cls",
")",
":",
"# Checking cls._typed_fields could return a superclass _typed_fields",
"# value. So we check our class __dict__ which does not include",
"# inherited attributes.",
"klassdict",
"=",
"cls",
".",
"__dict__",
"try",
":",
"return",
"klassdict",
"[",
"\"_typed_fields\"",
"]",
"except",
"KeyError",
":",
"fields",
"=",
"cls",
".",
"typed_fields_with_attrnames",
"(",
")",
"cls",
".",
"_typed_fields",
"=",
"tuple",
"(",
"field",
"for",
"_",
",",
"field",
"in",
"fields",
")",
"return",
"cls",
".",
"_typed_fields"
] | Return a tuple of this entity's TypedFields. | [
"Return",
"a",
"tuple",
"of",
"this",
"entity",
"s",
"TypedFields",
"."
] | python | train |
dopefishh/pympi | pympi/Elan.py | https://github.com/dopefishh/pympi/blob/79c747cde45b5ba203ed93154d8c123ac9c3ef56/pympi/Elan.py#L877-L893 | def get_ref_annotation_at_time(self, tier, time):
"""Give the ref annotations at the given time of the form
``[(start, end, value, refvalue)]``
:param str tier: Name of the tier.
:param int time: Time of the annotation of the parent.
:returns: List of annotations at that time.
:raises KeyError: If the tier is non existent.
"""
bucket = []
for aid, (ref, value, _, _) in self.tiers[tier][1].items():
begin, end, rvalue, _ = self.tiers[self.annotations[ref]][0][ref]
begin = self.timeslots[begin]
end = self.timeslots[end]
if begin <= time and end >= time:
bucket.append((begin, end, value, rvalue))
return bucket | [
"def",
"get_ref_annotation_at_time",
"(",
"self",
",",
"tier",
",",
"time",
")",
":",
"bucket",
"=",
"[",
"]",
"for",
"aid",
",",
"(",
"ref",
",",
"value",
",",
"_",
",",
"_",
")",
"in",
"self",
".",
"tiers",
"[",
"tier",
"]",
"[",
"1",
"]",
".",
"items",
"(",
")",
":",
"begin",
",",
"end",
",",
"rvalue",
",",
"_",
"=",
"self",
".",
"tiers",
"[",
"self",
".",
"annotations",
"[",
"ref",
"]",
"]",
"[",
"0",
"]",
"[",
"ref",
"]",
"begin",
"=",
"self",
".",
"timeslots",
"[",
"begin",
"]",
"end",
"=",
"self",
".",
"timeslots",
"[",
"end",
"]",
"if",
"begin",
"<=",
"time",
"and",
"end",
">=",
"time",
":",
"bucket",
".",
"append",
"(",
"(",
"begin",
",",
"end",
",",
"value",
",",
"rvalue",
")",
")",
"return",
"bucket"
] | Give the ref annotations at the given time of the form
``[(start, end, value, refvalue)]``
:param str tier: Name of the tier.
:param int time: Time of the annotation of the parent.
:returns: List of annotations at that time.
:raises KeyError: If the tier is non existent. | [
"Give",
"the",
"ref",
"annotations",
"at",
"the",
"given",
"time",
"of",
"the",
"form",
"[",
"(",
"start",
"end",
"value",
"refvalue",
")",
"]"
] | python | test |
gem/oq-engine | openquake/server/views.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/server/views.py#L226-L276 | def validate_nrml(request):
"""
Leverage oq-risklib to check if a given XML text is a valid NRML
:param request:
a `django.http.HttpRequest` object containing the mandatory
parameter 'xml_text': the text of the XML to be validated as NRML
:returns: a JSON object, containing:
* 'valid': a boolean indicating if the provided text is a valid NRML
* 'error_msg': the error message, if any error was found
(None otherwise)
* 'error_line': line of the given XML where the error was found
(None if no error was found or if it was not a
validation error)
"""
xml_text = request.POST.get('xml_text')
if not xml_text:
return HttpResponseBadRequest(
'Please provide the "xml_text" parameter')
xml_file = gettemp(xml_text, suffix='.xml')
try:
nrml.to_python(xml_file)
except ExpatError as exc:
return _make_response(error_msg=str(exc),
error_line=exc.lineno,
valid=False)
except Exception as exc:
# get the exception message
exc_msg = exc.args[0]
if isinstance(exc_msg, bytes):
exc_msg = exc_msg.decode('utf-8') # make it a unicode object
elif isinstance(exc_msg, str):
pass
else:
# if it is another kind of object, it is not obvious a priori how
# to extract the error line from it
return _make_response(
error_msg=str(exc_msg), error_line=None, valid=False)
# if the line is not mentioned, the whole message is taken
error_msg = exc_msg.split(', line')[0]
# check if the exc_msg contains a line number indication
search_match = re.search(r'line \d+', exc_msg)
if search_match:
error_line = int(search_match.group(0).split()[1])
else:
error_line = None
return _make_response(
error_msg=error_msg, error_line=error_line, valid=False)
else:
return _make_response(error_msg=None, error_line=None, valid=True) | [
"def",
"validate_nrml",
"(",
"request",
")",
":",
"xml_text",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"'xml_text'",
")",
"if",
"not",
"xml_text",
":",
"return",
"HttpResponseBadRequest",
"(",
"'Please provide the \"xml_text\" parameter'",
")",
"xml_file",
"=",
"gettemp",
"(",
"xml_text",
",",
"suffix",
"=",
"'.xml'",
")",
"try",
":",
"nrml",
".",
"to_python",
"(",
"xml_file",
")",
"except",
"ExpatError",
"as",
"exc",
":",
"return",
"_make_response",
"(",
"error_msg",
"=",
"str",
"(",
"exc",
")",
",",
"error_line",
"=",
"exc",
".",
"lineno",
",",
"valid",
"=",
"False",
")",
"except",
"Exception",
"as",
"exc",
":",
"# get the exception message",
"exc_msg",
"=",
"exc",
".",
"args",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"exc_msg",
",",
"bytes",
")",
":",
"exc_msg",
"=",
"exc_msg",
".",
"decode",
"(",
"'utf-8'",
")",
"# make it a unicode object",
"elif",
"isinstance",
"(",
"exc_msg",
",",
"str",
")",
":",
"pass",
"else",
":",
"# if it is another kind of object, it is not obvious a priori how",
"# to extract the error line from it",
"return",
"_make_response",
"(",
"error_msg",
"=",
"str",
"(",
"exc_msg",
")",
",",
"error_line",
"=",
"None",
",",
"valid",
"=",
"False",
")",
"# if the line is not mentioned, the whole message is taken",
"error_msg",
"=",
"exc_msg",
".",
"split",
"(",
"', line'",
")",
"[",
"0",
"]",
"# check if the exc_msg contains a line number indication",
"search_match",
"=",
"re",
".",
"search",
"(",
"r'line \\d+'",
",",
"exc_msg",
")",
"if",
"search_match",
":",
"error_line",
"=",
"int",
"(",
"search_match",
".",
"group",
"(",
"0",
")",
".",
"split",
"(",
")",
"[",
"1",
"]",
")",
"else",
":",
"error_line",
"=",
"None",
"return",
"_make_response",
"(",
"error_msg",
"=",
"error_msg",
",",
"error_line",
"=",
"error_line",
",",
"valid",
"=",
"False",
")",
"else",
":",
"return",
"_make_response",
"(",
"error_msg",
"=",
"None",
",",
"error_line",
"=",
"None",
",",
"valid",
"=",
"True",
")"
] | Leverage oq-risklib to check if a given XML text is a valid NRML
:param request:
a `django.http.HttpRequest` object containing the mandatory
parameter 'xml_text': the text of the XML to be validated as NRML
:returns: a JSON object, containing:
* 'valid': a boolean indicating if the provided text is a valid NRML
* 'error_msg': the error message, if any error was found
(None otherwise)
* 'error_line': line of the given XML where the error was found
(None if no error was found or if it was not a
validation error) | [
"Leverage",
"oq",
"-",
"risklib",
"to",
"check",
"if",
"a",
"given",
"XML",
"text",
"is",
"a",
"valid",
"NRML"
] | python | train |
Qiskit/qiskit-terra | qiskit/qasm/qasmparser.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/qasm/qasmparser.py#L121-L152 | def verify_as_gate(self, obj, bitlist, arglist=None):
"""Verify a user defined gate call."""
if obj.name not in self.global_symtab:
raise QasmError("Cannot find gate definition for '" + obj.name
+ "', line", str(obj.line), 'file', obj.file)
g_sym = self.global_symtab[obj.name]
if not (g_sym.type == 'gate' or g_sym.type == 'opaque'):
raise QasmError("'" + obj.name + "' is used as a gate "
+ "or opaque call but the symbol is neither;"
+ " it is a '" + g_sym.type + "' line",
str(obj.line), 'file', obj.file)
if g_sym.n_bits() != bitlist.size():
raise QasmError("Gate or opaque call to '" + obj.name
+ "' uses", str(bitlist.size()),
"qubits but is declared for",
str(g_sym.n_bits()), "qubits", "line",
str(obj.line), 'file', obj.file)
if arglist:
if g_sym.n_args() != arglist.size():
raise QasmError("Gate or opaque call to '" + obj.name
+ "' uses", str(arglist.size()),
"qubits but is declared for",
str(g_sym.n_args()), "qubits", "line",
str(obj.line), 'file', obj.file)
else:
if g_sym.n_args() > 0:
raise QasmError("Gate or opaque call to '" + obj.name
+ "' has no arguments but is declared for",
str(g_sym.n_args()), "qubits", "line",
str(obj.line), 'file', obj.file) | [
"def",
"verify_as_gate",
"(",
"self",
",",
"obj",
",",
"bitlist",
",",
"arglist",
"=",
"None",
")",
":",
"if",
"obj",
".",
"name",
"not",
"in",
"self",
".",
"global_symtab",
":",
"raise",
"QasmError",
"(",
"\"Cannot find gate definition for '\"",
"+",
"obj",
".",
"name",
"+",
"\"', line\"",
",",
"str",
"(",
"obj",
".",
"line",
")",
",",
"'file'",
",",
"obj",
".",
"file",
")",
"g_sym",
"=",
"self",
".",
"global_symtab",
"[",
"obj",
".",
"name",
"]",
"if",
"not",
"(",
"g_sym",
".",
"type",
"==",
"'gate'",
"or",
"g_sym",
".",
"type",
"==",
"'opaque'",
")",
":",
"raise",
"QasmError",
"(",
"\"'\"",
"+",
"obj",
".",
"name",
"+",
"\"' is used as a gate \"",
"+",
"\"or opaque call but the symbol is neither;\"",
"+",
"\" it is a '\"",
"+",
"g_sym",
".",
"type",
"+",
"\"' line\"",
",",
"str",
"(",
"obj",
".",
"line",
")",
",",
"'file'",
",",
"obj",
".",
"file",
")",
"if",
"g_sym",
".",
"n_bits",
"(",
")",
"!=",
"bitlist",
".",
"size",
"(",
")",
":",
"raise",
"QasmError",
"(",
"\"Gate or opaque call to '\"",
"+",
"obj",
".",
"name",
"+",
"\"' uses\"",
",",
"str",
"(",
"bitlist",
".",
"size",
"(",
")",
")",
",",
"\"qubits but is declared for\"",
",",
"str",
"(",
"g_sym",
".",
"n_bits",
"(",
")",
")",
",",
"\"qubits\"",
",",
"\"line\"",
",",
"str",
"(",
"obj",
".",
"line",
")",
",",
"'file'",
",",
"obj",
".",
"file",
")",
"if",
"arglist",
":",
"if",
"g_sym",
".",
"n_args",
"(",
")",
"!=",
"arglist",
".",
"size",
"(",
")",
":",
"raise",
"QasmError",
"(",
"\"Gate or opaque call to '\"",
"+",
"obj",
".",
"name",
"+",
"\"' uses\"",
",",
"str",
"(",
"arglist",
".",
"size",
"(",
")",
")",
",",
"\"qubits but is declared for\"",
",",
"str",
"(",
"g_sym",
".",
"n_args",
"(",
")",
")",
",",
"\"qubits\"",
",",
"\"line\"",
",",
"str",
"(",
"obj",
".",
"line",
")",
",",
"'file'",
",",
"obj",
".",
"file",
")",
"else",
":",
"if",
"g_sym",
".",
"n_args",
"(",
")",
">",
"0",
":",
"raise",
"QasmError",
"(",
"\"Gate or opaque call to '\"",
"+",
"obj",
".",
"name",
"+",
"\"' has no arguments but is declared for\"",
",",
"str",
"(",
"g_sym",
".",
"n_args",
"(",
")",
")",
",",
"\"qubits\"",
",",
"\"line\"",
",",
"str",
"(",
"obj",
".",
"line",
")",
",",
"'file'",
",",
"obj",
".",
"file",
")"
] | Verify a user defined gate call. | [
"Verify",
"a",
"user",
"defined",
"gate",
"call",
"."
] | python | test |
zetaops/zengine | zengine/messaging/views.py | https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/messaging/views.py#L72-L97 | def _paginate(self, current_page, query_set, per_page=10):
"""
Handles pagination of object listings.
Args:
current_page int:
Current page number
query_set (:class:`QuerySet<pyoko:pyoko.db.queryset.QuerySet>`):
Object listing queryset.
per_page int:
Objects per page.
Returns:
QuerySet object, pagination data dict as a tuple
"""
total_objects = query_set.count()
total_pages = int(total_objects / per_page or 1)
# add orphans to last page
current_per_page = per_page + (
total_objects % per_page if current_page == total_pages else 0)
pagination_data = dict(page=current_page,
total_pages=total_pages,
total_objects=total_objects,
per_page=current_per_page)
query_set = query_set.set_params(rows=current_per_page, start=(current_page - 1) * per_page)
return query_set, pagination_data | [
"def",
"_paginate",
"(",
"self",
",",
"current_page",
",",
"query_set",
",",
"per_page",
"=",
"10",
")",
":",
"total_objects",
"=",
"query_set",
".",
"count",
"(",
")",
"total_pages",
"=",
"int",
"(",
"total_objects",
"/",
"per_page",
"or",
"1",
")",
"# add orphans to last page",
"current_per_page",
"=",
"per_page",
"+",
"(",
"total_objects",
"%",
"per_page",
"if",
"current_page",
"==",
"total_pages",
"else",
"0",
")",
"pagination_data",
"=",
"dict",
"(",
"page",
"=",
"current_page",
",",
"total_pages",
"=",
"total_pages",
",",
"total_objects",
"=",
"total_objects",
",",
"per_page",
"=",
"current_per_page",
")",
"query_set",
"=",
"query_set",
".",
"set_params",
"(",
"rows",
"=",
"current_per_page",
",",
"start",
"=",
"(",
"current_page",
"-",
"1",
")",
"*",
"per_page",
")",
"return",
"query_set",
",",
"pagination_data"
] | Handles pagination of object listings.
Args:
current_page int:
Current page number
query_set (:class:`QuerySet<pyoko:pyoko.db.queryset.QuerySet>`):
Object listing queryset.
per_page int:
Objects per page.
Returns:
QuerySet object, pagination data dict as a tuple | [
"Handles",
"pagination",
"of",
"object",
"listings",
"."
] | python | train |
gccxml/pygccxml | pygccxml/parser/project_reader.py | https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/project_reader.py#L213-L234 | def get_os_file_names(files):
"""
returns file names
:param files: list of strings and\\or :class:`file_configuration_t`
instances.
:type files: list
"""
fnames = []
for f in files:
if utils.is_str(f):
fnames.append(f)
elif isinstance(f, file_configuration_t):
if f.content_type in (
file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE,
file_configuration_t.CONTENT_TYPE.CACHED_SOURCE_FILE):
fnames.append(f.data)
else:
pass
return fnames | [
"def",
"get_os_file_names",
"(",
"files",
")",
":",
"fnames",
"=",
"[",
"]",
"for",
"f",
"in",
"files",
":",
"if",
"utils",
".",
"is_str",
"(",
"f",
")",
":",
"fnames",
".",
"append",
"(",
"f",
")",
"elif",
"isinstance",
"(",
"f",
",",
"file_configuration_t",
")",
":",
"if",
"f",
".",
"content_type",
"in",
"(",
"file_configuration_t",
".",
"CONTENT_TYPE",
".",
"STANDARD_SOURCE_FILE",
",",
"file_configuration_t",
".",
"CONTENT_TYPE",
".",
"CACHED_SOURCE_FILE",
")",
":",
"fnames",
".",
"append",
"(",
"f",
".",
"data",
")",
"else",
":",
"pass",
"return",
"fnames"
] | returns file names
:param files: list of strings and\\or :class:`file_configuration_t`
instances.
:type files: list | [
"returns",
"file",
"names"
] | python | train |
basho/riak-python-client | riak/client/__init__.py | https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/__init__.py#L299-L317 | def table(self, name):
"""
Gets the table by the specified name. Tables do
not always exist (unlike buckets), but this will always return
a :class:`Table <riak.table.Table>` object.
:param name: the table name
:type name: str
:rtype: :class:`Table <riak.table.Table>`
"""
if not isinstance(name, string_types):
raise TypeError('Table name must be a string')
if name in self._tables:
return self._tables[name]
else:
table = Table(self, name)
self._tables[name] = table
return table | [
"def",
"table",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"isinstance",
"(",
"name",
",",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'Table name must be a string'",
")",
"if",
"name",
"in",
"self",
".",
"_tables",
":",
"return",
"self",
".",
"_tables",
"[",
"name",
"]",
"else",
":",
"table",
"=",
"Table",
"(",
"self",
",",
"name",
")",
"self",
".",
"_tables",
"[",
"name",
"]",
"=",
"table",
"return",
"table"
] | Gets the table by the specified name. Tables do
not always exist (unlike buckets), but this will always return
a :class:`Table <riak.table.Table>` object.
:param name: the table name
:type name: str
:rtype: :class:`Table <riak.table.Table>` | [
"Gets",
"the",
"table",
"by",
"the",
"specified",
"name",
".",
"Tables",
"do",
"not",
"always",
"exist",
"(",
"unlike",
"buckets",
")",
"but",
"this",
"will",
"always",
"return",
"a",
":",
"class",
":",
"Table",
"<riak",
".",
"table",
".",
"Table",
">",
"object",
"."
] | python | train |
itamarst/eliot | eliot/_action.py | https://github.com/itamarst/eliot/blob/c03c96520c5492fadfc438b4b0f6336e2785ba2d/eliot/_action.py#L390-L400 | def context(self):
"""
Create a context manager that ensures code runs within action's context.
The action does NOT finish when the context is exited.
"""
parent = _ACTION_CONTEXT.set(self)
try:
yield self
finally:
_ACTION_CONTEXT.reset(parent) | [
"def",
"context",
"(",
"self",
")",
":",
"parent",
"=",
"_ACTION_CONTEXT",
".",
"set",
"(",
"self",
")",
"try",
":",
"yield",
"self",
"finally",
":",
"_ACTION_CONTEXT",
".",
"reset",
"(",
"parent",
")"
] | Create a context manager that ensures code runs within action's context.
The action does NOT finish when the context is exited. | [
"Create",
"a",
"context",
"manager",
"that",
"ensures",
"code",
"runs",
"within",
"action",
"s",
"context",
"."
] | python | train |
wummel/linkchecker | linkcheck/checker/urlbase.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/urlbase.py#L618-L631 | def get_content (self):
"""Precondition: url_connection is an opened URL."""
if self.data is None:
log.debug(LOG_CHECK, "Get content of %r", self.url)
t = time.time()
self.data = self.read_content()
self.size = len(self.data)
self.dltime = time.time() - t
if self.size == 0:
self.add_warning(_("Content size is zero."),
tag=WARN_URL_CONTENT_SIZE_ZERO)
else:
self.aggregate.add_downloaded_bytes(self.size)
return self.data | [
"def",
"get_content",
"(",
"self",
")",
":",
"if",
"self",
".",
"data",
"is",
"None",
":",
"log",
".",
"debug",
"(",
"LOG_CHECK",
",",
"\"Get content of %r\"",
",",
"self",
".",
"url",
")",
"t",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"data",
"=",
"self",
".",
"read_content",
"(",
")",
"self",
".",
"size",
"=",
"len",
"(",
"self",
".",
"data",
")",
"self",
".",
"dltime",
"=",
"time",
".",
"time",
"(",
")",
"-",
"t",
"if",
"self",
".",
"size",
"==",
"0",
":",
"self",
".",
"add_warning",
"(",
"_",
"(",
"\"Content size is zero.\"",
")",
",",
"tag",
"=",
"WARN_URL_CONTENT_SIZE_ZERO",
")",
"else",
":",
"self",
".",
"aggregate",
".",
"add_downloaded_bytes",
"(",
"self",
".",
"size",
")",
"return",
"self",
".",
"data"
] | Precondition: url_connection is an opened URL. | [
"Precondition",
":",
"url_connection",
"is",
"an",
"opened",
"URL",
"."
] | python | train |
miyakogi/wdom | wdom/element.py | https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/element.py#L766-L774 | def draggable(self, value: Union[bool, str]) -> None:
"""Set ``draggable`` property.
``value`` is boolean or string.
"""
if value is False:
self.removeAttribute('draggable')
else:
self.setAttribute('draggable', value) | [
"def",
"draggable",
"(",
"self",
",",
"value",
":",
"Union",
"[",
"bool",
",",
"str",
"]",
")",
"->",
"None",
":",
"if",
"value",
"is",
"False",
":",
"self",
".",
"removeAttribute",
"(",
"'draggable'",
")",
"else",
":",
"self",
".",
"setAttribute",
"(",
"'draggable'",
",",
"value",
")"
] | Set ``draggable`` property.
``value`` is boolean or string. | [
"Set",
"draggable",
"property",
"."
] | python | train |
lavr/flask-emails | flask_emails/config.py | https://github.com/lavr/flask-emails/blob/a1a47108ce7d109fe6c32b6f967445e62f7e5ef6/flask_emails/config.py#L147-L156 | def message_options(self):
"""
Convert config namespace to emails.Message namespace
"""
o = {}
options = self.options
for key in self._default_message_options:
if key in options:
o[key] = options[key]
return o | [
"def",
"message_options",
"(",
"self",
")",
":",
"o",
"=",
"{",
"}",
"options",
"=",
"self",
".",
"options",
"for",
"key",
"in",
"self",
".",
"_default_message_options",
":",
"if",
"key",
"in",
"options",
":",
"o",
"[",
"key",
"]",
"=",
"options",
"[",
"key",
"]",
"return",
"o"
] | Convert config namespace to emails.Message namespace | [
"Convert",
"config",
"namespace",
"to",
"emails",
".",
"Message",
"namespace"
] | python | train |
merll/docker-fabric | dockerfabric/apiclient.py | https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/apiclient.py#L166-L171 | def copy_resource(self, container, resource, local_filename):
"""
Identical to :meth:`dockermap.client.base.DockerClientWrapper.copy_resource` with additional logging.
"""
self.push_log("Receiving tarball for resource '{0}:{1}' and storing as {2}".format(container, resource, local_filename))
super(DockerFabricClient, self).copy_resource(container, resource, local_filename) | [
"def",
"copy_resource",
"(",
"self",
",",
"container",
",",
"resource",
",",
"local_filename",
")",
":",
"self",
".",
"push_log",
"(",
"\"Receiving tarball for resource '{0}:{1}' and storing as {2}\"",
".",
"format",
"(",
"container",
",",
"resource",
",",
"local_filename",
")",
")",
"super",
"(",
"DockerFabricClient",
",",
"self",
")",
".",
"copy_resource",
"(",
"container",
",",
"resource",
",",
"local_filename",
")"
] | Identical to :meth:`dockermap.client.base.DockerClientWrapper.copy_resource` with additional logging. | [
"Identical",
"to",
":",
"meth",
":",
"dockermap",
".",
"client",
".",
"base",
".",
"DockerClientWrapper",
".",
"copy_resource",
"with",
"additional",
"logging",
"."
] | python | train |
jobovy/galpy | galpy/potential/IsochronePotential.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/IsochronePotential.py#L84-L103 | def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2013-09-08 - Written - Bovy (IAS)
"""
r2= R**2.+z**2.
rb= nu.sqrt(r2+self.b2)
dPhidrr= -1./rb/(self.b+rb)**2.
return dPhidrr*R | [
"def",
"_Rforce",
"(",
"self",
",",
"R",
",",
"z",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"r2",
"=",
"R",
"**",
"2.",
"+",
"z",
"**",
"2.",
"rb",
"=",
"nu",
".",
"sqrt",
"(",
"r2",
"+",
"self",
".",
"b2",
")",
"dPhidrr",
"=",
"-",
"1.",
"/",
"rb",
"/",
"(",
"self",
".",
"b",
"+",
"rb",
")",
"**",
"2.",
"return",
"dPhidrr",
"*",
"R"
] | NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2013-09-08 - Written - Bovy (IAS) | [
"NAME",
":",
"_Rforce",
"PURPOSE",
":",
"evaluate",
"the",
"radial",
"force",
"for",
"this",
"potential",
"INPUT",
":",
"R",
"-",
"Galactocentric",
"cylindrical",
"radius",
"z",
"-",
"vertical",
"height",
"phi",
"-",
"azimuth",
"t",
"-",
"time",
"OUTPUT",
":",
"the",
"radial",
"force",
"HISTORY",
":",
"2013",
"-",
"09",
"-",
"08",
"-",
"Written",
"-",
"Bovy",
"(",
"IAS",
")"
] | python | train |
openstack/quark | quark/quota_driver.py | https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/quota_driver.py#L29-L37 | def delete_tenant_quota(context, tenant_id):
"""Delete the quota entries for a given tenant_id.
Atfer deletion, this tenant will use default quota values in conf.
"""
tenant_quotas = context.session.query(Quota)
tenant_quotas = tenant_quotas.filter_by(tenant_id=tenant_id)
tenant_quotas.delete() | [
"def",
"delete_tenant_quota",
"(",
"context",
",",
"tenant_id",
")",
":",
"tenant_quotas",
"=",
"context",
".",
"session",
".",
"query",
"(",
"Quota",
")",
"tenant_quotas",
"=",
"tenant_quotas",
".",
"filter_by",
"(",
"tenant_id",
"=",
"tenant_id",
")",
"tenant_quotas",
".",
"delete",
"(",
")"
] | Delete the quota entries for a given tenant_id.
Atfer deletion, this tenant will use default quota values in conf. | [
"Delete",
"the",
"quota",
"entries",
"for",
"a",
"given",
"tenant_id",
"."
] | python | valid |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.