repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
google/grr | grr/client/grr_response_client/client_actions/file_finder_utils/globbing.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/file_finder_utils/globbing.py#L380-L408 | def _ListDir(dirpath, pathtype):
"""Returns children of a given directory.
This function is intended to be used by the `PathComponent` subclasses to get
initial list of potential children that then need to be filtered according to
the rules of a specific component.
Args:
dirpath: A path to the directory.
pathtype: The pathtype to use.
Raises:
ValueError: in case of unsupported path types.
"""
pathspec = rdf_paths.PathSpec(path=dirpath, pathtype=pathtype)
childpaths = []
try:
file_obj = vfs.VFSOpen(pathspec)
for path in file_obj.ListNames():
# For Windows registry, ignore the empty string which corresponds to the
# default value in the current key. Otherwise, globbing a key will yield
# the key itself, because joining the name of the default value u"" with
# a key name yields the key name again.
if pathtype != rdf_paths.PathSpec.PathType.REGISTRY or path:
childpaths.append(path)
except IOError:
pass
return childpaths | [
"def",
"_ListDir",
"(",
"dirpath",
",",
"pathtype",
")",
":",
"pathspec",
"=",
"rdf_paths",
".",
"PathSpec",
"(",
"path",
"=",
"dirpath",
",",
"pathtype",
"=",
"pathtype",
")",
"childpaths",
"=",
"[",
"]",
"try",
":",
"file_obj",
"=",
"vfs",
".",
"VFSOpen",
"(",
"pathspec",
")",
"for",
"path",
"in",
"file_obj",
".",
"ListNames",
"(",
")",
":",
"# For Windows registry, ignore the empty string which corresponds to the",
"# default value in the current key. Otherwise, globbing a key will yield",
"# the key itself, because joining the name of the default value u\"\" with",
"# a key name yields the key name again.",
"if",
"pathtype",
"!=",
"rdf_paths",
".",
"PathSpec",
".",
"PathType",
".",
"REGISTRY",
"or",
"path",
":",
"childpaths",
".",
"append",
"(",
"path",
")",
"except",
"IOError",
":",
"pass",
"return",
"childpaths"
]
| Returns children of a given directory.
This function is intended to be used by the `PathComponent` subclasses to get
initial list of potential children that then need to be filtered according to
the rules of a specific component.
Args:
dirpath: A path to the directory.
pathtype: The pathtype to use.
Raises:
ValueError: in case of unsupported path types. | [
"Returns",
"children",
"of",
"a",
"given",
"directory",
"."
]
| python | train |
gabstopper/smc-python | smc/elements/other.py | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/other.py#L409-L474 | def prepare_blacklist(src, dst, duration=3600, src_port1=None,
src_port2=None, src_proto='predefined_tcp',
dst_port1=None, dst_port2=None,
dst_proto='predefined_tcp'):
"""
Create a blacklist entry.
A blacklist can be added directly from the engine node, or from
the system context. If submitting from the system context, it becomes
a global blacklist. This will return the properly formatted json
to submit.
:param src: source address, with cidr, i.e. 10.10.10.10/32 or 'any'
:param dst: destination address with cidr, i.e. 1.1.1.1/32 or 'any'
:param int duration: length of time to blacklist
Both the system and engine context blacklist allow kw to be passed
to provide additional functionality such as adding source and destination
ports or port ranges and specifying the protocol. The following parameters
define the ``kw`` that can be passed.
The following example shows creating an engine context blacklist
using additional kw::
engine.blacklist('1.1.1.1/32', '2.2.2.2/32', duration=3600,
src_port1=1000, src_port2=1500, src_proto='predefined_udp',
dst_port1=3, dst_port2=3000, dst_proto='predefined_udp')
:param int src_port1: start source port to limit blacklist
:param int src_port2: end source port to limit blacklist
:param str src_proto: source protocol. Either 'predefined_tcp'
or 'predefined_udp'. (default: 'predefined_tcp')
:param int dst_port1: start dst port to limit blacklist
:param int dst_port2: end dst port to limit blacklist
:param str dst_proto: dst protocol. Either 'predefined_tcp'
or 'predefined_udp'. (default: 'predefined_tcp')
.. note:: if blocking a range of ports, use both src_port1 and
src_port2, otherwise providing only src_port1 is adequate. The
same applies to dst_port1 / dst_port2. In addition, if you provide
src_portX but not dst_portX (or vice versa), the undefined port
side definition will default to all ports.
"""
json = {}
directions = {src: 'end_point1', dst: 'end_point2'}
for direction, key in directions.items():
json[key] = {'address_mode': 'any'} if \
'any' in direction.lower() else {'address_mode': 'address', 'ip_network': direction}
if src_port1:
json.setdefault('end_point1').update(
port1=src_port1,
port2=src_port2 or src_port1,
port_mode=src_proto)
if dst_port1:
json.setdefault('end_point2').update(
port1=dst_port1,
port2=dst_port2 or dst_port1,
port_mode=dst_proto)
json.update(duration=duration)
return json | [
"def",
"prepare_blacklist",
"(",
"src",
",",
"dst",
",",
"duration",
"=",
"3600",
",",
"src_port1",
"=",
"None",
",",
"src_port2",
"=",
"None",
",",
"src_proto",
"=",
"'predefined_tcp'",
",",
"dst_port1",
"=",
"None",
",",
"dst_port2",
"=",
"None",
",",
"dst_proto",
"=",
"'predefined_tcp'",
")",
":",
"json",
"=",
"{",
"}",
"directions",
"=",
"{",
"src",
":",
"'end_point1'",
",",
"dst",
":",
"'end_point2'",
"}",
"for",
"direction",
",",
"key",
"in",
"directions",
".",
"items",
"(",
")",
":",
"json",
"[",
"key",
"]",
"=",
"{",
"'address_mode'",
":",
"'any'",
"}",
"if",
"'any'",
"in",
"direction",
".",
"lower",
"(",
")",
"else",
"{",
"'address_mode'",
":",
"'address'",
",",
"'ip_network'",
":",
"direction",
"}",
"if",
"src_port1",
":",
"json",
".",
"setdefault",
"(",
"'end_point1'",
")",
".",
"update",
"(",
"port1",
"=",
"src_port1",
",",
"port2",
"=",
"src_port2",
"or",
"src_port1",
",",
"port_mode",
"=",
"src_proto",
")",
"if",
"dst_port1",
":",
"json",
".",
"setdefault",
"(",
"'end_point2'",
")",
".",
"update",
"(",
"port1",
"=",
"dst_port1",
",",
"port2",
"=",
"dst_port2",
"or",
"dst_port1",
",",
"port_mode",
"=",
"dst_proto",
")",
"json",
".",
"update",
"(",
"duration",
"=",
"duration",
")",
"return",
"json"
]
| Create a blacklist entry.
A blacklist can be added directly from the engine node, or from
the system context. If submitting from the system context, it becomes
a global blacklist. This will return the properly formatted json
to submit.
:param src: source address, with cidr, i.e. 10.10.10.10/32 or 'any'
:param dst: destination address with cidr, i.e. 1.1.1.1/32 or 'any'
:param int duration: length of time to blacklist
Both the system and engine context blacklist allow kw to be passed
to provide additional functionality such as adding source and destination
ports or port ranges and specifying the protocol. The following parameters
define the ``kw`` that can be passed.
The following example shows creating an engine context blacklist
using additional kw::
engine.blacklist('1.1.1.1/32', '2.2.2.2/32', duration=3600,
src_port1=1000, src_port2=1500, src_proto='predefined_udp',
dst_port1=3, dst_port2=3000, dst_proto='predefined_udp')
:param int src_port1: start source port to limit blacklist
:param int src_port2: end source port to limit blacklist
:param str src_proto: source protocol. Either 'predefined_tcp'
or 'predefined_udp'. (default: 'predefined_tcp')
:param int dst_port1: start dst port to limit blacklist
:param int dst_port2: end dst port to limit blacklist
:param str dst_proto: dst protocol. Either 'predefined_tcp'
or 'predefined_udp'. (default: 'predefined_tcp')
.. note:: if blocking a range of ports, use both src_port1 and
src_port2, otherwise providing only src_port1 is adequate. The
same applies to dst_port1 / dst_port2. In addition, if you provide
src_portX but not dst_portX (or vice versa), the undefined port
side definition will default to all ports. | [
"Create",
"a",
"blacklist",
"entry",
".",
"A",
"blacklist",
"can",
"be",
"added",
"directly",
"from",
"the",
"engine",
"node",
"or",
"from",
"the",
"system",
"context",
".",
"If",
"submitting",
"from",
"the",
"system",
"context",
"it",
"becomes",
"a",
"global",
"blacklist",
".",
"This",
"will",
"return",
"the",
"properly",
"formatted",
"json",
"to",
"submit",
".",
":",
"param",
"src",
":",
"source",
"address",
"with",
"cidr",
"i",
".",
"e",
".",
"10",
".",
"10",
".",
"10",
".",
"10",
"/",
"32",
"or",
"any",
":",
"param",
"dst",
":",
"destination",
"address",
"with",
"cidr",
"i",
".",
"e",
".",
"1",
".",
"1",
".",
"1",
".",
"1",
"/",
"32",
"or",
"any",
":",
"param",
"int",
"duration",
":",
"length",
"of",
"time",
"to",
"blacklist",
"Both",
"the",
"system",
"and",
"engine",
"context",
"blacklist",
"allow",
"kw",
"to",
"be",
"passed",
"to",
"provide",
"additional",
"functionality",
"such",
"as",
"adding",
"source",
"and",
"destination",
"ports",
"or",
"port",
"ranges",
"and",
"specifying",
"the",
"protocol",
".",
"The",
"following",
"parameters",
"define",
"the",
"kw",
"that",
"can",
"be",
"passed",
".",
"The",
"following",
"example",
"shows",
"creating",
"an",
"engine",
"context",
"blacklist",
"using",
"additional",
"kw",
"::",
"engine",
".",
"blacklist",
"(",
"1",
".",
"1",
".",
"1",
".",
"1",
"/",
"32",
"2",
".",
"2",
".",
"2",
".",
"2",
"/",
"32",
"duration",
"=",
"3600",
"src_port1",
"=",
"1000",
"src_port2",
"=",
"1500",
"src_proto",
"=",
"predefined_udp",
"dst_port1",
"=",
"3",
"dst_port2",
"=",
"3000",
"dst_proto",
"=",
"predefined_udp",
")",
":",
"param",
"int",
"src_port1",
":",
"start",
"source",
"port",
"to",
"limit",
"blacklist",
":",
"param",
"int",
"src_port2",
":",
"end",
"source",
"port",
"to",
"limit",
"blacklist",
":",
"param",
"str",
"src_proto",
":",
"source",
"protocol",
".",
"Either",
"predefined_tcp",
"or",
"predefined_udp",
".",
"(",
"default",
":",
"predefined_tcp",
")",
":",
"param",
"int",
"dst_port1",
":",
"start",
"dst",
"port",
"to",
"limit",
"blacklist",
":",
"param",
"int",
"dst_port2",
":",
"end",
"dst",
"port",
"to",
"limit",
"blacklist",
":",
"param",
"str",
"dst_proto",
":",
"dst",
"protocol",
".",
"Either",
"predefined_tcp",
"or",
"predefined_udp",
".",
"(",
"default",
":",
"predefined_tcp",
")",
"..",
"note",
"::",
"if",
"blocking",
"a",
"range",
"of",
"ports",
"use",
"both",
"src_port1",
"and",
"src_port2",
"otherwise",
"providing",
"only",
"src_port1",
"is",
"adequate",
".",
"The",
"same",
"applies",
"to",
"dst_port1",
"/",
"dst_port2",
".",
"In",
"addition",
"if",
"you",
"provide",
"src_portX",
"but",
"not",
"dst_portX",
"(",
"or",
"vice",
"versa",
")",
"the",
"undefined",
"port",
"side",
"definition",
"will",
"default",
"to",
"all",
"ports",
"."
]
| python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/core/page.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/page.py#L51-L71 | def page_dumb(strng, start=0, screen_lines=25):
"""Very dumb 'pager' in Python, for when nothing else works.
Only moves forward, same interface as page(), except for pager_cmd and
mode."""
out_ln = strng.splitlines()[start:]
screens = chop(out_ln,screen_lines-1)
if len(screens) == 1:
print >>io.stdout, os.linesep.join(screens[0])
else:
last_escape = ""
for scr in screens[0:-1]:
hunk = os.linesep.join(scr)
print >>io.stdout, last_escape + hunk
if not page_more():
return
esc_list = esc_re.findall(hunk)
if len(esc_list) > 0:
last_escape = esc_list[-1]
print >>io.stdout, last_escape + os.linesep.join(screens[-1]) | [
"def",
"page_dumb",
"(",
"strng",
",",
"start",
"=",
"0",
",",
"screen_lines",
"=",
"25",
")",
":",
"out_ln",
"=",
"strng",
".",
"splitlines",
"(",
")",
"[",
"start",
":",
"]",
"screens",
"=",
"chop",
"(",
"out_ln",
",",
"screen_lines",
"-",
"1",
")",
"if",
"len",
"(",
"screens",
")",
"==",
"1",
":",
"print",
">>",
"io",
".",
"stdout",
",",
"os",
".",
"linesep",
".",
"join",
"(",
"screens",
"[",
"0",
"]",
")",
"else",
":",
"last_escape",
"=",
"\"\"",
"for",
"scr",
"in",
"screens",
"[",
"0",
":",
"-",
"1",
"]",
":",
"hunk",
"=",
"os",
".",
"linesep",
".",
"join",
"(",
"scr",
")",
"print",
">>",
"io",
".",
"stdout",
",",
"last_escape",
"+",
"hunk",
"if",
"not",
"page_more",
"(",
")",
":",
"return",
"esc_list",
"=",
"esc_re",
".",
"findall",
"(",
"hunk",
")",
"if",
"len",
"(",
"esc_list",
")",
">",
"0",
":",
"last_escape",
"=",
"esc_list",
"[",
"-",
"1",
"]",
"print",
">>",
"io",
".",
"stdout",
",",
"last_escape",
"+",
"os",
".",
"linesep",
".",
"join",
"(",
"screens",
"[",
"-",
"1",
"]",
")"
]
| Very dumb 'pager' in Python, for when nothing else works.
Only moves forward, same interface as page(), except for pager_cmd and
mode. | [
"Very",
"dumb",
"pager",
"in",
"Python",
"for",
"when",
"nothing",
"else",
"works",
"."
]
| python | test |
pre-commit/pre-commit | pre_commit/make_archives.py | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/make_archives.py#L30-L53 | def make_archive(name, repo, ref, destdir):
"""Makes an archive of a repository in the given destdir.
:param text name: Name to give the archive. For instance foo. The file
that is created will be called foo.tar.gz.
:param text repo: Repository to clone.
:param text ref: Tag/SHA/branch to check out.
:param text destdir: Directory to place archives in.
"""
output_path = os.path.join(destdir, name + '.tar.gz')
with tmpdir() as tempdir:
# Clone the repository to the temporary directory
cmd_output('git', 'clone', repo, tempdir)
cmd_output('git', 'checkout', ref, cwd=tempdir)
# We don't want the '.git' directory
# It adds a bunch of size to the archive and we don't use it at
# runtime
rmtree(os.path.join(tempdir, '.git'))
with tarfile.open(output_path, 'w|gz') as tf:
tf.add(tempdir, name)
return output_path | [
"def",
"make_archive",
"(",
"name",
",",
"repo",
",",
"ref",
",",
"destdir",
")",
":",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destdir",
",",
"name",
"+",
"'.tar.gz'",
")",
"with",
"tmpdir",
"(",
")",
"as",
"tempdir",
":",
"# Clone the repository to the temporary directory",
"cmd_output",
"(",
"'git'",
",",
"'clone'",
",",
"repo",
",",
"tempdir",
")",
"cmd_output",
"(",
"'git'",
",",
"'checkout'",
",",
"ref",
",",
"cwd",
"=",
"tempdir",
")",
"# We don't want the '.git' directory",
"# It adds a bunch of size to the archive and we don't use it at",
"# runtime",
"rmtree",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"'.git'",
")",
")",
"with",
"tarfile",
".",
"open",
"(",
"output_path",
",",
"'w|gz'",
")",
"as",
"tf",
":",
"tf",
".",
"add",
"(",
"tempdir",
",",
"name",
")",
"return",
"output_path"
]
| Makes an archive of a repository in the given destdir.
:param text name: Name to give the archive. For instance foo. The file
that is created will be called foo.tar.gz.
:param text repo: Repository to clone.
:param text ref: Tag/SHA/branch to check out.
:param text destdir: Directory to place archives in. | [
"Makes",
"an",
"archive",
"of",
"a",
"repository",
"in",
"the",
"given",
"destdir",
"."
]
| python | train |
SiLab-Bonn/pyBAR | pybar/fei4_run_base.py | https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4_run_base.py#L1445-L1457 | def interval_timer(interval, func, *args, **kwargs):
'''Interval timer function.
Taken from: http://stackoverflow.com/questions/22498038/improvement-on-interval-python/22498708
'''
stopped = Event()
def loop():
while not stopped.wait(interval): # the first call is after interval
func(*args, **kwargs)
Thread(name='IntervalTimerThread', target=loop).start()
return stopped.set | [
"def",
"interval_timer",
"(",
"interval",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"stopped",
"=",
"Event",
"(",
")",
"def",
"loop",
"(",
")",
":",
"while",
"not",
"stopped",
".",
"wait",
"(",
"interval",
")",
":",
"# the first call is after interval",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"Thread",
"(",
"name",
"=",
"'IntervalTimerThread'",
",",
"target",
"=",
"loop",
")",
".",
"start",
"(",
")",
"return",
"stopped",
".",
"set"
]
| Interval timer function.
Taken from: http://stackoverflow.com/questions/22498038/improvement-on-interval-python/22498708 | [
"Interval",
"timer",
"function",
"."
]
| python | train |
androguard/androguard | androguard/core/bytecodes/apk.py | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/apk.py#L1127-L1159 | def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:returns: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d | [
"def",
"get_intent_filters",
"(",
"self",
",",
"itemtype",
",",
"name",
")",
":",
"d",
"=",
"{",
"\"action\"",
":",
"[",
"]",
",",
"\"category\"",
":",
"[",
"]",
"}",
"for",
"i",
"in",
"self",
".",
"xml",
":",
"# TODO: this can probably be solved using a single xpath",
"for",
"item",
"in",
"self",
".",
"xml",
"[",
"i",
"]",
".",
"findall",
"(",
"\".//\"",
"+",
"itemtype",
")",
":",
"if",
"self",
".",
"_format_value",
"(",
"item",
".",
"get",
"(",
"self",
".",
"_ns",
"(",
"\"name\"",
")",
")",
")",
"==",
"name",
":",
"for",
"sitem",
"in",
"item",
".",
"findall",
"(",
"\".//intent-filter\"",
")",
":",
"for",
"ssitem",
"in",
"sitem",
".",
"findall",
"(",
"\"action\"",
")",
":",
"if",
"ssitem",
".",
"get",
"(",
"self",
".",
"_ns",
"(",
"\"name\"",
")",
")",
"not",
"in",
"d",
"[",
"\"action\"",
"]",
":",
"d",
"[",
"\"action\"",
"]",
".",
"append",
"(",
"ssitem",
".",
"get",
"(",
"self",
".",
"_ns",
"(",
"\"name\"",
")",
")",
")",
"for",
"ssitem",
"in",
"sitem",
".",
"findall",
"(",
"\"category\"",
")",
":",
"if",
"ssitem",
".",
"get",
"(",
"self",
".",
"_ns",
"(",
"\"name\"",
")",
")",
"not",
"in",
"d",
"[",
"\"category\"",
"]",
":",
"d",
"[",
"\"category\"",
"]",
".",
"append",
"(",
"ssitem",
".",
"get",
"(",
"self",
".",
"_ns",
"(",
"\"name\"",
")",
")",
")",
"if",
"not",
"d",
"[",
"\"action\"",
"]",
":",
"del",
"d",
"[",
"\"action\"",
"]",
"if",
"not",
"d",
"[",
"\"category\"",
"]",
":",
"del",
"d",
"[",
"\"category\"",
"]",
"return",
"d"
]
| Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:returns: a dictionary with the keys `action` and `category` containing the `android:name` of those items | [
"Find",
"intent",
"filters",
"for",
"a",
"given",
"item",
"and",
"name",
"."
]
| python | train |
jmbhughes/suvi-trainer | suvitrainer/fileio.py | https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/suvitrainer/fileio.py#L609-L638 | def make_three_color(self, upper_percentile=100, lower_percentile=0):
"""
Load the configured input channel images and create a three color image
:param upper_percentile: pixels above this percentile are suppressed
:param lower_percentile: pixels below this percentile are suppressed
:return: a numpy array (m,n,3) representing a three-color image
"""
order = {'red': 0, 'green': 1, 'blue': 2}
shape = self.thmap.shape
three_color = np.zeros((shape[0], shape[1], 3))
channel_colors = {color: self.config.default[color] for color in ['red', 'green', 'blue']}
data = Fetcher(self.date, products=list(channel_colors.values()), verbose=False).fetch()
for color, channel in channel_colors.items():
three_color[:, :, order[color]] = data[channel][1]
# scale the image by the power
three_color[:, :, order[color]] = np.power(three_color[:, :, order[color]],
self.config.default["{}_power".format(color)])
# adjust the percentile thresholds
lower = np.nanpercentile(three_color[:, :, order[color]], lower_percentile)
upper = np.nanpercentile(three_color[:, :, order[color]], upper_percentile)
three_color[np.where(three_color[:, :, order[color]] < lower)] = lower
three_color[np.where(three_color[:, :, order[color]] > upper)] = upper
# image values must be between (0,1) so scale image
for color, index in order.items():
three_color[:, :, index] /= np.nanmax(three_color[:, :, index])
return three_color | [
"def",
"make_three_color",
"(",
"self",
",",
"upper_percentile",
"=",
"100",
",",
"lower_percentile",
"=",
"0",
")",
":",
"order",
"=",
"{",
"'red'",
":",
"0",
",",
"'green'",
":",
"1",
",",
"'blue'",
":",
"2",
"}",
"shape",
"=",
"self",
".",
"thmap",
".",
"shape",
"three_color",
"=",
"np",
".",
"zeros",
"(",
"(",
"shape",
"[",
"0",
"]",
",",
"shape",
"[",
"1",
"]",
",",
"3",
")",
")",
"channel_colors",
"=",
"{",
"color",
":",
"self",
".",
"config",
".",
"default",
"[",
"color",
"]",
"for",
"color",
"in",
"[",
"'red'",
",",
"'green'",
",",
"'blue'",
"]",
"}",
"data",
"=",
"Fetcher",
"(",
"self",
".",
"date",
",",
"products",
"=",
"list",
"(",
"channel_colors",
".",
"values",
"(",
")",
")",
",",
"verbose",
"=",
"False",
")",
".",
"fetch",
"(",
")",
"for",
"color",
",",
"channel",
"in",
"channel_colors",
".",
"items",
"(",
")",
":",
"three_color",
"[",
":",
",",
":",
",",
"order",
"[",
"color",
"]",
"]",
"=",
"data",
"[",
"channel",
"]",
"[",
"1",
"]",
"# scale the image by the power",
"three_color",
"[",
":",
",",
":",
",",
"order",
"[",
"color",
"]",
"]",
"=",
"np",
".",
"power",
"(",
"three_color",
"[",
":",
",",
":",
",",
"order",
"[",
"color",
"]",
"]",
",",
"self",
".",
"config",
".",
"default",
"[",
"\"{}_power\"",
".",
"format",
"(",
"color",
")",
"]",
")",
"# adjust the percentile thresholds",
"lower",
"=",
"np",
".",
"nanpercentile",
"(",
"three_color",
"[",
":",
",",
":",
",",
"order",
"[",
"color",
"]",
"]",
",",
"lower_percentile",
")",
"upper",
"=",
"np",
".",
"nanpercentile",
"(",
"three_color",
"[",
":",
",",
":",
",",
"order",
"[",
"color",
"]",
"]",
",",
"upper_percentile",
")",
"three_color",
"[",
"np",
".",
"where",
"(",
"three_color",
"[",
":",
",",
":",
",",
"order",
"[",
"color",
"]",
"]",
"<",
"lower",
")",
"]",
"=",
"lower",
"three_color",
"[",
"np",
".",
"where",
"(",
"three_color",
"[",
":",
",",
":",
",",
"order",
"[",
"color",
"]",
"]",
">",
"upper",
")",
"]",
"=",
"upper",
"# image values must be between (0,1) so scale image",
"for",
"color",
",",
"index",
"in",
"order",
".",
"items",
"(",
")",
":",
"three_color",
"[",
":",
",",
":",
",",
"index",
"]",
"/=",
"np",
".",
"nanmax",
"(",
"three_color",
"[",
":",
",",
":",
",",
"index",
"]",
")",
"return",
"three_color"
]
| Load the configured input channel images and create a three color image
:param upper_percentile: pixels above this percentile are suppressed
:param lower_percentile: pixels below this percentile are suppressed
:return: a numpy array (m,n,3) representing a three-color image | [
"Load",
"the",
"configured",
"input",
"channel",
"images",
"and",
"create",
"a",
"three",
"color",
"image",
":",
"param",
"upper_percentile",
":",
"pixels",
"above",
"this",
"percentile",
"are",
"suppressed",
":",
"param",
"lower_percentile",
":",
"pixels",
"below",
"this",
"percentile",
"are",
"suppressed",
":",
"return",
":",
"a",
"numpy",
"array",
"(",
"m",
"n",
"3",
")",
"representing",
"a",
"three",
"-",
"color",
"image"
]
| python | train |
pypa/pipenv | pipenv/vendor/pipdeptree.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pipdeptree.py#L75-L87 | def find_tree_root(tree, key):
"""Find a root in a tree by it's key
:param dict tree: the pkg dependency tree obtained by calling
`construct_tree` function
:param str key: key of the root node to find
:returns: a root node if found else None
:rtype: mixed
"""
result = [p for p in tree.keys() if p.key == key]
assert len(result) in [0, 1]
return None if len(result) == 0 else result[0] | [
"def",
"find_tree_root",
"(",
"tree",
",",
"key",
")",
":",
"result",
"=",
"[",
"p",
"for",
"p",
"in",
"tree",
".",
"keys",
"(",
")",
"if",
"p",
".",
"key",
"==",
"key",
"]",
"assert",
"len",
"(",
"result",
")",
"in",
"[",
"0",
",",
"1",
"]",
"return",
"None",
"if",
"len",
"(",
"result",
")",
"==",
"0",
"else",
"result",
"[",
"0",
"]"
]
| Find a root in a tree by it's key
:param dict tree: the pkg dependency tree obtained by calling
`construct_tree` function
:param str key: key of the root node to find
:returns: a root node if found else None
:rtype: mixed | [
"Find",
"a",
"root",
"in",
"a",
"tree",
"by",
"it",
"s",
"key"
]
| python | train |
idmillington/layout | layout/datatypes/output.py | https://github.com/idmillington/layout/blob/c452d1d7a74c9a74f7639c1b49e2a41c4e354bb5/layout/datatypes/output.py#L79-L83 | def draw_image(
self, img_filename:str, x:float, y:float, w:float, h:float
) -> None:
"""Draws the given image."""
pass | [
"def",
"draw_image",
"(",
"self",
",",
"img_filename",
":",
"str",
",",
"x",
":",
"float",
",",
"y",
":",
"float",
",",
"w",
":",
"float",
",",
"h",
":",
"float",
")",
"->",
"None",
":",
"pass"
]
| Draws the given image. | [
"Draws",
"the",
"given",
"image",
"."
]
| python | train |
acutesoftware/AIKIF | aikif/cls_log.py | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L148-L153 | def record_command(self, cmd, prg=''):
"""
record the command passed - this is usually the name of the program
being run or task being run
"""
self._log(self.logFileCommand , force_to_string(cmd), prg) | [
"def",
"record_command",
"(",
"self",
",",
"cmd",
",",
"prg",
"=",
"''",
")",
":",
"self",
".",
"_log",
"(",
"self",
".",
"logFileCommand",
",",
"force_to_string",
"(",
"cmd",
")",
",",
"prg",
")"
]
| record the command passed - this is usually the name of the program
being run or task being run | [
"record",
"the",
"command",
"passed",
"-",
"this",
"is",
"usually",
"the",
"name",
"of",
"the",
"program",
"being",
"run",
"or",
"task",
"being",
"run"
]
| python | train |
arcticfoxnv/slackminion | slackminion/plugin/base.py | https://github.com/arcticfoxnv/slackminion/blob/62ea77aba5ac5ba582793e578a379a76f7d26cdb/slackminion/plugin/base.py#L65-L76 | def start_timer(self, duration, func, *args):
"""
Schedules a function to be called after some period of time.
* duration - time in seconds to wait before firing
* func - function to be called
* args - arguments to pass to the function
"""
t = threading.Timer(duration, self._timer_callback, (func, args))
self._timer_callbacks[func] = t
t.start()
self.log.info("Scheduled call to %s in %ds", func.__name__, duration) | [
"def",
"start_timer",
"(",
"self",
",",
"duration",
",",
"func",
",",
"*",
"args",
")",
":",
"t",
"=",
"threading",
".",
"Timer",
"(",
"duration",
",",
"self",
".",
"_timer_callback",
",",
"(",
"func",
",",
"args",
")",
")",
"self",
".",
"_timer_callbacks",
"[",
"func",
"]",
"=",
"t",
"t",
".",
"start",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Scheduled call to %s in %ds\"",
",",
"func",
".",
"__name__",
",",
"duration",
")"
]
| Schedules a function to be called after some period of time.
* duration - time in seconds to wait before firing
* func - function to be called
* args - arguments to pass to the function | [
"Schedules",
"a",
"function",
"to",
"be",
"called",
"after",
"some",
"period",
"of",
"time",
"."
]
| python | valid |
ambitioninc/rabbitmq-admin | rabbitmq_admin/api.py | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L468-L474 | def list_policies_for_vhost(self, vhost):
"""
A list of all policies for a vhost.
"""
return self._api_get('/api/policies/{0}'.format(
urllib.parse.quote_plus(vhost)
)) | [
"def",
"list_policies_for_vhost",
"(",
"self",
",",
"vhost",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/policies/{0}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
")",
")"
]
| A list of all policies for a vhost. | [
"A",
"list",
"of",
"all",
"policies",
"for",
"a",
"vhost",
"."
]
| python | train |
idlesign/django-admirarchy | admirarchy/utils.py | https://github.com/idlesign/django-admirarchy/blob/723e4fd212fdebcc156492cb16b9d65356f5ca73/admirarchy/utils.py#L169-L180 | def init_hierarchy(cls, model_admin):
"""Initializes model admin with hierarchy data."""
hierarchy = getattr(model_admin, 'hierarchy')
if hierarchy:
if not isinstance(hierarchy, Hierarchy):
hierarchy = AdjacencyList() # For `True` and etc. TODO heuristics maybe.
else:
hierarchy = NoHierarchy()
model_admin.hierarchy = hierarchy | [
"def",
"init_hierarchy",
"(",
"cls",
",",
"model_admin",
")",
":",
"hierarchy",
"=",
"getattr",
"(",
"model_admin",
",",
"'hierarchy'",
")",
"if",
"hierarchy",
":",
"if",
"not",
"isinstance",
"(",
"hierarchy",
",",
"Hierarchy",
")",
":",
"hierarchy",
"=",
"AdjacencyList",
"(",
")",
"# For `True` and etc. TODO heuristics maybe.",
"else",
":",
"hierarchy",
"=",
"NoHierarchy",
"(",
")",
"model_admin",
".",
"hierarchy",
"=",
"hierarchy"
]
| Initializes model admin with hierarchy data. | [
"Initializes",
"model",
"admin",
"with",
"hierarchy",
"data",
"."
]
| python | train |
UDST/orca | orca/orca.py | https://github.com/UDST/orca/blob/07b34aeef13cc87c966b2e30cbe7e76cc9d3622c/orca/orca.py#L1649-L1658 | def _all_reachable_tables(t):
"""
A generator that provides all the names of tables that can be
reached via merges starting at the given target table.
"""
for k, v in t.items():
for tname in _all_reachable_tables(v):
yield tname
yield k | [
"def",
"_all_reachable_tables",
"(",
"t",
")",
":",
"for",
"k",
",",
"v",
"in",
"t",
".",
"items",
"(",
")",
":",
"for",
"tname",
"in",
"_all_reachable_tables",
"(",
"v",
")",
":",
"yield",
"tname",
"yield",
"k"
]
| A generator that provides all the names of tables that can be
reached via merges starting at the given target table. | [
"A",
"generator",
"that",
"provides",
"all",
"the",
"names",
"of",
"tables",
"that",
"can",
"be",
"reached",
"via",
"merges",
"starting",
"at",
"the",
"given",
"target",
"table",
"."
]
| python | train |
MichaelAquilina/S4 | s4/clients/__init__.py | https://github.com/MichaelAquilina/S4/blob/05d74697e6ec683f0329c983f7c3f05ab75fd57e/s4/clients/__init__.py#L191-L201 | def get_action(self, key):
"""
returns the action to perform on this key based on its
state before the last sync.
"""
index_local_timestamp = self.get_index_local_timestamp(key)
real_local_timestamp = self.get_real_local_timestamp(key)
remote_timestamp = self.get_remote_timestamp(key)
return get_sync_state(
index_local_timestamp, real_local_timestamp, remote_timestamp
) | [
"def",
"get_action",
"(",
"self",
",",
"key",
")",
":",
"index_local_timestamp",
"=",
"self",
".",
"get_index_local_timestamp",
"(",
"key",
")",
"real_local_timestamp",
"=",
"self",
".",
"get_real_local_timestamp",
"(",
"key",
")",
"remote_timestamp",
"=",
"self",
".",
"get_remote_timestamp",
"(",
"key",
")",
"return",
"get_sync_state",
"(",
"index_local_timestamp",
",",
"real_local_timestamp",
",",
"remote_timestamp",
")"
]
| returns the action to perform on this key based on its
state before the last sync. | [
"returns",
"the",
"action",
"to",
"perform",
"on",
"this",
"key",
"based",
"on",
"its",
"state",
"before",
"the",
"last",
"sync",
"."
]
| python | train |
uogbuji/amara3-xml | pylib/uxml/uxpath/ast.py | https://github.com/uogbuji/amara3-xml/blob/88c18876418cffc89bb85b4a3193e5002b6b39a6/pylib/uxml/uxpath/ast.py#L135-L157 | def to_boolean(obj):
'''
Cast an arbitrary sequence to a boolean type
'''
#if hasattr(obj, '__iter__'):
if isinstance(obj, LiteralWrapper):
val = obj.obj
elif isinstance(obj, Iterable) and not isinstance(obj, str):
val = next(obj, None)
else:
val = obj
if val is None:
yield False
elif isinstance(val, bool):
yield val
elif isinstance(val, str):
yield bool(str)
elif isinstance(val, node):
yield True
elif isinstance(val, float) or isinstance(val, int):
yield bool(val)
else:
raise RuntimeError('Unknown type for boolean conversion: {}'.format(val)) | [
"def",
"to_boolean",
"(",
"obj",
")",
":",
"#if hasattr(obj, '__iter__'):",
"if",
"isinstance",
"(",
"obj",
",",
"LiteralWrapper",
")",
":",
"val",
"=",
"obj",
".",
"obj",
"elif",
"isinstance",
"(",
"obj",
",",
"Iterable",
")",
"and",
"not",
"isinstance",
"(",
"obj",
",",
"str",
")",
":",
"val",
"=",
"next",
"(",
"obj",
",",
"None",
")",
"else",
":",
"val",
"=",
"obj",
"if",
"val",
"is",
"None",
":",
"yield",
"False",
"elif",
"isinstance",
"(",
"val",
",",
"bool",
")",
":",
"yield",
"val",
"elif",
"isinstance",
"(",
"val",
",",
"str",
")",
":",
"yield",
"bool",
"(",
"str",
")",
"elif",
"isinstance",
"(",
"val",
",",
"node",
")",
":",
"yield",
"True",
"elif",
"isinstance",
"(",
"val",
",",
"float",
")",
"or",
"isinstance",
"(",
"val",
",",
"int",
")",
":",
"yield",
"bool",
"(",
"val",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Unknown type for boolean conversion: {}'",
".",
"format",
"(",
"val",
")",
")"
]
| Cast an arbitrary sequence to a boolean type | [
"Cast",
"an",
"arbitrary",
"sequence",
"to",
"a",
"boolean",
"type"
]
| python | test |
LaoLiulaoliu/pgwrapper | pgwrapper/pgwrapper.py | https://github.com/LaoLiulaoliu/pgwrapper/blob/063a164713b79bfadb56a01c4ae19911f508d01e/pgwrapper/pgwrapper.py#L173-L200 | def joint(self, table, fields,
join_table, join_fields,
condition_field, condition_join_field,
join_method='left_join'):
""".. :py:method::
Usage::
>>> joint('user', 'name, id_number', 'medical_card', 'number', 'id', 'user_id', 'inner_join')
select u.name, u.id_number, v.number from user as u inner join medical_card as v on u.id=v.user_id;
"""
import string
fields = map(string.strip, fields.split(','))
select = ', '.join( ['u.{}'.format(field) for field in fields] )
join_fields = map(string.strip, join_fields.split(','))
join_select = ', '.join( ['v.{}'.format(field) for field in join_fields] )
sql = "select {select}, {join_select} from {table} as u {join_method}"\
" {join_table} as v on u.{condition_field}="\
"v.{condition_join_field};".format(select=select,
join_select=join_select,
table=table,
join_method=join_method,
join_table=join_table,
condition_field=condition_field,
condition_join_field=condition_join_field)
return super(PGWrapper, self).execute(sql, result=True).results | [
"def",
"joint",
"(",
"self",
",",
"table",
",",
"fields",
",",
"join_table",
",",
"join_fields",
",",
"condition_field",
",",
"condition_join_field",
",",
"join_method",
"=",
"'left_join'",
")",
":",
"import",
"string",
"fields",
"=",
"map",
"(",
"string",
".",
"strip",
",",
"fields",
".",
"split",
"(",
"','",
")",
")",
"select",
"=",
"', '",
".",
"join",
"(",
"[",
"'u.{}'",
".",
"format",
"(",
"field",
")",
"for",
"field",
"in",
"fields",
"]",
")",
"join_fields",
"=",
"map",
"(",
"string",
".",
"strip",
",",
"join_fields",
".",
"split",
"(",
"','",
")",
")",
"join_select",
"=",
"', '",
".",
"join",
"(",
"[",
"'v.{}'",
".",
"format",
"(",
"field",
")",
"for",
"field",
"in",
"join_fields",
"]",
")",
"sql",
"=",
"\"select {select}, {join_select} from {table} as u {join_method}\"",
"\" {join_table} as v on u.{condition_field}=\"",
"\"v.{condition_join_field};\"",
".",
"format",
"(",
"select",
"=",
"select",
",",
"join_select",
"=",
"join_select",
",",
"table",
"=",
"table",
",",
"join_method",
"=",
"join_method",
",",
"join_table",
"=",
"join_table",
",",
"condition_field",
"=",
"condition_field",
",",
"condition_join_field",
"=",
"condition_join_field",
")",
"return",
"super",
"(",
"PGWrapper",
",",
"self",
")",
".",
"execute",
"(",
"sql",
",",
"result",
"=",
"True",
")",
".",
"results"
]
| .. :py:method::
Usage::
>>> joint('user', 'name, id_number', 'medical_card', 'number', 'id', 'user_id', 'inner_join')
select u.name, u.id_number, v.number from user as u inner join medical_card as v on u.id=v.user_id; | [
"..",
":",
"py",
":",
"method",
"::"
]
| python | train |
josuebrunel/yahoo-oauth | yahoo_oauth/utils.py | https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/utils.py#L29-L34 | def get_data(filename):
"""Calls right function according to file extension
"""
name, ext = get_file_extension(filename)
func = json_get_data if ext == '.json' else yaml_get_data
return func(filename) | [
"def",
"get_data",
"(",
"filename",
")",
":",
"name",
",",
"ext",
"=",
"get_file_extension",
"(",
"filename",
")",
"func",
"=",
"json_get_data",
"if",
"ext",
"==",
"'.json'",
"else",
"yaml_get_data",
"return",
"func",
"(",
"filename",
")"
]
| Calls right function according to file extension | [
"Calls",
"right",
"function",
"according",
"to",
"file",
"extension"
]
| python | valid |
cuihantao/andes | andes/models/wind.py | https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/models/wind.py#L82-L101 | def windspeed(self, t):
"""Return the wind speed list at time `t`"""
ws = [0] * self.n
for i in range(self.n):
q = ceil(t / self.dt[i])
q_prev = 0 if q == 0 else q - 1
r = t % self.dt[i]
r = 0 if abs(r) < 1e-6 else r
if r == 0:
ws[i] = self.speed[i][q]
else:
t1 = self.time[i][q_prev]
s1 = self.speed[i][q_prev]
s2 = self.speed[i][q]
ws[i] = s1 + (t - t1) * (s2 - s1) / self.dt[i]
return matrix(ws) | [
"def",
"windspeed",
"(",
"self",
",",
"t",
")",
":",
"ws",
"=",
"[",
"0",
"]",
"*",
"self",
".",
"n",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n",
")",
":",
"q",
"=",
"ceil",
"(",
"t",
"/",
"self",
".",
"dt",
"[",
"i",
"]",
")",
"q_prev",
"=",
"0",
"if",
"q",
"==",
"0",
"else",
"q",
"-",
"1",
"r",
"=",
"t",
"%",
"self",
".",
"dt",
"[",
"i",
"]",
"r",
"=",
"0",
"if",
"abs",
"(",
"r",
")",
"<",
"1e-6",
"else",
"r",
"if",
"r",
"==",
"0",
":",
"ws",
"[",
"i",
"]",
"=",
"self",
".",
"speed",
"[",
"i",
"]",
"[",
"q",
"]",
"else",
":",
"t1",
"=",
"self",
".",
"time",
"[",
"i",
"]",
"[",
"q_prev",
"]",
"s1",
"=",
"self",
".",
"speed",
"[",
"i",
"]",
"[",
"q_prev",
"]",
"s2",
"=",
"self",
".",
"speed",
"[",
"i",
"]",
"[",
"q",
"]",
"ws",
"[",
"i",
"]",
"=",
"s1",
"+",
"(",
"t",
"-",
"t1",
")",
"*",
"(",
"s2",
"-",
"s1",
")",
"/",
"self",
".",
"dt",
"[",
"i",
"]",
"return",
"matrix",
"(",
"ws",
")"
]
| Return the wind speed list at time `t` | [
"Return",
"the",
"wind",
"speed",
"list",
"at",
"time",
"t"
]
| python | train |
dossier/dossier.models | dossier/models/web/routes.py | https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/web/routes.py#L345-L393 | def v1_highlights_get(response, kvlclient, file_id_str, max_elapsed = 300):
'''Obtain highlights for a document POSTed previously to this end
point. See documentation for v1_highlights_post for further
details. If the `state` is still `pending` for more than
`max_elapsed` after the start of the `WorkUnit`, then this reports
an error, although the `WorkUnit` may continue in the background.
'''
file_id = make_file_id(file_id_str)
kvlclient.setup_namespace(highlights_kvlayer_tables)
payload_strs = list(kvlclient.get('highlights', file_id))
if not (payload_strs and payload_strs[0][1]):
response.status = 500
payload = {
'state': ERROR,
'error': {
'code': 8,
'message': 'unknown error'}}
logger.critical('got bogus info for %r: %r', file_id, payload_strs)
else:
payload_str = payload_strs[0][1]
try:
payload = json.loads(payload_str)
if payload['state'] == HIGHLIGHTS_PENDING:
elapsed = time.time() - payload.get('start', 0)
if elapsed > max_elapsed:
response.status = 500
payload = {
'state': ERROR,
'error': {
'code': 8,
'message': 'hit timeout'}}
logger.critical('hit timeout on %r', file_id)
kvlclient.put('highlights', (file_id, json.dumps(payload)))
else:
payload['elapsed'] = elapsed
logger.info('returning stored payload for %r', file_id)
except Exception, exc:
logger.critical('failed to decode out of %r',
payload_str, exc_info=True)
response.status = 400
payload = {
'state': ERROR,
'error': {
'code': 9,
'message': 'nothing known about file_id=%r' % file_id}
}
# only place where payload is returned
return payload | [
"def",
"v1_highlights_get",
"(",
"response",
",",
"kvlclient",
",",
"file_id_str",
",",
"max_elapsed",
"=",
"300",
")",
":",
"file_id",
"=",
"make_file_id",
"(",
"file_id_str",
")",
"kvlclient",
".",
"setup_namespace",
"(",
"highlights_kvlayer_tables",
")",
"payload_strs",
"=",
"list",
"(",
"kvlclient",
".",
"get",
"(",
"'highlights'",
",",
"file_id",
")",
")",
"if",
"not",
"(",
"payload_strs",
"and",
"payload_strs",
"[",
"0",
"]",
"[",
"1",
"]",
")",
":",
"response",
".",
"status",
"=",
"500",
"payload",
"=",
"{",
"'state'",
":",
"ERROR",
",",
"'error'",
":",
"{",
"'code'",
":",
"8",
",",
"'message'",
":",
"'unknown error'",
"}",
"}",
"logger",
".",
"critical",
"(",
"'got bogus info for %r: %r'",
",",
"file_id",
",",
"payload_strs",
")",
"else",
":",
"payload_str",
"=",
"payload_strs",
"[",
"0",
"]",
"[",
"1",
"]",
"try",
":",
"payload",
"=",
"json",
".",
"loads",
"(",
"payload_str",
")",
"if",
"payload",
"[",
"'state'",
"]",
"==",
"HIGHLIGHTS_PENDING",
":",
"elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"payload",
".",
"get",
"(",
"'start'",
",",
"0",
")",
"if",
"elapsed",
">",
"max_elapsed",
":",
"response",
".",
"status",
"=",
"500",
"payload",
"=",
"{",
"'state'",
":",
"ERROR",
",",
"'error'",
":",
"{",
"'code'",
":",
"8",
",",
"'message'",
":",
"'hit timeout'",
"}",
"}",
"logger",
".",
"critical",
"(",
"'hit timeout on %r'",
",",
"file_id",
")",
"kvlclient",
".",
"put",
"(",
"'highlights'",
",",
"(",
"file_id",
",",
"json",
".",
"dumps",
"(",
"payload",
")",
")",
")",
"else",
":",
"payload",
"[",
"'elapsed'",
"]",
"=",
"elapsed",
"logger",
".",
"info",
"(",
"'returning stored payload for %r'",
",",
"file_id",
")",
"except",
"Exception",
",",
"exc",
":",
"logger",
".",
"critical",
"(",
"'failed to decode out of %r'",
",",
"payload_str",
",",
"exc_info",
"=",
"True",
")",
"response",
".",
"status",
"=",
"400",
"payload",
"=",
"{",
"'state'",
":",
"ERROR",
",",
"'error'",
":",
"{",
"'code'",
":",
"9",
",",
"'message'",
":",
"'nothing known about file_id=%r'",
"%",
"file_id",
"}",
"}",
"# only place where payload is returned",
"return",
"payload"
]
| Obtain highlights for a document POSTed previously to this end
point. See documentation for v1_highlights_post for further
details. If the `state` is still `pending` for more than
`max_elapsed` after the start of the `WorkUnit`, then this reports
an error, although the `WorkUnit` may continue in the background. | [
"Obtain",
"highlights",
"for",
"a",
"document",
"POSTed",
"previously",
"to",
"this",
"end",
"point",
".",
"See",
"documentation",
"for",
"v1_highlights_post",
"for",
"further",
"details",
".",
"If",
"the",
"state",
"is",
"still",
"pending",
"for",
"more",
"than",
"max_elapsed",
"after",
"the",
"start",
"of",
"the",
"WorkUnit",
"then",
"this",
"reports",
"an",
"error",
"although",
"the",
"WorkUnit",
"may",
"continue",
"in",
"the",
"background",
"."
]
| python | train |
limodou/uliweb | uliweb/utils/xltools.py | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/xltools.py#L945-L952 | def read(self):
"""
:param find: 是否使用find模式.True为递归查找.缺省为False
:param begin: 开始行号. 缺省为 None, 表示使用模板计算的位置
"""
for sheet in self.get_sheet():
for row in self.template.read_data(sheet, begin=self.begin):
yield row | [
"def",
"read",
"(",
"self",
")",
":",
"for",
"sheet",
"in",
"self",
".",
"get_sheet",
"(",
")",
":",
"for",
"row",
"in",
"self",
".",
"template",
".",
"read_data",
"(",
"sheet",
",",
"begin",
"=",
"self",
".",
"begin",
")",
":",
"yield",
"row"
]
| :param find: 是否使用find模式.True为递归查找.缺省为False
:param begin: 开始行号. 缺省为 None, 表示使用模板计算的位置 | [
":",
"param",
"find",
":",
"是否使用find模式",
".",
"True为递归查找",
".",
"缺省为False",
":",
"param",
"begin",
":",
"开始行号",
".",
"缺省为",
"None",
"表示使用模板计算的位置"
]
| python | train |
david-caro/python-autosemver | autosemver/api.py | https://github.com/david-caro/python-autosemver/blob/3bc0adb70c33e4bd3623ae4c1944d5ee37f4303d/autosemver/api.py#L223-L267 | def tag_versions(repo_path):
"""
Given a repo will add a tag for each major version.
Args:
repo_path(str): path to the git repository to tag.
"""
repo = dulwich.repo.Repo(repo_path)
tags = get_tags(repo)
maj_version = 0
feat_version = 0
fix_version = 0
last_maj_version = 0
last_feat_version = 0
result = []
for commit_sha, children in reversed(
get_children_per_first_parent(repo_path).items()
):
commit = get_repo_object(repo, commit_sha)
maj_version, feat_version, fix_version = get_version(
commit=commit,
tags=tags,
maj_version=maj_version,
feat_version=feat_version,
fix_version=fix_version,
children=children,
)
if (
last_maj_version != maj_version or
last_feat_version != feat_version
):
last_maj_version = maj_version
last_feat_version = feat_version
tag_name = 'refs/tags/v%d.%d' % (maj_version, feat_version)
if ON_PYTHON3:
repo[str.encode(tag_name)] = commit
else:
repo[tag_name] = commit
result.append(
'v%d.%d -> %s' % (maj_version, feat_version, commit_sha)
)
return '\n'.join(result) | [
"def",
"tag_versions",
"(",
"repo_path",
")",
":",
"repo",
"=",
"dulwich",
".",
"repo",
".",
"Repo",
"(",
"repo_path",
")",
"tags",
"=",
"get_tags",
"(",
"repo",
")",
"maj_version",
"=",
"0",
"feat_version",
"=",
"0",
"fix_version",
"=",
"0",
"last_maj_version",
"=",
"0",
"last_feat_version",
"=",
"0",
"result",
"=",
"[",
"]",
"for",
"commit_sha",
",",
"children",
"in",
"reversed",
"(",
"get_children_per_first_parent",
"(",
"repo_path",
")",
".",
"items",
"(",
")",
")",
":",
"commit",
"=",
"get_repo_object",
"(",
"repo",
",",
"commit_sha",
")",
"maj_version",
",",
"feat_version",
",",
"fix_version",
"=",
"get_version",
"(",
"commit",
"=",
"commit",
",",
"tags",
"=",
"tags",
",",
"maj_version",
"=",
"maj_version",
",",
"feat_version",
"=",
"feat_version",
",",
"fix_version",
"=",
"fix_version",
",",
"children",
"=",
"children",
",",
")",
"if",
"(",
"last_maj_version",
"!=",
"maj_version",
"or",
"last_feat_version",
"!=",
"feat_version",
")",
":",
"last_maj_version",
"=",
"maj_version",
"last_feat_version",
"=",
"feat_version",
"tag_name",
"=",
"'refs/tags/v%d.%d'",
"%",
"(",
"maj_version",
",",
"feat_version",
")",
"if",
"ON_PYTHON3",
":",
"repo",
"[",
"str",
".",
"encode",
"(",
"tag_name",
")",
"]",
"=",
"commit",
"else",
":",
"repo",
"[",
"tag_name",
"]",
"=",
"commit",
"result",
".",
"append",
"(",
"'v%d.%d -> %s'",
"%",
"(",
"maj_version",
",",
"feat_version",
",",
"commit_sha",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"result",
")"
]
| Given a repo will add a tag for each major version.
Args:
repo_path(str): path to the git repository to tag. | [
"Given",
"a",
"repo",
"will",
"add",
"a",
"tag",
"for",
"each",
"major",
"version",
"."
]
| python | train |
gem/oq-engine | openquake/calculators/getters.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/getters.py#L189-L212 | def get_mean(self, grp=None):
"""
Compute the mean curve as a ProbabilityMap
:param grp:
if not None must be a string of the form "grp-XX"; in that case
returns the mean considering only the contribution for group XX
"""
self.init()
if len(self.weights) == 1: # one realization
# the standard deviation is zero
pmap = self.get(0, grp)
for sid, pcurve in pmap.items():
array = numpy.zeros(pcurve.array.shape[:-1] + (2,))
array[:, 0] = pcurve.array[:, 0]
pcurve.array = array
return pmap
else: # multiple realizations
dic = ({g: self.dstore['poes/' + g] for g in self.dstore['poes']}
if grp is None else {grp: self.dstore['poes/' + grp]})
pmaps = self.rlzs_assoc.combine_pmaps(dic)
return stats.compute_pmap_stats(
pmaps, [stats.mean_curve, stats.std_curve],
self.weights, self.imtls) | [
"def",
"get_mean",
"(",
"self",
",",
"grp",
"=",
"None",
")",
":",
"self",
".",
"init",
"(",
")",
"if",
"len",
"(",
"self",
".",
"weights",
")",
"==",
"1",
":",
"# one realization",
"# the standard deviation is zero",
"pmap",
"=",
"self",
".",
"get",
"(",
"0",
",",
"grp",
")",
"for",
"sid",
",",
"pcurve",
"in",
"pmap",
".",
"items",
"(",
")",
":",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"pcurve",
".",
"array",
".",
"shape",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"2",
",",
")",
")",
"array",
"[",
":",
",",
"0",
"]",
"=",
"pcurve",
".",
"array",
"[",
":",
",",
"0",
"]",
"pcurve",
".",
"array",
"=",
"array",
"return",
"pmap",
"else",
":",
"# multiple realizations",
"dic",
"=",
"(",
"{",
"g",
":",
"self",
".",
"dstore",
"[",
"'poes/'",
"+",
"g",
"]",
"for",
"g",
"in",
"self",
".",
"dstore",
"[",
"'poes'",
"]",
"}",
"if",
"grp",
"is",
"None",
"else",
"{",
"grp",
":",
"self",
".",
"dstore",
"[",
"'poes/'",
"+",
"grp",
"]",
"}",
")",
"pmaps",
"=",
"self",
".",
"rlzs_assoc",
".",
"combine_pmaps",
"(",
"dic",
")",
"return",
"stats",
".",
"compute_pmap_stats",
"(",
"pmaps",
",",
"[",
"stats",
".",
"mean_curve",
",",
"stats",
".",
"std_curve",
"]",
",",
"self",
".",
"weights",
",",
"self",
".",
"imtls",
")"
]
| Compute the mean curve as a ProbabilityMap
:param grp:
if not None must be a string of the form "grp-XX"; in that case
returns the mean considering only the contribution for group XX | [
"Compute",
"the",
"mean",
"curve",
"as",
"a",
"ProbabilityMap"
]
| python | train |
stevelittlefish/littlefish | littlefish/colourutil.py | https://github.com/stevelittlefish/littlefish/blob/6deee7f81fab30716c743efe2e94e786c6e17016/littlefish/colourutil.py#L13-L49 | def rgb_to_hsl(r, g, b):
"""
Converts an RGB color value to HSL.
:param r: The red color value
:param g: The green color value
:param b: The blue color value
:return: The HSL representation
"""
r = float(r) / 255.0
g = float(g) / 255.0
b = float(b) / 255.0
max_value = max(r, g, b)
min_value = min(r, g, b)
h = None
s = None
l = (max_value + min_value) / 2
d = max_value - min_value
if d == 0:
# achromatic
h = 0
s = 0
else:
s = d / (1 - abs(2 * l - 1))
if r == max_value:
h = 60 * ((g - b) % 6)
if b > g:
h += 360
if g == max_value:
h = 60 * ((b - r) / d + 2)
if b == max_value:
h = 60 * ((r - g) / d + 4)
return round(h, 2), round(s, 2), round(l, 2) | [
"def",
"rgb_to_hsl",
"(",
"r",
",",
"g",
",",
"b",
")",
":",
"r",
"=",
"float",
"(",
"r",
")",
"/",
"255.0",
"g",
"=",
"float",
"(",
"g",
")",
"/",
"255.0",
"b",
"=",
"float",
"(",
"b",
")",
"/",
"255.0",
"max_value",
"=",
"max",
"(",
"r",
",",
"g",
",",
"b",
")",
"min_value",
"=",
"min",
"(",
"r",
",",
"g",
",",
"b",
")",
"h",
"=",
"None",
"s",
"=",
"None",
"l",
"=",
"(",
"max_value",
"+",
"min_value",
")",
"/",
"2",
"d",
"=",
"max_value",
"-",
"min_value",
"if",
"d",
"==",
"0",
":",
"# achromatic",
"h",
"=",
"0",
"s",
"=",
"0",
"else",
":",
"s",
"=",
"d",
"/",
"(",
"1",
"-",
"abs",
"(",
"2",
"*",
"l",
"-",
"1",
")",
")",
"if",
"r",
"==",
"max_value",
":",
"h",
"=",
"60",
"*",
"(",
"(",
"g",
"-",
"b",
")",
"%",
"6",
")",
"if",
"b",
">",
"g",
":",
"h",
"+=",
"360",
"if",
"g",
"==",
"max_value",
":",
"h",
"=",
"60",
"*",
"(",
"(",
"b",
"-",
"r",
")",
"/",
"d",
"+",
"2",
")",
"if",
"b",
"==",
"max_value",
":",
"h",
"=",
"60",
"*",
"(",
"(",
"r",
"-",
"g",
")",
"/",
"d",
"+",
"4",
")",
"return",
"round",
"(",
"h",
",",
"2",
")",
",",
"round",
"(",
"s",
",",
"2",
")",
",",
"round",
"(",
"l",
",",
"2",
")"
]
| Converts an RGB color value to HSL.
:param r: The red color value
:param g: The green color value
:param b: The blue color value
:return: The HSL representation | [
"Converts",
"an",
"RGB",
"color",
"value",
"to",
"HSL",
".",
":",
"param",
"r",
":",
"The",
"red",
"color",
"value",
":",
"param",
"g",
":",
"The",
"green",
"color",
"value",
":",
"param",
"b",
":",
"The",
"blue",
"color",
"value",
":",
"return",
":",
"The",
"HSL",
"representation"
]
| python | test |
Chilipp/model-organization | model_organization/__init__.py | https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/__init__.py#L418-L471 | def init(self, projectname=None, description=None, **kwargs):
"""
Initialize a new experiment
Parameters
----------
projectname: str
The name of the project that shall be used. If None, the last one
created will be used
description: str
A short summary of the experiment
``**kwargs``
Keyword arguments passed to the :meth:`app_main` method
Notes
-----
If the experiment is None, a new experiment will be created
"""
self.app_main(**kwargs)
experiments = self.config.experiments
experiment = self._experiment
if experiment is None and not experiments:
experiment = self.name + '_exp0'
elif experiment is None:
try:
experiment = utils.get_next_name(self.experiment)
except ValueError:
raise ValueError(
"Could not estimate an experiment id! Please use the "
"experiment argument to provide an id.")
self.experiment = experiment
if self.is_archived(experiment):
raise ValueError(
"The specified experiment has already been archived! Run "
"``%s -id %s unarchive`` first" % (self.name, experiment))
if projectname is None:
projectname = self.projectname
else:
self.projectname = projectname
self.logger.info("Initializing experiment %s of project %s",
experiment, projectname)
exp_dict = experiments.setdefault(experiment, OrderedDict())
if description is not None:
exp_dict['description'] = description
exp_dict['project'] = projectname
exp_dict['expdir'] = exp_dir = osp.join('experiments', experiment)
exp_dir = osp.join(self.config.projects[projectname]['root'], exp_dir)
exp_dict['timestamps'] = OrderedDict()
if not os.path.exists(exp_dir):
self.logger.debug(" Creating experiment directory %s", exp_dir)
os.makedirs(exp_dir)
self.fix_paths(exp_dict)
return exp_dict | [
"def",
"init",
"(",
"self",
",",
"projectname",
"=",
"None",
",",
"description",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"app_main",
"(",
"*",
"*",
"kwargs",
")",
"experiments",
"=",
"self",
".",
"config",
".",
"experiments",
"experiment",
"=",
"self",
".",
"_experiment",
"if",
"experiment",
"is",
"None",
"and",
"not",
"experiments",
":",
"experiment",
"=",
"self",
".",
"name",
"+",
"'_exp0'",
"elif",
"experiment",
"is",
"None",
":",
"try",
":",
"experiment",
"=",
"utils",
".",
"get_next_name",
"(",
"self",
".",
"experiment",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Could not estimate an experiment id! Please use the \"",
"\"experiment argument to provide an id.\"",
")",
"self",
".",
"experiment",
"=",
"experiment",
"if",
"self",
".",
"is_archived",
"(",
"experiment",
")",
":",
"raise",
"ValueError",
"(",
"\"The specified experiment has already been archived! Run \"",
"\"``%s -id %s unarchive`` first\"",
"%",
"(",
"self",
".",
"name",
",",
"experiment",
")",
")",
"if",
"projectname",
"is",
"None",
":",
"projectname",
"=",
"self",
".",
"projectname",
"else",
":",
"self",
".",
"projectname",
"=",
"projectname",
"self",
".",
"logger",
".",
"info",
"(",
"\"Initializing experiment %s of project %s\"",
",",
"experiment",
",",
"projectname",
")",
"exp_dict",
"=",
"experiments",
".",
"setdefault",
"(",
"experiment",
",",
"OrderedDict",
"(",
")",
")",
"if",
"description",
"is",
"not",
"None",
":",
"exp_dict",
"[",
"'description'",
"]",
"=",
"description",
"exp_dict",
"[",
"'project'",
"]",
"=",
"projectname",
"exp_dict",
"[",
"'expdir'",
"]",
"=",
"exp_dir",
"=",
"osp",
".",
"join",
"(",
"'experiments'",
",",
"experiment",
")",
"exp_dir",
"=",
"osp",
".",
"join",
"(",
"self",
".",
"config",
".",
"projects",
"[",
"projectname",
"]",
"[",
"'root'",
"]",
",",
"exp_dir",
")",
"exp_dict",
"[",
"'timestamps'",
"]",
"=",
"OrderedDict",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"exp_dir",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\" Creating experiment directory %s\"",
",",
"exp_dir",
")",
"os",
".",
"makedirs",
"(",
"exp_dir",
")",
"self",
".",
"fix_paths",
"(",
"exp_dict",
")",
"return",
"exp_dict"
]
| Initialize a new experiment
Parameters
----------
projectname: str
The name of the project that shall be used. If None, the last one
created will be used
description: str
A short summary of the experiment
``**kwargs``
Keyword arguments passed to the :meth:`app_main` method
Notes
-----
If the experiment is None, a new experiment will be created | [
"Initialize",
"a",
"new",
"experiment"
]
| python | train |
soravux/scoop | scoop/futures.py | https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/futures.py#L47-L69 | def _startup(rootFuture, *args, **kargs):
"""Initializes the SCOOP environment.
:param rootFuture: Any callable object (function or class object with *__call__*
method); this object will be called once and allows the use of parallel
calls inside this object.
:param args: A tuple of positional arguments that will be passed to the
callable object.
:param kargs: A dictionary of additional keyword arguments that will be
passed to the callable object.
:returns: The result of the root Future.
Be sure to launch your root Future using this method."""
import greenlet
global _controller
_controller = greenlet.greenlet(control.runController)
try:
result = _controller.switch(rootFuture, *args, **kargs)
except scoop._comm.Shutdown:
result = None
control.execQueue.shutdown()
return result | [
"def",
"_startup",
"(",
"rootFuture",
",",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
":",
"import",
"greenlet",
"global",
"_controller",
"_controller",
"=",
"greenlet",
".",
"greenlet",
"(",
"control",
".",
"runController",
")",
"try",
":",
"result",
"=",
"_controller",
".",
"switch",
"(",
"rootFuture",
",",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
"except",
"scoop",
".",
"_comm",
".",
"Shutdown",
":",
"result",
"=",
"None",
"control",
".",
"execQueue",
".",
"shutdown",
"(",
")",
"return",
"result"
]
| Initializes the SCOOP environment.
:param rootFuture: Any callable object (function or class object with *__call__*
method); this object will be called once and allows the use of parallel
calls inside this object.
:param args: A tuple of positional arguments that will be passed to the
callable object.
:param kargs: A dictionary of additional keyword arguments that will be
passed to the callable object.
:returns: The result of the root Future.
Be sure to launch your root Future using this method. | [
"Initializes",
"the",
"SCOOP",
"environment",
"."
]
| python | train |
saltstack/salt | salt/modules/netbox.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netbox.py#L1070-L1113 | def create_circuit(name, provider_id, circuit_type, description=None):
'''
.. versionadded:: 2019.2.0
Create a new Netbox circuit
name
Name of the circuit
provider_id
The netbox id of the circuit provider
circuit_type
The name of the circuit type
asn
The ASN of the circuit provider
description
The description of the circuit
CLI Example:
.. code-block:: bash
salt myminion netbox.create_circuit NEW_CIRCUIT_01 Telia Transit 1299 "New Telia circuit"
'''
nb_circuit_provider = get_('circuits', 'providers', provider_id)
nb_circuit_type = get_('circuits', 'circuit-types', slug=slugify(circuit_type))
if nb_circuit_provider and nb_circuit_type:
payload = {
'cid': name,
'provider': nb_circuit_provider['id'],
'type': nb_circuit_type['id']
}
if description:
payload['description'] = description
nb_circuit = get_('circuits', 'circuits', cid=name)
if nb_circuit:
return False
circuit = _add('circuits', 'circuits', payload)
if circuit:
return {'circuits': {'circuits': {circuit['id']: payload}}}
else:
return circuit
else:
return False | [
"def",
"create_circuit",
"(",
"name",
",",
"provider_id",
",",
"circuit_type",
",",
"description",
"=",
"None",
")",
":",
"nb_circuit_provider",
"=",
"get_",
"(",
"'circuits'",
",",
"'providers'",
",",
"provider_id",
")",
"nb_circuit_type",
"=",
"get_",
"(",
"'circuits'",
",",
"'circuit-types'",
",",
"slug",
"=",
"slugify",
"(",
"circuit_type",
")",
")",
"if",
"nb_circuit_provider",
"and",
"nb_circuit_type",
":",
"payload",
"=",
"{",
"'cid'",
":",
"name",
",",
"'provider'",
":",
"nb_circuit_provider",
"[",
"'id'",
"]",
",",
"'type'",
":",
"nb_circuit_type",
"[",
"'id'",
"]",
"}",
"if",
"description",
":",
"payload",
"[",
"'description'",
"]",
"=",
"description",
"nb_circuit",
"=",
"get_",
"(",
"'circuits'",
",",
"'circuits'",
",",
"cid",
"=",
"name",
")",
"if",
"nb_circuit",
":",
"return",
"False",
"circuit",
"=",
"_add",
"(",
"'circuits'",
",",
"'circuits'",
",",
"payload",
")",
"if",
"circuit",
":",
"return",
"{",
"'circuits'",
":",
"{",
"'circuits'",
":",
"{",
"circuit",
"[",
"'id'",
"]",
":",
"payload",
"}",
"}",
"}",
"else",
":",
"return",
"circuit",
"else",
":",
"return",
"False"
]
| .. versionadded:: 2019.2.0
Create a new Netbox circuit
name
Name of the circuit
provider_id
The netbox id of the circuit provider
circuit_type
The name of the circuit type
asn
The ASN of the circuit provider
description
The description of the circuit
CLI Example:
.. code-block:: bash
salt myminion netbox.create_circuit NEW_CIRCUIT_01 Telia Transit 1299 "New Telia circuit" | [
"..",
"versionadded",
"::",
"2019",
".",
"2",
".",
"0"
]
| python | train |
lago-project/lago | lago/providers/libvirt/utils.py | https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/providers/libvirt/utils.py#L145-L158 | def dict_to_xml(spec, full_document=False):
"""
Convert dict to XML
Args:
spec(dict): dict to convert
full_document(bool): whether to add XML headers
Returns:
lxml.etree.Element: XML tree
"""
middle = xmltodict.unparse(spec, full_document=full_document, pretty=True)
return lxml.etree.fromstring(middle) | [
"def",
"dict_to_xml",
"(",
"spec",
",",
"full_document",
"=",
"False",
")",
":",
"middle",
"=",
"xmltodict",
".",
"unparse",
"(",
"spec",
",",
"full_document",
"=",
"full_document",
",",
"pretty",
"=",
"True",
")",
"return",
"lxml",
".",
"etree",
".",
"fromstring",
"(",
"middle",
")"
]
| Convert dict to XML
Args:
spec(dict): dict to convert
full_document(bool): whether to add XML headers
Returns:
lxml.etree.Element: XML tree | [
"Convert",
"dict",
"to",
"XML"
]
| python | train |
MonashBI/arcana | arcana/study/base.py | https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L669-L697 | def save_workflow_graph_for(self, spec_name, fname, full=False,
style='flat', **kwargs):
"""
Saves a graph of the workflow to generate the requested spec_name
Parameters
----------
spec_name : str
Name of the spec to generate the graph for
fname : str
The filename for the saved graph
style : str
The style of the graph, can be one of can be one of
'orig', 'flat', 'exec', 'hierarchical'
"""
pipeline = self.spec(spec_name).pipeline
if full:
workflow = pe.Workflow(name='{}_gen'.format(spec_name),
base_dir=self.processor.work_dir)
self.processor._connect_pipeline(
pipeline, workflow, **kwargs)
else:
workflow = pipeline._workflow
fname = op.expanduser(fname)
if not fname.endswith('.png'):
fname += '.png'
dotfilename = fname[:-4] + '.dot'
workflow.write_graph(graph2use=style,
dotfilename=dotfilename) | [
"def",
"save_workflow_graph_for",
"(",
"self",
",",
"spec_name",
",",
"fname",
",",
"full",
"=",
"False",
",",
"style",
"=",
"'flat'",
",",
"*",
"*",
"kwargs",
")",
":",
"pipeline",
"=",
"self",
".",
"spec",
"(",
"spec_name",
")",
".",
"pipeline",
"if",
"full",
":",
"workflow",
"=",
"pe",
".",
"Workflow",
"(",
"name",
"=",
"'{}_gen'",
".",
"format",
"(",
"spec_name",
")",
",",
"base_dir",
"=",
"self",
".",
"processor",
".",
"work_dir",
")",
"self",
".",
"processor",
".",
"_connect_pipeline",
"(",
"pipeline",
",",
"workflow",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"workflow",
"=",
"pipeline",
".",
"_workflow",
"fname",
"=",
"op",
".",
"expanduser",
"(",
"fname",
")",
"if",
"not",
"fname",
".",
"endswith",
"(",
"'.png'",
")",
":",
"fname",
"+=",
"'.png'",
"dotfilename",
"=",
"fname",
"[",
":",
"-",
"4",
"]",
"+",
"'.dot'",
"workflow",
".",
"write_graph",
"(",
"graph2use",
"=",
"style",
",",
"dotfilename",
"=",
"dotfilename",
")"
]
| Saves a graph of the workflow to generate the requested spec_name
Parameters
----------
spec_name : str
Name of the spec to generate the graph for
fname : str
The filename for the saved graph
style : str
The style of the graph, can be one of can be one of
'orig', 'flat', 'exec', 'hierarchical' | [
"Saves",
"a",
"graph",
"of",
"the",
"workflow",
"to",
"generate",
"the",
"requested",
"spec_name"
]
| python | train |
pypa/pipenv | pipenv/vendor/cerberus/validator.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/cerberus/validator.py#L769-L773 | def _normalize_rename(self, mapping, schema, field):
""" {'type': 'hashable'} """
if 'rename' in schema[field]:
mapping[schema[field]['rename']] = mapping[field]
del mapping[field] | [
"def",
"_normalize_rename",
"(",
"self",
",",
"mapping",
",",
"schema",
",",
"field",
")",
":",
"if",
"'rename'",
"in",
"schema",
"[",
"field",
"]",
":",
"mapping",
"[",
"schema",
"[",
"field",
"]",
"[",
"'rename'",
"]",
"]",
"=",
"mapping",
"[",
"field",
"]",
"del",
"mapping",
"[",
"field",
"]"
]
| {'type': 'hashable'} | [
"{",
"type",
":",
"hashable",
"}"
]
| python | train |
matthewdeanmartin/jiggle_version | jiggle_version/jiggle_class.py | https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/jiggle_class.py#L356-L389 | def jiggle_config_file(self): # type: () ->int
"""
Update ini, cfg, conf
"""
changed = 0
# setup.py related. setup.py itself should read __init__.py or __version__.py
other_files = ["setup.cfg"]
for file_name in other_files:
filepath = os.path.join(self.SRC, file_name)
# only create setup.cfg if we have setup.py
if (
self.create_configs
and not os.path.isfile(filepath)
and os.path.isfile("setup.py")
):
logger.info("Creating " + unicode(filepath))
self.file_maker.create_setup_cfg(filepath)
if os.path.isfile(filepath):
config = configparser.ConfigParser()
config.read(filepath)
try:
version = config["metadata"]["version"]
except KeyError:
version = ""
if version:
with io.open(filepath, "w") as configfile: # save
config["metadata"]["version"] = unicode(self.version_to_write())
config.write(configfile)
changed += 1
return changed | [
"def",
"jiggle_config_file",
"(",
"self",
")",
":",
"# type: () ->int",
"changed",
"=",
"0",
"# setup.py related. setup.py itself should read __init__.py or __version__.py",
"other_files",
"=",
"[",
"\"setup.cfg\"",
"]",
"for",
"file_name",
"in",
"other_files",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"SRC",
",",
"file_name",
")",
"# only create setup.cfg if we have setup.py",
"if",
"(",
"self",
".",
"create_configs",
"and",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"\"setup.py\"",
")",
")",
":",
"logger",
".",
"info",
"(",
"\"Creating \"",
"+",
"unicode",
"(",
"filepath",
")",
")",
"self",
".",
"file_maker",
".",
"create_setup_cfg",
"(",
"filepath",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
":",
"config",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"config",
".",
"read",
"(",
"filepath",
")",
"try",
":",
"version",
"=",
"config",
"[",
"\"metadata\"",
"]",
"[",
"\"version\"",
"]",
"except",
"KeyError",
":",
"version",
"=",
"\"\"",
"if",
"version",
":",
"with",
"io",
".",
"open",
"(",
"filepath",
",",
"\"w\"",
")",
"as",
"configfile",
":",
"# save",
"config",
"[",
"\"metadata\"",
"]",
"[",
"\"version\"",
"]",
"=",
"unicode",
"(",
"self",
".",
"version_to_write",
"(",
")",
")",
"config",
".",
"write",
"(",
"configfile",
")",
"changed",
"+=",
"1",
"return",
"changed"
]
| Update ini, cfg, conf | [
"Update",
"ini",
"cfg",
"conf"
]
| python | train |
KrzyHonk/bpmn-python | bpmn_python/bpmn_diagram_rep.py | https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_rep.py#L404-L414 | def add_event_definition_element(event_type, event_definitions):
"""
Helper function, that creates event definition element (special type of event) from given parameters.
:param event_type: string object. Short name of required event definition,
:param event_definitions: dictionary of event definitions. Key is a short name of event definition,
value is a full name of event definition, as defined in BPMN 2.0 XML Schema.
"""
event_def_id = BpmnDiagramGraph.id_prefix + str(uuid.uuid4())
event_def = {consts.Consts.id: event_def_id, consts.Consts.definition_type: event_definitions[event_type]}
return event_def | [
"def",
"add_event_definition_element",
"(",
"event_type",
",",
"event_definitions",
")",
":",
"event_def_id",
"=",
"BpmnDiagramGraph",
".",
"id_prefix",
"+",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"event_def",
"=",
"{",
"consts",
".",
"Consts",
".",
"id",
":",
"event_def_id",
",",
"consts",
".",
"Consts",
".",
"definition_type",
":",
"event_definitions",
"[",
"event_type",
"]",
"}",
"return",
"event_def"
]
| Helper function, that creates event definition element (special type of event) from given parameters.
:param event_type: string object. Short name of required event definition,
:param event_definitions: dictionary of event definitions. Key is a short name of event definition,
value is a full name of event definition, as defined in BPMN 2.0 XML Schema. | [
"Helper",
"function",
"that",
"creates",
"event",
"definition",
"element",
"(",
"special",
"type",
"of",
"event",
")",
"from",
"given",
"parameters",
"."
]
| python | train |
buildbot/buildbot | master/buildbot/process/builder.py | https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/process/builder.py#L144-L156 | def getOldestRequestTime(self):
"""Returns the submitted_at of the oldest unclaimed build request for
this builder, or None if there are no build requests.
@returns: datetime instance or None, via Deferred
"""
bldrid = yield self.getBuilderId()
unclaimed = yield self.master.data.get(
('builders', bldrid, 'buildrequests'),
[resultspec.Filter('claimed', 'eq', [False])],
order=['submitted_at'], limit=1)
if unclaimed:
return unclaimed[0]['submitted_at'] | [
"def",
"getOldestRequestTime",
"(",
"self",
")",
":",
"bldrid",
"=",
"yield",
"self",
".",
"getBuilderId",
"(",
")",
"unclaimed",
"=",
"yield",
"self",
".",
"master",
".",
"data",
".",
"get",
"(",
"(",
"'builders'",
",",
"bldrid",
",",
"'buildrequests'",
")",
",",
"[",
"resultspec",
".",
"Filter",
"(",
"'claimed'",
",",
"'eq'",
",",
"[",
"False",
"]",
")",
"]",
",",
"order",
"=",
"[",
"'submitted_at'",
"]",
",",
"limit",
"=",
"1",
")",
"if",
"unclaimed",
":",
"return",
"unclaimed",
"[",
"0",
"]",
"[",
"'submitted_at'",
"]"
]
| Returns the submitted_at of the oldest unclaimed build request for
this builder, or None if there are no build requests.
@returns: datetime instance or None, via Deferred | [
"Returns",
"the",
"submitted_at",
"of",
"the",
"oldest",
"unclaimed",
"build",
"request",
"for",
"this",
"builder",
"or",
"None",
"if",
"there",
"are",
"no",
"build",
"requests",
"."
]
| python | train |
econ-ark/HARK | HARK/ConsumptionSaving/ConsIndShockModel.py | https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsIndShockModel.py#L977-L994 | def solve(self):
'''
Solves a one period consumption saving problem with risky income.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem.
'''
aNrm = self.prepareToCalcEndOfPrdvP()
EndOfPrdvP = self.calcEndOfPrdvP()
solution = self.makeBasicSolution(EndOfPrdvP,aNrm,self.makeLinearcFunc)
solution = self.addMPCandHumanWealth(solution)
return solution | [
"def",
"solve",
"(",
"self",
")",
":",
"aNrm",
"=",
"self",
".",
"prepareToCalcEndOfPrdvP",
"(",
")",
"EndOfPrdvP",
"=",
"self",
".",
"calcEndOfPrdvP",
"(",
")",
"solution",
"=",
"self",
".",
"makeBasicSolution",
"(",
"EndOfPrdvP",
",",
"aNrm",
",",
"self",
".",
"makeLinearcFunc",
")",
"solution",
"=",
"self",
".",
"addMPCandHumanWealth",
"(",
"solution",
")",
"return",
"solution"
]
| Solves a one period consumption saving problem with risky income.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem. | [
"Solves",
"a",
"one",
"period",
"consumption",
"saving",
"problem",
"with",
"risky",
"income",
"."
]
| python | train |
aquatix/python-utilkit | utilkit/printutil.py | https://github.com/aquatix/python-utilkit/blob/1b4a4175381d2175592208619315f399610f915c/utilkit/printutil.py#L87-L110 | def merge_x_y(collection_x, collection_y, filter_none=False):
"""
Merge two lists, creating a dictionary with key `label` and a set x and y
"""
data = {}
for item in collection_x:
#print item[0:-1]
#print item[-1]
label = datetimeutil.tuple_to_string(item[0:-1])
if filter_none and label == 'None-None':
continue
data[label] = {'label': label, 'x': item[-1], 'y': 0}
for item in collection_y:
#print item
label = datetimeutil.tuple_to_string(item[0:-1])
if filter_none and label == 'None-None':
continue
try:
data[label]['y'] = item[-1]
except KeyError:
data[label] = {'label': label, 'x': 0, 'y': item[-1]}
# Keys are not sorted
return data | [
"def",
"merge_x_y",
"(",
"collection_x",
",",
"collection_y",
",",
"filter_none",
"=",
"False",
")",
":",
"data",
"=",
"{",
"}",
"for",
"item",
"in",
"collection_x",
":",
"#print item[0:-1]",
"#print item[-1]",
"label",
"=",
"datetimeutil",
".",
"tuple_to_string",
"(",
"item",
"[",
"0",
":",
"-",
"1",
"]",
")",
"if",
"filter_none",
"and",
"label",
"==",
"'None-None'",
":",
"continue",
"data",
"[",
"label",
"]",
"=",
"{",
"'label'",
":",
"label",
",",
"'x'",
":",
"item",
"[",
"-",
"1",
"]",
",",
"'y'",
":",
"0",
"}",
"for",
"item",
"in",
"collection_y",
":",
"#print item",
"label",
"=",
"datetimeutil",
".",
"tuple_to_string",
"(",
"item",
"[",
"0",
":",
"-",
"1",
"]",
")",
"if",
"filter_none",
"and",
"label",
"==",
"'None-None'",
":",
"continue",
"try",
":",
"data",
"[",
"label",
"]",
"[",
"'y'",
"]",
"=",
"item",
"[",
"-",
"1",
"]",
"except",
"KeyError",
":",
"data",
"[",
"label",
"]",
"=",
"{",
"'label'",
":",
"label",
",",
"'x'",
":",
"0",
",",
"'y'",
":",
"item",
"[",
"-",
"1",
"]",
"}",
"# Keys are not sorted",
"return",
"data"
]
| Merge two lists, creating a dictionary with key `label` and a set x and y | [
"Merge",
"two",
"lists",
"creating",
"a",
"dictionary",
"with",
"key",
"label",
"and",
"a",
"set",
"x",
"and",
"y"
]
| python | train |
ungarj/mapchete | mapchete/config.py | https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/config.py#L248-L263 | def effective_bounds(self):
"""
Effective process bounds required to initialize inputs.
Process bounds sometimes have to be larger, because all intersecting process
tiles have to be covered as well.
"""
return snap_bounds(
bounds=clip_bounds(bounds=self.init_bounds, clip=self.process_pyramid.bounds),
pyramid=self.process_pyramid,
zoom=min(
self.baselevels["zooms"]
) if self.baselevels else min(
self.init_zoom_levels
)
) | [
"def",
"effective_bounds",
"(",
"self",
")",
":",
"return",
"snap_bounds",
"(",
"bounds",
"=",
"clip_bounds",
"(",
"bounds",
"=",
"self",
".",
"init_bounds",
",",
"clip",
"=",
"self",
".",
"process_pyramid",
".",
"bounds",
")",
",",
"pyramid",
"=",
"self",
".",
"process_pyramid",
",",
"zoom",
"=",
"min",
"(",
"self",
".",
"baselevels",
"[",
"\"zooms\"",
"]",
")",
"if",
"self",
".",
"baselevels",
"else",
"min",
"(",
"self",
".",
"init_zoom_levels",
")",
")"
]
| Effective process bounds required to initialize inputs.
Process bounds sometimes have to be larger, because all intersecting process
tiles have to be covered as well. | [
"Effective",
"process",
"bounds",
"required",
"to",
"initialize",
"inputs",
"."
]
| python | valid |
saltstack/salt | salt/cloud/clouds/joyent.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/joyent.py#L917-L960 | def import_key(kwargs=None, call=None):
'''
List the keys available
CLI Example:
.. code-block:: bash
salt-cloud -f import_key joyent keyname=mykey keyfile=/tmp/mykey.pub
'''
if call != 'function':
log.error(
'The import_key function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
if 'keyfile' not in kwargs:
log.error('The location of the SSH keyfile is required.')
return False
if not os.path.isfile(kwargs['keyfile']):
log.error('The specified keyfile (%s) does not exist.', kwargs['keyfile'])
return False
with salt.utils.files.fopen(kwargs['keyfile'], 'r') as fp_:
kwargs['key'] = salt.utils.stringutils.to_unicode(fp_.read())
send_data = {'name': kwargs['keyname'], 'key': kwargs['key']}
kwargs['data'] = salt.utils.json.dumps(send_data)
rcode, data = query(
command='my/keys',
method='POST',
data=kwargs['data'],
)
log.debug(pprint.pformat(data))
return {'keys': {data['name']: data['key']}} | [
"def",
"import_key",
"(",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'function'",
":",
"log",
".",
"error",
"(",
"'The import_key function must be called with -f or --function.'",
")",
"return",
"False",
"if",
"not",
"kwargs",
":",
"kwargs",
"=",
"{",
"}",
"if",
"'keyname'",
"not",
"in",
"kwargs",
":",
"log",
".",
"error",
"(",
"'A keyname is required.'",
")",
"return",
"False",
"if",
"'keyfile'",
"not",
"in",
"kwargs",
":",
"log",
".",
"error",
"(",
"'The location of the SSH keyfile is required.'",
")",
"return",
"False",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"kwargs",
"[",
"'keyfile'",
"]",
")",
":",
"log",
".",
"error",
"(",
"'The specified keyfile (%s) does not exist.'",
",",
"kwargs",
"[",
"'keyfile'",
"]",
")",
"return",
"False",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"kwargs",
"[",
"'keyfile'",
"]",
",",
"'r'",
")",
"as",
"fp_",
":",
"kwargs",
"[",
"'key'",
"]",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"fp_",
".",
"read",
"(",
")",
")",
"send_data",
"=",
"{",
"'name'",
":",
"kwargs",
"[",
"'keyname'",
"]",
",",
"'key'",
":",
"kwargs",
"[",
"'key'",
"]",
"}",
"kwargs",
"[",
"'data'",
"]",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"dumps",
"(",
"send_data",
")",
"rcode",
",",
"data",
"=",
"query",
"(",
"command",
"=",
"'my/keys'",
",",
"method",
"=",
"'POST'",
",",
"data",
"=",
"kwargs",
"[",
"'data'",
"]",
",",
")",
"log",
".",
"debug",
"(",
"pprint",
".",
"pformat",
"(",
"data",
")",
")",
"return",
"{",
"'keys'",
":",
"{",
"data",
"[",
"'name'",
"]",
":",
"data",
"[",
"'key'",
"]",
"}",
"}"
]
| List the keys available
CLI Example:
.. code-block:: bash
salt-cloud -f import_key joyent keyname=mykey keyfile=/tmp/mykey.pub | [
"List",
"the",
"keys",
"available"
]
| python | train |
RockFeng0/rtsf | rtsf/p_executer.py | https://github.com/RockFeng0/rtsf/blob/fbc0d57edaeca86418af3942472fcc6d3e9ce591/rtsf/p_executer.py#L227-L243 | def init_runner(self, parser, tracers, projinfo):
''' initial some instances for preparing to run test case
@note: should not override
@param parser: instance of TestCaseParser
@param tracers: dict type for the instance of Tracer. Such as {"":tracer_obj} or {"192.168.0.1:5555":tracer_obj1, "192.168.0.2:5555":tracer_obj2}
@param proj_info: dict type of test case. use like: self.proj_info["module"], self.proj_info["name"]
yaml case like:
- project:
name: xxx
module: xxxx
dict case like:
{"project": {"name": xxx, "module": xxxx}}
'''
self.parser = parser
self.tracers = tracers
self.proj_info = projinfo | [
"def",
"init_runner",
"(",
"self",
",",
"parser",
",",
"tracers",
",",
"projinfo",
")",
":",
"self",
".",
"parser",
"=",
"parser",
"self",
".",
"tracers",
"=",
"tracers",
"self",
".",
"proj_info",
"=",
"projinfo"
]
| initial some instances for preparing to run test case
@note: should not override
@param parser: instance of TestCaseParser
@param tracers: dict type for the instance of Tracer. Such as {"":tracer_obj} or {"192.168.0.1:5555":tracer_obj1, "192.168.0.2:5555":tracer_obj2}
@param proj_info: dict type of test case. use like: self.proj_info["module"], self.proj_info["name"]
yaml case like:
- project:
name: xxx
module: xxxx
dict case like:
{"project": {"name": xxx, "module": xxxx}} | [
"initial",
"some",
"instances",
"for",
"preparing",
"to",
"run",
"test",
"case"
]
| python | train |
spyder-ide/spyder | spyder/widgets/arraybuilder.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/arraybuilder.py#L297-L369 | def process_text(self, array=True):
"""
Construct the text based on the entered content in the widget.
"""
if array:
prefix = 'np.array([['
else:
prefix = 'np.matrix([['
suffix = ']])'
values = self._widget.text().strip()
if values != '':
# cleans repeated spaces
exp = r'(\s*)' + ROW_SEPARATOR + r'(\s*)'
values = re.sub(exp, ROW_SEPARATOR, values)
values = re.sub(r"\s+", " ", values)
values = re.sub(r"]$", "", values)
values = re.sub(r"^\[", "", values)
values = re.sub(ROW_SEPARATOR + r'*$', '', values)
# replaces spaces by commas
values = values.replace(' ', ELEMENT_SEPARATOR)
# iterate to find number of rows and columns
new_values = []
rows = values.split(ROW_SEPARATOR)
nrows = len(rows)
ncols = []
for row in rows:
new_row = []
elements = row.split(ELEMENT_SEPARATOR)
ncols.append(len(elements))
for e in elements:
num = e
# replaces not defined values
if num in NAN_VALUES:
num = 'np.nan'
# Convert numbers to floating point
if self._force_float:
try:
num = str(float(e))
except:
pass
new_row.append(num)
new_values.append(ELEMENT_SEPARATOR.join(new_row))
new_values = ROW_SEPARATOR.join(new_values)
values = new_values
# Check validity
if len(set(ncols)) == 1:
self._valid = True
else:
self._valid = False
# Single rows are parsed as 1D arrays/matrices
if nrows == 1:
prefix = prefix[:-1]
suffix = suffix.replace("]])", "])")
# Fix offset
offset = self._offset
braces = BRACES.replace(' ', '\n' + ' '*(offset + len(prefix) - 1))
values = values.replace(ROW_SEPARATOR, braces)
text = "{0}{1}{2}".format(prefix, values, suffix)
self._text = text
else:
self._text = ''
self.update_warning() | [
"def",
"process_text",
"(",
"self",
",",
"array",
"=",
"True",
")",
":",
"if",
"array",
":",
"prefix",
"=",
"'np.array([['",
"else",
":",
"prefix",
"=",
"'np.matrix([['",
"suffix",
"=",
"']])'",
"values",
"=",
"self",
".",
"_widget",
".",
"text",
"(",
")",
".",
"strip",
"(",
")",
"if",
"values",
"!=",
"''",
":",
"# cleans repeated spaces\r",
"exp",
"=",
"r'(\\s*)'",
"+",
"ROW_SEPARATOR",
"+",
"r'(\\s*)'",
"values",
"=",
"re",
".",
"sub",
"(",
"exp",
",",
"ROW_SEPARATOR",
",",
"values",
")",
"values",
"=",
"re",
".",
"sub",
"(",
"r\"\\s+\"",
",",
"\" \"",
",",
"values",
")",
"values",
"=",
"re",
".",
"sub",
"(",
"r\"]$\"",
",",
"\"\"",
",",
"values",
")",
"values",
"=",
"re",
".",
"sub",
"(",
"r\"^\\[\"",
",",
"\"\"",
",",
"values",
")",
"values",
"=",
"re",
".",
"sub",
"(",
"ROW_SEPARATOR",
"+",
"r'*$'",
",",
"''",
",",
"values",
")",
"# replaces spaces by commas\r",
"values",
"=",
"values",
".",
"replace",
"(",
"' '",
",",
"ELEMENT_SEPARATOR",
")",
"# iterate to find number of rows and columns\r",
"new_values",
"=",
"[",
"]",
"rows",
"=",
"values",
".",
"split",
"(",
"ROW_SEPARATOR",
")",
"nrows",
"=",
"len",
"(",
"rows",
")",
"ncols",
"=",
"[",
"]",
"for",
"row",
"in",
"rows",
":",
"new_row",
"=",
"[",
"]",
"elements",
"=",
"row",
".",
"split",
"(",
"ELEMENT_SEPARATOR",
")",
"ncols",
".",
"append",
"(",
"len",
"(",
"elements",
")",
")",
"for",
"e",
"in",
"elements",
":",
"num",
"=",
"e",
"# replaces not defined values\r",
"if",
"num",
"in",
"NAN_VALUES",
":",
"num",
"=",
"'np.nan'",
"# Convert numbers to floating point\r",
"if",
"self",
".",
"_force_float",
":",
"try",
":",
"num",
"=",
"str",
"(",
"float",
"(",
"e",
")",
")",
"except",
":",
"pass",
"new_row",
".",
"append",
"(",
"num",
")",
"new_values",
".",
"append",
"(",
"ELEMENT_SEPARATOR",
".",
"join",
"(",
"new_row",
")",
")",
"new_values",
"=",
"ROW_SEPARATOR",
".",
"join",
"(",
"new_values",
")",
"values",
"=",
"new_values",
"# Check validity\r",
"if",
"len",
"(",
"set",
"(",
"ncols",
")",
")",
"==",
"1",
":",
"self",
".",
"_valid",
"=",
"True",
"else",
":",
"self",
".",
"_valid",
"=",
"False",
"# Single rows are parsed as 1D arrays/matrices\r",
"if",
"nrows",
"==",
"1",
":",
"prefix",
"=",
"prefix",
"[",
":",
"-",
"1",
"]",
"suffix",
"=",
"suffix",
".",
"replace",
"(",
"\"]])\"",
",",
"\"])\"",
")",
"# Fix offset\r",
"offset",
"=",
"self",
".",
"_offset",
"braces",
"=",
"BRACES",
".",
"replace",
"(",
"' '",
",",
"'\\n'",
"+",
"' '",
"*",
"(",
"offset",
"+",
"len",
"(",
"prefix",
")",
"-",
"1",
")",
")",
"values",
"=",
"values",
".",
"replace",
"(",
"ROW_SEPARATOR",
",",
"braces",
")",
"text",
"=",
"\"{0}{1}{2}\"",
".",
"format",
"(",
"prefix",
",",
"values",
",",
"suffix",
")",
"self",
".",
"_text",
"=",
"text",
"else",
":",
"self",
".",
"_text",
"=",
"''",
"self",
".",
"update_warning",
"(",
")"
]
| Construct the text based on the entered content in the widget. | [
"Construct",
"the",
"text",
"based",
"on",
"the",
"entered",
"content",
"in",
"the",
"widget",
"."
]
| python | train |
fr33jc/bang | bang/providers/openstack/__init__.py | https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/providers/openstack/__init__.py#L189-L262 | def create_server(self, basename, disk_image_id, instance_type,
ssh_key_name, tags=None, availability_zone=None,
timeout_s=DEFAULT_TIMEOUT_S, floating_ip=True,
**kwargs):
"""
Creates a new server instance. This call blocks until the server is
created and available for normal use, or :attr:`timeout_s` has elapsed.
:param str basename: An identifier for the server. A random postfix
will be appended to this basename to work around OpenStack Nova
REST API limitations.
:param str disk_image_id: The identifier of the base disk image to use
as the rootfs.
:param str instance_type: The name of an OpenStack instance type, or
*flavor*. This is specific to the OpenStack provider installation.
:param str ssh_key_name: The name of the ssh key to inject into the
target server's ``authorized_keys`` file. The key must already
have been registered with the OpenStack Nova provider.
:param tags: Up to 5 key-value pairs of arbitrary strings to use as
*tags* for the server instance.
:type tags: :class:`Mapping`
:param str availability_zone: The name of the availability zone in
which to place the server.
:param float timeout_s: The number of seconds to poll for an active
server before failing. Defaults to ``0`` (i.e. Expect server to be
active immediately).
:param bool floating_ip: Allocate a floating IP (in
openstack 13.5 this doesn't happen automatically, so only
don't do it if you know what you're doing)
:rtype: :class:`dict`
"""
nova = self.nova
name = self.provider.gen_component_name(basename)
log.info('Launching server %s... this could take a while...' % name)
flavor = nova.flavors.find(name=instance_type)
server = nova.servers.create(
name,
disk_image_id,
flavor,
key_name=ssh_key_name,
meta=tags,
availability_zone=availability_zone,
**kwargs
)
def find_active():
s = nova.servers.get(server.id)
if s and s.status == 'ACTIVE':
return s
instance = poll_with_timeout(timeout_s, find_active, 5)
if not instance:
raise TimeoutError(
'Server %s failed to launch within allotted time.'
% server.id
)
if floating_ip:
log.info('Creating floating ip for %s', name)
floating_ip = nova.floating_ips.create()
server.add_floating_ip(floating_ip)
log.info('Created floating ip %s for %s', floating_ip.ip, name)
return server_to_dict(instance) | [
"def",
"create_server",
"(",
"self",
",",
"basename",
",",
"disk_image_id",
",",
"instance_type",
",",
"ssh_key_name",
",",
"tags",
"=",
"None",
",",
"availability_zone",
"=",
"None",
",",
"timeout_s",
"=",
"DEFAULT_TIMEOUT_S",
",",
"floating_ip",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"nova",
"=",
"self",
".",
"nova",
"name",
"=",
"self",
".",
"provider",
".",
"gen_component_name",
"(",
"basename",
")",
"log",
".",
"info",
"(",
"'Launching server %s... this could take a while...'",
"%",
"name",
")",
"flavor",
"=",
"nova",
".",
"flavors",
".",
"find",
"(",
"name",
"=",
"instance_type",
")",
"server",
"=",
"nova",
".",
"servers",
".",
"create",
"(",
"name",
",",
"disk_image_id",
",",
"flavor",
",",
"key_name",
"=",
"ssh_key_name",
",",
"meta",
"=",
"tags",
",",
"availability_zone",
"=",
"availability_zone",
",",
"*",
"*",
"kwargs",
")",
"def",
"find_active",
"(",
")",
":",
"s",
"=",
"nova",
".",
"servers",
".",
"get",
"(",
"server",
".",
"id",
")",
"if",
"s",
"and",
"s",
".",
"status",
"==",
"'ACTIVE'",
":",
"return",
"s",
"instance",
"=",
"poll_with_timeout",
"(",
"timeout_s",
",",
"find_active",
",",
"5",
")",
"if",
"not",
"instance",
":",
"raise",
"TimeoutError",
"(",
"'Server %s failed to launch within allotted time.'",
"%",
"server",
".",
"id",
")",
"if",
"floating_ip",
":",
"log",
".",
"info",
"(",
"'Creating floating ip for %s'",
",",
"name",
")",
"floating_ip",
"=",
"nova",
".",
"floating_ips",
".",
"create",
"(",
")",
"server",
".",
"add_floating_ip",
"(",
"floating_ip",
")",
"log",
".",
"info",
"(",
"'Created floating ip %s for %s'",
",",
"floating_ip",
".",
"ip",
",",
"name",
")",
"return",
"server_to_dict",
"(",
"instance",
")"
]
| Creates a new server instance. This call blocks until the server is
created and available for normal use, or :attr:`timeout_s` has elapsed.
:param str basename: An identifier for the server. A random postfix
will be appended to this basename to work around OpenStack Nova
REST API limitations.
:param str disk_image_id: The identifier of the base disk image to use
as the rootfs.
:param str instance_type: The name of an OpenStack instance type, or
*flavor*. This is specific to the OpenStack provider installation.
:param str ssh_key_name: The name of the ssh key to inject into the
target server's ``authorized_keys`` file. The key must already
have been registered with the OpenStack Nova provider.
:param tags: Up to 5 key-value pairs of arbitrary strings to use as
*tags* for the server instance.
:type tags: :class:`Mapping`
:param str availability_zone: The name of the availability zone in
which to place the server.
:param float timeout_s: The number of seconds to poll for an active
server before failing. Defaults to ``0`` (i.e. Expect server to be
active immediately).
:param bool floating_ip: Allocate a floating IP (in
openstack 13.5 this doesn't happen automatically, so only
don't do it if you know what you're doing)
:rtype: :class:`dict` | [
"Creates",
"a",
"new",
"server",
"instance",
".",
"This",
"call",
"blocks",
"until",
"the",
"server",
"is",
"created",
"and",
"available",
"for",
"normal",
"use",
"or",
":",
"attr",
":",
"timeout_s",
"has",
"elapsed",
"."
]
| python | train |
AshleySetter/optoanalysis | optoanalysis/optoanalysis/optoanalysis.py | https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L2787-L2812 | def butterworth_filter(Signal, SampleFreq, lowerFreq, upperFreq):
"""
Filters data using by constructing a 5th order butterworth
IIR filter and using scipy.signal.filtfilt, which does
phase correction after implementing the filter (as IIR
filter apply a phase change)
Parameters
----------
Signal : ndarray
Signal to be filtered
SampleFreq : float
Sample frequency of signal
lowerFreq : float
Lower frequency of bandpass to allow through filter
upperFreq : float
Upper frequency of bandpass to allow through filter
Returns
-------
FilteredData : ndarray
Array containing the filtered data
"""
b, a = make_butterworth_b_a(lowerFreq, upperFreq, SampleFreq)
FilteredSignal = scipy.signal.filtfilt(b, a, Signal)
return _np.real(FilteredSignal) | [
"def",
"butterworth_filter",
"(",
"Signal",
",",
"SampleFreq",
",",
"lowerFreq",
",",
"upperFreq",
")",
":",
"b",
",",
"a",
"=",
"make_butterworth_b_a",
"(",
"lowerFreq",
",",
"upperFreq",
",",
"SampleFreq",
")",
"FilteredSignal",
"=",
"scipy",
".",
"signal",
".",
"filtfilt",
"(",
"b",
",",
"a",
",",
"Signal",
")",
"return",
"_np",
".",
"real",
"(",
"FilteredSignal",
")"
]
| Filters data using by constructing a 5th order butterworth
IIR filter and using scipy.signal.filtfilt, which does
phase correction after implementing the filter (as IIR
filter apply a phase change)
Parameters
----------
Signal : ndarray
Signal to be filtered
SampleFreq : float
Sample frequency of signal
lowerFreq : float
Lower frequency of bandpass to allow through filter
upperFreq : float
Upper frequency of bandpass to allow through filter
Returns
-------
FilteredData : ndarray
Array containing the filtered data | [
"Filters",
"data",
"using",
"by",
"constructing",
"a",
"5th",
"order",
"butterworth",
"IIR",
"filter",
"and",
"using",
"scipy",
".",
"signal",
".",
"filtfilt",
"which",
"does",
"phase",
"correction",
"after",
"implementing",
"the",
"filter",
"(",
"as",
"IIR",
"filter",
"apply",
"a",
"phase",
"change",
")"
]
| python | train |
openstack/proliantutils | proliantutils/ilo/snmp/snmp_cpqdisk_sizes.py | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/snmp/snmp_cpqdisk_sizes.py#L55-L100 | def _create_usm_user_obj(snmp_cred):
"""Creates the UsmUserData obj for the given credentials.
This method creates an instance for the method hlapi.UsmUserData.
The UsmUserData() allows the 'auth_protocol' and 'priv_protocol'
to be undefined by user if their pass phrases are provided.
:param snmp_cred: Dictionary of SNMP credentials.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
:returns UsmUserData object as per given credentials.
"""
auth_protocol = snmp_cred.get('auth_protocol')
priv_protocol = snmp_cred.get('priv_protocol')
auth_user = snmp_cred.get('auth_user')
auth_prot_pp = snmp_cred.get('auth_prot_pp')
auth_priv_pp = snmp_cred.get('auth_priv_pp')
if ((not auth_protocol) and priv_protocol):
priv_protocol = (
MAPPED_SNMP_ATTRIBUTES['privProtocol'][priv_protocol])
usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,
auth_priv_pp,
privProtocol=priv_protocol)
elif ((not priv_protocol) and auth_protocol):
auth_protocol = (
MAPPED_SNMP_ATTRIBUTES['authProtocol'][auth_protocol])
usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,
auth_priv_pp,
authProtocol=auth_protocol)
elif not all([priv_protocol and auth_protocol]):
usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,
auth_priv_pp)
else:
auth_protocol = (
MAPPED_SNMP_ATTRIBUTES['authProtocol'][auth_protocol])
priv_protocol = (
MAPPED_SNMP_ATTRIBUTES['privProtocol'][priv_protocol])
usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,
auth_priv_pp,
authProtocol=auth_protocol,
privProtocol=priv_protocol)
return usm_user_obj | [
"def",
"_create_usm_user_obj",
"(",
"snmp_cred",
")",
":",
"auth_protocol",
"=",
"snmp_cred",
".",
"get",
"(",
"'auth_protocol'",
")",
"priv_protocol",
"=",
"snmp_cred",
".",
"get",
"(",
"'priv_protocol'",
")",
"auth_user",
"=",
"snmp_cred",
".",
"get",
"(",
"'auth_user'",
")",
"auth_prot_pp",
"=",
"snmp_cred",
".",
"get",
"(",
"'auth_prot_pp'",
")",
"auth_priv_pp",
"=",
"snmp_cred",
".",
"get",
"(",
"'auth_priv_pp'",
")",
"if",
"(",
"(",
"not",
"auth_protocol",
")",
"and",
"priv_protocol",
")",
":",
"priv_protocol",
"=",
"(",
"MAPPED_SNMP_ATTRIBUTES",
"[",
"'privProtocol'",
"]",
"[",
"priv_protocol",
"]",
")",
"usm_user_obj",
"=",
"hlapi",
".",
"UsmUserData",
"(",
"auth_user",
",",
"auth_prot_pp",
",",
"auth_priv_pp",
",",
"privProtocol",
"=",
"priv_protocol",
")",
"elif",
"(",
"(",
"not",
"priv_protocol",
")",
"and",
"auth_protocol",
")",
":",
"auth_protocol",
"=",
"(",
"MAPPED_SNMP_ATTRIBUTES",
"[",
"'authProtocol'",
"]",
"[",
"auth_protocol",
"]",
")",
"usm_user_obj",
"=",
"hlapi",
".",
"UsmUserData",
"(",
"auth_user",
",",
"auth_prot_pp",
",",
"auth_priv_pp",
",",
"authProtocol",
"=",
"auth_protocol",
")",
"elif",
"not",
"all",
"(",
"[",
"priv_protocol",
"and",
"auth_protocol",
"]",
")",
":",
"usm_user_obj",
"=",
"hlapi",
".",
"UsmUserData",
"(",
"auth_user",
",",
"auth_prot_pp",
",",
"auth_priv_pp",
")",
"else",
":",
"auth_protocol",
"=",
"(",
"MAPPED_SNMP_ATTRIBUTES",
"[",
"'authProtocol'",
"]",
"[",
"auth_protocol",
"]",
")",
"priv_protocol",
"=",
"(",
"MAPPED_SNMP_ATTRIBUTES",
"[",
"'privProtocol'",
"]",
"[",
"priv_protocol",
"]",
")",
"usm_user_obj",
"=",
"hlapi",
".",
"UsmUserData",
"(",
"auth_user",
",",
"auth_prot_pp",
",",
"auth_priv_pp",
",",
"authProtocol",
"=",
"auth_protocol",
",",
"privProtocol",
"=",
"priv_protocol",
")",
"return",
"usm_user_obj"
]
| Creates the UsmUserData obj for the given credentials.
This method creates an instance for the method hlapi.UsmUserData.
The UsmUserData() allows the 'auth_protocol' and 'priv_protocol'
to be undefined by user if their pass phrases are provided.
:param snmp_cred: Dictionary of SNMP credentials.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
:returns UsmUserData object as per given credentials. | [
"Creates",
"the",
"UsmUserData",
"obj",
"for",
"the",
"given",
"credentials",
"."
]
| python | train |
carsongee/flask-htpasswd | flask_htpasswd.py | https://github.com/carsongee/flask-htpasswd/blob/db6fe596dd167f33aeb3d77e975c861d0534cecf/flask_htpasswd.py#L194-L208 | def required(self, func):
"""
Decorator function with basic and token authentication handler
"""
@wraps(func)
def decorated(*args, **kwargs):
"""
Actual wrapper to run the auth checks.
"""
is_valid, user = self.authenticate()
if not is_valid:
return self.auth_failed()
kwargs['user'] = user
return func(*args, **kwargs)
return decorated | [
"def",
"required",
"(",
"self",
",",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorated",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n Actual wrapper to run the auth checks.\n \"\"\"",
"is_valid",
",",
"user",
"=",
"self",
".",
"authenticate",
"(",
")",
"if",
"not",
"is_valid",
":",
"return",
"self",
".",
"auth_failed",
"(",
")",
"kwargs",
"[",
"'user'",
"]",
"=",
"user",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorated"
]
| Decorator function with basic and token authentication handler | [
"Decorator",
"function",
"with",
"basic",
"and",
"token",
"authentication",
"handler"
]
| python | train |
pysal/spglm | spglm/links.py | https://github.com/pysal/spglm/blob/1339898adcb7e1638f1da83d57aa37392525f018/spglm/links.py#L623-L632 | def deriv2(self, p):
"""
Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import approx_fprime
p = np.atleast_1d(p)
# Note: special function for norm.ppf does not support complex
return np.diag(approx_fprime(p, self.deriv, centered=True)) | [
"def",
"deriv2",
"(",
"self",
",",
"p",
")",
":",
"from",
"statsmodels",
".",
"tools",
".",
"numdiff",
"import",
"approx_fprime",
"p",
"=",
"np",
".",
"atleast_1d",
"(",
"p",
")",
"# Note: special function for norm.ppf does not support complex",
"return",
"np",
".",
"diag",
"(",
"approx_fprime",
"(",
"p",
",",
"self",
".",
"deriv",
",",
"centered",
"=",
"True",
")",
")"
]
| Second derivative of the link function g''(p)
implemented through numerical differentiation | [
"Second",
"derivative",
"of",
"the",
"link",
"function",
"g",
"(",
"p",
")"
]
| python | train |
fprimex/zdesk | zdesk/zdesk_api.py | https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L2029-L2033 | def help_center_vote_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/votes#delete-vote"
api_path = "/api/v2/help_center/votes/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs) | [
"def",
"help_center_vote_delete",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/help_center/votes/{id}.json\"",
"api_path",
"=",
"api_path",
".",
"format",
"(",
"id",
"=",
"id",
")",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"method",
"=",
"\"DELETE\"",
",",
"*",
"*",
"kwargs",
")"
]
| https://developer.zendesk.com/rest_api/docs/help_center/votes#delete-vote | [
"https",
":",
"//",
"developer",
".",
"zendesk",
".",
"com",
"/",
"rest_api",
"/",
"docs",
"/",
"help_center",
"/",
"votes#delete",
"-",
"vote"
]
| python | train |
seung-lab/cloud-volume | cloudvolume/storage.py | https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/storage.py#L626-L641 | def list_files(self, prefix, flat=False):
"""
List the files in the layer with the given prefix.
flat means only generate one level of a directory,
while non-flat means generate all file paths with that
prefix.
"""
layer_path = self.get_path_to_file("")
path = os.path.join(layer_path, prefix)
for blob in self._bucket.list_blobs(prefix=path):
filename = blob.name.replace(layer_path, '')
if not flat and filename[-1] != '/':
yield filename
elif flat and '/' not in blob.name.replace(path, ''):
yield filename | [
"def",
"list_files",
"(",
"self",
",",
"prefix",
",",
"flat",
"=",
"False",
")",
":",
"layer_path",
"=",
"self",
".",
"get_path_to_file",
"(",
"\"\"",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"layer_path",
",",
"prefix",
")",
"for",
"blob",
"in",
"self",
".",
"_bucket",
".",
"list_blobs",
"(",
"prefix",
"=",
"path",
")",
":",
"filename",
"=",
"blob",
".",
"name",
".",
"replace",
"(",
"layer_path",
",",
"''",
")",
"if",
"not",
"flat",
"and",
"filename",
"[",
"-",
"1",
"]",
"!=",
"'/'",
":",
"yield",
"filename",
"elif",
"flat",
"and",
"'/'",
"not",
"in",
"blob",
".",
"name",
".",
"replace",
"(",
"path",
",",
"''",
")",
":",
"yield",
"filename"
]
| List the files in the layer with the given prefix.
flat means only generate one level of a directory,
while non-flat means generate all file paths with that
prefix. | [
"List",
"the",
"files",
"in",
"the",
"layer",
"with",
"the",
"given",
"prefix",
"."
]
| python | train |
caseyjlaw/rtpipe | rtpipe/parsecands.py | https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/parsecands.py#L199-L208 | def cleanup(workdir, fileroot, scans=[]):
""" Cleanup up noise and cands files.
Finds all segments in each scan and merges them into single cand/noise file per scan.
"""
os.chdir(workdir)
# merge cands/noise files per scan
for scan in scans:
merge_segments(fileroot, scan, cleanup=True, sizelimit=2.) | [
"def",
"cleanup",
"(",
"workdir",
",",
"fileroot",
",",
"scans",
"=",
"[",
"]",
")",
":",
"os",
".",
"chdir",
"(",
"workdir",
")",
"# merge cands/noise files per scan",
"for",
"scan",
"in",
"scans",
":",
"merge_segments",
"(",
"fileroot",
",",
"scan",
",",
"cleanup",
"=",
"True",
",",
"sizelimit",
"=",
"2.",
")"
]
| Cleanup up noise and cands files.
Finds all segments in each scan and merges them into single cand/noise file per scan. | [
"Cleanup",
"up",
"noise",
"and",
"cands",
"files",
".",
"Finds",
"all",
"segments",
"in",
"each",
"scan",
"and",
"merges",
"them",
"into",
"single",
"cand",
"/",
"noise",
"file",
"per",
"scan",
"."
]
| python | train |
Parsl/parsl | parsl/monitoring/db_manager.py | https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/monitoring/db_manager.py#L374-L402 | def start_file_logger(filename, name='database_manager', level=logging.DEBUG, format_string=None):
"""Add a stream log handler.
Parameters
---------
filename: string
Name of the file to write logs to. Required.
name: string
Logger name. Default="parsl.executors.interchange"
level: logging.LEVEL
Set the logging level. Default=logging.DEBUG
- format_string (string): Set the format string
format_string: string
Format string to use.
Returns
-------
None.
"""
if format_string is None:
format_string = "%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s"
global logger
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.FileHandler(filename)
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger | [
"def",
"start_file_logger",
"(",
"filename",
",",
"name",
"=",
"'database_manager'",
",",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"format_string",
"=",
"None",
")",
":",
"if",
"format_string",
"is",
"None",
":",
"format_string",
"=",
"\"%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s\"",
"global",
"logger",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"logger",
".",
"setLevel",
"(",
"level",
")",
"handler",
"=",
"logging",
".",
"FileHandler",
"(",
"filename",
")",
"handler",
".",
"setLevel",
"(",
"level",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"format_string",
",",
"datefmt",
"=",
"'%Y-%m-%d %H:%M:%S'",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"return",
"logger"
]
| Add a stream log handler.
Parameters
---------
filename: string
Name of the file to write logs to. Required.
name: string
Logger name. Default="parsl.executors.interchange"
level: logging.LEVEL
Set the logging level. Default=logging.DEBUG
- format_string (string): Set the format string
format_string: string
Format string to use.
Returns
-------
None. | [
"Add",
"a",
"stream",
"log",
"handler",
".",
"Parameters",
"---------",
"filename",
":",
"string",
"Name",
"of",
"the",
"file",
"to",
"write",
"logs",
"to",
".",
"Required",
".",
"name",
":",
"string",
"Logger",
"name",
".",
"Default",
"=",
"parsl",
".",
"executors",
".",
"interchange",
"level",
":",
"logging",
".",
"LEVEL",
"Set",
"the",
"logging",
"level",
".",
"Default",
"=",
"logging",
".",
"DEBUG",
"-",
"format_string",
"(",
"string",
")",
":",
"Set",
"the",
"format",
"string",
"format_string",
":",
"string",
"Format",
"string",
"to",
"use",
".",
"Returns",
"-------",
"None",
"."
]
| python | valid |
jantman/awslimitchecker | awslimitchecker/services/route53.py | https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/services/route53.py#L127-L139 | def _get_hosted_zone_limit(self, limit_type, hosted_zone_id):
"""
Return a hosted zone limit [recordsets|vpc_associations]
:rtype: dict
"""
result = self.conn.get_hosted_zone_limit(
Type=limit_type,
HostedZoneId=hosted_zone_id
)
return result | [
"def",
"_get_hosted_zone_limit",
"(",
"self",
",",
"limit_type",
",",
"hosted_zone_id",
")",
":",
"result",
"=",
"self",
".",
"conn",
".",
"get_hosted_zone_limit",
"(",
"Type",
"=",
"limit_type",
",",
"HostedZoneId",
"=",
"hosted_zone_id",
")",
"return",
"result"
]
| Return a hosted zone limit [recordsets|vpc_associations]
:rtype: dict | [
"Return",
"a",
"hosted",
"zone",
"limit",
"[",
"recordsets|vpc_associations",
"]"
]
| python | train |
baguette-io/baguette-messaging | farine/execute/method.py | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/execute/method.py#L46-L54 | def start(self, *args, **kwargs):#pylint:disable=unused-argument
"""
Launch the method.
:param restart: Restart the method if it ends.
:type restart: bool
:rtype: None
"""
restart = kwargs.get('restart', True)
return self.run(restart) | [
"def",
"start",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"#pylint:disable=unused-argument",
"restart",
"=",
"kwargs",
".",
"get",
"(",
"'restart'",
",",
"True",
")",
"return",
"self",
".",
"run",
"(",
"restart",
")"
]
| Launch the method.
:param restart: Restart the method if it ends.
:type restart: bool
:rtype: None | [
"Launch",
"the",
"method",
".",
":",
"param",
"restart",
":",
"Restart",
"the",
"method",
"if",
"it",
"ends",
".",
":",
"type",
"restart",
":",
"bool",
":",
"rtype",
":",
"None"
]
| python | train |
pandas-dev/pandas | pandas/core/arrays/categorical.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2635-L2670 | def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories, dtype=values.dtype)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories | [
"def",
"_factorize_from_iterable",
"(",
"values",
")",
":",
"from",
"pandas",
".",
"core",
".",
"indexes",
".",
"category",
"import",
"CategoricalIndex",
"if",
"not",
"is_list_like",
"(",
"values",
")",
":",
"raise",
"TypeError",
"(",
"\"Input must be list-like\"",
")",
"if",
"is_categorical",
"(",
"values",
")",
":",
"if",
"isinstance",
"(",
"values",
",",
"(",
"ABCCategoricalIndex",
",",
"ABCSeries",
")",
")",
":",
"values",
"=",
"values",
".",
"_values",
"categories",
"=",
"CategoricalIndex",
"(",
"values",
".",
"categories",
",",
"dtype",
"=",
"values",
".",
"dtype",
")",
"codes",
"=",
"values",
".",
"codes",
"else",
":",
"# The value of ordered is irrelevant since we don't use cat as such,",
"# but only the resulting categories, the order of which is independent",
"# from ordered. Set ordered to False as default. See GH #15457",
"cat",
"=",
"Categorical",
"(",
"values",
",",
"ordered",
"=",
"False",
")",
"categories",
"=",
"cat",
".",
"categories",
"codes",
"=",
"cat",
".",
"codes",
"return",
"codes",
",",
"categories"
]
| Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`. | [
"Factorize",
"an",
"input",
"values",
"into",
"categories",
"and",
"codes",
".",
"Preserves",
"categorical",
"dtype",
"in",
"categories",
"."
]
| python | train |
persephone-tools/persephone | persephone/model.py | https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/model.py#L258-L299 | def eval(self, restore_model_path: Optional[str]=None) -> None:
""" Evaluates the model on a test set."""
saver = tf.train.Saver()
with tf.Session(config=allow_growth_config) as sess:
if restore_model_path:
logger.info("restoring model from %s", restore_model_path)
saver.restore(sess, restore_model_path)
else:
assert self.saved_model_path, "{}".format(self.saved_model_path)
logger.info("restoring model from %s", self.saved_model_path)
saver.restore(sess, self.saved_model_path)
test_x, test_x_lens, test_y = self.corpus_reader.test_batch()
feed_dict = {self.batch_x: test_x,
self.batch_x_lens: test_x_lens,
self.batch_y: test_y}
test_ler, dense_decoded, dense_ref = sess.run(
[self.ler, self.dense_decoded, self.dense_ref],
feed_dict=feed_dict)
hyps, refs = self.corpus_reader.human_readable_hyp_ref(
dense_decoded, dense_ref)
# Log hypotheses
hyps_dir = os.path.join(self.exp_dir, "test")
if not os.path.isdir(hyps_dir):
os.mkdir(hyps_dir)
with open(os.path.join(hyps_dir, "hyps"), "w",
encoding=ENCODING) as hyps_f:
for hyp in hyps:
print(" ".join(hyp), file=hyps_f)
with open(os.path.join(hyps_dir, "refs"), "w",
encoding=ENCODING) as refs_f:
for ref in refs:
print(" ".join(ref), file=refs_f)
test_per = utils.batch_per(hyps, refs)
assert test_per == test_ler
with open(os.path.join(hyps_dir, "test_per"), "w",
encoding=ENCODING) as per_f:
print("LER: %f" % (test_ler), file=per_f) | [
"def",
"eval",
"(",
"self",
",",
"restore_model_path",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"None",
":",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
"with",
"tf",
".",
"Session",
"(",
"config",
"=",
"allow_growth_config",
")",
"as",
"sess",
":",
"if",
"restore_model_path",
":",
"logger",
".",
"info",
"(",
"\"restoring model from %s\"",
",",
"restore_model_path",
")",
"saver",
".",
"restore",
"(",
"sess",
",",
"restore_model_path",
")",
"else",
":",
"assert",
"self",
".",
"saved_model_path",
",",
"\"{}\"",
".",
"format",
"(",
"self",
".",
"saved_model_path",
")",
"logger",
".",
"info",
"(",
"\"restoring model from %s\"",
",",
"self",
".",
"saved_model_path",
")",
"saver",
".",
"restore",
"(",
"sess",
",",
"self",
".",
"saved_model_path",
")",
"test_x",
",",
"test_x_lens",
",",
"test_y",
"=",
"self",
".",
"corpus_reader",
".",
"test_batch",
"(",
")",
"feed_dict",
"=",
"{",
"self",
".",
"batch_x",
":",
"test_x",
",",
"self",
".",
"batch_x_lens",
":",
"test_x_lens",
",",
"self",
".",
"batch_y",
":",
"test_y",
"}",
"test_ler",
",",
"dense_decoded",
",",
"dense_ref",
"=",
"sess",
".",
"run",
"(",
"[",
"self",
".",
"ler",
",",
"self",
".",
"dense_decoded",
",",
"self",
".",
"dense_ref",
"]",
",",
"feed_dict",
"=",
"feed_dict",
")",
"hyps",
",",
"refs",
"=",
"self",
".",
"corpus_reader",
".",
"human_readable_hyp_ref",
"(",
"dense_decoded",
",",
"dense_ref",
")",
"# Log hypotheses",
"hyps_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"exp_dir",
",",
"\"test\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"hyps_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"hyps_dir",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"hyps_dir",
",",
"\"hyps\"",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"ENCODING",
")",
"as",
"hyps_f",
":",
"for",
"hyp",
"in",
"hyps",
":",
"print",
"(",
"\" \"",
".",
"join",
"(",
"hyp",
")",
",",
"file",
"=",
"hyps_f",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"hyps_dir",
",",
"\"refs\"",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"ENCODING",
")",
"as",
"refs_f",
":",
"for",
"ref",
"in",
"refs",
":",
"print",
"(",
"\" \"",
".",
"join",
"(",
"ref",
")",
",",
"file",
"=",
"refs_f",
")",
"test_per",
"=",
"utils",
".",
"batch_per",
"(",
"hyps",
",",
"refs",
")",
"assert",
"test_per",
"==",
"test_ler",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"hyps_dir",
",",
"\"test_per\"",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"ENCODING",
")",
"as",
"per_f",
":",
"print",
"(",
"\"LER: %f\"",
"%",
"(",
"test_ler",
")",
",",
"file",
"=",
"per_f",
")"
]
| Evaluates the model on a test set. | [
"Evaluates",
"the",
"model",
"on",
"a",
"test",
"set",
"."
]
| python | train |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15664-L15685 | def wnvald(insize, n, window):
"""
Form a valid double precision window from the contents
of a window array.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnvald_c.html
:param insize: Size of window.
:type insize: int
:param n: Original number of endpoints.
:type n: int
:param window: Input window.
:type window: spiceypy.utils.support_types.SpiceCell
:return: The union of the intervals in the input cell.
:rtype: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(window, stypes.SpiceCell)
assert window.dtype == 1
insize = ctypes.c_int(insize)
n = ctypes.c_int(n)
libspice.wnvald_c(insize, n, ctypes.byref(window))
return window | [
"def",
"wnvald",
"(",
"insize",
",",
"n",
",",
"window",
")",
":",
"assert",
"isinstance",
"(",
"window",
",",
"stypes",
".",
"SpiceCell",
")",
"assert",
"window",
".",
"dtype",
"==",
"1",
"insize",
"=",
"ctypes",
".",
"c_int",
"(",
"insize",
")",
"n",
"=",
"ctypes",
".",
"c_int",
"(",
"n",
")",
"libspice",
".",
"wnvald_c",
"(",
"insize",
",",
"n",
",",
"ctypes",
".",
"byref",
"(",
"window",
")",
")",
"return",
"window"
]
| Form a valid double precision window from the contents
of a window array.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnvald_c.html
:param insize: Size of window.
:type insize: int
:param n: Original number of endpoints.
:type n: int
:param window: Input window.
:type window: spiceypy.utils.support_types.SpiceCell
:return: The union of the intervals in the input cell.
:rtype: spiceypy.utils.support_types.SpiceCell | [
"Form",
"a",
"valid",
"double",
"precision",
"window",
"from",
"the",
"contents",
"of",
"a",
"window",
"array",
"."
]
| python | train |
apache/spark | python/pyspark/cloudpickle.py | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L926-L936 | def dump(obj, file, protocol=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed
between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(file, protocol=protocol).dump(obj) | [
"def",
"dump",
"(",
"obj",
",",
"file",
",",
"protocol",
"=",
"None",
")",
":",
"CloudPickler",
"(",
"file",
",",
"protocol",
"=",
"protocol",
")",
".",
"dump",
"(",
"obj",
")"
]
| Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed
between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python. | [
"Serialize",
"obj",
"as",
"bytes",
"streamed",
"into",
"file"
]
| python | train |
PmagPy/PmagPy | programs/plot_cdf.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/plot_cdf.py#L12-L63 | def main():
"""
NAME
plot_cdf.py
DESCRIPTION
makes plots of cdfs of data in input file
SYNTAX
plot_cdf.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE
-t TITLE
-fmt [svg,eps,png,pdf,jpg..] specify format of output figure, default is svg
-sav saves plot and quits
"""
fmt,plot='svg',0
title=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-sav' in sys.argv:plot=1
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
X=numpy.loadtxt(file)
# else:
# X=numpy.loadtxt(sys.stdin,dtype=numpy.float)
else:
print('-f option required')
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-t' in sys.argv:
ind=sys.argv.index('-t')
title=sys.argv[ind+1]
CDF={'X':1}
pmagplotlib.plot_init(CDF['X'],5,5)
pmagplotlib.plot_cdf(CDF['X'],X,title,'r','')
files={'X':'CDF_.'+fmt}
if plot==0:
pmagplotlib.draw_figs(CDF)
ans= input('S[a]ve plot, <Return> to quit ')
if ans=='a':
pmagplotlib.save_plots(CDF,files)
else:
pmagplotlib.save_plots(CDF,files) | [
"def",
"main",
"(",
")",
":",
"fmt",
",",
"plot",
"=",
"'svg'",
",",
"0",
"title",
"=",
"\"\"",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"'-sav'",
"in",
"sys",
".",
"argv",
":",
"plot",
"=",
"1",
"if",
"'-f'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-f'",
")",
"file",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"X",
"=",
"numpy",
".",
"loadtxt",
"(",
"file",
")",
"# else:",
"# X=numpy.loadtxt(sys.stdin,dtype=numpy.float)",
"else",
":",
"print",
"(",
"'-f option required'",
")",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"'-fmt'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-fmt'",
")",
"fmt",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-t'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-t'",
")",
"title",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"CDF",
"=",
"{",
"'X'",
":",
"1",
"}",
"pmagplotlib",
".",
"plot_init",
"(",
"CDF",
"[",
"'X'",
"]",
",",
"5",
",",
"5",
")",
"pmagplotlib",
".",
"plot_cdf",
"(",
"CDF",
"[",
"'X'",
"]",
",",
"X",
",",
"title",
",",
"'r'",
",",
"''",
")",
"files",
"=",
"{",
"'X'",
":",
"'CDF_.'",
"+",
"fmt",
"}",
"if",
"plot",
"==",
"0",
":",
"pmagplotlib",
".",
"draw_figs",
"(",
"CDF",
")",
"ans",
"=",
"input",
"(",
"'S[a]ve plot, <Return> to quit '",
")",
"if",
"ans",
"==",
"'a'",
":",
"pmagplotlib",
".",
"save_plots",
"(",
"CDF",
",",
"files",
")",
"else",
":",
"pmagplotlib",
".",
"save_plots",
"(",
"CDF",
",",
"files",
")"
]
| NAME
plot_cdf.py
DESCRIPTION
makes plots of cdfs of data in input file
SYNTAX
plot_cdf.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE
-t TITLE
-fmt [svg,eps,png,pdf,jpg..] specify format of output figure, default is svg
-sav saves plot and quits | [
"NAME",
"plot_cdf",
".",
"py"
]
| python | train |
iotile/coretools | iotileemulate/iotile/emulate/reference/controller_features/sensor_log.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/reference/controller_features/sensor_log.py#L164-L185 | def inspect_virtual(self, stream_id):
"""Inspect the last value written into a virtual stream.
Args:
stream_id (int): The virtual stream was want to inspect.
Returns:
(int, int): An error code and the stream value.
"""
stream = DataStream.FromEncoded(stream_id)
if stream.buffered:
return [pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.VIRTUAL_STREAM_NOT_FOUND), 0]
try:
reading = self.storage.inspect_last(stream, only_allocated=True)
return [Error.NO_ERROR, reading.value]
except StreamEmptyError:
return [Error.NO_ERROR, 0]
except UnresolvedIdentifierError:
return [pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.VIRTUAL_STREAM_NOT_FOUND), 0] | [
"def",
"inspect_virtual",
"(",
"self",
",",
"stream_id",
")",
":",
"stream",
"=",
"DataStream",
".",
"FromEncoded",
"(",
"stream_id",
")",
"if",
"stream",
".",
"buffered",
":",
"return",
"[",
"pack_error",
"(",
"ControllerSubsystem",
".",
"SENSOR_LOG",
",",
"SensorLogError",
".",
"VIRTUAL_STREAM_NOT_FOUND",
")",
",",
"0",
"]",
"try",
":",
"reading",
"=",
"self",
".",
"storage",
".",
"inspect_last",
"(",
"stream",
",",
"only_allocated",
"=",
"True",
")",
"return",
"[",
"Error",
".",
"NO_ERROR",
",",
"reading",
".",
"value",
"]",
"except",
"StreamEmptyError",
":",
"return",
"[",
"Error",
".",
"NO_ERROR",
",",
"0",
"]",
"except",
"UnresolvedIdentifierError",
":",
"return",
"[",
"pack_error",
"(",
"ControllerSubsystem",
".",
"SENSOR_LOG",
",",
"SensorLogError",
".",
"VIRTUAL_STREAM_NOT_FOUND",
")",
",",
"0",
"]"
]
| Inspect the last value written into a virtual stream.
Args:
stream_id (int): The virtual stream was want to inspect.
Returns:
(int, int): An error code and the stream value. | [
"Inspect",
"the",
"last",
"value",
"written",
"into",
"a",
"virtual",
"stream",
"."
]
| python | train |
NarrativeScience/lsi | src/lsi/utils/table.py | https://github.com/NarrativeScience/lsi/blob/7d901b03fdb1a34ef795e5412bfe9685d948e32d/src/lsi/utils/table.py#L146-L153 | def get_table_width(table):
"""
Gets the width of the table that would be printed.
:rtype: ``int``
"""
columns = transpose_table(prepare_rows(table))
widths = [max(len(cell) for cell in column) for column in columns]
return len('+' + '|'.join('-' * (w + 2) for w in widths) + '+') | [
"def",
"get_table_width",
"(",
"table",
")",
":",
"columns",
"=",
"transpose_table",
"(",
"prepare_rows",
"(",
"table",
")",
")",
"widths",
"=",
"[",
"max",
"(",
"len",
"(",
"cell",
")",
"for",
"cell",
"in",
"column",
")",
"for",
"column",
"in",
"columns",
"]",
"return",
"len",
"(",
"'+'",
"+",
"'|'",
".",
"join",
"(",
"'-'",
"*",
"(",
"w",
"+",
"2",
")",
"for",
"w",
"in",
"widths",
")",
"+",
"'+'",
")"
]
| Gets the width of the table that would be printed.
:rtype: ``int`` | [
"Gets",
"the",
"width",
"of",
"the",
"table",
"that",
"would",
"be",
"printed",
".",
":",
"rtype",
":",
"int"
]
| python | test |
GPflow/GPflow | gpflow/likelihoods.py | https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/likelihoods.py#L639-L665 | def predict_mean_and_var(self, Fmu, Fvar, epsilon=None):
r"""
Given a Normal distribution for the latent function,
return the mean of Y
if
q(f) = N(Fmu, Fvar)
and this object represents
p(y|f)
then this method computes the predictive mean
\int\int y p(y|f)q(f) df dy
and the predictive variance
\int\int y^2 p(y|f)q(f) df dy - [ \int\int y p(y|f)q(f) df dy ]^2
Here, we implement a default Monte Carlo routine.
"""
integrand2 = lambda *X: self.conditional_variance(*X) + tf.square(self.conditional_mean(*X))
E_y, E_y2 = self._mc_quadrature([self.conditional_mean, integrand2],
Fmu, Fvar, epsilon=epsilon)
V_y = E_y2 - tf.square(E_y)
return E_y, V_y | [
"def",
"predict_mean_and_var",
"(",
"self",
",",
"Fmu",
",",
"Fvar",
",",
"epsilon",
"=",
"None",
")",
":",
"integrand2",
"=",
"lambda",
"*",
"X",
":",
"self",
".",
"conditional_variance",
"(",
"*",
"X",
")",
"+",
"tf",
".",
"square",
"(",
"self",
".",
"conditional_mean",
"(",
"*",
"X",
")",
")",
"E_y",
",",
"E_y2",
"=",
"self",
".",
"_mc_quadrature",
"(",
"[",
"self",
".",
"conditional_mean",
",",
"integrand2",
"]",
",",
"Fmu",
",",
"Fvar",
",",
"epsilon",
"=",
"epsilon",
")",
"V_y",
"=",
"E_y2",
"-",
"tf",
".",
"square",
"(",
"E_y",
")",
"return",
"E_y",
",",
"V_y"
]
| r"""
Given a Normal distribution for the latent function,
return the mean of Y
if
q(f) = N(Fmu, Fvar)
and this object represents
p(y|f)
then this method computes the predictive mean
\int\int y p(y|f)q(f) df dy
and the predictive variance
\int\int y^2 p(y|f)q(f) df dy - [ \int\int y p(y|f)q(f) df dy ]^2
Here, we implement a default Monte Carlo routine. | [
"r",
"Given",
"a",
"Normal",
"distribution",
"for",
"the",
"latent",
"function",
"return",
"the",
"mean",
"of",
"Y"
]
| python | train |
saltstack/salt | salt/returners/cassandra_cql_return.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/cassandra_cql_return.py#L410-L437 | def get_jids():
'''
Return a list of all job ids
'''
query = '''SELECT jid, load FROM {keyspace}.jids;'''.format(keyspace=_get_keyspace())
ret = {}
# cassandra_cql.cql_query may raise a CommandExecutionError
try:
data = __salt__['cassandra_cql.cql_query'](query)
if data:
for row in data:
jid = row.get('jid')
load = row.get('load')
if jid and load:
ret[jid] = salt.utils.jid.format_jid_instance(
jid,
salt.utils.json.loads(load))
except CommandExecutionError:
log.critical('Could not get a list of all job ids.')
raise
except Exception as e:
log.critical(
'Unexpected error while getting list of all job ids: %s', e)
raise
return ret | [
"def",
"get_jids",
"(",
")",
":",
"query",
"=",
"'''SELECT jid, load FROM {keyspace}.jids;'''",
".",
"format",
"(",
"keyspace",
"=",
"_get_keyspace",
"(",
")",
")",
"ret",
"=",
"{",
"}",
"# cassandra_cql.cql_query may raise a CommandExecutionError",
"try",
":",
"data",
"=",
"__salt__",
"[",
"'cassandra_cql.cql_query'",
"]",
"(",
"query",
")",
"if",
"data",
":",
"for",
"row",
"in",
"data",
":",
"jid",
"=",
"row",
".",
"get",
"(",
"'jid'",
")",
"load",
"=",
"row",
".",
"get",
"(",
"'load'",
")",
"if",
"jid",
"and",
"load",
":",
"ret",
"[",
"jid",
"]",
"=",
"salt",
".",
"utils",
".",
"jid",
".",
"format_jid_instance",
"(",
"jid",
",",
"salt",
".",
"utils",
".",
"json",
".",
"loads",
"(",
"load",
")",
")",
"except",
"CommandExecutionError",
":",
"log",
".",
"critical",
"(",
"'Could not get a list of all job ids.'",
")",
"raise",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"critical",
"(",
"'Unexpected error while getting list of all job ids: %s'",
",",
"e",
")",
"raise",
"return",
"ret"
]
| Return a list of all job ids | [
"Return",
"a",
"list",
"of",
"all",
"job",
"ids"
]
| python | train |
pyGrowler/Growler | growler/core/router.py | https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/router.py#L137-L153 | def sinatra_path_to_regex(cls, path):
"""
Converts a sinatra-style path to a regex with named
parameters.
"""
# Return the path if already a (compiled) regex
if type(path) is cls.regex_type:
return path
# Build a regular expression string which is split on the '/' character
regex = [
"(?P<{}>\w+)".format(segment[1:])
if cls.sinatra_param_regex.match(segment)
else segment
for segment in path.split('/')
]
return re.compile('/'.join(regex)) | [
"def",
"sinatra_path_to_regex",
"(",
"cls",
",",
"path",
")",
":",
"# Return the path if already a (compiled) regex",
"if",
"type",
"(",
"path",
")",
"is",
"cls",
".",
"regex_type",
":",
"return",
"path",
"# Build a regular expression string which is split on the '/' character",
"regex",
"=",
"[",
"\"(?P<{}>\\w+)\"",
".",
"format",
"(",
"segment",
"[",
"1",
":",
"]",
")",
"if",
"cls",
".",
"sinatra_param_regex",
".",
"match",
"(",
"segment",
")",
"else",
"segment",
"for",
"segment",
"in",
"path",
".",
"split",
"(",
"'/'",
")",
"]",
"return",
"re",
".",
"compile",
"(",
"'/'",
".",
"join",
"(",
"regex",
")",
")"
]
| Converts a sinatra-style path to a regex with named
parameters. | [
"Converts",
"a",
"sinatra",
"-",
"style",
"path",
"to",
"a",
"regex",
"with",
"named",
"parameters",
"."
]
| python | train |
h2oai/typesentry | typesentry/signature.py | https://github.com/h2oai/typesentry/blob/0ca8ed0e62d15ffe430545e7648c9a9b2547b49c/typesentry/signature.py#L172-L186 | def _make_retval_checker(self):
"""Create a function that checks the return value of the function."""
rvchk = self.retval.checker
if rvchk:
def _checker(value):
if not rvchk.check(value):
raise self._type_error(
"Incorrect return type in %s: expected %s got %s" %
(self.name_bt, rvchk.name(),
checker_for_type(type(value)).name())
)
else:
def _checker(value):
pass
return _checker | [
"def",
"_make_retval_checker",
"(",
"self",
")",
":",
"rvchk",
"=",
"self",
".",
"retval",
".",
"checker",
"if",
"rvchk",
":",
"def",
"_checker",
"(",
"value",
")",
":",
"if",
"not",
"rvchk",
".",
"check",
"(",
"value",
")",
":",
"raise",
"self",
".",
"_type_error",
"(",
"\"Incorrect return type in %s: expected %s got %s\"",
"%",
"(",
"self",
".",
"name_bt",
",",
"rvchk",
".",
"name",
"(",
")",
",",
"checker_for_type",
"(",
"type",
"(",
"value",
")",
")",
".",
"name",
"(",
")",
")",
")",
"else",
":",
"def",
"_checker",
"(",
"value",
")",
":",
"pass",
"return",
"_checker"
]
| Create a function that checks the return value of the function. | [
"Create",
"a",
"function",
"that",
"checks",
"the",
"return",
"value",
"of",
"the",
"function",
"."
]
| python | train |
kxgames/vecrec | vecrec/shapes.py | https://github.com/kxgames/vecrec/blob/18b0841419de21a644b4511e2229af853ed09529/vecrec/shapes.py#L433-L438 | def get_components(self, other):
""" Break this vector into one vector that is perpendicular to the
given vector and another that is parallel to it. """
tangent = self.get_projection(other)
normal = self - tangent
return normal, tangent | [
"def",
"get_components",
"(",
"self",
",",
"other",
")",
":",
"tangent",
"=",
"self",
".",
"get_projection",
"(",
"other",
")",
"normal",
"=",
"self",
"-",
"tangent",
"return",
"normal",
",",
"tangent"
]
| Break this vector into one vector that is perpendicular to the
given vector and another that is parallel to it. | [
"Break",
"this",
"vector",
"into",
"one",
"vector",
"that",
"is",
"perpendicular",
"to",
"the",
"given",
"vector",
"and",
"another",
"that",
"is",
"parallel",
"to",
"it",
"."
]
| python | train |
gunthercox/ChatterBot | examples/tkinter_gui.py | https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/examples/tkinter_gui.py#L51-L66 | def get_response(self):
"""
Get a response from the chatbot and display it.
"""
user_input = self.usr_input.get()
self.usr_input.delete(0, tk.END)
response = self.chatbot.get_response(user_input)
self.conversation['state'] = 'normal'
self.conversation.insert(
tk.END, "Human: " + user_input + "\n" + "ChatBot: " + str(response.text) + "\n"
)
self.conversation['state'] = 'disabled'
time.sleep(0.5) | [
"def",
"get_response",
"(",
"self",
")",
":",
"user_input",
"=",
"self",
".",
"usr_input",
".",
"get",
"(",
")",
"self",
".",
"usr_input",
".",
"delete",
"(",
"0",
",",
"tk",
".",
"END",
")",
"response",
"=",
"self",
".",
"chatbot",
".",
"get_response",
"(",
"user_input",
")",
"self",
".",
"conversation",
"[",
"'state'",
"]",
"=",
"'normal'",
"self",
".",
"conversation",
".",
"insert",
"(",
"tk",
".",
"END",
",",
"\"Human: \"",
"+",
"user_input",
"+",
"\"\\n\"",
"+",
"\"ChatBot: \"",
"+",
"str",
"(",
"response",
".",
"text",
")",
"+",
"\"\\n\"",
")",
"self",
".",
"conversation",
"[",
"'state'",
"]",
"=",
"'disabled'",
"time",
".",
"sleep",
"(",
"0.5",
")"
]
| Get a response from the chatbot and display it. | [
"Get",
"a",
"response",
"from",
"the",
"chatbot",
"and",
"display",
"it",
"."
]
| python | train |
DLR-RM/RAFCON | source/rafcon/core/states/container_state.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L2002-L2025 | def _check_transition_origin(self, transition):
"""Checks the validity of a transition origin
Checks whether the transition origin is valid.
:param rafcon.core.transition.Transition transition: The transition to be checked
:return bool validity, str message: validity is True, when the transition is valid, False else. message gives
more information especially if the transition is not valid
"""
from_state_id = transition.from_state
from_outcome_id = transition.from_outcome
if from_state_id == self.state_id:
return False, "from_state_id of transition must not be the container state itself." \
" In the case of a start transition both the from state and the from_outcome are None."
if from_state_id != self.state_id and from_state_id not in self.states:
return False, "from_state not existing"
from_outcome = self.get_outcome(from_state_id, from_outcome_id)
if from_outcome is None:
return False, "from_outcome not existing in from_state"
return True, "valid" | [
"def",
"_check_transition_origin",
"(",
"self",
",",
"transition",
")",
":",
"from_state_id",
"=",
"transition",
".",
"from_state",
"from_outcome_id",
"=",
"transition",
".",
"from_outcome",
"if",
"from_state_id",
"==",
"self",
".",
"state_id",
":",
"return",
"False",
",",
"\"from_state_id of transition must not be the container state itself.\"",
"\" In the case of a start transition both the from state and the from_outcome are None.\"",
"if",
"from_state_id",
"!=",
"self",
".",
"state_id",
"and",
"from_state_id",
"not",
"in",
"self",
".",
"states",
":",
"return",
"False",
",",
"\"from_state not existing\"",
"from_outcome",
"=",
"self",
".",
"get_outcome",
"(",
"from_state_id",
",",
"from_outcome_id",
")",
"if",
"from_outcome",
"is",
"None",
":",
"return",
"False",
",",
"\"from_outcome not existing in from_state\"",
"return",
"True",
",",
"\"valid\""
]
| Checks the validity of a transition origin
Checks whether the transition origin is valid.
:param rafcon.core.transition.Transition transition: The transition to be checked
:return bool validity, str message: validity is True, when the transition is valid, False else. message gives
more information especially if the transition is not valid | [
"Checks",
"the",
"validity",
"of",
"a",
"transition",
"origin"
]
| python | train |
blockstack/zone-file-py | blockstack_zones/record_processors.py | https://github.com/blockstack/zone-file-py/blob/c1078c8c3c28f0881bc9a3af53d4972c4a6862d0/blockstack_zones/record_processors.py#L15-L23 | def process_ttl(data, template):
"""
Replace {$ttl} in template with a serialized $TTL record
"""
record = ""
if data is not None:
record += "$TTL %s" % data
return template.replace("{$ttl}", record) | [
"def",
"process_ttl",
"(",
"data",
",",
"template",
")",
":",
"record",
"=",
"\"\"",
"if",
"data",
"is",
"not",
"None",
":",
"record",
"+=",
"\"$TTL %s\"",
"%",
"data",
"return",
"template",
".",
"replace",
"(",
"\"{$ttl}\"",
",",
"record",
")"
]
| Replace {$ttl} in template with a serialized $TTL record | [
"Replace",
"{",
"$ttl",
"}",
"in",
"template",
"with",
"a",
"serialized",
"$TTL",
"record"
]
| python | test |
PGower/PyCanvas | pycanvas/apis/courses.py | https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/courses.py#L798-L868 | def update_course_settings(self, course_id, allow_student_discussion_editing=None, allow_student_discussion_topics=None, allow_student_forum_attachments=None, allow_student_organized_groups=None, hide_distribution_graphs=None, hide_final_grades=None, home_page_announcement_limit=None, lock_all_announcements=None, restrict_student_future_view=None, restrict_student_past_view=None, show_announcements_on_home_page=None):
"""
Update course settings.
Can update the following course settings:
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - allow_student_discussion_topics
"""Let students create discussion topics"""
if allow_student_discussion_topics is not None:
data["allow_student_discussion_topics"] = allow_student_discussion_topics
# OPTIONAL - allow_student_forum_attachments
"""Let students attach files to discussions"""
if allow_student_forum_attachments is not None:
data["allow_student_forum_attachments"] = allow_student_forum_attachments
# OPTIONAL - allow_student_discussion_editing
"""Let students edit or delete their own discussion posts"""
if allow_student_discussion_editing is not None:
data["allow_student_discussion_editing"] = allow_student_discussion_editing
# OPTIONAL - allow_student_organized_groups
"""Let students organize their own groups"""
if allow_student_organized_groups is not None:
data["allow_student_organized_groups"] = allow_student_organized_groups
# OPTIONAL - hide_final_grades
"""Hide totals in student grades summary"""
if hide_final_grades is not None:
data["hide_final_grades"] = hide_final_grades
# OPTIONAL - hide_distribution_graphs
"""Hide grade distribution graphs from students"""
if hide_distribution_graphs is not None:
data["hide_distribution_graphs"] = hide_distribution_graphs
# OPTIONAL - lock_all_announcements
"""Disable comments on announcements"""
if lock_all_announcements is not None:
data["lock_all_announcements"] = lock_all_announcements
# OPTIONAL - restrict_student_past_view
"""Restrict students from viewing courses after end date"""
if restrict_student_past_view is not None:
data["restrict_student_past_view"] = restrict_student_past_view
# OPTIONAL - restrict_student_future_view
"""Restrict students from viewing courses before start date"""
if restrict_student_future_view is not None:
data["restrict_student_future_view"] = restrict_student_future_view
# OPTIONAL - show_announcements_on_home_page
"""Show the most recent announcements on the Course home page (if a Wiki, defaults to five announcements, configurable via home_page_announcement_limit)"""
if show_announcements_on_home_page is not None:
data["show_announcements_on_home_page"] = show_announcements_on_home_page
# OPTIONAL - home_page_announcement_limit
"""Limit the number of announcements on the home page if enabled via show_announcements_on_home_page"""
if home_page_announcement_limit is not None:
data["home_page_announcement_limit"] = home_page_announcement_limit
self.logger.debug("PUT /api/v1/courses/{course_id}/settings with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/settings".format(**path), data=data, params=params, no_data=True) | [
"def",
"update_course_settings",
"(",
"self",
",",
"course_id",
",",
"allow_student_discussion_editing",
"=",
"None",
",",
"allow_student_discussion_topics",
"=",
"None",
",",
"allow_student_forum_attachments",
"=",
"None",
",",
"allow_student_organized_groups",
"=",
"None",
",",
"hide_distribution_graphs",
"=",
"None",
",",
"hide_final_grades",
"=",
"None",
",",
"home_page_announcement_limit",
"=",
"None",
",",
"lock_all_announcements",
"=",
"None",
",",
"restrict_student_future_view",
"=",
"None",
",",
"restrict_student_past_view",
"=",
"None",
",",
"show_announcements_on_home_page",
"=",
"None",
")",
":",
"path",
"=",
"{",
"}",
"data",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"# REQUIRED - PATH - course_id\r",
"\"\"\"ID\"\"\"",
"path",
"[",
"\"course_id\"",
"]",
"=",
"course_id",
"# OPTIONAL - allow_student_discussion_topics\r",
"\"\"\"Let students create discussion topics\"\"\"",
"if",
"allow_student_discussion_topics",
"is",
"not",
"None",
":",
"data",
"[",
"\"allow_student_discussion_topics\"",
"]",
"=",
"allow_student_discussion_topics",
"# OPTIONAL - allow_student_forum_attachments\r",
"\"\"\"Let students attach files to discussions\"\"\"",
"if",
"allow_student_forum_attachments",
"is",
"not",
"None",
":",
"data",
"[",
"\"allow_student_forum_attachments\"",
"]",
"=",
"allow_student_forum_attachments",
"# OPTIONAL - allow_student_discussion_editing\r",
"\"\"\"Let students edit or delete their own discussion posts\"\"\"",
"if",
"allow_student_discussion_editing",
"is",
"not",
"None",
":",
"data",
"[",
"\"allow_student_discussion_editing\"",
"]",
"=",
"allow_student_discussion_editing",
"# OPTIONAL - allow_student_organized_groups\r",
"\"\"\"Let students organize their own groups\"\"\"",
"if",
"allow_student_organized_groups",
"is",
"not",
"None",
":",
"data",
"[",
"\"allow_student_organized_groups\"",
"]",
"=",
"allow_student_organized_groups",
"# OPTIONAL - hide_final_grades\r",
"\"\"\"Hide totals in student grades summary\"\"\"",
"if",
"hide_final_grades",
"is",
"not",
"None",
":",
"data",
"[",
"\"hide_final_grades\"",
"]",
"=",
"hide_final_grades",
"# OPTIONAL - hide_distribution_graphs\r",
"\"\"\"Hide grade distribution graphs from students\"\"\"",
"if",
"hide_distribution_graphs",
"is",
"not",
"None",
":",
"data",
"[",
"\"hide_distribution_graphs\"",
"]",
"=",
"hide_distribution_graphs",
"# OPTIONAL - lock_all_announcements\r",
"\"\"\"Disable comments on announcements\"\"\"",
"if",
"lock_all_announcements",
"is",
"not",
"None",
":",
"data",
"[",
"\"lock_all_announcements\"",
"]",
"=",
"lock_all_announcements",
"# OPTIONAL - restrict_student_past_view\r",
"\"\"\"Restrict students from viewing courses after end date\"\"\"",
"if",
"restrict_student_past_view",
"is",
"not",
"None",
":",
"data",
"[",
"\"restrict_student_past_view\"",
"]",
"=",
"restrict_student_past_view",
"# OPTIONAL - restrict_student_future_view\r",
"\"\"\"Restrict students from viewing courses before start date\"\"\"",
"if",
"restrict_student_future_view",
"is",
"not",
"None",
":",
"data",
"[",
"\"restrict_student_future_view\"",
"]",
"=",
"restrict_student_future_view",
"# OPTIONAL - show_announcements_on_home_page\r",
"\"\"\"Show the most recent announcements on the Course home page (if a Wiki, defaults to five announcements, configurable via home_page_announcement_limit)\"\"\"",
"if",
"show_announcements_on_home_page",
"is",
"not",
"None",
":",
"data",
"[",
"\"show_announcements_on_home_page\"",
"]",
"=",
"show_announcements_on_home_page",
"# OPTIONAL - home_page_announcement_limit\r",
"\"\"\"Limit the number of announcements on the home page if enabled via show_announcements_on_home_page\"\"\"",
"if",
"home_page_announcement_limit",
"is",
"not",
"None",
":",
"data",
"[",
"\"home_page_announcement_limit\"",
"]",
"=",
"home_page_announcement_limit",
"self",
".",
"logger",
".",
"debug",
"(",
"\"PUT /api/v1/courses/{course_id}/settings with query params: {params} and form data: {data}\"",
".",
"format",
"(",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"*",
"*",
"path",
")",
")",
"return",
"self",
".",
"generic_request",
"(",
"\"PUT\"",
",",
"\"/api/v1/courses/{course_id}/settings\"",
".",
"format",
"(",
"*",
"*",
"path",
")",
",",
"data",
"=",
"data",
",",
"params",
"=",
"params",
",",
"no_data",
"=",
"True",
")"
]
| Update course settings.
Can update the following course settings: | [
"Update",
"course",
"settings",
".",
"Can",
"update",
"the",
"following",
"course",
"settings",
":"
]
| python | train |
noirbizarre/bumpr | tasks.py | https://github.com/noirbizarre/bumpr/blob/221dbb3deaf1cae7922f6a477f3d29d6bf0c0035/tasks.py#L38-L41 | def header(text):
'''Display an header'''
print(' '.join((blue('>>'), cyan(text))))
sys.stdout.flush() | [
"def",
"header",
"(",
"text",
")",
":",
"print",
"(",
"' '",
".",
"join",
"(",
"(",
"blue",
"(",
"'>>'",
")",
",",
"cyan",
"(",
"text",
")",
")",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
]
| Display an header | [
"Display",
"an",
"header"
]
| python | train |
zyga/guacamole | guacamole/recipes/cmd.py | https://github.com/zyga/guacamole/blob/105c10a798144e3b89659b500d7c2b84b0c76546/guacamole/recipes/cmd.py#L387-L393 | def main(self, argv=None, exit=True):
"""
Shortcut for running a command.
See :meth:`guacamole.recipes.Recipe.main()` for details.
"""
return CommandRecipe(self).main(argv, exit) | [
"def",
"main",
"(",
"self",
",",
"argv",
"=",
"None",
",",
"exit",
"=",
"True",
")",
":",
"return",
"CommandRecipe",
"(",
"self",
")",
".",
"main",
"(",
"argv",
",",
"exit",
")"
]
| Shortcut for running a command.
See :meth:`guacamole.recipes.Recipe.main()` for details. | [
"Shortcut",
"for",
"running",
"a",
"command",
"."
]
| python | train |
GoogleCloudPlatform/datastore-ndb-python | ndb/eventloop.py | https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/eventloop.py#L138-L148 | def queue_call(self, delay, callback, *args, **kwds):
"""Schedule a function call at a specific time in the future."""
if delay is None:
self.current.append((callback, args, kwds))
return
if delay < 1e9:
when = delay + self.clock.now()
else:
# Times over a billion seconds are assumed to be absolute.
when = delay
self.insort_event_right((when, callback, args, kwds)) | [
"def",
"queue_call",
"(",
"self",
",",
"delay",
",",
"callback",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"if",
"delay",
"is",
"None",
":",
"self",
".",
"current",
".",
"append",
"(",
"(",
"callback",
",",
"args",
",",
"kwds",
")",
")",
"return",
"if",
"delay",
"<",
"1e9",
":",
"when",
"=",
"delay",
"+",
"self",
".",
"clock",
".",
"now",
"(",
")",
"else",
":",
"# Times over a billion seconds are assumed to be absolute.",
"when",
"=",
"delay",
"self",
".",
"insort_event_right",
"(",
"(",
"when",
",",
"callback",
",",
"args",
",",
"kwds",
")",
")"
]
| Schedule a function call at a specific time in the future. | [
"Schedule",
"a",
"function",
"call",
"at",
"a",
"specific",
"time",
"in",
"the",
"future",
"."
]
| python | train |
quintusdias/glymur | glymur/config.py | https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/config.py#L24-L45 | def glymurrc_fname():
"""Return the path to the configuration file.
Search order:
1) current working directory
2) environ var XDG_CONFIG_HOME
3) $HOME/.config/glymur/glymurrc
"""
# Current directory.
fname = os.path.join(os.getcwd(), 'glymurrc')
if os.path.exists(fname):
return fname
confdir = get_configdir()
if confdir is not None:
fname = os.path.join(confdir, 'glymurrc')
if os.path.exists(fname):
return fname
# didn't find a configuration file.
return None | [
"def",
"glymurrc_fname",
"(",
")",
":",
"# Current directory.",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'glymurrc'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fname",
")",
":",
"return",
"fname",
"confdir",
"=",
"get_configdir",
"(",
")",
"if",
"confdir",
"is",
"not",
"None",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"confdir",
",",
"'glymurrc'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fname",
")",
":",
"return",
"fname",
"# didn't find a configuration file.",
"return",
"None"
]
| Return the path to the configuration file.
Search order:
1) current working directory
2) environ var XDG_CONFIG_HOME
3) $HOME/.config/glymur/glymurrc | [
"Return",
"the",
"path",
"to",
"the",
"configuration",
"file",
"."
]
| python | train |
jbasko/configmanager | configmanager/sections.py | https://github.com/jbasko/configmanager/blob/1d7229ce367143c7210d8e5f0782de03945a1721/configmanager/sections.py#L603-L616 | def create_section(self, *args, **kwargs):
"""
Internal factory method used to create an instance of configuration section.
Should only be used when extending or modifying configmanager's functionality.
Under normal circumstances you should let configmanager create sections
and items when parsing configuration schemas.
Do not override this method. To customise section creation,
write your own section factory and pass it to Config through
section_factory= keyword argument.
"""
kwargs.setdefault('section', self)
return self.settings.section_factory(*args, **kwargs) | [
"def",
"create_section",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'section'",
",",
"self",
")",
"return",
"self",
".",
"settings",
".",
"section_factory",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| Internal factory method used to create an instance of configuration section.
Should only be used when extending or modifying configmanager's functionality.
Under normal circumstances you should let configmanager create sections
and items when parsing configuration schemas.
Do not override this method. To customise section creation,
write your own section factory and pass it to Config through
section_factory= keyword argument. | [
"Internal",
"factory",
"method",
"used",
"to",
"create",
"an",
"instance",
"of",
"configuration",
"section",
"."
]
| python | train |
maas/python-libmaas | maas/client/utils/__init__.py | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/__init__.py#L342-L350 | def print(self, *args, **kwargs):
"""Print inside of the spinner context.
This must be used when inside of a spinner context to ensure that
the line printed doesn't overwrite an already existing spinner line.
"""
clear_len = max(len(self._prev_msg), len(self.msg)) + 4
self.spinner.stream.write("%s\r" % (' ' * clear_len))
print(*args, file=self.spinner.stream, flush=True, **kwargs) | [
"def",
"print",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"clear_len",
"=",
"max",
"(",
"len",
"(",
"self",
".",
"_prev_msg",
")",
",",
"len",
"(",
"self",
".",
"msg",
")",
")",
"+",
"4",
"self",
".",
"spinner",
".",
"stream",
".",
"write",
"(",
"\"%s\\r\"",
"%",
"(",
"' '",
"*",
"clear_len",
")",
")",
"print",
"(",
"*",
"args",
",",
"file",
"=",
"self",
".",
"spinner",
".",
"stream",
",",
"flush",
"=",
"True",
",",
"*",
"*",
"kwargs",
")"
]
| Print inside of the spinner context.
This must be used when inside of a spinner context to ensure that
the line printed doesn't overwrite an already existing spinner line. | [
"Print",
"inside",
"of",
"the",
"spinner",
"context",
"."
]
| python | train |
etcher-be/epab | epab/utils/_ensure_exe.py | https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_ensure_exe.py#L14-L26 | def ensure_exe(exe_name: str, *paths: str): # pragma: no cover
"""
Makes sure that an executable can be found on the system path.
Will exit the program if the executable cannot be found
Args:
exe_name: name of the executable
paths: optional path(s) to be searched; if not specified, search the whole system
"""
if not elib_run.find_executable(exe_name, *paths):
LOGGER.error('could not find "%s.exe" on this system', exe_name)
sys.exit(-1) | [
"def",
"ensure_exe",
"(",
"exe_name",
":",
"str",
",",
"*",
"paths",
":",
"str",
")",
":",
"# pragma: no cover",
"if",
"not",
"elib_run",
".",
"find_executable",
"(",
"exe_name",
",",
"*",
"paths",
")",
":",
"LOGGER",
".",
"error",
"(",
"'could not find \"%s.exe\" on this system'",
",",
"exe_name",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")"
]
| Makes sure that an executable can be found on the system path.
Will exit the program if the executable cannot be found
Args:
exe_name: name of the executable
paths: optional path(s) to be searched; if not specified, search the whole system | [
"Makes",
"sure",
"that",
"an",
"executable",
"can",
"be",
"found",
"on",
"the",
"system",
"path",
".",
"Will",
"exit",
"the",
"program",
"if",
"the",
"executable",
"cannot",
"be",
"found"
]
| python | train |
blockstack/virtualchain | virtualchain/lib/blockchain/bitcoin_blockchain/keys.py | https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/keys.py#L137-L158 | def btc_script_to_hex(script):
""" Parse the string representation of a script and return the hex version.
Example: "OP_DUP OP_HASH160 c629...a6db OP_EQUALVERIFY OP_CHECKSIG"
"""
hex_script = ''
parts = script.split(' ')
for part in parts:
if part[0:3] == 'OP_':
value = OPCODE_VALUES.get(part)
if not value:
raise ValueError("Unrecognized opcode {}".format(part))
hex_script += "%0.2x" % value
elif hashing.is_hex(part):
hex_script += '%0.2x' % hashing.count_bytes(part) + part
else:
raise Exception('Invalid script - only opcodes and hex characters allowed.')
return hex_script | [
"def",
"btc_script_to_hex",
"(",
"script",
")",
":",
"hex_script",
"=",
"''",
"parts",
"=",
"script",
".",
"split",
"(",
"' '",
")",
"for",
"part",
"in",
"parts",
":",
"if",
"part",
"[",
"0",
":",
"3",
"]",
"==",
"'OP_'",
":",
"value",
"=",
"OPCODE_VALUES",
".",
"get",
"(",
"part",
")",
"if",
"not",
"value",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized opcode {}\"",
".",
"format",
"(",
"part",
")",
")",
"hex_script",
"+=",
"\"%0.2x\"",
"%",
"value",
"elif",
"hashing",
".",
"is_hex",
"(",
"part",
")",
":",
"hex_script",
"+=",
"'%0.2x'",
"%",
"hashing",
".",
"count_bytes",
"(",
"part",
")",
"+",
"part",
"else",
":",
"raise",
"Exception",
"(",
"'Invalid script - only opcodes and hex characters allowed.'",
")",
"return",
"hex_script"
]
| Parse the string representation of a script and return the hex version.
Example: "OP_DUP OP_HASH160 c629...a6db OP_EQUALVERIFY OP_CHECKSIG" | [
"Parse",
"the",
"string",
"representation",
"of",
"a",
"script",
"and",
"return",
"the",
"hex",
"version",
".",
"Example",
":",
"OP_DUP",
"OP_HASH160",
"c629",
"...",
"a6db",
"OP_EQUALVERIFY",
"OP_CHECKSIG"
]
| python | train |
datastore/datastore | datastore/core/query.py | https://github.com/datastore/datastore/blob/7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3/datastore/core/query.py#L543-L549 | def apply_offset(self):
'''Naively apply query offset.'''
self._ensure_modification_is_safe()
if self.query.offset != 0:
self._iterable = \
offset_gen(self.query.offset, self._iterable, self._skipped_inc) | [
"def",
"apply_offset",
"(",
"self",
")",
":",
"self",
".",
"_ensure_modification_is_safe",
"(",
")",
"if",
"self",
".",
"query",
".",
"offset",
"!=",
"0",
":",
"self",
".",
"_iterable",
"=",
"offset_gen",
"(",
"self",
".",
"query",
".",
"offset",
",",
"self",
".",
"_iterable",
",",
"self",
".",
"_skipped_inc",
")"
]
| Naively apply query offset. | [
"Naively",
"apply",
"query",
"offset",
"."
]
| python | train |
Yubico/python-u2flib-server | release.py | https://github.com/Yubico/python-u2flib-server/blob/4b9d353a3ba82be40c6cf9bab39d34ea7e07cb5a/release.py#L45-L58 | def get_version(module_name_or_file=None):
"""Return the current version as defined by the given module/file."""
if module_name_or_file is None:
parts = base_module.split('.')
module_name_or_file = parts[0] if len(parts) > 1 else \
find_packages(exclude=['test', 'test.*'])[0]
if os.path.isdir(module_name_or_file):
module_name_or_file = os.path.join(module_name_or_file, '__init__.py')
with open(module_name_or_file, 'r') as f:
match = VERSION_PATTERN.search(f.read())
return match.group(1) | [
"def",
"get_version",
"(",
"module_name_or_file",
"=",
"None",
")",
":",
"if",
"module_name_or_file",
"is",
"None",
":",
"parts",
"=",
"base_module",
".",
"split",
"(",
"'.'",
")",
"module_name_or_file",
"=",
"parts",
"[",
"0",
"]",
"if",
"len",
"(",
"parts",
")",
">",
"1",
"else",
"find_packages",
"(",
"exclude",
"=",
"[",
"'test'",
",",
"'test.*'",
"]",
")",
"[",
"0",
"]",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"module_name_or_file",
")",
":",
"module_name_or_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"module_name_or_file",
",",
"'__init__.py'",
")",
"with",
"open",
"(",
"module_name_or_file",
",",
"'r'",
")",
"as",
"f",
":",
"match",
"=",
"VERSION_PATTERN",
".",
"search",
"(",
"f",
".",
"read",
"(",
")",
")",
"return",
"match",
".",
"group",
"(",
"1",
")"
]
| Return the current version as defined by the given module/file. | [
"Return",
"the",
"current",
"version",
"as",
"defined",
"by",
"the",
"given",
"module",
"/",
"file",
"."
]
| python | train |
sdispater/orator | orator/query/builder.py | https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/query/builder.py#L763-L785 | def having_raw(self, sql, bindings=None, boolean="and"):
"""
Add a raw having clause to the query
:param sql: The raw query
:type sql: str
:param bindings: The query bindings
:type bindings: list
:param boolean: Boolean joiner type
:type boolean: str
:return: The current QueryBuilder instance
:rtype: QueryBuilder
"""
type = "raw"
self.havings.append({"type": type, "sql": sql, "boolean": boolean})
self.add_binding(bindings, "having")
return self | [
"def",
"having_raw",
"(",
"self",
",",
"sql",
",",
"bindings",
"=",
"None",
",",
"boolean",
"=",
"\"and\"",
")",
":",
"type",
"=",
"\"raw\"",
"self",
".",
"havings",
".",
"append",
"(",
"{",
"\"type\"",
":",
"type",
",",
"\"sql\"",
":",
"sql",
",",
"\"boolean\"",
":",
"boolean",
"}",
")",
"self",
".",
"add_binding",
"(",
"bindings",
",",
"\"having\"",
")",
"return",
"self"
]
| Add a raw having clause to the query
:param sql: The raw query
:type sql: str
:param bindings: The query bindings
:type bindings: list
:param boolean: Boolean joiner type
:type boolean: str
:return: The current QueryBuilder instance
:rtype: QueryBuilder | [
"Add",
"a",
"raw",
"having",
"clause",
"to",
"the",
"query"
]
| python | train |
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/drivers.py | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/drivers.py#L253-L338 | def _get_response(self, timeout=1.0, eor=('\n', '\n- ')):
""" Reads a response from the drive.
Reads the response returned by the drive with an optional
timeout. All carriage returns and linefeeds are kept.
Parameters
----------
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
eor : str or iterable of str, optional
``str`` or iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
response : str
The response obtained from the drive. Carriage returns and
linefeeds are preserved.
"""
# If no timeout is given or it is invalid and we are using '\n'
# as the eor, use the wrapper to read a line with an infinite
# timeout. Otherwise, the reading and timeout must be
# implemented manually.
if (timeout is None or timeout < 0) and eor == '\n':
return self._sio.readline()
else:
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. They need to
# be checked to make sure they are not too big, which is
# threading.TIMEOUT_MAX on Python 3.x and not specified on
# Python 2.x (lets use a week). Then, the timer is started.
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
tm = threading.Timer(timeout, lambda : None)
tm.start()
# eor needs to be converted to bytes. If it is just an str,
# it needs to be wrapped in a tuple.
if isinstance(eor, str):
eor = tuple([eor])
if sys.hexversion >= 0x03000000:
eor = [s.encode(encoding='ASCII') for s in eor]
# Read from the serial port into buf until the EOR is found
# or the timer has stopped. A small pause is done each time
# so that this thread doesn't hog the CPU.
buf = b''
while not any([(x in buf) for x in eor]) and tm.is_alive():
time.sleep(0.001)
buf += self._ser.read(self._ser.inWaiting())
# Just in case the timer has not stopped (EOR was found),
# stop it.
tm.cancel()
# Remove anything after the EOR if there is one. First, a
# set of matches (index, eor_str) for each string in eor
# needs to be constructed. Sorting the matches by their
# index puts all the ones that were not found (index of -1)
# at the front. Then a list of bools that are True for each
# index that isn't -1 is made, converted to a bytes (True
# goes to b'\x01' and False goes to b'\x00'), and then the
# index of the first True value found. If it is not -1, then
# there was a successful match and all the characters are
# dropped after that eor_str.
matches = [(buf.find(x), x) for x in eor]
matches.sort(key=lambda x: x[0])
index = bytes([x[0] != -1 for x in matches]).find(b'\x01')
if index != -1:
buf = buf[:(matches[index][0] + len(matches[index][1]))]
# Convert to an str before returning.
if sys.hexversion >= 0x03000000:
return buf.decode(errors='replace')
else:
return buf | [
"def",
"_get_response",
"(",
"self",
",",
"timeout",
"=",
"1.0",
",",
"eor",
"=",
"(",
"'\\n'",
",",
"'\\n- '",
")",
")",
":",
"# If no timeout is given or it is invalid and we are using '\\n'",
"# as the eor, use the wrapper to read a line with an infinite",
"# timeout. Otherwise, the reading and timeout must be",
"# implemented manually.",
"if",
"(",
"timeout",
"is",
"None",
"or",
"timeout",
"<",
"0",
")",
"and",
"eor",
"==",
"'\\n'",
":",
"return",
"self",
".",
"_sio",
".",
"readline",
"(",
")",
"else",
":",
"# A timer will be made that takes timeout to finish. Then,",
"# it is a matter of checking whether it is alive or not to",
"# know whether the timeout was exceeded or not. They need to",
"# be checked to make sure they are not too big, which is",
"# threading.TIMEOUT_MAX on Python 3.x and not specified on",
"# Python 2.x (lets use a week). Then, the timer is started.",
"if",
"sys",
".",
"hexversion",
">=",
"0x03000000",
":",
"maxtimeout",
"=",
"threading",
".",
"TIMEOUT_MAX",
"else",
":",
"maxtimeout",
"=",
"7",
"*",
"24",
"*",
"3600",
"timeout",
"=",
"min",
"(",
"timeout",
",",
"maxtimeout",
")",
"tm",
"=",
"threading",
".",
"Timer",
"(",
"timeout",
",",
"lambda",
":",
"None",
")",
"tm",
".",
"start",
"(",
")",
"# eor needs to be converted to bytes. If it is just an str,",
"# it needs to be wrapped in a tuple.",
"if",
"isinstance",
"(",
"eor",
",",
"str",
")",
":",
"eor",
"=",
"tuple",
"(",
"[",
"eor",
"]",
")",
"if",
"sys",
".",
"hexversion",
">=",
"0x03000000",
":",
"eor",
"=",
"[",
"s",
".",
"encode",
"(",
"encoding",
"=",
"'ASCII'",
")",
"for",
"s",
"in",
"eor",
"]",
"# Read from the serial port into buf until the EOR is found",
"# or the timer has stopped. A small pause is done each time",
"# so that this thread doesn't hog the CPU.",
"buf",
"=",
"b''",
"while",
"not",
"any",
"(",
"[",
"(",
"x",
"in",
"buf",
")",
"for",
"x",
"in",
"eor",
"]",
")",
"and",
"tm",
".",
"is_alive",
"(",
")",
":",
"time",
".",
"sleep",
"(",
"0.001",
")",
"buf",
"+=",
"self",
".",
"_ser",
".",
"read",
"(",
"self",
".",
"_ser",
".",
"inWaiting",
"(",
")",
")",
"# Just in case the timer has not stopped (EOR was found),",
"# stop it.",
"tm",
".",
"cancel",
"(",
")",
"# Remove anything after the EOR if there is one. First, a",
"# set of matches (index, eor_str) for each string in eor",
"# needs to be constructed. Sorting the matches by their",
"# index puts all the ones that were not found (index of -1)",
"# at the front. Then a list of bools that are True for each",
"# index that isn't -1 is made, converted to a bytes (True",
"# goes to b'\\x01' and False goes to b'\\x00'), and then the",
"# index of the first True value found. If it is not -1, then",
"# there was a successful match and all the characters are",
"# dropped after that eor_str.",
"matches",
"=",
"[",
"(",
"buf",
".",
"find",
"(",
"x",
")",
",",
"x",
")",
"for",
"x",
"in",
"eor",
"]",
"matches",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"index",
"=",
"bytes",
"(",
"[",
"x",
"[",
"0",
"]",
"!=",
"-",
"1",
"for",
"x",
"in",
"matches",
"]",
")",
".",
"find",
"(",
"b'\\x01'",
")",
"if",
"index",
"!=",
"-",
"1",
":",
"buf",
"=",
"buf",
"[",
":",
"(",
"matches",
"[",
"index",
"]",
"[",
"0",
"]",
"+",
"len",
"(",
"matches",
"[",
"index",
"]",
"[",
"1",
"]",
")",
")",
"]",
"# Convert to an str before returning.",
"if",
"sys",
".",
"hexversion",
">=",
"0x03000000",
":",
"return",
"buf",
".",
"decode",
"(",
"errors",
"=",
"'replace'",
")",
"else",
":",
"return",
"buf"
]
| Reads a response from the drive.
Reads the response returned by the drive with an optional
timeout. All carriage returns and linefeeds are kept.
Parameters
----------
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
eor : str or iterable of str, optional
``str`` or iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
response : str
The response obtained from the drive. Carriage returns and
linefeeds are preserved. | [
"Reads",
"a",
"response",
"from",
"the",
"drive",
"."
]
| python | train |
barrust/pyspellchecker | spellchecker/spellchecker.py | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L157-L172 | def known(self, words):
""" The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus """
tmp = [w.lower() for w in words]
return set(
w
for w in tmp
if w in self._word_frequency.dictionary
or not self._check_if_should_check(w)
) | [
"def",
"known",
"(",
"self",
",",
"words",
")",
":",
"tmp",
"=",
"[",
"w",
".",
"lower",
"(",
")",
"for",
"w",
"in",
"words",
"]",
"return",
"set",
"(",
"w",
"for",
"w",
"in",
"tmp",
"if",
"w",
"in",
"self",
".",
"_word_frequency",
".",
"dictionary",
"or",
"not",
"self",
".",
"_check_if_should_check",
"(",
"w",
")",
")"
]
| The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus | [
"The",
"subset",
"of",
"words",
"that",
"appear",
"in",
"the",
"dictionary",
"of",
"words"
]
| python | train |
radjkarl/appBase | appbase/Session.py | https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L456-L459 | def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path) | [
"def",
"_saveState",
"(",
"self",
",",
"path",
")",
":",
"self",
".",
"addSession",
"(",
")",
"# next session\r",
"self",
".",
"_save",
"(",
"str",
"(",
"self",
".",
"n_sessions",
")",
",",
"path",
")"
]
| save current state and add a new state | [
"save",
"current",
"state",
"and",
"add",
"a",
"new",
"state"
]
| python | train |
joshspeagle/dynesty | dynesty/bounding.py | https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/bounding.py#L530-L634 | def update(self, points, pointvol=0., vol_dec=0.5, vol_check=2.,
rstate=None, bootstrap=0, pool=None, mc_integrate=False):
"""
Update the set of ellipsoids to bound the collection of points.
Parameters
----------
points : `~numpy.ndarray` with shape (npoints, ndim)
The set of points to bound.
pointvol : float, optional
The minimum volume associated with each point. Default is `0.`.
vol_dec : float, optional
The required fractional reduction in volume after splitting
an ellipsoid in order to to accept the split.
Default is `0.5`.
vol_check : float, optional
The factor used when checking if the volume of the original
bounding ellipsoid is large enough to warrant `> 2` splits
via `ell.vol > vol_check * nlive * pointvol`.
Default is `2.0`.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
bootstrap : int, optional
The number of bootstrapped realizations of the ellipsoids. The
maximum distance to the set of points "left out" during each
iteration is used to enlarge the resulting volumes.
Default is `0`.
pool : user-provided pool, optional
Use this pool of workers to execute operations in parallel.
mc_integrate : bool, optional
Whether to use Monte Carlo methods to compute the effective
volume and fractional overlap of the final union of ellipsoids
with the unit cube. Default is `False`.
"""
if rstate is None:
rstate = np.random
if not HAVE_KMEANS:
raise ValueError("scipy.cluster.vq.kmeans2 is required "
"to compute ellipsoid decompositions.")
npoints, ndim = points.shape
# Calculate the bounding ellipsoid for the points, possibly
# enlarged to a minimum volume.
firstell = bounding_ellipsoid(points, pointvol=pointvol)
# Recursively split the bounding ellipsoid using `vol_check`
# until the volume of each split no longer decreases by a
# factor of `vol_dec`.
ells = _bounding_ellipsoids(points, firstell, pointvol=pointvol,
vol_dec=vol_dec, vol_check=vol_check)
# Update the set of ellipsoids.
self.nells = len(ells)
self.ells = ells
self.ctrs = np.array([ell.ctr for ell in self.ells])
self.covs = np.array([ell.cov for ell in self.ells])
self.ams = np.array([ell.am for ell in self.ells])
self.vols = np.array([ell.vol for ell in self.ells])
self.vol_tot = sum(self.vols)
# Compute expansion factor.
expands = np.array([ell.expand for ell in self.ells])
vols_orig = self.vols / expands
vol_tot_orig = sum(vols_orig)
self.expand_tot = self.vol_tot / vol_tot_orig
# Use bootstrapping to determine the volume expansion factor.
if bootstrap > 0:
# If provided, compute bootstraps in parallel using a pool.
if pool is None:
M = map
else:
M = pool.map
ps = [points for it in range(bootstrap)]
pvs = [pointvol for it in range(bootstrap)]
vds = [vol_dec for it in range(bootstrap)]
vcs = [vol_check for it in range(bootstrap)]
args = zip(ps, pvs, vds, vcs)
expands = list(M(_ellipsoids_bootstrap_expand, args))
# Conservatively set the expansion factor to be the maximum
# factor derived from our set of bootstraps.
expand = max(expands)
# If our ellipsoids are overly constrained, expand them.
if expand > 1.:
vs = self.vols * expand**ndim
self.scale_to_vols(vs)
# Estimate the volume and fractional overlap with the unit cube
# using Monte Carlo integration.
if mc_integrate:
self.vol, self.funit = self.monte_carlo_vol(return_overlap=True) | [
"def",
"update",
"(",
"self",
",",
"points",
",",
"pointvol",
"=",
"0.",
",",
"vol_dec",
"=",
"0.5",
",",
"vol_check",
"=",
"2.",
",",
"rstate",
"=",
"None",
",",
"bootstrap",
"=",
"0",
",",
"pool",
"=",
"None",
",",
"mc_integrate",
"=",
"False",
")",
":",
"if",
"rstate",
"is",
"None",
":",
"rstate",
"=",
"np",
".",
"random",
"if",
"not",
"HAVE_KMEANS",
":",
"raise",
"ValueError",
"(",
"\"scipy.cluster.vq.kmeans2 is required \"",
"\"to compute ellipsoid decompositions.\"",
")",
"npoints",
",",
"ndim",
"=",
"points",
".",
"shape",
"# Calculate the bounding ellipsoid for the points, possibly",
"# enlarged to a minimum volume.",
"firstell",
"=",
"bounding_ellipsoid",
"(",
"points",
",",
"pointvol",
"=",
"pointvol",
")",
"# Recursively split the bounding ellipsoid using `vol_check`",
"# until the volume of each split no longer decreases by a",
"# factor of `vol_dec`.",
"ells",
"=",
"_bounding_ellipsoids",
"(",
"points",
",",
"firstell",
",",
"pointvol",
"=",
"pointvol",
",",
"vol_dec",
"=",
"vol_dec",
",",
"vol_check",
"=",
"vol_check",
")",
"# Update the set of ellipsoids.",
"self",
".",
"nells",
"=",
"len",
"(",
"ells",
")",
"self",
".",
"ells",
"=",
"ells",
"self",
".",
"ctrs",
"=",
"np",
".",
"array",
"(",
"[",
"ell",
".",
"ctr",
"for",
"ell",
"in",
"self",
".",
"ells",
"]",
")",
"self",
".",
"covs",
"=",
"np",
".",
"array",
"(",
"[",
"ell",
".",
"cov",
"for",
"ell",
"in",
"self",
".",
"ells",
"]",
")",
"self",
".",
"ams",
"=",
"np",
".",
"array",
"(",
"[",
"ell",
".",
"am",
"for",
"ell",
"in",
"self",
".",
"ells",
"]",
")",
"self",
".",
"vols",
"=",
"np",
".",
"array",
"(",
"[",
"ell",
".",
"vol",
"for",
"ell",
"in",
"self",
".",
"ells",
"]",
")",
"self",
".",
"vol_tot",
"=",
"sum",
"(",
"self",
".",
"vols",
")",
"# Compute expansion factor.",
"expands",
"=",
"np",
".",
"array",
"(",
"[",
"ell",
".",
"expand",
"for",
"ell",
"in",
"self",
".",
"ells",
"]",
")",
"vols_orig",
"=",
"self",
".",
"vols",
"/",
"expands",
"vol_tot_orig",
"=",
"sum",
"(",
"vols_orig",
")",
"self",
".",
"expand_tot",
"=",
"self",
".",
"vol_tot",
"/",
"vol_tot_orig",
"# Use bootstrapping to determine the volume expansion factor.",
"if",
"bootstrap",
">",
"0",
":",
"# If provided, compute bootstraps in parallel using a pool.",
"if",
"pool",
"is",
"None",
":",
"M",
"=",
"map",
"else",
":",
"M",
"=",
"pool",
".",
"map",
"ps",
"=",
"[",
"points",
"for",
"it",
"in",
"range",
"(",
"bootstrap",
")",
"]",
"pvs",
"=",
"[",
"pointvol",
"for",
"it",
"in",
"range",
"(",
"bootstrap",
")",
"]",
"vds",
"=",
"[",
"vol_dec",
"for",
"it",
"in",
"range",
"(",
"bootstrap",
")",
"]",
"vcs",
"=",
"[",
"vol_check",
"for",
"it",
"in",
"range",
"(",
"bootstrap",
")",
"]",
"args",
"=",
"zip",
"(",
"ps",
",",
"pvs",
",",
"vds",
",",
"vcs",
")",
"expands",
"=",
"list",
"(",
"M",
"(",
"_ellipsoids_bootstrap_expand",
",",
"args",
")",
")",
"# Conservatively set the expansion factor to be the maximum",
"# factor derived from our set of bootstraps.",
"expand",
"=",
"max",
"(",
"expands",
")",
"# If our ellipsoids are overly constrained, expand them.",
"if",
"expand",
">",
"1.",
":",
"vs",
"=",
"self",
".",
"vols",
"*",
"expand",
"**",
"ndim",
"self",
".",
"scale_to_vols",
"(",
"vs",
")",
"# Estimate the volume and fractional overlap with the unit cube",
"# using Monte Carlo integration.",
"if",
"mc_integrate",
":",
"self",
".",
"vol",
",",
"self",
".",
"funit",
"=",
"self",
".",
"monte_carlo_vol",
"(",
"return_overlap",
"=",
"True",
")"
]
| Update the set of ellipsoids to bound the collection of points.
Parameters
----------
points : `~numpy.ndarray` with shape (npoints, ndim)
The set of points to bound.
pointvol : float, optional
The minimum volume associated with each point. Default is `0.`.
vol_dec : float, optional
The required fractional reduction in volume after splitting
an ellipsoid in order to to accept the split.
Default is `0.5`.
vol_check : float, optional
The factor used when checking if the volume of the original
bounding ellipsoid is large enough to warrant `> 2` splits
via `ell.vol > vol_check * nlive * pointvol`.
Default is `2.0`.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
bootstrap : int, optional
The number of bootstrapped realizations of the ellipsoids. The
maximum distance to the set of points "left out" during each
iteration is used to enlarge the resulting volumes.
Default is `0`.
pool : user-provided pool, optional
Use this pool of workers to execute operations in parallel.
mc_integrate : bool, optional
Whether to use Monte Carlo methods to compute the effective
volume and fractional overlap of the final union of ellipsoids
with the unit cube. Default is `False`. | [
"Update",
"the",
"set",
"of",
"ellipsoids",
"to",
"bound",
"the",
"collection",
"of",
"points",
"."
]
| python | train |
MultipedRobotics/pyxl320 | pyxl320/Packet.py | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/Packet.py#L230-L236 | def makeServoMinLimitPacket(ID, angle):
"""
Sets the minimum servo angle (in the CW direction)
"""
angle = int(angle/300.0*1023)
pkt = makeWritePacket(ID, xl320.XL320_CW_ANGLE_LIMIT, le(angle))
return pkt | [
"def",
"makeServoMinLimitPacket",
"(",
"ID",
",",
"angle",
")",
":",
"angle",
"=",
"int",
"(",
"angle",
"/",
"300.0",
"*",
"1023",
")",
"pkt",
"=",
"makeWritePacket",
"(",
"ID",
",",
"xl320",
".",
"XL320_CW_ANGLE_LIMIT",
",",
"le",
"(",
"angle",
")",
")",
"return",
"pkt"
]
| Sets the minimum servo angle (in the CW direction) | [
"Sets",
"the",
"minimum",
"servo",
"angle",
"(",
"in",
"the",
"CW",
"direction",
")"
]
| python | train |
gofed/gofedlib | gofedlib/providers/upstreamprovider.py | https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/providers/upstreamprovider.py#L102-L115 | def parseGithubImportPath(self, path):
"""
Definition: github.com/<project>/<repo>
"""
parts = path.split("/")
if len(parts) < 3:
raise ValueError("Import path %s not in github.com/<project>/<repo> form" % path)
repo = {}
repo["prefix"] = "/".join(parts[:3])
repo["signature"] = {"provider": "github", "username": parts[1], "project": parts[2]}
return repo | [
"def",
"parseGithubImportPath",
"(",
"self",
",",
"path",
")",
":",
"parts",
"=",
"path",
".",
"split",
"(",
"\"/\"",
")",
"if",
"len",
"(",
"parts",
")",
"<",
"3",
":",
"raise",
"ValueError",
"(",
"\"Import path %s not in github.com/<project>/<repo> form\"",
"%",
"path",
")",
"repo",
"=",
"{",
"}",
"repo",
"[",
"\"prefix\"",
"]",
"=",
"\"/\"",
".",
"join",
"(",
"parts",
"[",
":",
"3",
"]",
")",
"repo",
"[",
"\"signature\"",
"]",
"=",
"{",
"\"provider\"",
":",
"\"github\"",
",",
"\"username\"",
":",
"parts",
"[",
"1",
"]",
",",
"\"project\"",
":",
"parts",
"[",
"2",
"]",
"}",
"return",
"repo"
]
| Definition: github.com/<project>/<repo> | [
"Definition",
":",
"github",
".",
"com",
"/",
"<project",
">",
"/",
"<repo",
">"
]
| python | train |
Miserlou/Zappa | zappa/core.py | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L3010-L3049 | def fetch_logs(self, lambda_name, filter_pattern='', limit=10000, start_time=0):
"""
Fetch the CloudWatch logs for a given Lambda name.
"""
log_name = '/aws/lambda/' + lambda_name
streams = self.logs_client.describe_log_streams(
logGroupName=log_name,
descending=True,
orderBy='LastEventTime'
)
all_streams = streams['logStreams']
all_names = [stream['logStreamName'] for stream in all_streams]
events = []
response = {}
while not response or 'nextToken' in response:
extra_args = {}
if 'nextToken' in response:
extra_args['nextToken'] = response['nextToken']
# Amazon uses millisecond epoch for some reason.
# Thanks, Jeff.
start_time = start_time * 1000
end_time = int(time.time()) * 1000
response = self.logs_client.filter_log_events(
logGroupName=log_name,
logStreamNames=all_names,
startTime=start_time,
endTime=end_time,
filterPattern=filter_pattern,
limit=limit,
interleaved=True, # Does this actually improve performance?
**extra_args
)
if response and 'events' in response:
events += response['events']
return sorted(events, key=lambda k: k['timestamp']) | [
"def",
"fetch_logs",
"(",
"self",
",",
"lambda_name",
",",
"filter_pattern",
"=",
"''",
",",
"limit",
"=",
"10000",
",",
"start_time",
"=",
"0",
")",
":",
"log_name",
"=",
"'/aws/lambda/'",
"+",
"lambda_name",
"streams",
"=",
"self",
".",
"logs_client",
".",
"describe_log_streams",
"(",
"logGroupName",
"=",
"log_name",
",",
"descending",
"=",
"True",
",",
"orderBy",
"=",
"'LastEventTime'",
")",
"all_streams",
"=",
"streams",
"[",
"'logStreams'",
"]",
"all_names",
"=",
"[",
"stream",
"[",
"'logStreamName'",
"]",
"for",
"stream",
"in",
"all_streams",
"]",
"events",
"=",
"[",
"]",
"response",
"=",
"{",
"}",
"while",
"not",
"response",
"or",
"'nextToken'",
"in",
"response",
":",
"extra_args",
"=",
"{",
"}",
"if",
"'nextToken'",
"in",
"response",
":",
"extra_args",
"[",
"'nextToken'",
"]",
"=",
"response",
"[",
"'nextToken'",
"]",
"# Amazon uses millisecond epoch for some reason.",
"# Thanks, Jeff.",
"start_time",
"=",
"start_time",
"*",
"1000",
"end_time",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"*",
"1000",
"response",
"=",
"self",
".",
"logs_client",
".",
"filter_log_events",
"(",
"logGroupName",
"=",
"log_name",
",",
"logStreamNames",
"=",
"all_names",
",",
"startTime",
"=",
"start_time",
",",
"endTime",
"=",
"end_time",
",",
"filterPattern",
"=",
"filter_pattern",
",",
"limit",
"=",
"limit",
",",
"interleaved",
"=",
"True",
",",
"# Does this actually improve performance?",
"*",
"*",
"extra_args",
")",
"if",
"response",
"and",
"'events'",
"in",
"response",
":",
"events",
"+=",
"response",
"[",
"'events'",
"]",
"return",
"sorted",
"(",
"events",
",",
"key",
"=",
"lambda",
"k",
":",
"k",
"[",
"'timestamp'",
"]",
")"
]
| Fetch the CloudWatch logs for a given Lambda name. | [
"Fetch",
"the",
"CloudWatch",
"logs",
"for",
"a",
"given",
"Lambda",
"name",
"."
]
| python | train |
gwastro/pycbc | pycbc/workflow/core.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/core.py#L1170-L1186 | def _filename(self, ifo, description, extension, segment):
"""
Construct the standard output filename. Should only be used internally
of the File class.
"""
if extension.startswith('.'):
extension = extension[1:]
# Follow the frame convention of using integer filenames,
# but stretching to cover partially covered seconds.
start = int(segment[0])
end = int(math.ceil(segment[1]))
duration = str(end-start)
start = str(start)
return "%s-%s-%s-%s.%s" % (ifo, description.upper(), start,
duration, extension) | [
"def",
"_filename",
"(",
"self",
",",
"ifo",
",",
"description",
",",
"extension",
",",
"segment",
")",
":",
"if",
"extension",
".",
"startswith",
"(",
"'.'",
")",
":",
"extension",
"=",
"extension",
"[",
"1",
":",
"]",
"# Follow the frame convention of using integer filenames,",
"# but stretching to cover partially covered seconds.",
"start",
"=",
"int",
"(",
"segment",
"[",
"0",
"]",
")",
"end",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"segment",
"[",
"1",
"]",
")",
")",
"duration",
"=",
"str",
"(",
"end",
"-",
"start",
")",
"start",
"=",
"str",
"(",
"start",
")",
"return",
"\"%s-%s-%s-%s.%s\"",
"%",
"(",
"ifo",
",",
"description",
".",
"upper",
"(",
")",
",",
"start",
",",
"duration",
",",
"extension",
")"
]
| Construct the standard output filename. Should only be used internally
of the File class. | [
"Construct",
"the",
"standard",
"output",
"filename",
".",
"Should",
"only",
"be",
"used",
"internally",
"of",
"the",
"File",
"class",
"."
]
| python | train |
LEMS/pylems | lems/model/structure.py | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/structure.py#L263-L273 | def add(self, child):
"""
Adds a typed child object to the structure object.
@param child: Child object to be added.
"""
if isinstance(child, Assign):
self.add_assign(child)
else:
raise ModelError('Unsupported child element') | [
"def",
"add",
"(",
"self",
",",
"child",
")",
":",
"if",
"isinstance",
"(",
"child",
",",
"Assign",
")",
":",
"self",
".",
"add_assign",
"(",
"child",
")",
"else",
":",
"raise",
"ModelError",
"(",
"'Unsupported child element'",
")"
]
| Adds a typed child object to the structure object.
@param child: Child object to be added. | [
"Adds",
"a",
"typed",
"child",
"object",
"to",
"the",
"structure",
"object",
"."
]
| python | train |
tensorflow/tensor2tensor | tensor2tensor/models/research/glow_ops.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L429-L461 | def time_pad(x, filter_size, dilations):
"""Pad left across time and pad valid across the spatial components.
Also concats a binary feature that indicates if a feature is padded or not.
Args:
x: 5-D Tensor, (NTHWC)
filter_size: list of ints
dilations: list of ints, dilations - 1 specifies the number of holes
between two filter elements.
Returns:
x_pad: 5-D Tensor.
"""
x_shape = common_layers.shape_list(x)
if filter_size == [1, 1, 1]:
return x
_, h, w = filter_size
eff_h = h + (h - 1)*(dilations[2] - 1)
eff_w = w + (w - 1)*(dilations[3] - 1)
a = (eff_h - 1) // 2 # vertical padding size
b = (eff_w - 1) // 2 # horizontal padding size
c = filter_size[0] - 1
# pad across edges.
padding = [[0, 0], [c, 0], [a, a], [b, b], [0, 0]]
# concat a binary feature across channels to indicate a padding.
# 1 indicates that the feature is a padding.
x_bias = tf.zeros(x_shape[:-1] + [1])
x_bias = tf.pad(x_bias, padding, constant_values=1)
x_pad = tf.pad(x, padding)
x_pad = tf.concat((x_bias, x_pad), axis=-1)
return x_pad | [
"def",
"time_pad",
"(",
"x",
",",
"filter_size",
",",
"dilations",
")",
":",
"x_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"if",
"filter_size",
"==",
"[",
"1",
",",
"1",
",",
"1",
"]",
":",
"return",
"x",
"_",
",",
"h",
",",
"w",
"=",
"filter_size",
"eff_h",
"=",
"h",
"+",
"(",
"h",
"-",
"1",
")",
"*",
"(",
"dilations",
"[",
"2",
"]",
"-",
"1",
")",
"eff_w",
"=",
"w",
"+",
"(",
"w",
"-",
"1",
")",
"*",
"(",
"dilations",
"[",
"3",
"]",
"-",
"1",
")",
"a",
"=",
"(",
"eff_h",
"-",
"1",
")",
"//",
"2",
"# vertical padding size",
"b",
"=",
"(",
"eff_w",
"-",
"1",
")",
"//",
"2",
"# horizontal padding size",
"c",
"=",
"filter_size",
"[",
"0",
"]",
"-",
"1",
"# pad across edges.",
"padding",
"=",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"c",
",",
"0",
"]",
",",
"[",
"a",
",",
"a",
"]",
",",
"[",
"b",
",",
"b",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
"# concat a binary feature across channels to indicate a padding.",
"# 1 indicates that the feature is a padding.",
"x_bias",
"=",
"tf",
".",
"zeros",
"(",
"x_shape",
"[",
":",
"-",
"1",
"]",
"+",
"[",
"1",
"]",
")",
"x_bias",
"=",
"tf",
".",
"pad",
"(",
"x_bias",
",",
"padding",
",",
"constant_values",
"=",
"1",
")",
"x_pad",
"=",
"tf",
".",
"pad",
"(",
"x",
",",
"padding",
")",
"x_pad",
"=",
"tf",
".",
"concat",
"(",
"(",
"x_bias",
",",
"x_pad",
")",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"x_pad"
]
| Pad left across time and pad valid across the spatial components.
Also concats a binary feature that indicates if a feature is padded or not.
Args:
x: 5-D Tensor, (NTHWC)
filter_size: list of ints
dilations: list of ints, dilations - 1 specifies the number of holes
between two filter elements.
Returns:
x_pad: 5-D Tensor. | [
"Pad",
"left",
"across",
"time",
"and",
"pad",
"valid",
"across",
"the",
"spatial",
"components",
"."
]
| python | train |
sibirrer/lenstronomy | lenstronomy/LensModel/Profiles/nfw.py | https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/nfw.py#L383-L390 | def _alpha2rho0(self, theta_Rs, Rs):
"""
convert angle at Rs into rho0
"""
rho0 = theta_Rs / (4. * Rs ** 2 * (1. + np.log(1. / 2.)))
return rho0 | [
"def",
"_alpha2rho0",
"(",
"self",
",",
"theta_Rs",
",",
"Rs",
")",
":",
"rho0",
"=",
"theta_Rs",
"/",
"(",
"4.",
"*",
"Rs",
"**",
"2",
"*",
"(",
"1.",
"+",
"np",
".",
"log",
"(",
"1.",
"/",
"2.",
")",
")",
")",
"return",
"rho0"
]
| convert angle at Rs into rho0 | [
"convert",
"angle",
"at",
"Rs",
"into",
"rho0"
]
| python | train |
praekelt/django-analytics | analytics/views.py | https://github.com/praekelt/django-analytics/blob/29c22d03374ccc0ec451650e2c2886d324f6e5c6/analytics/views.py#L22-L31 | def get_active_stats(self):
"""
Returns all of the active statistics for the gadgets currently registered.
"""
stats = []
for gadget in self._registry.values():
for s in gadget.stats:
if s not in stats:
stats.append(s)
return stats | [
"def",
"get_active_stats",
"(",
"self",
")",
":",
"stats",
"=",
"[",
"]",
"for",
"gadget",
"in",
"self",
".",
"_registry",
".",
"values",
"(",
")",
":",
"for",
"s",
"in",
"gadget",
".",
"stats",
":",
"if",
"s",
"not",
"in",
"stats",
":",
"stats",
".",
"append",
"(",
"s",
")",
"return",
"stats"
]
| Returns all of the active statistics for the gadgets currently registered. | [
"Returns",
"all",
"of",
"the",
"active",
"statistics",
"for",
"the",
"gadgets",
"currently",
"registered",
"."
]
| python | test |
iotile/typedargs | typedargs/annotate.py | https://github.com/iotile/typedargs/blob/0a5091a664b9b4d836e091e9ba583e944f438fd8/typedargs/annotate.py#L192-L209 | def context(name=None):
"""Declare that a class defines a context.
Contexts are for use with HierarchicalShell for discovering
and using functionality from the command line.
Args:
name (str): Optional name for this context if you don't want
to just use the class name.
"""
def _context(cls):
annotated(cls, name)
cls.context = True
return cls
return _context | [
"def",
"context",
"(",
"name",
"=",
"None",
")",
":",
"def",
"_context",
"(",
"cls",
")",
":",
"annotated",
"(",
"cls",
",",
"name",
")",
"cls",
".",
"context",
"=",
"True",
"return",
"cls",
"return",
"_context"
]
| Declare that a class defines a context.
Contexts are for use with HierarchicalShell for discovering
and using functionality from the command line.
Args:
name (str): Optional name for this context if you don't want
to just use the class name. | [
"Declare",
"that",
"a",
"class",
"defines",
"a",
"context",
"."
]
| python | test |
jbloomlab/phydms | phydmslib/models.py | https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/models.py#L507-L543 | def updateParams(self, newvalues, update_all=False):
"""See docs for `Model` abstract base class."""
assert all(map(lambda x: x in self.freeparams, newvalues.keys())),\
"Invalid entry in newvalues: {0}\nfreeparams: {1}".format(
', '.join(newvalues.keys()), ', '.join(self.freeparams))
changed = set([]) # contains string names of changed params
for (name, value) in newvalues.items():
_checkParam(name, value, self.PARAMLIMITS, self.PARAMTYPES)
if isinstance(value, scipy.ndarray):
if (value != getattr(self, name)).any():
changed.add(name)
setattr(self, name, value.copy())
else:
if value != getattr(self, name):
changed.add(name)
setattr(self, name, copy.copy(value))
if update_all or changed:
self._cached = {}
# The order of the updating below is important.
# If you change it, you may break either this class
# **or** classes that inherit from it.
# Note also that not all attributes need to be updated
# for all possible parameter changes, but just doing it
# this way is much simpler and adds negligible cost.
if update_all or (changed and changed != set(['mu'])):
self._update_pi_vars()
self._update_phi()
self._update_prx()
self._update_dprx()
self._update_Qxy()
self._update_Frxy()
self._update_Prxy()
self._update_Prxy_diag()
self._update_dPrxy()
self._update_B() | [
"def",
"updateParams",
"(",
"self",
",",
"newvalues",
",",
"update_all",
"=",
"False",
")",
":",
"assert",
"all",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
"in",
"self",
".",
"freeparams",
",",
"newvalues",
".",
"keys",
"(",
")",
")",
")",
",",
"\"Invalid entry in newvalues: {0}\\nfreeparams: {1}\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"newvalues",
".",
"keys",
"(",
")",
")",
",",
"', '",
".",
"join",
"(",
"self",
".",
"freeparams",
")",
")",
"changed",
"=",
"set",
"(",
"[",
"]",
")",
"# contains string names of changed params",
"for",
"(",
"name",
",",
"value",
")",
"in",
"newvalues",
".",
"items",
"(",
")",
":",
"_checkParam",
"(",
"name",
",",
"value",
",",
"self",
".",
"PARAMLIMITS",
",",
"self",
".",
"PARAMTYPES",
")",
"if",
"isinstance",
"(",
"value",
",",
"scipy",
".",
"ndarray",
")",
":",
"if",
"(",
"value",
"!=",
"getattr",
"(",
"self",
",",
"name",
")",
")",
".",
"any",
"(",
")",
":",
"changed",
".",
"add",
"(",
"name",
")",
"setattr",
"(",
"self",
",",
"name",
",",
"value",
".",
"copy",
"(",
")",
")",
"else",
":",
"if",
"value",
"!=",
"getattr",
"(",
"self",
",",
"name",
")",
":",
"changed",
".",
"add",
"(",
"name",
")",
"setattr",
"(",
"self",
",",
"name",
",",
"copy",
".",
"copy",
"(",
"value",
")",
")",
"if",
"update_all",
"or",
"changed",
":",
"self",
".",
"_cached",
"=",
"{",
"}",
"# The order of the updating below is important.",
"# If you change it, you may break either this class",
"# **or** classes that inherit from it.",
"# Note also that not all attributes need to be updated",
"# for all possible parameter changes, but just doing it",
"# this way is much simpler and adds negligible cost.",
"if",
"update_all",
"or",
"(",
"changed",
"and",
"changed",
"!=",
"set",
"(",
"[",
"'mu'",
"]",
")",
")",
":",
"self",
".",
"_update_pi_vars",
"(",
")",
"self",
".",
"_update_phi",
"(",
")",
"self",
".",
"_update_prx",
"(",
")",
"self",
".",
"_update_dprx",
"(",
")",
"self",
".",
"_update_Qxy",
"(",
")",
"self",
".",
"_update_Frxy",
"(",
")",
"self",
".",
"_update_Prxy",
"(",
")",
"self",
".",
"_update_Prxy_diag",
"(",
")",
"self",
".",
"_update_dPrxy",
"(",
")",
"self",
".",
"_update_B",
"(",
")"
]
| See docs for `Model` abstract base class. | [
"See",
"docs",
"for",
"Model",
"abstract",
"base",
"class",
"."
]
| python | train |
wglass/lighthouse | lighthouse/zookeeper.py | https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/zookeeper.py#L128-L189 | def start_watching(self, cluster, callback):
"""
Initiates the "watching" of a cluster's associated znode.
This is done via kazoo's ChildrenWatch object. When a cluster's
znode's child nodes are updated, a callback is fired and we update
the cluster's `nodes` attribute based on the existing child znodes
and fire a passed-in callback with no arguments once done.
If the cluster's znode does not exist we wait for `NO_NODE_INTERVAL`
seconds before trying again as long as no ChildrenWatch exists for
the given cluster yet and we are not in the process of shutting down.
"""
logger.debug("starting to watch cluster %s", cluster.name)
wait_on_any(self.connected, self.shutdown)
logger.debug("done waiting on (connected, shutdown)")
znode_path = "/".join([self.base_path, cluster.name])
self.stop_events[znode_path] = threading.Event()
def should_stop():
return (
znode_path not in self.stop_events or
self.stop_events[znode_path].is_set() or
self.shutdown.is_set()
)
while not should_stop():
try:
if self.client.exists(znode_path):
break
except exceptions.ConnectionClosedError:
break
wait_on_any(
self.stop_events[znode_path], self.shutdown,
timeout=NO_NODE_INTERVAL
)
logger.debug("setting up ChildrenWatch for %s", znode_path)
@self.client.ChildrenWatch(znode_path)
def watch(children):
if should_stop():
return False
logger.debug("znode children changed! (%s)", znode_path)
new_nodes = []
for child in children:
child_path = "/".join([znode_path, child])
try:
new_nodes.append(
Node.deserialize(self.client.get(child_path)[0])
)
except ValueError:
logger.exception("Invalid node at path '%s'", child)
continue
cluster.nodes = new_nodes
callback() | [
"def",
"start_watching",
"(",
"self",
",",
"cluster",
",",
"callback",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting to watch cluster %s\"",
",",
"cluster",
".",
"name",
")",
"wait_on_any",
"(",
"self",
".",
"connected",
",",
"self",
".",
"shutdown",
")",
"logger",
".",
"debug",
"(",
"\"done waiting on (connected, shutdown)\"",
")",
"znode_path",
"=",
"\"/\"",
".",
"join",
"(",
"[",
"self",
".",
"base_path",
",",
"cluster",
".",
"name",
"]",
")",
"self",
".",
"stop_events",
"[",
"znode_path",
"]",
"=",
"threading",
".",
"Event",
"(",
")",
"def",
"should_stop",
"(",
")",
":",
"return",
"(",
"znode_path",
"not",
"in",
"self",
".",
"stop_events",
"or",
"self",
".",
"stop_events",
"[",
"znode_path",
"]",
".",
"is_set",
"(",
")",
"or",
"self",
".",
"shutdown",
".",
"is_set",
"(",
")",
")",
"while",
"not",
"should_stop",
"(",
")",
":",
"try",
":",
"if",
"self",
".",
"client",
".",
"exists",
"(",
"znode_path",
")",
":",
"break",
"except",
"exceptions",
".",
"ConnectionClosedError",
":",
"break",
"wait_on_any",
"(",
"self",
".",
"stop_events",
"[",
"znode_path",
"]",
",",
"self",
".",
"shutdown",
",",
"timeout",
"=",
"NO_NODE_INTERVAL",
")",
"logger",
".",
"debug",
"(",
"\"setting up ChildrenWatch for %s\"",
",",
"znode_path",
")",
"@",
"self",
".",
"client",
".",
"ChildrenWatch",
"(",
"znode_path",
")",
"def",
"watch",
"(",
"children",
")",
":",
"if",
"should_stop",
"(",
")",
":",
"return",
"False",
"logger",
".",
"debug",
"(",
"\"znode children changed! (%s)\"",
",",
"znode_path",
")",
"new_nodes",
"=",
"[",
"]",
"for",
"child",
"in",
"children",
":",
"child_path",
"=",
"\"/\"",
".",
"join",
"(",
"[",
"znode_path",
",",
"child",
"]",
")",
"try",
":",
"new_nodes",
".",
"append",
"(",
"Node",
".",
"deserialize",
"(",
"self",
".",
"client",
".",
"get",
"(",
"child_path",
")",
"[",
"0",
"]",
")",
")",
"except",
"ValueError",
":",
"logger",
".",
"exception",
"(",
"\"Invalid node at path '%s'\"",
",",
"child",
")",
"continue",
"cluster",
".",
"nodes",
"=",
"new_nodes",
"callback",
"(",
")"
]
| Initiates the "watching" of a cluster's associated znode.
This is done via kazoo's ChildrenWatch object. When a cluster's
znode's child nodes are updated, a callback is fired and we update
the cluster's `nodes` attribute based on the existing child znodes
and fire a passed-in callback with no arguments once done.
If the cluster's znode does not exist we wait for `NO_NODE_INTERVAL`
seconds before trying again as long as no ChildrenWatch exists for
the given cluster yet and we are not in the process of shutting down. | [
"Initiates",
"the",
"watching",
"of",
"a",
"cluster",
"s",
"associated",
"znode",
"."
]
| python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/zmq/kernelmanager.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/zmq/kernelmanager.py#L760-L774 | def write_connection_file(self):
"""write connection info to JSON dict in self.connection_file"""
if self._connection_file_written:
return
self.connection_file,cfg = write_connection_file(self.connection_file,
ip=self.ip, key=self.session.key,
stdin_port=self.stdin_port, iopub_port=self.iopub_port,
shell_port=self.shell_port, hb_port=self.hb_port)
# write_connection_file also sets default ports:
self.shell_port = cfg['shell_port']
self.stdin_port = cfg['stdin_port']
self.iopub_port = cfg['iopub_port']
self.hb_port = cfg['hb_port']
self._connection_file_written = True | [
"def",
"write_connection_file",
"(",
"self",
")",
":",
"if",
"self",
".",
"_connection_file_written",
":",
"return",
"self",
".",
"connection_file",
",",
"cfg",
"=",
"write_connection_file",
"(",
"self",
".",
"connection_file",
",",
"ip",
"=",
"self",
".",
"ip",
",",
"key",
"=",
"self",
".",
"session",
".",
"key",
",",
"stdin_port",
"=",
"self",
".",
"stdin_port",
",",
"iopub_port",
"=",
"self",
".",
"iopub_port",
",",
"shell_port",
"=",
"self",
".",
"shell_port",
",",
"hb_port",
"=",
"self",
".",
"hb_port",
")",
"# write_connection_file also sets default ports:",
"self",
".",
"shell_port",
"=",
"cfg",
"[",
"'shell_port'",
"]",
"self",
".",
"stdin_port",
"=",
"cfg",
"[",
"'stdin_port'",
"]",
"self",
".",
"iopub_port",
"=",
"cfg",
"[",
"'iopub_port'",
"]",
"self",
".",
"hb_port",
"=",
"cfg",
"[",
"'hb_port'",
"]",
"self",
".",
"_connection_file_written",
"=",
"True"
]
| write connection info to JSON dict in self.connection_file | [
"write",
"connection",
"info",
"to",
"JSON",
"dict",
"in",
"self",
".",
"connection_file"
]
| python | test |
HazyResearch/metal | metal/analysis.py | https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/analysis.py#L271-L277 | def add(self, gold, pred):
"""
Args:
gold: a np.ndarray of gold labels (ints)
pred: a np.ndarray of predictions (ints)
"""
self.counter.update(zip(gold, pred)) | [
"def",
"add",
"(",
"self",
",",
"gold",
",",
"pred",
")",
":",
"self",
".",
"counter",
".",
"update",
"(",
"zip",
"(",
"gold",
",",
"pred",
")",
")"
]
| Args:
gold: a np.ndarray of gold labels (ints)
pred: a np.ndarray of predictions (ints) | [
"Args",
":",
"gold",
":",
"a",
"np",
".",
"ndarray",
"of",
"gold",
"labels",
"(",
"ints",
")",
"pred",
":",
"a",
"np",
".",
"ndarray",
"of",
"predictions",
"(",
"ints",
")"
]
| python | train |
scivision/gridaurora | gridaurora/calcemissions.py | https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/calcemissions.py#L62-L76 | def getMetastable(rates, ver: np.ndarray, lamb, br, reactfn: Path):
with h5py.File(reactfn, 'r') as f:
A = f['/metastable/A'][:]
lambnew = f['/metastable/lambda'].value.ravel(order='F') # some are not 1-D!
"""
concatenate along the reaction dimension, axis=-1
"""
vnew = np.concatenate((A[:2] * rates.loc[..., 'no1s'].values[:, None],
A[2:4] * rates.loc[..., 'no1d'].values[:, None],
A[4:] * rates.loc[..., 'noii2p'].values[:, None]), axis=-1)
assert vnew.shape == (rates.shape[0], A.size)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br) | [
"def",
"getMetastable",
"(",
"rates",
",",
"ver",
":",
"np",
".",
"ndarray",
",",
"lamb",
",",
"br",
",",
"reactfn",
":",
"Path",
")",
":",
"with",
"h5py",
".",
"File",
"(",
"reactfn",
",",
"'r'",
")",
"as",
"f",
":",
"A",
"=",
"f",
"[",
"'/metastable/A'",
"]",
"[",
":",
"]",
"lambnew",
"=",
"f",
"[",
"'/metastable/lambda'",
"]",
".",
"value",
".",
"ravel",
"(",
"order",
"=",
"'F'",
")",
"# some are not 1-D!",
"vnew",
"=",
"np",
".",
"concatenate",
"(",
"(",
"A",
"[",
":",
"2",
"]",
"*",
"rates",
".",
"loc",
"[",
"...",
",",
"'no1s'",
"]",
".",
"values",
"[",
":",
",",
"None",
"]",
",",
"A",
"[",
"2",
":",
"4",
"]",
"*",
"rates",
".",
"loc",
"[",
"...",
",",
"'no1d'",
"]",
".",
"values",
"[",
":",
",",
"None",
"]",
",",
"A",
"[",
"4",
":",
"]",
"*",
"rates",
".",
"loc",
"[",
"...",
",",
"'noii2p'",
"]",
".",
"values",
"[",
":",
",",
"None",
"]",
")",
",",
"axis",
"=",
"-",
"1",
")",
"assert",
"vnew",
".",
"shape",
"==",
"(",
"rates",
".",
"shape",
"[",
"0",
"]",
",",
"A",
".",
"size",
")",
"return",
"catvl",
"(",
"rates",
".",
"alt_km",
",",
"ver",
",",
"vnew",
",",
"lamb",
",",
"lambnew",
",",
"br",
")"
]
| concatenate along the reaction dimension, axis=-1 | [
"concatenate",
"along",
"the",
"reaction",
"dimension",
"axis",
"=",
"-",
"1"
]
| python | train |
saltstack/salt | salt/modules/boto_asg.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_asg.py#L895-L918 | def exit_standby(name, instance_ids, should_decrement_desired_capacity=False,
region=None, key=None, keyid=None, profile=None):
'''
Exit desired instances from StandBy mode
.. versionadded:: 2016.11.0
CLI example::
salt-call boto_asg.exit_standby my_autoscale_group_name '["i-xxxxxx"]'
'''
conn = _get_conn_autoscaling_boto3(
region=region, key=key, keyid=keyid, profile=profile)
try:
response = conn.exit_standby(
InstanceIds=instance_ids,
AutoScalingGroupName=name)
except ClientError as e:
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'exists': False}
return {'error': err}
return all(activity['StatusCode'] != 'Failed' for activity in response['Activities']) | [
"def",
"exit_standby",
"(",
"name",
",",
"instance_ids",
",",
"should_decrement_desired_capacity",
"=",
"False",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn_autoscaling_boto3",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"try",
":",
"response",
"=",
"conn",
".",
"exit_standby",
"(",
"InstanceIds",
"=",
"instance_ids",
",",
"AutoScalingGroupName",
"=",
"name",
")",
"except",
"ClientError",
"as",
"e",
":",
"err",
"=",
"__utils__",
"[",
"'boto3.get_error'",
"]",
"(",
"e",
")",
"if",
"e",
".",
"response",
".",
"get",
"(",
"'Error'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'Code'",
")",
"==",
"'ResourceNotFoundException'",
":",
"return",
"{",
"'exists'",
":",
"False",
"}",
"return",
"{",
"'error'",
":",
"err",
"}",
"return",
"all",
"(",
"activity",
"[",
"'StatusCode'",
"]",
"!=",
"'Failed'",
"for",
"activity",
"in",
"response",
"[",
"'Activities'",
"]",
")"
]
| Exit desired instances from StandBy mode
.. versionadded:: 2016.11.0
CLI example::
salt-call boto_asg.exit_standby my_autoscale_group_name '["i-xxxxxx"]' | [
"Exit",
"desired",
"instances",
"from",
"StandBy",
"mode"
]
| python | train |
wheeler-microfluidics/dmf-control-board-firmware | dmf_control_board_firmware/__init__.py | https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/__init__.py#L50-L72 | def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0):
'''
Wrapper for the scipy.signal.savgol_filter function that handles Nan values.
See: https://github.com/wheeler-microfluidics/dmf-control-board-firmware/issues/3
Returns
-------
y : ndarray, same shape as `x`
The filtered data.
'''
# linearly interpolate missing values before filtering
x = np.ma.masked_invalid(pd.Series(x).interpolate())
try:
# start filtering from the first non-zero value since these won't be addressed by
# the interpolation above
ind = np.isfinite(x).nonzero()[0][0]
x[ind:] = signal.savgol_filter(x[ind:], window_length, polyorder, deriv,
delta, axis, mode, cval)
except IndexError:
pass
return np.ma.masked_invalid(x) | [
"def",
"savgol_filter",
"(",
"x",
",",
"window_length",
",",
"polyorder",
",",
"deriv",
"=",
"0",
",",
"delta",
"=",
"1.0",
",",
"axis",
"=",
"-",
"1",
",",
"mode",
"=",
"'interp'",
",",
"cval",
"=",
"0.0",
")",
":",
"# linearly interpolate missing values before filtering",
"x",
"=",
"np",
".",
"ma",
".",
"masked_invalid",
"(",
"pd",
".",
"Series",
"(",
"x",
")",
".",
"interpolate",
"(",
")",
")",
"try",
":",
"# start filtering from the first non-zero value since these won't be addressed by",
"# the interpolation above",
"ind",
"=",
"np",
".",
"isfinite",
"(",
"x",
")",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"x",
"[",
"ind",
":",
"]",
"=",
"signal",
".",
"savgol_filter",
"(",
"x",
"[",
"ind",
":",
"]",
",",
"window_length",
",",
"polyorder",
",",
"deriv",
",",
"delta",
",",
"axis",
",",
"mode",
",",
"cval",
")",
"except",
"IndexError",
":",
"pass",
"return",
"np",
".",
"ma",
".",
"masked_invalid",
"(",
"x",
")"
]
| Wrapper for the scipy.signal.savgol_filter function that handles Nan values.
See: https://github.com/wheeler-microfluidics/dmf-control-board-firmware/issues/3
Returns
-------
y : ndarray, same shape as `x`
The filtered data. | [
"Wrapper",
"for",
"the",
"scipy",
".",
"signal",
".",
"savgol_filter",
"function",
"that",
"handles",
"Nan",
"values",
"."
]
| python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/ext/fontconfig.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/ext/fontconfig.py#L85-L111 | def find_font(face, bold, italic):
"""Find font"""
bold = FC_WEIGHT_BOLD if bold else FC_WEIGHT_REGULAR
italic = FC_SLANT_ITALIC if italic else FC_SLANT_ROMAN
face = face.encode('utf8')
fontconfig.FcInit()
pattern = fontconfig.FcPatternCreate()
fontconfig.FcPatternAddInteger(pattern, FC_WEIGHT, bold)
fontconfig.FcPatternAddInteger(pattern, FC_SLANT, italic)
fontconfig.FcPatternAddString(pattern, FC_FAMILY, face)
fontconfig.FcConfigSubstitute(0, pattern, FcMatchPattern)
fontconfig.FcDefaultSubstitute(pattern)
result = FcType()
match = fontconfig.FcFontMatch(0, pattern, byref(result))
fontconfig.FcPatternDestroy(pattern)
if not match:
raise RuntimeError('Could not match font "%s"' % face)
value = FcValue()
fontconfig.FcPatternGet(match, FC_FAMILY, 0, byref(value))
if(value.u.s != face):
warnings.warn('Could not find face match "%s", falling back to "%s"'
% (face, value.u.s))
result = fontconfig.FcPatternGet(match, FC_FILE, 0, byref(value))
if result != 0:
raise RuntimeError('No filename or FT face for "%s"' % face)
fname = value.u.s
return fname.decode('utf-8') | [
"def",
"find_font",
"(",
"face",
",",
"bold",
",",
"italic",
")",
":",
"bold",
"=",
"FC_WEIGHT_BOLD",
"if",
"bold",
"else",
"FC_WEIGHT_REGULAR",
"italic",
"=",
"FC_SLANT_ITALIC",
"if",
"italic",
"else",
"FC_SLANT_ROMAN",
"face",
"=",
"face",
".",
"encode",
"(",
"'utf8'",
")",
"fontconfig",
".",
"FcInit",
"(",
")",
"pattern",
"=",
"fontconfig",
".",
"FcPatternCreate",
"(",
")",
"fontconfig",
".",
"FcPatternAddInteger",
"(",
"pattern",
",",
"FC_WEIGHT",
",",
"bold",
")",
"fontconfig",
".",
"FcPatternAddInteger",
"(",
"pattern",
",",
"FC_SLANT",
",",
"italic",
")",
"fontconfig",
".",
"FcPatternAddString",
"(",
"pattern",
",",
"FC_FAMILY",
",",
"face",
")",
"fontconfig",
".",
"FcConfigSubstitute",
"(",
"0",
",",
"pattern",
",",
"FcMatchPattern",
")",
"fontconfig",
".",
"FcDefaultSubstitute",
"(",
"pattern",
")",
"result",
"=",
"FcType",
"(",
")",
"match",
"=",
"fontconfig",
".",
"FcFontMatch",
"(",
"0",
",",
"pattern",
",",
"byref",
"(",
"result",
")",
")",
"fontconfig",
".",
"FcPatternDestroy",
"(",
"pattern",
")",
"if",
"not",
"match",
":",
"raise",
"RuntimeError",
"(",
"'Could not match font \"%s\"'",
"%",
"face",
")",
"value",
"=",
"FcValue",
"(",
")",
"fontconfig",
".",
"FcPatternGet",
"(",
"match",
",",
"FC_FAMILY",
",",
"0",
",",
"byref",
"(",
"value",
")",
")",
"if",
"(",
"value",
".",
"u",
".",
"s",
"!=",
"face",
")",
":",
"warnings",
".",
"warn",
"(",
"'Could not find face match \"%s\", falling back to \"%s\"'",
"%",
"(",
"face",
",",
"value",
".",
"u",
".",
"s",
")",
")",
"result",
"=",
"fontconfig",
".",
"FcPatternGet",
"(",
"match",
",",
"FC_FILE",
",",
"0",
",",
"byref",
"(",
"value",
")",
")",
"if",
"result",
"!=",
"0",
":",
"raise",
"RuntimeError",
"(",
"'No filename or FT face for \"%s\"'",
"%",
"face",
")",
"fname",
"=",
"value",
".",
"u",
".",
"s",
"return",
"fname",
".",
"decode",
"(",
"'utf-8'",
")"
]
| Find font | [
"Find",
"font"
]
| python | train |
MartinThoma/hwrt | hwrt/segmentation/segmentation.py | https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/segmentation/segmentation.py#L784-L832 | def break_mst(mst, i):
"""
Break mst into multiple MSTs by removing one node i.
Parameters
----------
mst : symmetrical square matrix
i : index of the mst where to break
Returns
-------
list of dictionarys ('mst' and 'strokes' are the keys)
"""
for j in range(len(mst['mst'])):
mst['mst'][i][j] = 0
mst['mst'][j][i] = 0
_, components = scipy.sparse.csgraph.connected_components(mst['mst'])
comp_indices = {}
for el in set(components):
comp_indices[el] = {'strokes': [], 'strokes_i': []}
for i, comp_nr in enumerate(components):
comp_indices[comp_nr]['strokes'].append(mst['strokes'][i])
comp_indices[comp_nr]['strokes_i'].append(i)
mst_wood = []
for key in comp_indices:
matrix = []
for i, line in enumerate(mst['mst']):
line_add = []
if i not in comp_indices[key]['strokes_i']:
continue
for j, el in enumerate(line):
if j in comp_indices[key]['strokes_i']:
line_add.append(el)
matrix.append(line_add)
assert len(matrix) > 0, \
("len(matrix) == 0 (strokes: %s, mst=%s, i=%i)" %
(comp_indices[key]['strokes'], mst, i))
assert len(matrix) == len(matrix[0]), \
("matrix was %i x %i, but should be square" %
(len(matrix), len(matrix[0])))
assert len(matrix) == len(comp_indices[key]['strokes']), \
(("stroke length was not equal to matrix length "
"(strokes=%s, len(matrix)=%i)") %
(comp_indices[key]['strokes'], len(matrix)))
mst_wood.append({'mst': matrix,
'strokes': comp_indices[key]['strokes']})
return mst_wood | [
"def",
"break_mst",
"(",
"mst",
",",
"i",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"mst",
"[",
"'mst'",
"]",
")",
")",
":",
"mst",
"[",
"'mst'",
"]",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"0",
"mst",
"[",
"'mst'",
"]",
"[",
"j",
"]",
"[",
"i",
"]",
"=",
"0",
"_",
",",
"components",
"=",
"scipy",
".",
"sparse",
".",
"csgraph",
".",
"connected_components",
"(",
"mst",
"[",
"'mst'",
"]",
")",
"comp_indices",
"=",
"{",
"}",
"for",
"el",
"in",
"set",
"(",
"components",
")",
":",
"comp_indices",
"[",
"el",
"]",
"=",
"{",
"'strokes'",
":",
"[",
"]",
",",
"'strokes_i'",
":",
"[",
"]",
"}",
"for",
"i",
",",
"comp_nr",
"in",
"enumerate",
"(",
"components",
")",
":",
"comp_indices",
"[",
"comp_nr",
"]",
"[",
"'strokes'",
"]",
".",
"append",
"(",
"mst",
"[",
"'strokes'",
"]",
"[",
"i",
"]",
")",
"comp_indices",
"[",
"comp_nr",
"]",
"[",
"'strokes_i'",
"]",
".",
"append",
"(",
"i",
")",
"mst_wood",
"=",
"[",
"]",
"for",
"key",
"in",
"comp_indices",
":",
"matrix",
"=",
"[",
"]",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"mst",
"[",
"'mst'",
"]",
")",
":",
"line_add",
"=",
"[",
"]",
"if",
"i",
"not",
"in",
"comp_indices",
"[",
"key",
"]",
"[",
"'strokes_i'",
"]",
":",
"continue",
"for",
"j",
",",
"el",
"in",
"enumerate",
"(",
"line",
")",
":",
"if",
"j",
"in",
"comp_indices",
"[",
"key",
"]",
"[",
"'strokes_i'",
"]",
":",
"line_add",
".",
"append",
"(",
"el",
")",
"matrix",
".",
"append",
"(",
"line_add",
")",
"assert",
"len",
"(",
"matrix",
")",
">",
"0",
",",
"(",
"\"len(matrix) == 0 (strokes: %s, mst=%s, i=%i)\"",
"%",
"(",
"comp_indices",
"[",
"key",
"]",
"[",
"'strokes'",
"]",
",",
"mst",
",",
"i",
")",
")",
"assert",
"len",
"(",
"matrix",
")",
"==",
"len",
"(",
"matrix",
"[",
"0",
"]",
")",
",",
"(",
"\"matrix was %i x %i, but should be square\"",
"%",
"(",
"len",
"(",
"matrix",
")",
",",
"len",
"(",
"matrix",
"[",
"0",
"]",
")",
")",
")",
"assert",
"len",
"(",
"matrix",
")",
"==",
"len",
"(",
"comp_indices",
"[",
"key",
"]",
"[",
"'strokes'",
"]",
")",
",",
"(",
"(",
"\"stroke length was not equal to matrix length \"",
"\"(strokes=%s, len(matrix)=%i)\"",
")",
"%",
"(",
"comp_indices",
"[",
"key",
"]",
"[",
"'strokes'",
"]",
",",
"len",
"(",
"matrix",
")",
")",
")",
"mst_wood",
".",
"append",
"(",
"{",
"'mst'",
":",
"matrix",
",",
"'strokes'",
":",
"comp_indices",
"[",
"key",
"]",
"[",
"'strokes'",
"]",
"}",
")",
"return",
"mst_wood"
]
| Break mst into multiple MSTs by removing one node i.
Parameters
----------
mst : symmetrical square matrix
i : index of the mst where to break
Returns
-------
list of dictionarys ('mst' and 'strokes' are the keys) | [
"Break",
"mst",
"into",
"multiple",
"MSTs",
"by",
"removing",
"one",
"node",
"i",
"."
]
| python | train |
ultrabug/py3status | py3status/modules/dpms.py | https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/modules/dpms.py#L62-L79 | def dpms(self):
"""
Display a colorful state of DPMS.
"""
if "DPMS is Enabled" in self.py3.command_output("xset -q"):
_format = self.icon_on
color = self.color_on
else:
_format = self.icon_off
color = self.color_off
icon = self.py3.safe_format(_format)
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, {"icon": icon}),
"color": color,
} | [
"def",
"dpms",
"(",
"self",
")",
":",
"if",
"\"DPMS is Enabled\"",
"in",
"self",
".",
"py3",
".",
"command_output",
"(",
"\"xset -q\"",
")",
":",
"_format",
"=",
"self",
".",
"icon_on",
"color",
"=",
"self",
".",
"color_on",
"else",
":",
"_format",
"=",
"self",
".",
"icon_off",
"color",
"=",
"self",
".",
"color_off",
"icon",
"=",
"self",
".",
"py3",
".",
"safe_format",
"(",
"_format",
")",
"return",
"{",
"\"cached_until\"",
":",
"self",
".",
"py3",
".",
"time_in",
"(",
"self",
".",
"cache_timeout",
")",
",",
"\"full_text\"",
":",
"self",
".",
"py3",
".",
"safe_format",
"(",
"self",
".",
"format",
",",
"{",
"\"icon\"",
":",
"icon",
"}",
")",
",",
"\"color\"",
":",
"color",
",",
"}"
]
| Display a colorful state of DPMS. | [
"Display",
"a",
"colorful",
"state",
"of",
"DPMS",
"."
]
| python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.