repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
tanghaibao/jcvi
|
jcvi/formats/vcf.py
|
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/vcf.py#L199-L233
|
def sample(args):
"""
%prog sample vcffile 0.9
Sample subset of vcf file.
"""
from random import random
p = OptionParser(sample.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, ratio = args
ratio = float(ratio)
fp = open(vcffile)
pf = vcffile.rsplit(".", 1)[0]
kept = pf + ".kept.vcf"
withheld = pf + ".withheld.vcf"
fwk = open(kept, "w")
fww = open(withheld, "w")
nkept = nwithheld = 0
for row in fp:
if row[0] == '#':
print(row.strip(), file=fwk)
continue
if random() < ratio:
nkept += 1
print(row.strip(), file=fwk)
else:
nwithheld += 1
print(row.strip(), file=fww)
logging.debug("{0} records kept to `{1}`".format(nkept, kept))
logging.debug("{0} records withheld to `{1}`".format(nwithheld, withheld))
|
[
"def",
"sample",
"(",
"args",
")",
":",
"from",
"random",
"import",
"random",
"p",
"=",
"OptionParser",
"(",
"sample",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"vcffile",
",",
"ratio",
"=",
"args",
"ratio",
"=",
"float",
"(",
"ratio",
")",
"fp",
"=",
"open",
"(",
"vcffile",
")",
"pf",
"=",
"vcffile",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"kept",
"=",
"pf",
"+",
"\".kept.vcf\"",
"withheld",
"=",
"pf",
"+",
"\".withheld.vcf\"",
"fwk",
"=",
"open",
"(",
"kept",
",",
"\"w\"",
")",
"fww",
"=",
"open",
"(",
"withheld",
",",
"\"w\"",
")",
"nkept",
"=",
"nwithheld",
"=",
"0",
"for",
"row",
"in",
"fp",
":",
"if",
"row",
"[",
"0",
"]",
"==",
"'#'",
":",
"print",
"(",
"row",
".",
"strip",
"(",
")",
",",
"file",
"=",
"fwk",
")",
"continue",
"if",
"random",
"(",
")",
"<",
"ratio",
":",
"nkept",
"+=",
"1",
"print",
"(",
"row",
".",
"strip",
"(",
")",
",",
"file",
"=",
"fwk",
")",
"else",
":",
"nwithheld",
"+=",
"1",
"print",
"(",
"row",
".",
"strip",
"(",
")",
",",
"file",
"=",
"fww",
")",
"logging",
".",
"debug",
"(",
"\"{0} records kept to `{1}`\"",
".",
"format",
"(",
"nkept",
",",
"kept",
")",
")",
"logging",
".",
"debug",
"(",
"\"{0} records withheld to `{1}`\"",
".",
"format",
"(",
"nwithheld",
",",
"withheld",
")",
")"
] |
%prog sample vcffile 0.9
Sample subset of vcf file.
|
[
"%prog",
"sample",
"vcffile",
"0",
".",
"9"
] |
python
|
train
| 25.971429 |
bovee/Aston
|
aston/peak/integrators.py
|
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/peak/integrators.py#L223-L233
|
def _integrate_mpwrap(ts_and_pks, integrate, fopts):
"""
Take a zipped timeseries and peaks found in it
and integrate it to return peaks. Used to allow
multiprocessing support.
"""
ts, tpks = ts_and_pks
pks = integrate(ts, tpks, **fopts)
# for p in pks:
# p.info['mz'] = str(ts.name)
return pks
|
[
"def",
"_integrate_mpwrap",
"(",
"ts_and_pks",
",",
"integrate",
",",
"fopts",
")",
":",
"ts",
",",
"tpks",
"=",
"ts_and_pks",
"pks",
"=",
"integrate",
"(",
"ts",
",",
"tpks",
",",
"*",
"*",
"fopts",
")",
"# for p in pks:",
"# p.info['mz'] = str(ts.name)",
"return",
"pks"
] |
Take a zipped timeseries and peaks found in it
and integrate it to return peaks. Used to allow
multiprocessing support.
|
[
"Take",
"a",
"zipped",
"timeseries",
"and",
"peaks",
"found",
"in",
"it",
"and",
"integrate",
"it",
"to",
"return",
"peaks",
".",
"Used",
"to",
"allow",
"multiprocessing",
"support",
"."
] |
python
|
train
| 29.818182 |
coleifer/walrus
|
walrus/database.py
|
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/database.py#L190-L199
|
def get_key(self, key):
"""
Return a rich object for the given key. For instance, if
a hash key is requested, then a :py:class:`Hash` will be
returned.
:param str key: Key to retrieve.
:returns: A hash, set, list, zset or array.
"""
return self.__mapping.get(self.type(key), self.__getitem__)(key)
|
[
"def",
"get_key",
"(",
"self",
",",
"key",
")",
":",
"return",
"self",
".",
"__mapping",
".",
"get",
"(",
"self",
".",
"type",
"(",
"key",
")",
",",
"self",
".",
"__getitem__",
")",
"(",
"key",
")"
] |
Return a rich object for the given key. For instance, if
a hash key is requested, then a :py:class:`Hash` will be
returned.
:param str key: Key to retrieve.
:returns: A hash, set, list, zset or array.
|
[
"Return",
"a",
"rich",
"object",
"for",
"the",
"given",
"key",
".",
"For",
"instance",
"if",
"a",
"hash",
"key",
"is",
"requested",
"then",
"a",
":",
"py",
":",
"class",
":",
"Hash",
"will",
"be",
"returned",
"."
] |
python
|
train
| 35.3 |
EUDAT-B2SAFE/B2HANDLE
|
b2handle/handlesystemconnector.py
|
https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/handlesystemconnector.py#L252-L292
|
def send_handle_get_request(self, handle, indices=None):
'''
Send a HTTP GET request to the handle server to read either an entire
handle or to some specified values from a handle record, using the
requests module.
:param handle: The handle.
:param indices: Optional. A list of indices to delete. Defaults to
None (i.e. the entire handle is deleted.). The list can contain
integers or strings.
:return: The server's response.
'''
# Assemble required info:
url = self.make_handle_URL(handle, indices)
LOGGER.debug('GET Request to '+url)
head = self.__get_headers('GET')
veri = self.__HTTPS_verify
# Send the request
if self.__cert_needed_for_get_request():
# If this is the first request and the connector uses client cert authentication, we need to send the cert along
# in the first request that builds the session.
resp = self.__session.get(url, headers=head, verify=veri, cert=self.__cert_object)
else:
# Normal case:
resp = self.__session.get(url, headers=head, verify=veri)
# Log and return
self.__log_request_response_to_file(
logger=REQUESTLOGGER,
op='GET',
handle=handle,
url=url,
headers=head,
verify=veri,
resp=resp
)
self.__first_request = False
return resp
|
[
"def",
"send_handle_get_request",
"(",
"self",
",",
"handle",
",",
"indices",
"=",
"None",
")",
":",
"# Assemble required info:",
"url",
"=",
"self",
".",
"make_handle_URL",
"(",
"handle",
",",
"indices",
")",
"LOGGER",
".",
"debug",
"(",
"'GET Request to '",
"+",
"url",
")",
"head",
"=",
"self",
".",
"__get_headers",
"(",
"'GET'",
")",
"veri",
"=",
"self",
".",
"__HTTPS_verify",
"# Send the request",
"if",
"self",
".",
"__cert_needed_for_get_request",
"(",
")",
":",
"# If this is the first request and the connector uses client cert authentication, we need to send the cert along",
"# in the first request that builds the session.",
"resp",
"=",
"self",
".",
"__session",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"head",
",",
"verify",
"=",
"veri",
",",
"cert",
"=",
"self",
".",
"__cert_object",
")",
"else",
":",
"# Normal case:",
"resp",
"=",
"self",
".",
"__session",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"head",
",",
"verify",
"=",
"veri",
")",
"# Log and return",
"self",
".",
"__log_request_response_to_file",
"(",
"logger",
"=",
"REQUESTLOGGER",
",",
"op",
"=",
"'GET'",
",",
"handle",
"=",
"handle",
",",
"url",
"=",
"url",
",",
"headers",
"=",
"head",
",",
"verify",
"=",
"veri",
",",
"resp",
"=",
"resp",
")",
"self",
".",
"__first_request",
"=",
"False",
"return",
"resp"
] |
Send a HTTP GET request to the handle server to read either an entire
handle or to some specified values from a handle record, using the
requests module.
:param handle: The handle.
:param indices: Optional. A list of indices to delete. Defaults to
None (i.e. the entire handle is deleted.). The list can contain
integers or strings.
:return: The server's response.
|
[
"Send",
"a",
"HTTP",
"GET",
"request",
"to",
"the",
"handle",
"server",
"to",
"read",
"either",
"an",
"entire",
"handle",
"or",
"to",
"some",
"specified",
"values",
"from",
"a",
"handle",
"record",
"using",
"the",
"requests",
"module",
"."
] |
python
|
train
| 36.219512 |
rackerlabs/simpl
|
simpl/config.py
|
https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/config.py#L378-L382
|
def init(cls, *args, **kwargs):
"""Initialize the config like as you would a regular dict."""
instance = cls()
instance._values.update(dict(*args, **kwargs))
return instance
|
[
"def",
"init",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"instance",
"=",
"cls",
"(",
")",
"instance",
".",
"_values",
".",
"update",
"(",
"dict",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"instance"
] |
Initialize the config like as you would a regular dict.
|
[
"Initialize",
"the",
"config",
"like",
"as",
"you",
"would",
"a",
"regular",
"dict",
"."
] |
python
|
train
| 40.2 |
chrisspen/burlap
|
burlap/deb.py
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/deb.py#L45-L56
|
def is_installed(pkg_name):
"""
Check if a package is installed.
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = run("dpkg -s %(pkg_name)s" % locals())
for line in res.splitlines():
if line.startswith("Status: "):
status = line[8:]
if "installed" in status.split(' '):
return True
return False
|
[
"def",
"is_installed",
"(",
"pkg_name",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
",",
"'stderr'",
",",
"'warnings'",
")",
",",
"warn_only",
"=",
"True",
")",
":",
"res",
"=",
"run",
"(",
"\"dpkg -s %(pkg_name)s\"",
"%",
"locals",
"(",
")",
")",
"for",
"line",
"in",
"res",
".",
"splitlines",
"(",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"\"Status: \"",
")",
":",
"status",
"=",
"line",
"[",
"8",
":",
"]",
"if",
"\"installed\"",
"in",
"status",
".",
"split",
"(",
"' '",
")",
":",
"return",
"True",
"return",
"False"
] |
Check if a package is installed.
|
[
"Check",
"if",
"a",
"package",
"is",
"installed",
"."
] |
python
|
valid
| 35.666667 |
taskcluster/taskcluster-client.py
|
taskcluster/aio/ec2manager.py
|
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L149-L158
|
async def terminateInstance(self, *args, **kwargs):
"""
Terminate an instance
Terminate an instance in a specified region
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["terminateInstance"], *args, **kwargs)
|
[
"async",
"def",
"terminateInstance",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"await",
"self",
".",
"_makeApiCall",
"(",
"self",
".",
"funcinfo",
"[",
"\"terminateInstance\"",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Terminate an instance
Terminate an instance in a specified region
This method is ``experimental``
|
[
"Terminate",
"an",
"instance"
] |
python
|
train
| 28.3 |
bpython/curtsies
|
curtsies/input.py
|
https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/curtsies/input.py#L267-L274
|
def event_trigger(self, event_type):
"""Returns a callback that creates events.
Returned callback function will add an event of type event_type
to a queue which will be checked the next time an event is requested."""
def callback(**kwargs):
self.queued_events.append(event_type(**kwargs))
return callback
|
[
"def",
"event_trigger",
"(",
"self",
",",
"event_type",
")",
":",
"def",
"callback",
"(",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"queued_events",
".",
"append",
"(",
"event_type",
"(",
"*",
"*",
"kwargs",
")",
")",
"return",
"callback"
] |
Returns a callback that creates events.
Returned callback function will add an event of type event_type
to a queue which will be checked the next time an event is requested.
|
[
"Returns",
"a",
"callback",
"that",
"creates",
"events",
"."
] |
python
|
train
| 43.75 |
amaas-fintech/amaas-core-sdk-python
|
amaascore/transactions/interface.py
|
https://github.com/amaas-fintech/amaas-core-sdk-python/blob/347b71f8e776b2dde582b015e31b4802d91e8040/amaascore/transactions/interface.py#L763-L780
|
def clear(self, asset_manager_id, book_ids=None):
""" This method deletes all the data for an asset_manager_id
and option book_ids.
It should be used with extreme caution. In production it
is almost always better to Inactivate rather than delete. """
self.logger.info('Clear Transactions & Positions - Asset Manager: %s', asset_manager_id)
url = '%s/clear/%s' % (self.endpoint, asset_manager_id)
params = {'asset_manager_ids': ','.join(book_ids)} if book_ids else {}
response = self.session.delete(url, params=params)
if response.ok:
tran_count = response.json().get('transaction_count', 'Unknown')
self.logger.info('Deleted %s Transactions.', tran_count)
pos_count = response.json().get('position_count', 'Unknown')
self.logger.info('Deleted %s Positions.', pos_count)
return response.json()
else:
self.logger.error(response.text)
response.raise_for_status()
|
[
"def",
"clear",
"(",
"self",
",",
"asset_manager_id",
",",
"book_ids",
"=",
"None",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Clear Transactions & Positions - Asset Manager: %s'",
",",
"asset_manager_id",
")",
"url",
"=",
"'%s/clear/%s'",
"%",
"(",
"self",
".",
"endpoint",
",",
"asset_manager_id",
")",
"params",
"=",
"{",
"'asset_manager_ids'",
":",
"','",
".",
"join",
"(",
"book_ids",
")",
"}",
"if",
"book_ids",
"else",
"{",
"}",
"response",
"=",
"self",
".",
"session",
".",
"delete",
"(",
"url",
",",
"params",
"=",
"params",
")",
"if",
"response",
".",
"ok",
":",
"tran_count",
"=",
"response",
".",
"json",
"(",
")",
".",
"get",
"(",
"'transaction_count'",
",",
"'Unknown'",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Deleted %s Transactions.'",
",",
"tran_count",
")",
"pos_count",
"=",
"response",
".",
"json",
"(",
")",
".",
"get",
"(",
"'position_count'",
",",
"'Unknown'",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Deleted %s Positions.'",
",",
"pos_count",
")",
"return",
"response",
".",
"json",
"(",
")",
"else",
":",
"self",
".",
"logger",
".",
"error",
"(",
"response",
".",
"text",
")",
"response",
".",
"raise_for_status",
"(",
")"
] |
This method deletes all the data for an asset_manager_id
and option book_ids.
It should be used with extreme caution. In production it
is almost always better to Inactivate rather than delete.
|
[
"This",
"method",
"deletes",
"all",
"the",
"data",
"for",
"an",
"asset_manager_id",
"and",
"option",
"book_ids",
".",
"It",
"should",
"be",
"used",
"with",
"extreme",
"caution",
".",
"In",
"production",
"it",
"is",
"almost",
"always",
"better",
"to",
"Inactivate",
"rather",
"than",
"delete",
"."
] |
python
|
train
| 56.611111 |
persephone-tools/persephone
|
persephone/results.py
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/results.py#L57-L96
|
def fmt_latex_output(hyps: Sequence[Sequence[str]],
refs: Sequence[Sequence[str]],
prefixes: Sequence[str],
out_fn: Path,
) -> None:
""" Output the hypotheses and references to a LaTeX source file for
pretty printing.
"""
alignments_ = [min_edit_distance_align(ref, hyp)
for hyp, ref in zip(hyps, refs)]
with out_fn.open("w") as out_f:
print(latex_header(), file=out_f)
print("\\begin{document}\n"
"\\begin{longtable}{ll}", file=out_f)
print(r"\toprule", file=out_f)
for sent in zip(prefixes, alignments_):
prefix = sent[0]
alignments = sent[1:]
print("Utterance ID: &", prefix.strip().replace(r"_", r"\_"), r"\\", file=out_f)
for i, alignment in enumerate(alignments):
ref_list = []
hyp_list = []
for arrow in alignment:
if arrow[0] == arrow[1]:
# Then don't highlight it; it's correct.
ref_list.append(arrow[0])
hyp_list.append(arrow[1])
else:
# Then highlight the errors.
ref_list.append("\\hl{%s}" % arrow[0])
hyp_list.append("\\hl{%s}" % arrow[1])
print("Ref: &", "".join(ref_list), r"\\", file=out_f)
print("Hyp: &", "".join(hyp_list), r"\\", file=out_f)
print(r"\midrule", file=out_f)
print(r"\end{longtable}", file=out_f)
print(r"\end{document}", file=out_f)
|
[
"def",
"fmt_latex_output",
"(",
"hyps",
":",
"Sequence",
"[",
"Sequence",
"[",
"str",
"]",
"]",
",",
"refs",
":",
"Sequence",
"[",
"Sequence",
"[",
"str",
"]",
"]",
",",
"prefixes",
":",
"Sequence",
"[",
"str",
"]",
",",
"out_fn",
":",
"Path",
",",
")",
"->",
"None",
":",
"alignments_",
"=",
"[",
"min_edit_distance_align",
"(",
"ref",
",",
"hyp",
")",
"for",
"hyp",
",",
"ref",
"in",
"zip",
"(",
"hyps",
",",
"refs",
")",
"]",
"with",
"out_fn",
".",
"open",
"(",
"\"w\"",
")",
"as",
"out_f",
":",
"print",
"(",
"latex_header",
"(",
")",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"\"\\\\begin{document}\\n\"",
"\"\\\\begin{longtable}{ll}\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"r\"\\toprule\"",
",",
"file",
"=",
"out_f",
")",
"for",
"sent",
"in",
"zip",
"(",
"prefixes",
",",
"alignments_",
")",
":",
"prefix",
"=",
"sent",
"[",
"0",
"]",
"alignments",
"=",
"sent",
"[",
"1",
":",
"]",
"print",
"(",
"\"Utterance ID: &\"",
",",
"prefix",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"r\"_\"",
",",
"r\"\\_\"",
")",
",",
"r\"\\\\\"",
",",
"file",
"=",
"out_f",
")",
"for",
"i",
",",
"alignment",
"in",
"enumerate",
"(",
"alignments",
")",
":",
"ref_list",
"=",
"[",
"]",
"hyp_list",
"=",
"[",
"]",
"for",
"arrow",
"in",
"alignment",
":",
"if",
"arrow",
"[",
"0",
"]",
"==",
"arrow",
"[",
"1",
"]",
":",
"# Then don't highlight it; it's correct.",
"ref_list",
".",
"append",
"(",
"arrow",
"[",
"0",
"]",
")",
"hyp_list",
".",
"append",
"(",
"arrow",
"[",
"1",
"]",
")",
"else",
":",
"# Then highlight the errors.",
"ref_list",
".",
"append",
"(",
"\"\\\\hl{%s}\"",
"%",
"arrow",
"[",
"0",
"]",
")",
"hyp_list",
".",
"append",
"(",
"\"\\\\hl{%s}\"",
"%",
"arrow",
"[",
"1",
"]",
")",
"print",
"(",
"\"Ref: &\"",
",",
"\"\"",
".",
"join",
"(",
"ref_list",
")",
",",
"r\"\\\\\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"\"Hyp: &\"",
",",
"\"\"",
".",
"join",
"(",
"hyp_list",
")",
",",
"r\"\\\\\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"r\"\\midrule\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"r\"\\end{longtable}\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"r\"\\end{document}\"",
",",
"file",
"=",
"out_f",
")"
] |
Output the hypotheses and references to a LaTeX source file for
pretty printing.
|
[
"Output",
"the",
"hypotheses",
"and",
"references",
"to",
"a",
"LaTeX",
"source",
"file",
"for",
"pretty",
"printing",
"."
] |
python
|
train
| 40.95 |
bird-house/twitcher
|
twitcher/store/mongodb.py
|
https://github.com/bird-house/twitcher/blob/e6a36b3aeeacf44eec537434b0fb87c09ab54b5f/twitcher/store/mongodb.py#L102-L109
|
def fetch_by_url(self, url):
"""
Gets service for given ``url`` from mongodb storage.
"""
service = self.collection.find_one({'url': url})
if not service:
raise ServiceNotFound
return Service(service)
|
[
"def",
"fetch_by_url",
"(",
"self",
",",
"url",
")",
":",
"service",
"=",
"self",
".",
"collection",
".",
"find_one",
"(",
"{",
"'url'",
":",
"url",
"}",
")",
"if",
"not",
"service",
":",
"raise",
"ServiceNotFound",
"return",
"Service",
"(",
"service",
")"
] |
Gets service for given ``url`` from mongodb storage.
|
[
"Gets",
"service",
"for",
"given",
"url",
"from",
"mongodb",
"storage",
"."
] |
python
|
valid
| 31.625 |
gem/oq-engine
|
openquake/hmtk/faults/mfd/anderson_luco_area_mmax.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/faults/mfd/anderson_luco_area_mmax.py#L155-L175
|
def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta):
'''
Returns the rate of events with M > mag_value
:param float slip:
Slip rate in mm/yr
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
:param float beta:
Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
'''
delta_m = mmax - mag_value
a_3 = self._get_a3_value(bbar, dbar, slip / 10., beta, mmax)
central_term = np.exp(bbar * delta_m) - 1.0 - (bbar * delta_m)
return a_3 * central_term * (delta_m > 0.0)
|
[
"def",
"cumulative_value",
"(",
"self",
",",
"slip",
",",
"mmax",
",",
"mag_value",
",",
"bbar",
",",
"dbar",
",",
"beta",
")",
":",
"delta_m",
"=",
"mmax",
"-",
"mag_value",
"a_3",
"=",
"self",
".",
"_get_a3_value",
"(",
"bbar",
",",
"dbar",
",",
"slip",
"/",
"10.",
",",
"beta",
",",
"mmax",
")",
"central_term",
"=",
"np",
".",
"exp",
"(",
"bbar",
"*",
"delta_m",
")",
"-",
"1.0",
"-",
"(",
"bbar",
"*",
"delta_m",
")",
"return",
"a_3",
"*",
"central_term",
"*",
"(",
"delta_m",
">",
"0.0",
")"
] |
Returns the rate of events with M > mag_value
:param float slip:
Slip rate in mm/yr
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
:param float beta:
Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
|
[
"Returns",
"the",
"rate",
"of",
"events",
"with",
"M",
">",
"mag_value"
] |
python
|
train
| 37 |
skyfielders/python-skyfield
|
skyfield/precessionlib.py
|
https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/precessionlib.py#L5-L69
|
def compute_precession(jd_tdb):
"""Return the rotation matrices for precessing to an array of epochs.
`jd_tdb` - array of TDB Julian dates
The array returned has the shape `(3, 3, n)` where `n` is the number
of dates that have been provided as input.
"""
eps0 = 84381.406
# 't' is time in TDB centuries.
t = (jd_tdb - T0) / 36525.0
# Numerical coefficients of psi_a, omega_a, and chi_a, along with
# epsilon_0, the obliquity at J2000.0, are 4-angle formulation from
# Capitaine et al. (2003), eqs. (4), (37), & (39).
psia = ((((- 0.0000000951 * t
+ 0.000132851 ) * t
- 0.00114045 ) * t
- 1.0790069 ) * t
+ 5038.481507 ) * t
omegaa = ((((+ 0.0000003337 * t
- 0.000000467 ) * t
- 0.00772503 ) * t
+ 0.0512623 ) * t
- 0.025754 ) * t + eps0
chia = ((((- 0.0000000560 * t
+ 0.000170663 ) * t
- 0.00121197 ) * t
- 2.3814292 ) * t
+ 10.556403 ) * t
eps0 = eps0 * ASEC2RAD
psia = psia * ASEC2RAD
omegaa = omegaa * ASEC2RAD
chia = chia * ASEC2RAD
sa = sin(eps0)
ca = cos(eps0)
sb = sin(-psia)
cb = cos(-psia)
sc = sin(-omegaa)
cc = cos(-omegaa)
sd = sin(chia)
cd = cos(chia)
# Compute elements of precession rotation matrix equivalent to
# R3(chi_a) R1(-omega_a) R3(-psi_a) R1(epsilon_0).
rot3 = array(((cd * cb - sb * sd * cc,
cd * sb * ca + sd * cc * cb * ca - sa * sd * sc,
cd * sb * sa + sd * cc * cb * sa + ca * sd * sc),
(-sd * cb - sb * cd * cc,
-sd * sb * ca + cd * cc * cb * ca - sa * cd * sc,
-sd * sb * sa + cd * cc * cb * sa + ca * cd * sc),
(sb * sc,
-sc * cb * ca - sa * cc,
-sc * cb * sa + cc * ca)))
return rot3
|
[
"def",
"compute_precession",
"(",
"jd_tdb",
")",
":",
"eps0",
"=",
"84381.406",
"# 't' is time in TDB centuries.",
"t",
"=",
"(",
"jd_tdb",
"-",
"T0",
")",
"/",
"36525.0",
"# Numerical coefficients of psi_a, omega_a, and chi_a, along with",
"# epsilon_0, the obliquity at J2000.0, are 4-angle formulation from",
"# Capitaine et al. (2003), eqs. (4), (37), & (39).",
"psia",
"=",
"(",
"(",
"(",
"(",
"-",
"0.0000000951",
"*",
"t",
"+",
"0.000132851",
")",
"*",
"t",
"-",
"0.00114045",
")",
"*",
"t",
"-",
"1.0790069",
")",
"*",
"t",
"+",
"5038.481507",
")",
"*",
"t",
"omegaa",
"=",
"(",
"(",
"(",
"(",
"+",
"0.0000003337",
"*",
"t",
"-",
"0.000000467",
")",
"*",
"t",
"-",
"0.00772503",
")",
"*",
"t",
"+",
"0.0512623",
")",
"*",
"t",
"-",
"0.025754",
")",
"*",
"t",
"+",
"eps0",
"chia",
"=",
"(",
"(",
"(",
"(",
"-",
"0.0000000560",
"*",
"t",
"+",
"0.000170663",
")",
"*",
"t",
"-",
"0.00121197",
")",
"*",
"t",
"-",
"2.3814292",
")",
"*",
"t",
"+",
"10.556403",
")",
"*",
"t",
"eps0",
"=",
"eps0",
"*",
"ASEC2RAD",
"psia",
"=",
"psia",
"*",
"ASEC2RAD",
"omegaa",
"=",
"omegaa",
"*",
"ASEC2RAD",
"chia",
"=",
"chia",
"*",
"ASEC2RAD",
"sa",
"=",
"sin",
"(",
"eps0",
")",
"ca",
"=",
"cos",
"(",
"eps0",
")",
"sb",
"=",
"sin",
"(",
"-",
"psia",
")",
"cb",
"=",
"cos",
"(",
"-",
"psia",
")",
"sc",
"=",
"sin",
"(",
"-",
"omegaa",
")",
"cc",
"=",
"cos",
"(",
"-",
"omegaa",
")",
"sd",
"=",
"sin",
"(",
"chia",
")",
"cd",
"=",
"cos",
"(",
"chia",
")",
"# Compute elements of precession rotation matrix equivalent to",
"# R3(chi_a) R1(-omega_a) R3(-psi_a) R1(epsilon_0).",
"rot3",
"=",
"array",
"(",
"(",
"(",
"cd",
"*",
"cb",
"-",
"sb",
"*",
"sd",
"*",
"cc",
",",
"cd",
"*",
"sb",
"*",
"ca",
"+",
"sd",
"*",
"cc",
"*",
"cb",
"*",
"ca",
"-",
"sa",
"*",
"sd",
"*",
"sc",
",",
"cd",
"*",
"sb",
"*",
"sa",
"+",
"sd",
"*",
"cc",
"*",
"cb",
"*",
"sa",
"+",
"ca",
"*",
"sd",
"*",
"sc",
")",
",",
"(",
"-",
"sd",
"*",
"cb",
"-",
"sb",
"*",
"cd",
"*",
"cc",
",",
"-",
"sd",
"*",
"sb",
"*",
"ca",
"+",
"cd",
"*",
"cc",
"*",
"cb",
"*",
"ca",
"-",
"sa",
"*",
"cd",
"*",
"sc",
",",
"-",
"sd",
"*",
"sb",
"*",
"sa",
"+",
"cd",
"*",
"cc",
"*",
"cb",
"*",
"sa",
"+",
"ca",
"*",
"cd",
"*",
"sc",
")",
",",
"(",
"sb",
"*",
"sc",
",",
"-",
"sc",
"*",
"cb",
"*",
"ca",
"-",
"sa",
"*",
"cc",
",",
"-",
"sc",
"*",
"cb",
"*",
"sa",
"+",
"cc",
"*",
"ca",
")",
")",
")",
"return",
"rot3"
] |
Return the rotation matrices for precessing to an array of epochs.
`jd_tdb` - array of TDB Julian dates
The array returned has the shape `(3, 3, n)` where `n` is the number
of dates that have been provided as input.
|
[
"Return",
"the",
"rotation",
"matrices",
"for",
"precessing",
"to",
"an",
"array",
"of",
"epochs",
"."
] |
python
|
train
| 30.892308 |
CalebBell/thermo
|
thermo/chemical.py
|
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/chemical.py#L1799-L1816
|
def rhol(self):
r'''Liquid-phase mass density of the chemical at its current
temperature and pressure, in units of [kg/m^3]. For calculation of this
property at other temperatures and pressures, or specifying manually
the method used to calculate it, and more - see the object oriented
interface :obj:`thermo.volume.VolumeLiquid`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Chemical('o-xylene', T=297).rhol
876.9946785618097
'''
Vml = self.Vml
if Vml:
return Vm_to_rho(Vml, self.MW)
return None
|
[
"def",
"rhol",
"(",
"self",
")",
":",
"Vml",
"=",
"self",
".",
"Vml",
"if",
"Vml",
":",
"return",
"Vm_to_rho",
"(",
"Vml",
",",
"self",
".",
"MW",
")",
"return",
"None"
] |
r'''Liquid-phase mass density of the chemical at its current
temperature and pressure, in units of [kg/m^3]. For calculation of this
property at other temperatures and pressures, or specifying manually
the method used to calculate it, and more - see the object oriented
interface :obj:`thermo.volume.VolumeLiquid`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Chemical('o-xylene', T=297).rhol
876.9946785618097
|
[
"r",
"Liquid",
"-",
"phase",
"mass",
"density",
"of",
"the",
"chemical",
"at",
"its",
"current",
"temperature",
"and",
"pressure",
"in",
"units",
"of",
"[",
"kg",
"/",
"m^3",
"]",
".",
"For",
"calculation",
"of",
"this",
"property",
"at",
"other",
"temperatures",
"and",
"pressures",
"or",
"specifying",
"manually",
"the",
"method",
"used",
"to",
"calculate",
"it",
"and",
"more",
"-",
"see",
"the",
"object",
"oriented",
"interface",
":",
"obj",
":",
"thermo",
".",
"volume",
".",
"VolumeLiquid",
";",
"each",
"Chemical",
"instance",
"creates",
"one",
"to",
"actually",
"perform",
"the",
"calculations",
".",
"Note",
"that",
"that",
"interface",
"provides",
"output",
"in",
"molar",
"units",
"."
] |
python
|
valid
| 39.944444 |
proteanhq/protean
|
src/protean/impl/repository/dict_repo.py
|
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/impl/repository/dict_repo.py#L160-L188
|
def _filter(self, criteria: Q, db):
"""Recursive function to filter items from dictionary"""
# Filter the dictionary objects based on the filters
negated = criteria.negated
input_db = None
if criteria.connector == criteria.AND:
# Trim database records over successive iterations
# Whatever is left at the end satisfy all criteria (AND)
input_db = db
for child in criteria.children:
if isinstance(child, Q):
input_db = self._filter(child, input_db)
else:
input_db = self.provider._evaluate_lookup(child[0], child[1],
negated, input_db)
else:
# Grow database records over successive iterations
# Whatever is left at the end satisfy any criteria (OR)
input_db = {}
for child in criteria.children:
if isinstance(child, Q):
results = self._filter(child, db)
else:
results = self.provider._evaluate_lookup(child[0], child[1], negated, db)
input_db = {**input_db, **results}
return input_db
|
[
"def",
"_filter",
"(",
"self",
",",
"criteria",
":",
"Q",
",",
"db",
")",
":",
"# Filter the dictionary objects based on the filters",
"negated",
"=",
"criteria",
".",
"negated",
"input_db",
"=",
"None",
"if",
"criteria",
".",
"connector",
"==",
"criteria",
".",
"AND",
":",
"# Trim database records over successive iterations",
"# Whatever is left at the end satisfy all criteria (AND)",
"input_db",
"=",
"db",
"for",
"child",
"in",
"criteria",
".",
"children",
":",
"if",
"isinstance",
"(",
"child",
",",
"Q",
")",
":",
"input_db",
"=",
"self",
".",
"_filter",
"(",
"child",
",",
"input_db",
")",
"else",
":",
"input_db",
"=",
"self",
".",
"provider",
".",
"_evaluate_lookup",
"(",
"child",
"[",
"0",
"]",
",",
"child",
"[",
"1",
"]",
",",
"negated",
",",
"input_db",
")",
"else",
":",
"# Grow database records over successive iterations",
"# Whatever is left at the end satisfy any criteria (OR)",
"input_db",
"=",
"{",
"}",
"for",
"child",
"in",
"criteria",
".",
"children",
":",
"if",
"isinstance",
"(",
"child",
",",
"Q",
")",
":",
"results",
"=",
"self",
".",
"_filter",
"(",
"child",
",",
"db",
")",
"else",
":",
"results",
"=",
"self",
".",
"provider",
".",
"_evaluate_lookup",
"(",
"child",
"[",
"0",
"]",
",",
"child",
"[",
"1",
"]",
",",
"negated",
",",
"db",
")",
"input_db",
"=",
"{",
"*",
"*",
"input_db",
",",
"*",
"*",
"results",
"}",
"return",
"input_db"
] |
Recursive function to filter items from dictionary
|
[
"Recursive",
"function",
"to",
"filter",
"items",
"from",
"dictionary"
] |
python
|
train
| 42.62069 |
UDST/urbansim
|
urbansim/models/regression.py
|
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L909-L931
|
def to_dict(self):
"""
Returns a dict representation of this instance suitable for
conversion to YAML.
"""
return {
'model_type': 'segmented_regression',
'name': self.name,
'segmentation_col': self.segmentation_col,
'fit_filters': self.fit_filters,
'predict_filters': self.predict_filters,
'min_segment_size': self.min_segment_size,
'default_config': {
'model_expression': self.default_model_expr,
'ytransform': YTRANSFORM_MAPPING[self.default_ytransform]
},
'fitted': self.fitted,
'models': {
yamlio.to_scalar_safe(name):
self._process_model_dict(m.to_dict())
for name, m in self._group.models.items()}
}
|
[
"def",
"to_dict",
"(",
"self",
")",
":",
"return",
"{",
"'model_type'",
":",
"'segmented_regression'",
",",
"'name'",
":",
"self",
".",
"name",
",",
"'segmentation_col'",
":",
"self",
".",
"segmentation_col",
",",
"'fit_filters'",
":",
"self",
".",
"fit_filters",
",",
"'predict_filters'",
":",
"self",
".",
"predict_filters",
",",
"'min_segment_size'",
":",
"self",
".",
"min_segment_size",
",",
"'default_config'",
":",
"{",
"'model_expression'",
":",
"self",
".",
"default_model_expr",
",",
"'ytransform'",
":",
"YTRANSFORM_MAPPING",
"[",
"self",
".",
"default_ytransform",
"]",
"}",
",",
"'fitted'",
":",
"self",
".",
"fitted",
",",
"'models'",
":",
"{",
"yamlio",
".",
"to_scalar_safe",
"(",
"name",
")",
":",
"self",
".",
"_process_model_dict",
"(",
"m",
".",
"to_dict",
"(",
")",
")",
"for",
"name",
",",
"m",
"in",
"self",
".",
"_group",
".",
"models",
".",
"items",
"(",
")",
"}",
"}"
] |
Returns a dict representation of this instance suitable for
conversion to YAML.
|
[
"Returns",
"a",
"dict",
"representation",
"of",
"this",
"instance",
"suitable",
"for",
"conversion",
"to",
"YAML",
"."
] |
python
|
train
| 36.347826 |
mamrhein/identifiers
|
identifiers/gs1.py
|
https://github.com/mamrhein/identifiers/blob/93ab2609e461faff245d1f582411bf831b428eef/identifiers/gs1.py#L71-L74
|
def company_prefix(self):
"""Return the identifier's company prefix part."""
offset = self.EXTRA_DIGITS
return self._id[offset:self._ref_idx]
|
[
"def",
"company_prefix",
"(",
"self",
")",
":",
"offset",
"=",
"self",
".",
"EXTRA_DIGITS",
"return",
"self",
".",
"_id",
"[",
"offset",
":",
"self",
".",
"_ref_idx",
"]"
] |
Return the identifier's company prefix part.
|
[
"Return",
"the",
"identifier",
"s",
"company",
"prefix",
"part",
"."
] |
python
|
train
| 40.5 |
django-salesforce/django-salesforce
|
salesforce/backend/utils.py
|
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/backend/utils.py#L453-L455
|
def str_dict(some_dict):
"""Convert dict of ascii str/unicode to dict of str, if necessary"""
return {str(k): str(v) for k, v in some_dict.items()}
|
[
"def",
"str_dict",
"(",
"some_dict",
")",
":",
"return",
"{",
"str",
"(",
"k",
")",
":",
"str",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"some_dict",
".",
"items",
"(",
")",
"}"
] |
Convert dict of ascii str/unicode to dict of str, if necessary
|
[
"Convert",
"dict",
"of",
"ascii",
"str",
"/",
"unicode",
"to",
"dict",
"of",
"str",
"if",
"necessary"
] |
python
|
train
| 51 |
exa-analytics/exa
|
exa/core/composer.py
|
https://github.com/exa-analytics/exa/blob/40fb3c22b531d460dbc51e603de75b856cc28f0d/exa/core/composer.py#L116-L118
|
def get_kwargs(self):
"""Return kwargs from attached attributes."""
return {k: v for k, v in vars(self).items() if k not in self._ignored}
|
[
"def",
"get_kwargs",
"(",
"self",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"vars",
"(",
"self",
")",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"self",
".",
"_ignored",
"}"
] |
Return kwargs from attached attributes.
|
[
"Return",
"kwargs",
"from",
"attached",
"attributes",
"."
] |
python
|
train
| 51.333333 |
saltstack/salt
|
salt/states/gnomedesktop.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/gnomedesktop.py#L95-L142
|
def wm_preferences(name,
user=None,
action_double_click_titlebar=None,
action_middle_click_titlebar=None,
action_right_click_titlebar=None,
application_based=None,
audible_bell=None,
auto_raise=None,
auto_raise_delay=None,
button_layout=None,
disable_workarounds=None,
focus_mode=None,
focus_new_windows=None,
mouse_button_modifier=None,
num_workspaces=None,
raise_on_click=None,
resize_with_right_button=None,
theme=None,
titlebar_font=None,
titlebar_uses_system_font=None,
visual_bell=None,
visual_bell_type=None,
workspace_names=None,
**kwargs):
'''
wm_preferences: sets values in the org.gnome.desktop.wm.preferences schema
'''
gnome_kwargs = {
'user': user,
'schema': 'org.gnome.desktop.wm.preferences'
}
preferences = ['action_double_click_titlebar',
'action_middle_click_titlebar', 'action_right_click_titlebar',
'application_based', 'audible_bell', 'auto_raise',
'auto_raise_delay', 'button_layout', 'disable_workarounds',
'focus_mode', 'focus_new_windows', 'mouse_button_modifier',
'num_workspaces', 'raise_on_click', 'resize_with_right_button',
'theme', 'titlebar_font', 'titlebar_uses_system_font',
'visual_bell', 'visual_bell_type', 'workspace_names']
preferences_hash = {}
for pref in preferences:
if pref in locals() and locals()[pref] is not None:
key = re.sub('_', '-', pref)
preferences_hash[key] = locals()[pref]
return _do(name, gnome_kwargs, preferences_hash)
|
[
"def",
"wm_preferences",
"(",
"name",
",",
"user",
"=",
"None",
",",
"action_double_click_titlebar",
"=",
"None",
",",
"action_middle_click_titlebar",
"=",
"None",
",",
"action_right_click_titlebar",
"=",
"None",
",",
"application_based",
"=",
"None",
",",
"audible_bell",
"=",
"None",
",",
"auto_raise",
"=",
"None",
",",
"auto_raise_delay",
"=",
"None",
",",
"button_layout",
"=",
"None",
",",
"disable_workarounds",
"=",
"None",
",",
"focus_mode",
"=",
"None",
",",
"focus_new_windows",
"=",
"None",
",",
"mouse_button_modifier",
"=",
"None",
",",
"num_workspaces",
"=",
"None",
",",
"raise_on_click",
"=",
"None",
",",
"resize_with_right_button",
"=",
"None",
",",
"theme",
"=",
"None",
",",
"titlebar_font",
"=",
"None",
",",
"titlebar_uses_system_font",
"=",
"None",
",",
"visual_bell",
"=",
"None",
",",
"visual_bell_type",
"=",
"None",
",",
"workspace_names",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"gnome_kwargs",
"=",
"{",
"'user'",
":",
"user",
",",
"'schema'",
":",
"'org.gnome.desktop.wm.preferences'",
"}",
"preferences",
"=",
"[",
"'action_double_click_titlebar'",
",",
"'action_middle_click_titlebar'",
",",
"'action_right_click_titlebar'",
",",
"'application_based'",
",",
"'audible_bell'",
",",
"'auto_raise'",
",",
"'auto_raise_delay'",
",",
"'button_layout'",
",",
"'disable_workarounds'",
",",
"'focus_mode'",
",",
"'focus_new_windows'",
",",
"'mouse_button_modifier'",
",",
"'num_workspaces'",
",",
"'raise_on_click'",
",",
"'resize_with_right_button'",
",",
"'theme'",
",",
"'titlebar_font'",
",",
"'titlebar_uses_system_font'",
",",
"'visual_bell'",
",",
"'visual_bell_type'",
",",
"'workspace_names'",
"]",
"preferences_hash",
"=",
"{",
"}",
"for",
"pref",
"in",
"preferences",
":",
"if",
"pref",
"in",
"locals",
"(",
")",
"and",
"locals",
"(",
")",
"[",
"pref",
"]",
"is",
"not",
"None",
":",
"key",
"=",
"re",
".",
"sub",
"(",
"'_'",
",",
"'-'",
",",
"pref",
")",
"preferences_hash",
"[",
"key",
"]",
"=",
"locals",
"(",
")",
"[",
"pref",
"]",
"return",
"_do",
"(",
"name",
",",
"gnome_kwargs",
",",
"preferences_hash",
")"
] |
wm_preferences: sets values in the org.gnome.desktop.wm.preferences schema
|
[
"wm_preferences",
":",
"sets",
"values",
"in",
"the",
"org",
".",
"gnome",
".",
"desktop",
".",
"wm",
".",
"preferences",
"schema"
] |
python
|
train
| 40.416667 |
choderalab/pymbar
|
pymbar/old_mbar.py
|
https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/old_mbar.py#L2300-L2428
|
def _adaptive(self, gamma=1.0, relative_tolerance=1.0e-8, maximum_iterations=1000, verbose=True, print_warning=True):
"""
Determine dimensionless free energies by a combination of Newton-Raphson iteration and self-consistent iteration.
Picks whichever method gives the lowest gradient.
Is slower than NR (approximated, not calculated) since it calculates the log norms twice each iteration.
OPTIONAL ARGUMENTS
gamma (float between 0 and 1) - incrementor for NR iterations.
relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-6)
maximum_iterations (int) - maximum number of Newton-Raphson iterations (default 1000)
verbose (boolean) - verbosity level for debug output
NOTES
This method determines the dimensionless free energies by minimizing a convex function whose solution is the desired estimator.
The original idea came from the construction of a likelihood function that independently reproduced the work of Geyer (see [1]
and Section 6 of [2]).
This can alternatively be formulated as a root-finding algorithm for the Z-estimator.
More details of this procedure will follow in a subsequent paper.
Only those states with nonzero counts are include in the estimation procedure.
REFERENCES
See Appendix C.2 of [1].
"""
if verbose:
print("Determining dimensionless free energies by Newton-Raphson iteration.")
# keep track of Newton-Raphson and self-consistent iterations
nr_iter = 0
sci_iter = 0
N_k = self.N_k[self.states_with_samples]
K = len(N_k)
f_k_sci = np.zeros([K], dtype=np.float64)
f_k_new = np.zeros([K], dtype=np.float64)
# Perform Newton-Raphson iterations (with sci computed on the way)
for iteration in range(0, maximum_iterations):
# Store for new estimate of dimensionless relative free energies.
f_k = self.f_k[self.states_with_samples].copy()
# compute weights for gradients: the denominators and free energies are from the previous
# iteration in most cases.
(W_nk, f_k_sci) = self._computeWeights(
recalc_denom=(iteration == 0), return_f_k = True)
# Compute gradient and Hessian of last (K-1) states.
#
# gradient (defined by Eq. C6 of [1])
# g_i(theta) = N_i - \sum_n N_i W_ni
#
# Hessian (defined by Eq. C9 of [1])
# H_ii(theta) = - \sum_n N_i W_ni (1 - N_i W_ni)
# H_ij(theta) = \sum_n N_i W_ni N_j W_nj
#
"""
g = np.matrix(np.zeros([K-1,1], dtype=np.float64)) # gradient
H = np.matrix(np.zeros([K-1,K-1], dtype=np.float64)) # Hessian
for i in range(1,K):
g[i-1] = N_k[i] - N_k[i] * W_nk[:,i].sum()
H[i-1,i-1] = - (N_k[i] * W_nk[:,i] * (1.0 - N_k[i] * W_nk[:,i])).sum()
for j in range(1,i):
H[i-1,j-1] = (N_k[i] * W_nk[:,i] * N_k[j] * W_nk[:,j]).sum()
H[j-1,i-1] = H[i-1,j-1]
# Update the free energy estimate (Eq. C11 of [1]).
Hinvg = linalg.lstsq(H,g)[0] #
# Hinvg = linalg.solve(H,g) # This might be faster if we can guarantee full rank.
for k in range(0,K-1):
f_k_new[k+1] = f_k[k+1] - gamma*Hinvg[k]
"""
g = N_k - N_k * W_nk.sum(axis=0)
NW = N_k * W_nk
H = np.dot(NW.T, NW)
H += (g.T - N_k) * np.eye(K)
# Update the free energy estimate (Eq. C11 of [1]).
# will always have lower rank the way it is set up
Hinvg = linalg.lstsq(H, g)[0]
Hinvg -= Hinvg[0]
f_k_new = f_k - gamma * Hinvg
# self-consistent iteration gradient norm and saved log sums.
g_sci = self._gradientF(f_k_sci)
gnorm_sci = np.dot(g_sci, g_sci)
# save this so we can switch it back in if g_sci is lower.
log_weight_denom = self.log_weight_denom.copy()
# newton raphson gradient norm and saved log sums.
g_nr = self._gradientF(f_k_new)
gnorm_nr = np.dot(g_nr, g_nr)
# we could save the gradient, too, but it's not too expensive to
# compute since we are doing the Hessian anyway.
if verbose:
print("self consistent iteration gradient norm is %10.5g, Newton-Raphson gradient norm is %10.5g" % (gnorm_sci, gnorm_nr))
# decide which directon to go depending on size of gradient norm
if (gnorm_sci < gnorm_nr or sci_iter < 2):
sci_iter += 1
self.log_weight_denom = log_weight_denom.copy()
if verbose:
if sci_iter < 2:
print("Choosing self-consistent iteration on iteration %d" % iteration)
else:
print("Choosing self-consistent iteration for lower gradient on iteration %d" % iteration)
f_k_new = f_k_sci.copy()
else:
nr_iter += 1
if verbose:
print("Newton-Raphson used on iteration %d" % iteration)
# get rid of big matrices that are not used.
del(log_weight_denom, NW, W_nk)
# have to set the free energies back in self, since the gradient
# routine changes them.
self.f_k[self.states_with_samples] = f_k
if (self._amIdoneIterating(f_k_new, relative_tolerance, iteration, maximum_iterations, print_warning, verbose)):
if verbose:
print('Of %d iterations, %d were Newton-Raphson iterations and %d were self-consistent iterations' % (iteration + 1, nr_iter, sci_iter))
break
return
|
[
"def",
"_adaptive",
"(",
"self",
",",
"gamma",
"=",
"1.0",
",",
"relative_tolerance",
"=",
"1.0e-8",
",",
"maximum_iterations",
"=",
"1000",
",",
"verbose",
"=",
"True",
",",
"print_warning",
"=",
"True",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"\"Determining dimensionless free energies by Newton-Raphson iteration.\"",
")",
"# keep track of Newton-Raphson and self-consistent iterations",
"nr_iter",
"=",
"0",
"sci_iter",
"=",
"0",
"N_k",
"=",
"self",
".",
"N_k",
"[",
"self",
".",
"states_with_samples",
"]",
"K",
"=",
"len",
"(",
"N_k",
")",
"f_k_sci",
"=",
"np",
".",
"zeros",
"(",
"[",
"K",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"f_k_new",
"=",
"np",
".",
"zeros",
"(",
"[",
"K",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"# Perform Newton-Raphson iterations (with sci computed on the way)",
"for",
"iteration",
"in",
"range",
"(",
"0",
",",
"maximum_iterations",
")",
":",
"# Store for new estimate of dimensionless relative free energies.",
"f_k",
"=",
"self",
".",
"f_k",
"[",
"self",
".",
"states_with_samples",
"]",
".",
"copy",
"(",
")",
"# compute weights for gradients: the denominators and free energies are from the previous",
"# iteration in most cases.",
"(",
"W_nk",
",",
"f_k_sci",
")",
"=",
"self",
".",
"_computeWeights",
"(",
"recalc_denom",
"=",
"(",
"iteration",
"==",
"0",
")",
",",
"return_f_k",
"=",
"True",
")",
"# Compute gradient and Hessian of last (K-1) states.",
"#",
"# gradient (defined by Eq. C6 of [1])",
"# g_i(theta) = N_i - \\sum_n N_i W_ni",
"#",
"# Hessian (defined by Eq. C9 of [1])",
"# H_ii(theta) = - \\sum_n N_i W_ni (1 - N_i W_ni)",
"# H_ij(theta) = \\sum_n N_i W_ni N_j W_nj",
"#",
"\"\"\"\n g = np.matrix(np.zeros([K-1,1], dtype=np.float64)) # gradient\n H = np.matrix(np.zeros([K-1,K-1], dtype=np.float64)) # Hessian\n for i in range(1,K):\n g[i-1] = N_k[i] - N_k[i] * W_nk[:,i].sum()\n H[i-1,i-1] = - (N_k[i] * W_nk[:,i] * (1.0 - N_k[i] * W_nk[:,i])).sum() \n for j in range(1,i):\n H[i-1,j-1] = (N_k[i] * W_nk[:,i] * N_k[j] * W_nk[:,j]).sum()\n H[j-1,i-1] = H[i-1,j-1]\n\n # Update the free energy estimate (Eq. C11 of [1]).\n Hinvg = linalg.lstsq(H,g)[0] #\n # Hinvg = linalg.solve(H,g) # This might be faster if we can guarantee full rank.\n for k in range(0,K-1):\n f_k_new[k+1] = f_k[k+1] - gamma*Hinvg[k]\n\n \"\"\"",
"g",
"=",
"N_k",
"-",
"N_k",
"*",
"W_nk",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"NW",
"=",
"N_k",
"*",
"W_nk",
"H",
"=",
"np",
".",
"dot",
"(",
"NW",
".",
"T",
",",
"NW",
")",
"H",
"+=",
"(",
"g",
".",
"T",
"-",
"N_k",
")",
"*",
"np",
".",
"eye",
"(",
"K",
")",
"# Update the free energy estimate (Eq. C11 of [1]).",
"# will always have lower rank the way it is set up",
"Hinvg",
"=",
"linalg",
".",
"lstsq",
"(",
"H",
",",
"g",
")",
"[",
"0",
"]",
"Hinvg",
"-=",
"Hinvg",
"[",
"0",
"]",
"f_k_new",
"=",
"f_k",
"-",
"gamma",
"*",
"Hinvg",
"# self-consistent iteration gradient norm and saved log sums.",
"g_sci",
"=",
"self",
".",
"_gradientF",
"(",
"f_k_sci",
")",
"gnorm_sci",
"=",
"np",
".",
"dot",
"(",
"g_sci",
",",
"g_sci",
")",
"# save this so we can switch it back in if g_sci is lower.",
"log_weight_denom",
"=",
"self",
".",
"log_weight_denom",
".",
"copy",
"(",
")",
"# newton raphson gradient norm and saved log sums.",
"g_nr",
"=",
"self",
".",
"_gradientF",
"(",
"f_k_new",
")",
"gnorm_nr",
"=",
"np",
".",
"dot",
"(",
"g_nr",
",",
"g_nr",
")",
"# we could save the gradient, too, but it's not too expensive to",
"# compute since we are doing the Hessian anyway.",
"if",
"verbose",
":",
"print",
"(",
"\"self consistent iteration gradient norm is %10.5g, Newton-Raphson gradient norm is %10.5g\"",
"%",
"(",
"gnorm_sci",
",",
"gnorm_nr",
")",
")",
"# decide which directon to go depending on size of gradient norm",
"if",
"(",
"gnorm_sci",
"<",
"gnorm_nr",
"or",
"sci_iter",
"<",
"2",
")",
":",
"sci_iter",
"+=",
"1",
"self",
".",
"log_weight_denom",
"=",
"log_weight_denom",
".",
"copy",
"(",
")",
"if",
"verbose",
":",
"if",
"sci_iter",
"<",
"2",
":",
"print",
"(",
"\"Choosing self-consistent iteration on iteration %d\"",
"%",
"iteration",
")",
"else",
":",
"print",
"(",
"\"Choosing self-consistent iteration for lower gradient on iteration %d\"",
"%",
"iteration",
")",
"f_k_new",
"=",
"f_k_sci",
".",
"copy",
"(",
")",
"else",
":",
"nr_iter",
"+=",
"1",
"if",
"verbose",
":",
"print",
"(",
"\"Newton-Raphson used on iteration %d\"",
"%",
"iteration",
")",
"# get rid of big matrices that are not used.",
"del",
"(",
"log_weight_denom",
",",
"NW",
",",
"W_nk",
")",
"# have to set the free energies back in self, since the gradient",
"# routine changes them.",
"self",
".",
"f_k",
"[",
"self",
".",
"states_with_samples",
"]",
"=",
"f_k",
"if",
"(",
"self",
".",
"_amIdoneIterating",
"(",
"f_k_new",
",",
"relative_tolerance",
",",
"iteration",
",",
"maximum_iterations",
",",
"print_warning",
",",
"verbose",
")",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'Of %d iterations, %d were Newton-Raphson iterations and %d were self-consistent iterations'",
"%",
"(",
"iteration",
"+",
"1",
",",
"nr_iter",
",",
"sci_iter",
")",
")",
"break",
"return"
] |
Determine dimensionless free energies by a combination of Newton-Raphson iteration and self-consistent iteration.
Picks whichever method gives the lowest gradient.
Is slower than NR (approximated, not calculated) since it calculates the log norms twice each iteration.
OPTIONAL ARGUMENTS
gamma (float between 0 and 1) - incrementor for NR iterations.
relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-6)
maximum_iterations (int) - maximum number of Newton-Raphson iterations (default 1000)
verbose (boolean) - verbosity level for debug output
NOTES
This method determines the dimensionless free energies by minimizing a convex function whose solution is the desired estimator.
The original idea came from the construction of a likelihood function that independently reproduced the work of Geyer (see [1]
and Section 6 of [2]).
This can alternatively be formulated as a root-finding algorithm for the Z-estimator.
More details of this procedure will follow in a subsequent paper.
Only those states with nonzero counts are include in the estimation procedure.
REFERENCES
See Appendix C.2 of [1].
|
[
"Determine",
"dimensionless",
"free",
"energies",
"by",
"a",
"combination",
"of",
"Newton",
"-",
"Raphson",
"iteration",
"and",
"self",
"-",
"consistent",
"iteration",
".",
"Picks",
"whichever",
"method",
"gives",
"the",
"lowest",
"gradient",
".",
"Is",
"slower",
"than",
"NR",
"(",
"approximated",
"not",
"calculated",
")",
"since",
"it",
"calculates",
"the",
"log",
"norms",
"twice",
"each",
"iteration",
"."
] |
python
|
train
| 45.069767 |
watson-developer-cloud/python-sdk
|
ibm_watson/compare_comply_v1.py
|
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/compare_comply_v1.py#L3709-L3716
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
return _dict
|
[
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'types'",
")",
"and",
"self",
".",
"types",
"is",
"not",
"None",
":",
"_dict",
"[",
"'types'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"types",
"]",
"if",
"hasattr",
"(",
"self",
",",
"'categories'",
")",
"and",
"self",
".",
"categories",
"is",
"not",
"None",
":",
"_dict",
"[",
"'categories'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"categories",
"]",
"return",
"_dict"
] |
Return a json dictionary representing this model.
|
[
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] |
python
|
train
| 48.5 |
spacetelescope/synphot_refactor
|
synphot/reddening.py
|
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/reddening.py#L131-L163
|
def from_file(cls, filename, **kwargs):
"""Create a reddening law from file.
If filename has 'fits' or 'fit' suffix, it is read as FITS.
Otherwise, it is read as ASCII.
Parameters
----------
filename : str
Reddening law filename.
kwargs : dict
Keywords acceptable by
:func:`~synphot.specio.read_fits_spec` (if FITS) or
:func:`~synphot.specio.read_ascii_spec` (if ASCII).
Returns
-------
redlaw : `ReddeningLaw`
Empirical reddening law.
"""
if 'flux_unit' not in kwargs:
kwargs['flux_unit'] = cls._internal_flux_unit
if ((filename.endswith('fits') or filename.endswith('fit')) and
'flux_col' not in kwargs):
kwargs['flux_col'] = 'Av/E(B-V)'
header, wavelengths, rvs = specio.read_spec(filename, **kwargs)
return cls(Empirical1D, points=wavelengths, lookup_table=rvs,
meta={'header': header})
|
[
"def",
"from_file",
"(",
"cls",
",",
"filename",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'flux_unit'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'flux_unit'",
"]",
"=",
"cls",
".",
"_internal_flux_unit",
"if",
"(",
"(",
"filename",
".",
"endswith",
"(",
"'fits'",
")",
"or",
"filename",
".",
"endswith",
"(",
"'fit'",
")",
")",
"and",
"'flux_col'",
"not",
"in",
"kwargs",
")",
":",
"kwargs",
"[",
"'flux_col'",
"]",
"=",
"'Av/E(B-V)'",
"header",
",",
"wavelengths",
",",
"rvs",
"=",
"specio",
".",
"read_spec",
"(",
"filename",
",",
"*",
"*",
"kwargs",
")",
"return",
"cls",
"(",
"Empirical1D",
",",
"points",
"=",
"wavelengths",
",",
"lookup_table",
"=",
"rvs",
",",
"meta",
"=",
"{",
"'header'",
":",
"header",
"}",
")"
] |
Create a reddening law from file.
If filename has 'fits' or 'fit' suffix, it is read as FITS.
Otherwise, it is read as ASCII.
Parameters
----------
filename : str
Reddening law filename.
kwargs : dict
Keywords acceptable by
:func:`~synphot.specio.read_fits_spec` (if FITS) or
:func:`~synphot.specio.read_ascii_spec` (if ASCII).
Returns
-------
redlaw : `ReddeningLaw`
Empirical reddening law.
|
[
"Create",
"a",
"reddening",
"law",
"from",
"file",
"."
] |
python
|
train
| 30.454545 |
aio-libs/janus
|
janus/__init__.py
|
https://github.com/aio-libs/janus/blob/8dc80530db1144fbd1dba75d4a1c1a54bb520c21/janus/__init__.py#L187-L210
|
def task_done(self):
'''Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
'''
self._parent._check_closing()
with self._parent._all_tasks_done:
unfinished = self._parent._unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self._parent._all_tasks_done.notify_all()
self._parent._loop.call_soon_threadsafe(
self._parent._finished.set)
self._parent._unfinished_tasks = unfinished
|
[
"def",
"task_done",
"(",
"self",
")",
":",
"self",
".",
"_parent",
".",
"_check_closing",
"(",
")",
"with",
"self",
".",
"_parent",
".",
"_all_tasks_done",
":",
"unfinished",
"=",
"self",
".",
"_parent",
".",
"_unfinished_tasks",
"-",
"1",
"if",
"unfinished",
"<=",
"0",
":",
"if",
"unfinished",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'task_done() called too many times'",
")",
"self",
".",
"_parent",
".",
"_all_tasks_done",
".",
"notify_all",
"(",
")",
"self",
".",
"_parent",
".",
"_loop",
".",
"call_soon_threadsafe",
"(",
"self",
".",
"_parent",
".",
"_finished",
".",
"set",
")",
"self",
".",
"_parent",
".",
"_unfinished_tasks",
"=",
"unfinished"
] |
Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
|
[
"Indicate",
"that",
"a",
"formerly",
"enqueued",
"task",
"is",
"complete",
"."
] |
python
|
train
| 44.625 |
toumorokoshi/transmute-core
|
transmute_core/function/parameters.py
|
https://github.com/toumorokoshi/transmute-core/blob/a2c26625d5d8bab37e00038f9d615a26167fc7f4/transmute_core/function/parameters.py#L84-L99
|
def _extract_path_parameters_from_paths(paths):
"""
from a list of paths, return back a list of the
arguments present in those paths.
the arguments available in all of the paths must match: if not,
an exception will be raised.
"""
params = set()
for path in paths:
parts = PART_REGEX.split(path)
for p in parts:
match = PARAM_REGEX.match(p)
if match:
params.add(match.group("name"))
return params
|
[
"def",
"_extract_path_parameters_from_paths",
"(",
"paths",
")",
":",
"params",
"=",
"set",
"(",
")",
"for",
"path",
"in",
"paths",
":",
"parts",
"=",
"PART_REGEX",
".",
"split",
"(",
"path",
")",
"for",
"p",
"in",
"parts",
":",
"match",
"=",
"PARAM_REGEX",
".",
"match",
"(",
"p",
")",
"if",
"match",
":",
"params",
".",
"add",
"(",
"match",
".",
"group",
"(",
"\"name\"",
")",
")",
"return",
"params"
] |
from a list of paths, return back a list of the
arguments present in those paths.
the arguments available in all of the paths must match: if not,
an exception will be raised.
|
[
"from",
"a",
"list",
"of",
"paths",
"return",
"back",
"a",
"list",
"of",
"the",
"arguments",
"present",
"in",
"those",
"paths",
"."
] |
python
|
train
| 29.625 |
glitchassassin/lackey
|
lackey/InputEmulation.py
|
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/InputEmulation.py#L59-L73
|
def moveSpeed(self, location, seconds=0.3):
""" Moves cursor to specified ``Location`` over ``seconds``.
If ``seconds`` is 0, moves the cursor immediately. Used for smooth
somewhat-human-like motion.
"""
self._lock.acquire()
original_location = mouse.get_position()
mouse.move(location.x, location.y, duration=seconds)
if mouse.get_position() == original_location and original_location != location.getTuple():
raise IOError("""
Unable to move mouse cursor. This may happen if you're trying to automate a
program running as Administrator with a script running as a non-elevated user.
""")
self._lock.release()
|
[
"def",
"moveSpeed",
"(",
"self",
",",
"location",
",",
"seconds",
"=",
"0.3",
")",
":",
"self",
".",
"_lock",
".",
"acquire",
"(",
")",
"original_location",
"=",
"mouse",
".",
"get_position",
"(",
")",
"mouse",
".",
"move",
"(",
"location",
".",
"x",
",",
"location",
".",
"y",
",",
"duration",
"=",
"seconds",
")",
"if",
"mouse",
".",
"get_position",
"(",
")",
"==",
"original_location",
"and",
"original_location",
"!=",
"location",
".",
"getTuple",
"(",
")",
":",
"raise",
"IOError",
"(",
"\"\"\"\n Unable to move mouse cursor. This may happen if you're trying to automate a \n program running as Administrator with a script running as a non-elevated user.\n \"\"\"",
")",
"self",
".",
"_lock",
".",
"release",
"(",
")"
] |
Moves cursor to specified ``Location`` over ``seconds``.
If ``seconds`` is 0, moves the cursor immediately. Used for smooth
somewhat-human-like motion.
|
[
"Moves",
"cursor",
"to",
"specified",
"Location",
"over",
"seconds",
"."
] |
python
|
train
| 48.266667 |
cloudera/cm_api
|
python/src/cm_api/endpoints/clusters.py
|
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/clusters.py#L100-L104
|
def _put_cluster(self, dic, params=None):
"""Change cluster attributes"""
cluster = self._put('', ApiCluster, data=dic, params=params)
self._update(cluster)
return self
|
[
"def",
"_put_cluster",
"(",
"self",
",",
"dic",
",",
"params",
"=",
"None",
")",
":",
"cluster",
"=",
"self",
".",
"_put",
"(",
"''",
",",
"ApiCluster",
",",
"data",
"=",
"dic",
",",
"params",
"=",
"params",
")",
"self",
".",
"_update",
"(",
"cluster",
")",
"return",
"self"
] |
Change cluster attributes
|
[
"Change",
"cluster",
"attributes"
] |
python
|
train
| 36 |
saltstack/salt
|
salt/modules/firewalld.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/firewalld.py#L48-L55
|
def __mgmt(name, _type, action):
'''
Perform zone management
'''
# It's permanent because the 4 concerned functions need the permanent option, it's wrong without
cmd = '--{0}-{1}={2} --permanent'.format(action, _type, name)
return __firewall_cmd(cmd)
|
[
"def",
"__mgmt",
"(",
"name",
",",
"_type",
",",
"action",
")",
":",
"# It's permanent because the 4 concerned functions need the permanent option, it's wrong without",
"cmd",
"=",
"'--{0}-{1}={2} --permanent'",
".",
"format",
"(",
"action",
",",
"_type",
",",
"name",
")",
"return",
"__firewall_cmd",
"(",
"cmd",
")"
] |
Perform zone management
|
[
"Perform",
"zone",
"management"
] |
python
|
train
| 33.5 |
PythonCharmers/python-future
|
src/future/types/newrange.py
|
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/types/newrange.py#L133-L141
|
def __getitem_slice(self, slce):
"""Return a range which represents the requested slce
of the sequence represented by this range.
"""
scaled_indices = (self._step * n for n in slce.indices(self._len))
start_offset, stop_offset, new_step = scaled_indices
return newrange(self._start + start_offset,
self._start + stop_offset,
new_step)
|
[
"def",
"__getitem_slice",
"(",
"self",
",",
"slce",
")",
":",
"scaled_indices",
"=",
"(",
"self",
".",
"_step",
"*",
"n",
"for",
"n",
"in",
"slce",
".",
"indices",
"(",
"self",
".",
"_len",
")",
")",
"start_offset",
",",
"stop_offset",
",",
"new_step",
"=",
"scaled_indices",
"return",
"newrange",
"(",
"self",
".",
"_start",
"+",
"start_offset",
",",
"self",
".",
"_start",
"+",
"stop_offset",
",",
"new_step",
")"
] |
Return a range which represents the requested slce
of the sequence represented by this range.
|
[
"Return",
"a",
"range",
"which",
"represents",
"the",
"requested",
"slce",
"of",
"the",
"sequence",
"represented",
"by",
"this",
"range",
"."
] |
python
|
train
| 46.888889 |
bitesofcode/projexui
|
projexui/widgets/xoverlaywidget.py
|
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xoverlaywidget.py#L97-L106
|
def keyPressEvent(self, event):
"""
Exits the modal window on an escape press.
:param event | <QtCore.QKeyPressEvent>
"""
if event.key() == QtCore.Qt.Key_Escape:
self.reject()
super(XOverlayWidget, self).keyPressEvent(event)
|
[
"def",
"keyPressEvent",
"(",
"self",
",",
"event",
")",
":",
"if",
"event",
".",
"key",
"(",
")",
"==",
"QtCore",
".",
"Qt",
".",
"Key_Escape",
":",
"self",
".",
"reject",
"(",
")",
"super",
"(",
"XOverlayWidget",
",",
"self",
")",
".",
"keyPressEvent",
"(",
"event",
")"
] |
Exits the modal window on an escape press.
:param event | <QtCore.QKeyPressEvent>
|
[
"Exits",
"the",
"modal",
"window",
"on",
"an",
"escape",
"press",
"."
] |
python
|
train
| 28.2 |
jason-weirather/py-seq-tools
|
seqtools/graph/__init__.py
|
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/graph/__init__.py#L176-L182
|
def find_cycle(self):
"""greedy search for a cycle"""
for node in self.nodes:
cyc = self._follow_children(node)
if len(cyc) > 0:
return [self._nodes[x] for x in cyc]
return None
|
[
"def",
"find_cycle",
"(",
"self",
")",
":",
"for",
"node",
"in",
"self",
".",
"nodes",
":",
"cyc",
"=",
"self",
".",
"_follow_children",
"(",
"node",
")",
"if",
"len",
"(",
"cyc",
")",
">",
"0",
":",
"return",
"[",
"self",
".",
"_nodes",
"[",
"x",
"]",
"for",
"x",
"in",
"cyc",
"]",
"return",
"None"
] |
greedy search for a cycle
|
[
"greedy",
"search",
"for",
"a",
"cycle"
] |
python
|
train
| 31.285714 |
peterbrittain/asciimatics
|
asciimatics/widgets.py
|
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/widgets.py#L3680-L3688
|
def is_mouse_over(self, event):
"""
Check whether a MouseEvent is over thus scroll bar.
:param event: The MouseEvent to check.
:returns: True if the mouse event is over the scroll bar.
"""
return event.x == self._x and self._y <= event.y < self._y + self._height
|
[
"def",
"is_mouse_over",
"(",
"self",
",",
"event",
")",
":",
"return",
"event",
".",
"x",
"==",
"self",
".",
"_x",
"and",
"self",
".",
"_y",
"<=",
"event",
".",
"y",
"<",
"self",
".",
"_y",
"+",
"self",
".",
"_height"
] |
Check whether a MouseEvent is over thus scroll bar.
:param event: The MouseEvent to check.
:returns: True if the mouse event is over the scroll bar.
|
[
"Check",
"whether",
"a",
"MouseEvent",
"is",
"over",
"thus",
"scroll",
"bar",
"."
] |
python
|
train
| 33.777778 |
hadrianl/huobi
|
huobitrade/service.py
|
https://github.com/hadrianl/huobi/blob/bbfa2036703ee84a76d5d8e9f89c25fc8a55f2c7/huobitrade/service.py#L1269-L1288
|
def margin_to_exchange(self, symbol, currency, amount):
"""
借贷账户划出至现货账户
:param amount:
:param currency:
:param symbol:
:return:
"""
params = {'symbol': symbol, 'currency': currency, 'amount': amount}
path = '/v1/dw/transfer-out/margin'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper
|
[
"def",
"margin_to_exchange",
"(",
"self",
",",
"symbol",
",",
"currency",
",",
"amount",
")",
":",
"params",
"=",
"{",
"'symbol'",
":",
"symbol",
",",
"'currency'",
":",
"currency",
",",
"'amount'",
":",
"amount",
"}",
"path",
"=",
"'/v1/dw/transfer-out/margin'",
"def",
"_wrapper",
"(",
"_func",
")",
":",
"@",
"wraps",
"(",
"_func",
")",
"def",
"handle",
"(",
")",
":",
"_func",
"(",
"api_key_post",
"(",
"params",
",",
"path",
")",
")",
"return",
"handle",
"return",
"_wrapper"
] |
借贷账户划出至现货账户
:param amount:
:param currency:
:param symbol:
:return:
|
[
"借贷账户划出至现货账户",
":",
"param",
"amount",
":",
":",
"param",
"currency",
":",
":",
"param",
"symbol",
":",
":",
"return",
":"
] |
python
|
train
| 23.65 |
swisscom/cleanerversion
|
versions/models.py
|
https://github.com/swisscom/cleanerversion/blob/becadbab5d7b474a0e9a596b99e97682402d2f2c/versions/models.py#L298-L323
|
def as_sql(self, qn, connection):
"""
This method identifies joined table aliases in order for
VersionedExtraWhere.as_sql() to be able to add time restrictions for
those tables based on the VersionedQuery's querytime value.
:param qn: In Django 1.7 & 1.8 this is a compiler
:param connection: A DB connection
:return: A tuple consisting of (sql_string, result_params)
"""
# self.children is an array of VersionedExtraWhere-objects
from versions.fields import VersionedExtraWhere
for child in self.children:
if isinstance(child, VersionedExtraWhere) and not child.params:
_query = qn.query
query_time = _query.querytime.time
apply_query_time = _query.querytime.active
alias_map = _query.alias_map
self._set_child_joined_alias(child, alias_map)
if apply_query_time:
# Add query parameters that have not been added till now
child.set_as_of(query_time)
else:
# Remove the restriction if it's not required
child.sqls = []
return super(VersionedWhereNode, self).as_sql(qn, connection)
|
[
"def",
"as_sql",
"(",
"self",
",",
"qn",
",",
"connection",
")",
":",
"# self.children is an array of VersionedExtraWhere-objects",
"from",
"versions",
".",
"fields",
"import",
"VersionedExtraWhere",
"for",
"child",
"in",
"self",
".",
"children",
":",
"if",
"isinstance",
"(",
"child",
",",
"VersionedExtraWhere",
")",
"and",
"not",
"child",
".",
"params",
":",
"_query",
"=",
"qn",
".",
"query",
"query_time",
"=",
"_query",
".",
"querytime",
".",
"time",
"apply_query_time",
"=",
"_query",
".",
"querytime",
".",
"active",
"alias_map",
"=",
"_query",
".",
"alias_map",
"self",
".",
"_set_child_joined_alias",
"(",
"child",
",",
"alias_map",
")",
"if",
"apply_query_time",
":",
"# Add query parameters that have not been added till now",
"child",
".",
"set_as_of",
"(",
"query_time",
")",
"else",
":",
"# Remove the restriction if it's not required",
"child",
".",
"sqls",
"=",
"[",
"]",
"return",
"super",
"(",
"VersionedWhereNode",
",",
"self",
")",
".",
"as_sql",
"(",
"qn",
",",
"connection",
")"
] |
This method identifies joined table aliases in order for
VersionedExtraWhere.as_sql() to be able to add time restrictions for
those tables based on the VersionedQuery's querytime value.
:param qn: In Django 1.7 & 1.8 this is a compiler
:param connection: A DB connection
:return: A tuple consisting of (sql_string, result_params)
|
[
"This",
"method",
"identifies",
"joined",
"table",
"aliases",
"in",
"order",
"for",
"VersionedExtraWhere",
".",
"as_sql",
"()",
"to",
"be",
"able",
"to",
"add",
"time",
"restrictions",
"for",
"those",
"tables",
"based",
"on",
"the",
"VersionedQuery",
"s",
"querytime",
"value",
"."
] |
python
|
train
| 48.230769 |
shoebot/shoebot
|
lib/colors/__init__.py
|
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/colors/__init__.py#L805-L813
|
def blend(self, clr, factor=0.5):
"""
Returns a mix of two colors.
"""
r = self.r * (1 - factor) + clr.r * factor
g = self.g * (1 - factor) + clr.g * factor
b = self.b * (1 - factor) + clr.b * factor
a = self.a * (1 - factor) + clr.a * factor
return Color(r, g, b, a, mode="rgb")
|
[
"def",
"blend",
"(",
"self",
",",
"clr",
",",
"factor",
"=",
"0.5",
")",
":",
"r",
"=",
"self",
".",
"r",
"*",
"(",
"1",
"-",
"factor",
")",
"+",
"clr",
".",
"r",
"*",
"factor",
"g",
"=",
"self",
".",
"g",
"*",
"(",
"1",
"-",
"factor",
")",
"+",
"clr",
".",
"g",
"*",
"factor",
"b",
"=",
"self",
".",
"b",
"*",
"(",
"1",
"-",
"factor",
")",
"+",
"clr",
".",
"b",
"*",
"factor",
"a",
"=",
"self",
".",
"a",
"*",
"(",
"1",
"-",
"factor",
")",
"+",
"clr",
".",
"a",
"*",
"factor",
"return",
"Color",
"(",
"r",
",",
"g",
",",
"b",
",",
"a",
",",
"mode",
"=",
"\"rgb\"",
")"
] |
Returns a mix of two colors.
|
[
"Returns",
"a",
"mix",
"of",
"two",
"colors",
"."
] |
python
|
valid
| 37.222222 |
vladsaveliev/TargQC
|
targqc/utilz/jsontemplate/_jsontemplate.py
|
https://github.com/vladsaveliev/TargQC/blob/e887c36b2194dbd73c6ea32989b6cb84c6c0e58d/targqc/utilz/jsontemplate/_jsontemplate.py#L1797-L1816
|
def expand_with_style(template, style, data, body_subtree='body'):
"""Expand a data dictionary with a template AND a style.
DEPRECATED -- Remove this entire function in favor of expand(d, style=style)
A style is a Template instance that factors out the common strings in several
"body" templates.
Args:
template: Template instance for the inner "page content"
style: Template instance for the outer "page style"
data: Data dictionary, with a 'body' key (or body_subtree
"""
if template.has_defines:
return template.expand(data, style=style)
else:
tokens = []
execute_with_style_LEGACY(template, style, data, tokens.append,
body_subtree=body_subtree)
return JoinTokens(tokens)
|
[
"def",
"expand_with_style",
"(",
"template",
",",
"style",
",",
"data",
",",
"body_subtree",
"=",
"'body'",
")",
":",
"if",
"template",
".",
"has_defines",
":",
"return",
"template",
".",
"expand",
"(",
"data",
",",
"style",
"=",
"style",
")",
"else",
":",
"tokens",
"=",
"[",
"]",
"execute_with_style_LEGACY",
"(",
"template",
",",
"style",
",",
"data",
",",
"tokens",
".",
"append",
",",
"body_subtree",
"=",
"body_subtree",
")",
"return",
"JoinTokens",
"(",
"tokens",
")"
] |
Expand a data dictionary with a template AND a style.
DEPRECATED -- Remove this entire function in favor of expand(d, style=style)
A style is a Template instance that factors out the common strings in several
"body" templates.
Args:
template: Template instance for the inner "page content"
style: Template instance for the outer "page style"
data: Data dictionary, with a 'body' key (or body_subtree
|
[
"Expand",
"a",
"data",
"dictionary",
"with",
"a",
"template",
"AND",
"a",
"style",
"."
] |
python
|
train
| 38.05 |
lesscpy/lesscpy
|
lesscpy/lessc/lexer.py
|
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/lessc/lexer.py#L340-L346
|
def t_t_eopen(self, t):
r'~"|~\''
if t.value[1] == '"':
t.lexer.push_state('escapequotes')
elif t.value[1] == '\'':
t.lexer.push_state('escapeapostrophe')
return t
|
[
"def",
"t_t_eopen",
"(",
"self",
",",
"t",
")",
":",
"if",
"t",
".",
"value",
"[",
"1",
"]",
"==",
"'\"'",
":",
"t",
".",
"lexer",
".",
"push_state",
"(",
"'escapequotes'",
")",
"elif",
"t",
".",
"value",
"[",
"1",
"]",
"==",
"'\\''",
":",
"t",
".",
"lexer",
".",
"push_state",
"(",
"'escapeapostrophe'",
")",
"return",
"t"
] |
r'~"|~\
|
[
"r",
"~",
"|~",
"\\"
] |
python
|
valid
| 30.428571 |
openvax/pyensembl
|
pyensembl/database.py
|
https://github.com/openvax/pyensembl/blob/4b995fb72e848206d6fbf11950cf30964cd9b3aa/pyensembl/database.py#L111-L151
|
def _all_possible_indices(self, column_names):
"""
Create list of tuples containing all possible index groups
we might want to create over tables in this database.
If a set of genome annotations is missing some column we want
to index on, we have to drop any indices which use that column.
A specific table may later drop some of these indices if they're
missing values for that feature or are the same as the table's primary key.
"""
candidate_column_groups = [
['seqname', 'start', 'end'],
['gene_name'],
['gene_id'],
['transcript_id'],
['transcript_name'],
['exon_id'],
['protein_id'],
['ccds_id'],
]
indices = []
column_set = set(column_names)
# Since queries are often restricted by feature type
# we should include that column in combination with all
# other indices we anticipate might improve performance
for column_group in candidate_column_groups:
skip = False
for column_name in column_group:
# some columns, such as 'exon_id',
# are not available in all releases of Ensembl (or
# other GTFs)
if column_name not in column_set:
logger.info(
"Skipping database index for {%s}",
", ".join(column_group))
skip = True
if skip:
continue
indices.append(column_group)
return indices
|
[
"def",
"_all_possible_indices",
"(",
"self",
",",
"column_names",
")",
":",
"candidate_column_groups",
"=",
"[",
"[",
"'seqname'",
",",
"'start'",
",",
"'end'",
"]",
",",
"[",
"'gene_name'",
"]",
",",
"[",
"'gene_id'",
"]",
",",
"[",
"'transcript_id'",
"]",
",",
"[",
"'transcript_name'",
"]",
",",
"[",
"'exon_id'",
"]",
",",
"[",
"'protein_id'",
"]",
",",
"[",
"'ccds_id'",
"]",
",",
"]",
"indices",
"=",
"[",
"]",
"column_set",
"=",
"set",
"(",
"column_names",
")",
"# Since queries are often restricted by feature type",
"# we should include that column in combination with all",
"# other indices we anticipate might improve performance",
"for",
"column_group",
"in",
"candidate_column_groups",
":",
"skip",
"=",
"False",
"for",
"column_name",
"in",
"column_group",
":",
"# some columns, such as 'exon_id',",
"# are not available in all releases of Ensembl (or",
"# other GTFs)",
"if",
"column_name",
"not",
"in",
"column_set",
":",
"logger",
".",
"info",
"(",
"\"Skipping database index for {%s}\"",
",",
"\", \"",
".",
"join",
"(",
"column_group",
")",
")",
"skip",
"=",
"True",
"if",
"skip",
":",
"continue",
"indices",
".",
"append",
"(",
"column_group",
")",
"return",
"indices"
] |
Create list of tuples containing all possible index groups
we might want to create over tables in this database.
If a set of genome annotations is missing some column we want
to index on, we have to drop any indices which use that column.
A specific table may later drop some of these indices if they're
missing values for that feature or are the same as the table's primary key.
|
[
"Create",
"list",
"of",
"tuples",
"containing",
"all",
"possible",
"index",
"groups",
"we",
"might",
"want",
"to",
"create",
"over",
"tables",
"in",
"this",
"database",
"."
] |
python
|
train
| 38.926829 |
boakley/robotframework-lint
|
rflint/parser/tables.py
|
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/tables.py#L35-L71
|
def statements(self):
'''Return a list of statements
This is done by joining together any rows that
have continuations
'''
# FIXME: no need to do this every time; we should cache the
# result
if len(self.rows) == 0:
return []
current_statement = Statement(self.rows[0])
current_statement.startline = self.rows[0].linenumber
current_statement.endline = self.rows[0].linenumber
statements = []
for row in self.rows[1:]:
if len(row) > 0 and row[0] == "...":
# we found a continuation
current_statement += row[1:]
current_statement.endline = row.linenumber
else:
if len(current_statement) > 0:
# append current statement to the list of statements...
statements.append(current_statement)
# start a new statement
current_statement = Statement(row)
current_statement.startline = row.linenumber
current_statement.endline = row.linenumber
if len(current_statement) > 0:
statements.append(current_statement)
# trim trailing blank statements
while (len(statements[-1]) == 0 or
((len(statements[-1]) == 1) and len(statements[-1][0]) == 0)):
statements.pop()
return statements
|
[
"def",
"statements",
"(",
"self",
")",
":",
"# FIXME: no need to do this every time; we should cache the",
"# result",
"if",
"len",
"(",
"self",
".",
"rows",
")",
"==",
"0",
":",
"return",
"[",
"]",
"current_statement",
"=",
"Statement",
"(",
"self",
".",
"rows",
"[",
"0",
"]",
")",
"current_statement",
".",
"startline",
"=",
"self",
".",
"rows",
"[",
"0",
"]",
".",
"linenumber",
"current_statement",
".",
"endline",
"=",
"self",
".",
"rows",
"[",
"0",
"]",
".",
"linenumber",
"statements",
"=",
"[",
"]",
"for",
"row",
"in",
"self",
".",
"rows",
"[",
"1",
":",
"]",
":",
"if",
"len",
"(",
"row",
")",
">",
"0",
"and",
"row",
"[",
"0",
"]",
"==",
"\"...\"",
":",
"# we found a continuation",
"current_statement",
"+=",
"row",
"[",
"1",
":",
"]",
"current_statement",
".",
"endline",
"=",
"row",
".",
"linenumber",
"else",
":",
"if",
"len",
"(",
"current_statement",
")",
">",
"0",
":",
"# append current statement to the list of statements...",
"statements",
".",
"append",
"(",
"current_statement",
")",
"# start a new statement",
"current_statement",
"=",
"Statement",
"(",
"row",
")",
"current_statement",
".",
"startline",
"=",
"row",
".",
"linenumber",
"current_statement",
".",
"endline",
"=",
"row",
".",
"linenumber",
"if",
"len",
"(",
"current_statement",
")",
">",
"0",
":",
"statements",
".",
"append",
"(",
"current_statement",
")",
"# trim trailing blank statements",
"while",
"(",
"len",
"(",
"statements",
"[",
"-",
"1",
"]",
")",
"==",
"0",
"or",
"(",
"(",
"len",
"(",
"statements",
"[",
"-",
"1",
"]",
")",
"==",
"1",
")",
"and",
"len",
"(",
"statements",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
")",
"==",
"0",
")",
")",
":",
"statements",
".",
"pop",
"(",
")",
"return",
"statements"
] |
Return a list of statements
This is done by joining together any rows that
have continuations
|
[
"Return",
"a",
"list",
"of",
"statements"
] |
python
|
valid
| 37.918919 |
bpsmith/tia
|
tia/analysis/ta.py
|
https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/ta.py#L198-L277
|
def cross_signal(s1, s2, continuous=0):
""" return a signal with the following
1 : when all values of s1 cross all values of s2
-1 : when all values of s2 cross below all values of s2
0 : if s1 < max(s2) and s1 > min(s2)
np.nan : if s1 or s2 contains np.nan at position
s1: Series, DataFrame, float, int, or tuple(float|int)
s2: Series, DataFrame, float, int, or tuple(float|int)
continous: bool, if true then once the signal starts it is always 1 or -1
"""
def _convert(src, other):
if isinstance(src, pd.DataFrame):
return src.min(axis=1, skipna=0), src.max(axis=1, skipna=0)
elif isinstance(src, pd.Series):
return src, src
elif isinstance(src, (int, float)):
s = pd.Series(src, index=other.index)
return s, s
elif isinstance(src, (tuple, list)):
l, u = min(src), max(src)
assert l <= u, 'lower bound must be less than upper bound'
lower, upper = pd.Series(l, index=other.index), pd.Series(u, index=other.index)
return lower, upper
else:
raise Exception('unable to handle type %s' % type(src))
lower1, upper1 = _convert(s1, s2)
lower2, upper2 = _convert(s2, s1)
df = pd.DataFrame({'upper1': upper1, 'lower1': lower1, 'upper2': upper2, 'lower2': lower2})
df.ffill(inplace=True)
signal = pd.Series(np.nan, index=df.index)
signal[df.upper1 > df.upper2] = 1
signal[df.lower1 < df.lower2] = -1
if continuous:
# Just roll with 1, -1
signal = signal.fillna(method='ffill')
m1, m2 = df.upper1.first_valid_index(), df.upper2.first_valid_index()
if m1 is not None or m2 is not None:
m1 = m2 if m1 is None else m1
m2 = m1 if m2 is None else m2
fv = max(m1, m2)
if np.isnan(signal[fv]):
signal[fv] = 0
signal.ffill(inplace=1)
else:
signal[(df.upper1 < df.upper2) & (df.lower1 > df.lower2)] = 0
# special handling when equal, determine where it previously was
eq = (df.upper1 == df.upper2)
if eq.any(): # Set to prior value
tmp = signal[eq]
for i in tmp.index:
loc = signal.index.get_loc(i)
if loc != 0:
u, l = df.upper2.iloc[loc], df.lower2.iloc[loc]
ps = signal.iloc[loc - 1]
if u == l or ps == 1.: # Line coming from above upper bound if ps == 1
signal[i] = ps
else:
signal[i] = 0
eq = (df.lower1 == df.lower2)
if eq.any(): # Set to prior value
tmp = signal[eq]
for i in tmp.index:
loc = signal.index.get_loc(i)
if loc != 0:
u, l = df.upper2.iloc[loc], df.lower2.iloc[loc]
ps = signal.iloc[loc - 1]
if u == l or ps == -1.: # Line coming from below lower bound if ps == -1
signal[i] = ps
else:
signal[i] = 0
return signal
|
[
"def",
"cross_signal",
"(",
"s1",
",",
"s2",
",",
"continuous",
"=",
"0",
")",
":",
"def",
"_convert",
"(",
"src",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"src",
",",
"pd",
".",
"DataFrame",
")",
":",
"return",
"src",
".",
"min",
"(",
"axis",
"=",
"1",
",",
"skipna",
"=",
"0",
")",
",",
"src",
".",
"max",
"(",
"axis",
"=",
"1",
",",
"skipna",
"=",
"0",
")",
"elif",
"isinstance",
"(",
"src",
",",
"pd",
".",
"Series",
")",
":",
"return",
"src",
",",
"src",
"elif",
"isinstance",
"(",
"src",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"s",
"=",
"pd",
".",
"Series",
"(",
"src",
",",
"index",
"=",
"other",
".",
"index",
")",
"return",
"s",
",",
"s",
"elif",
"isinstance",
"(",
"src",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"l",
",",
"u",
"=",
"min",
"(",
"src",
")",
",",
"max",
"(",
"src",
")",
"assert",
"l",
"<=",
"u",
",",
"'lower bound must be less than upper bound'",
"lower",
",",
"upper",
"=",
"pd",
".",
"Series",
"(",
"l",
",",
"index",
"=",
"other",
".",
"index",
")",
",",
"pd",
".",
"Series",
"(",
"u",
",",
"index",
"=",
"other",
".",
"index",
")",
"return",
"lower",
",",
"upper",
"else",
":",
"raise",
"Exception",
"(",
"'unable to handle type %s'",
"%",
"type",
"(",
"src",
")",
")",
"lower1",
",",
"upper1",
"=",
"_convert",
"(",
"s1",
",",
"s2",
")",
"lower2",
",",
"upper2",
"=",
"_convert",
"(",
"s2",
",",
"s1",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"'upper1'",
":",
"upper1",
",",
"'lower1'",
":",
"lower1",
",",
"'upper2'",
":",
"upper2",
",",
"'lower2'",
":",
"lower2",
"}",
")",
"df",
".",
"ffill",
"(",
"inplace",
"=",
"True",
")",
"signal",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"nan",
",",
"index",
"=",
"df",
".",
"index",
")",
"signal",
"[",
"df",
".",
"upper1",
">",
"df",
".",
"upper2",
"]",
"=",
"1",
"signal",
"[",
"df",
".",
"lower1",
"<",
"df",
".",
"lower2",
"]",
"=",
"-",
"1",
"if",
"continuous",
":",
"# Just roll with 1, -1",
"signal",
"=",
"signal",
".",
"fillna",
"(",
"method",
"=",
"'ffill'",
")",
"m1",
",",
"m2",
"=",
"df",
".",
"upper1",
".",
"first_valid_index",
"(",
")",
",",
"df",
".",
"upper2",
".",
"first_valid_index",
"(",
")",
"if",
"m1",
"is",
"not",
"None",
"or",
"m2",
"is",
"not",
"None",
":",
"m1",
"=",
"m2",
"if",
"m1",
"is",
"None",
"else",
"m1",
"m2",
"=",
"m1",
"if",
"m2",
"is",
"None",
"else",
"m2",
"fv",
"=",
"max",
"(",
"m1",
",",
"m2",
")",
"if",
"np",
".",
"isnan",
"(",
"signal",
"[",
"fv",
"]",
")",
":",
"signal",
"[",
"fv",
"]",
"=",
"0",
"signal",
".",
"ffill",
"(",
"inplace",
"=",
"1",
")",
"else",
":",
"signal",
"[",
"(",
"df",
".",
"upper1",
"<",
"df",
".",
"upper2",
")",
"&",
"(",
"df",
".",
"lower1",
">",
"df",
".",
"lower2",
")",
"]",
"=",
"0",
"# special handling when equal, determine where it previously was",
"eq",
"=",
"(",
"df",
".",
"upper1",
"==",
"df",
".",
"upper2",
")",
"if",
"eq",
".",
"any",
"(",
")",
":",
"# Set to prior value",
"tmp",
"=",
"signal",
"[",
"eq",
"]",
"for",
"i",
"in",
"tmp",
".",
"index",
":",
"loc",
"=",
"signal",
".",
"index",
".",
"get_loc",
"(",
"i",
")",
"if",
"loc",
"!=",
"0",
":",
"u",
",",
"l",
"=",
"df",
".",
"upper2",
".",
"iloc",
"[",
"loc",
"]",
",",
"df",
".",
"lower2",
".",
"iloc",
"[",
"loc",
"]",
"ps",
"=",
"signal",
".",
"iloc",
"[",
"loc",
"-",
"1",
"]",
"if",
"u",
"==",
"l",
"or",
"ps",
"==",
"1.",
":",
"# Line coming from above upper bound if ps == 1",
"signal",
"[",
"i",
"]",
"=",
"ps",
"else",
":",
"signal",
"[",
"i",
"]",
"=",
"0",
"eq",
"=",
"(",
"df",
".",
"lower1",
"==",
"df",
".",
"lower2",
")",
"if",
"eq",
".",
"any",
"(",
")",
":",
"# Set to prior value",
"tmp",
"=",
"signal",
"[",
"eq",
"]",
"for",
"i",
"in",
"tmp",
".",
"index",
":",
"loc",
"=",
"signal",
".",
"index",
".",
"get_loc",
"(",
"i",
")",
"if",
"loc",
"!=",
"0",
":",
"u",
",",
"l",
"=",
"df",
".",
"upper2",
".",
"iloc",
"[",
"loc",
"]",
",",
"df",
".",
"lower2",
".",
"iloc",
"[",
"loc",
"]",
"ps",
"=",
"signal",
".",
"iloc",
"[",
"loc",
"-",
"1",
"]",
"if",
"u",
"==",
"l",
"or",
"ps",
"==",
"-",
"1.",
":",
"# Line coming from below lower bound if ps == -1",
"signal",
"[",
"i",
"]",
"=",
"ps",
"else",
":",
"signal",
"[",
"i",
"]",
"=",
"0",
"return",
"signal"
] |
return a signal with the following
1 : when all values of s1 cross all values of s2
-1 : when all values of s2 cross below all values of s2
0 : if s1 < max(s2) and s1 > min(s2)
np.nan : if s1 or s2 contains np.nan at position
s1: Series, DataFrame, float, int, or tuple(float|int)
s2: Series, DataFrame, float, int, or tuple(float|int)
continous: bool, if true then once the signal starts it is always 1 or -1
|
[
"return",
"a",
"signal",
"with",
"the",
"following",
"1",
":",
"when",
"all",
"values",
"of",
"s1",
"cross",
"all",
"values",
"of",
"s2",
"-",
"1",
":",
"when",
"all",
"values",
"of",
"s2",
"cross",
"below",
"all",
"values",
"of",
"s2",
"0",
":",
"if",
"s1",
"<",
"max",
"(",
"s2",
")",
"and",
"s1",
">",
"min",
"(",
"s2",
")",
"np",
".",
"nan",
":",
"if",
"s1",
"or",
"s2",
"contains",
"np",
".",
"nan",
"at",
"position"
] |
python
|
train
| 38.7875 |
AtmaHou/atma
|
tool.py
|
https://github.com/AtmaHou/atma/blob/41cd8ea9443a9c3b2dd71432f46f44a0f83093c7/tool.py#L39-L64
|
def wrap_star_digger(item, type_str, data_name='Value'):
"""
code used to extract data from Bing's wrap star
:param item: wrap star obj
:param type_str: target type string
:param data_name: target data label, might be "Entities", "Properties", 'Value'
:return: list of all matched target, arranged in occurance
"""
ret = []
if type(item) == dict:
if 'Type' in item and item['Type'] == type_str and data_name in item: # 'Business.Consumer_Product.Description'
if len(item[data_name]) > 1:
# print 'length error!!!!!!!!!!!'
pass
return item[data_name]
else:
for k in item:
sub_ret = wrap_star_digger(item[k], type_str, data_name)
if sub_ret:
ret.extend(sub_ret)
elif type(item) == list:
for i in item:
sub_ret = wrap_star_digger(i, type_str, data_name)
if sub_ret:
ret.extend(sub_ret)
return ret
|
[
"def",
"wrap_star_digger",
"(",
"item",
",",
"type_str",
",",
"data_name",
"=",
"'Value'",
")",
":",
"ret",
"=",
"[",
"]",
"if",
"type",
"(",
"item",
")",
"==",
"dict",
":",
"if",
"'Type'",
"in",
"item",
"and",
"item",
"[",
"'Type'",
"]",
"==",
"type_str",
"and",
"data_name",
"in",
"item",
":",
"# 'Business.Consumer_Product.Description'",
"if",
"len",
"(",
"item",
"[",
"data_name",
"]",
")",
">",
"1",
":",
"# print 'length error!!!!!!!!!!!'",
"pass",
"return",
"item",
"[",
"data_name",
"]",
"else",
":",
"for",
"k",
"in",
"item",
":",
"sub_ret",
"=",
"wrap_star_digger",
"(",
"item",
"[",
"k",
"]",
",",
"type_str",
",",
"data_name",
")",
"if",
"sub_ret",
":",
"ret",
".",
"extend",
"(",
"sub_ret",
")",
"elif",
"type",
"(",
"item",
")",
"==",
"list",
":",
"for",
"i",
"in",
"item",
":",
"sub_ret",
"=",
"wrap_star_digger",
"(",
"i",
",",
"type_str",
",",
"data_name",
")",
"if",
"sub_ret",
":",
"ret",
".",
"extend",
"(",
"sub_ret",
")",
"return",
"ret"
] |
code used to extract data from Bing's wrap star
:param item: wrap star obj
:param type_str: target type string
:param data_name: target data label, might be "Entities", "Properties", 'Value'
:return: list of all matched target, arranged in occurance
|
[
"code",
"used",
"to",
"extract",
"data",
"from",
"Bing",
"s",
"wrap",
"star",
":",
"param",
"item",
":",
"wrap",
"star",
"obj",
":",
"param",
"type_str",
":",
"target",
"type",
"string",
":",
"param",
"data_name",
":",
"target",
"data",
"label",
"might",
"be",
"Entities",
"Properties",
"Value",
":",
"return",
":",
"list",
"of",
"all",
"matched",
"target",
"arranged",
"in",
"occurance"
] |
python
|
train
| 38.346154 |
saltstack/salt
|
salt/runners/reactor.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/reactor.py#L36-L59
|
def list_(saltenv='base', test=None):
'''
List currently configured reactors
CLI Example:
.. code-block:: bash
salt-run reactor.list
'''
sevent = salt.utils.event.get_event(
'master',
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=True)
master_key = salt.utils.master.get_master_key('root', __opts__)
__jid_event__.fire_event({'key': master_key}, 'salt/reactors/manage/list')
results = sevent.get_event(wait=30, tag='salt/reactors/manage/list-results')
reactors = results['reactors']
return reactors
|
[
"def",
"list_",
"(",
"saltenv",
"=",
"'base'",
",",
"test",
"=",
"None",
")",
":",
"sevent",
"=",
"salt",
".",
"utils",
".",
"event",
".",
"get_event",
"(",
"'master'",
",",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"__opts__",
"[",
"'transport'",
"]",
",",
"opts",
"=",
"__opts__",
",",
"listen",
"=",
"True",
")",
"master_key",
"=",
"salt",
".",
"utils",
".",
"master",
".",
"get_master_key",
"(",
"'root'",
",",
"__opts__",
")",
"__jid_event__",
".",
"fire_event",
"(",
"{",
"'key'",
":",
"master_key",
"}",
",",
"'salt/reactors/manage/list'",
")",
"results",
"=",
"sevent",
".",
"get_event",
"(",
"wait",
"=",
"30",
",",
"tag",
"=",
"'salt/reactors/manage/list-results'",
")",
"reactors",
"=",
"results",
"[",
"'reactors'",
"]",
"return",
"reactors"
] |
List currently configured reactors
CLI Example:
.. code-block:: bash
salt-run reactor.list
|
[
"List",
"currently",
"configured",
"reactors"
] |
python
|
train
| 25.583333 |
secdev/scapy
|
scapy/layers/tls/keyexchange_tls13.py
|
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/keyexchange_tls13.py#L59-L89
|
def create_privkey(self):
"""
This is called by post_build() for key creation.
"""
if self.group in _tls_named_ffdh_groups:
params = _ffdh_groups[_tls_named_ffdh_groups[self.group]][0]
privkey = params.generate_private_key()
self.privkey = privkey
pubkey = privkey.public_key()
self.key_exchange = pubkey.public_numbers().y
elif self.group in _tls_named_curves:
if _tls_named_curves[self.group] == "x25519":
if conf.crypto_valid_advanced:
privkey = x25519.X25519PrivateKey.generate()
self.privkey = privkey
pubkey = privkey.public_key()
self.key_exchange = pubkey.public_bytes()
elif _tls_named_curves[self.group] != "x448":
curve = ec._CURVE_TYPES[_tls_named_curves[self.group]]()
privkey = ec.generate_private_key(curve, default_backend())
self.privkey = privkey
pubkey = privkey.public_key()
try:
# cryptography >= 2.5
self.key_exchange = pubkey.public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint
)
except TypeError:
# older versions
self.key_exchange = pubkey.public_numbers().encode_point()
|
[
"def",
"create_privkey",
"(",
"self",
")",
":",
"if",
"self",
".",
"group",
"in",
"_tls_named_ffdh_groups",
":",
"params",
"=",
"_ffdh_groups",
"[",
"_tls_named_ffdh_groups",
"[",
"self",
".",
"group",
"]",
"]",
"[",
"0",
"]",
"privkey",
"=",
"params",
".",
"generate_private_key",
"(",
")",
"self",
".",
"privkey",
"=",
"privkey",
"pubkey",
"=",
"privkey",
".",
"public_key",
"(",
")",
"self",
".",
"key_exchange",
"=",
"pubkey",
".",
"public_numbers",
"(",
")",
".",
"y",
"elif",
"self",
".",
"group",
"in",
"_tls_named_curves",
":",
"if",
"_tls_named_curves",
"[",
"self",
".",
"group",
"]",
"==",
"\"x25519\"",
":",
"if",
"conf",
".",
"crypto_valid_advanced",
":",
"privkey",
"=",
"x25519",
".",
"X25519PrivateKey",
".",
"generate",
"(",
")",
"self",
".",
"privkey",
"=",
"privkey",
"pubkey",
"=",
"privkey",
".",
"public_key",
"(",
")",
"self",
".",
"key_exchange",
"=",
"pubkey",
".",
"public_bytes",
"(",
")",
"elif",
"_tls_named_curves",
"[",
"self",
".",
"group",
"]",
"!=",
"\"x448\"",
":",
"curve",
"=",
"ec",
".",
"_CURVE_TYPES",
"[",
"_tls_named_curves",
"[",
"self",
".",
"group",
"]",
"]",
"(",
")",
"privkey",
"=",
"ec",
".",
"generate_private_key",
"(",
"curve",
",",
"default_backend",
"(",
")",
")",
"self",
".",
"privkey",
"=",
"privkey",
"pubkey",
"=",
"privkey",
".",
"public_key",
"(",
")",
"try",
":",
"# cryptography >= 2.5",
"self",
".",
"key_exchange",
"=",
"pubkey",
".",
"public_bytes",
"(",
"serialization",
".",
"Encoding",
".",
"X962",
",",
"serialization",
".",
"PublicFormat",
".",
"UncompressedPoint",
")",
"except",
"TypeError",
":",
"# older versions",
"self",
".",
"key_exchange",
"=",
"pubkey",
".",
"public_numbers",
"(",
")",
".",
"encode_point",
"(",
")"
] |
This is called by post_build() for key creation.
|
[
"This",
"is",
"called",
"by",
"post_build",
"()",
"for",
"key",
"creation",
"."
] |
python
|
train
| 47.290323 |
rkcosmos/deepcut
|
deepcut/train.py
|
https://github.com/rkcosmos/deepcut/blob/9a2729071d01972af805acede85d7aa9e7a6da30/deepcut/train.py#L107-L142
|
def prepare_feature(best_processed_path, option='train'):
"""
Transform processed path into feature matrix and output array
Input
=====
best_processed_path: str, path to processed BEST dataset
option: str, 'train' or 'test'
"""
# padding for training and testing set
n_pad = 21
n_pad_2 = int((n_pad - 1)/2)
pad = [{'char': ' ', 'type': 'p', 'target': True}]
df_pad = pd.DataFrame(pad * n_pad_2)
df = []
for article_type in article_types:
df.append(pd.read_csv(os.path.join(best_processed_path, option, 'df_best_{}_{}.csv'.format(article_type, option))))
df = pd.concat(df)
df = pd.concat((df_pad, df, df_pad)) # pad with empty string feature
df['char'] = df['char'].map(lambda x: CHARS_MAP.get(x, 80))
df['type'] = df['type'].map(lambda x: CHAR_TYPES_MAP.get(x, 4))
df_pad = create_n_gram_df(df, n_pad=n_pad)
char_row = ['char' + str(i + 1) for i in range(n_pad_2)] + \
['char-' + str(i + 1) for i in range(n_pad_2)] + ['char']
type_row = ['type' + str(i + 1) for i in range(n_pad_2)] + \
['type-' + str(i + 1) for i in range(n_pad_2)] + ['type']
x_char = df_pad[char_row].as_matrix()
x_type = df_pad[type_row].as_matrix()
y = df_pad['target'].astype(int).as_matrix()
return x_char, x_type, y
|
[
"def",
"prepare_feature",
"(",
"best_processed_path",
",",
"option",
"=",
"'train'",
")",
":",
"# padding for training and testing set",
"n_pad",
"=",
"21",
"n_pad_2",
"=",
"int",
"(",
"(",
"n_pad",
"-",
"1",
")",
"/",
"2",
")",
"pad",
"=",
"[",
"{",
"'char'",
":",
"' '",
",",
"'type'",
":",
"'p'",
",",
"'target'",
":",
"True",
"}",
"]",
"df_pad",
"=",
"pd",
".",
"DataFrame",
"(",
"pad",
"*",
"n_pad_2",
")",
"df",
"=",
"[",
"]",
"for",
"article_type",
"in",
"article_types",
":",
"df",
".",
"append",
"(",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"best_processed_path",
",",
"option",
",",
"'df_best_{}_{}.csv'",
".",
"format",
"(",
"article_type",
",",
"option",
")",
")",
")",
")",
"df",
"=",
"pd",
".",
"concat",
"(",
"df",
")",
"df",
"=",
"pd",
".",
"concat",
"(",
"(",
"df_pad",
",",
"df",
",",
"df_pad",
")",
")",
"# pad with empty string feature",
"df",
"[",
"'char'",
"]",
"=",
"df",
"[",
"'char'",
"]",
".",
"map",
"(",
"lambda",
"x",
":",
"CHARS_MAP",
".",
"get",
"(",
"x",
",",
"80",
")",
")",
"df",
"[",
"'type'",
"]",
"=",
"df",
"[",
"'type'",
"]",
".",
"map",
"(",
"lambda",
"x",
":",
"CHAR_TYPES_MAP",
".",
"get",
"(",
"x",
",",
"4",
")",
")",
"df_pad",
"=",
"create_n_gram_df",
"(",
"df",
",",
"n_pad",
"=",
"n_pad",
")",
"char_row",
"=",
"[",
"'char'",
"+",
"str",
"(",
"i",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"n_pad_2",
")",
"]",
"+",
"[",
"'char-'",
"+",
"str",
"(",
"i",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"n_pad_2",
")",
"]",
"+",
"[",
"'char'",
"]",
"type_row",
"=",
"[",
"'type'",
"+",
"str",
"(",
"i",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"n_pad_2",
")",
"]",
"+",
"[",
"'type-'",
"+",
"str",
"(",
"i",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"n_pad_2",
")",
"]",
"+",
"[",
"'type'",
"]",
"x_char",
"=",
"df_pad",
"[",
"char_row",
"]",
".",
"as_matrix",
"(",
")",
"x_type",
"=",
"df_pad",
"[",
"type_row",
"]",
".",
"as_matrix",
"(",
")",
"y",
"=",
"df_pad",
"[",
"'target'",
"]",
".",
"astype",
"(",
"int",
")",
".",
"as_matrix",
"(",
")",
"return",
"x_char",
",",
"x_type",
",",
"y"
] |
Transform processed path into feature matrix and output array
Input
=====
best_processed_path: str, path to processed BEST dataset
option: str, 'train' or 'test'
|
[
"Transform",
"processed",
"path",
"into",
"feature",
"matrix",
"and",
"output",
"array"
] |
python
|
valid
| 36.166667 |
saltstack/salt
|
salt/master.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L876-L937
|
def __bind(self):
'''
Binds the reply server
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
# Wait for kill should be less then parent's ProcessManager.
self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager',
wait_for_kill=1)
req_channels = []
tcp_only = True
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
if transport != 'tcp':
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1:
log.warning('TCP transport supports only 1 worker on Windows '
'when using Python 2.')
self.opts['worker_threads'] = 1
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts['worker_threads'])):
name = 'MWorker-{0}'.format(ind)
self.process_manager.add_process(MWorker,
args=(self.opts,
self.master_key,
self.key,
req_channels,
name),
kwargs=kwargs,
name=name)
self.process_manager.run()
|
[
"def",
"__bind",
"(",
"self",
")",
":",
"if",
"self",
".",
"log_queue",
"is",
"not",
"None",
":",
"salt",
".",
"log",
".",
"setup",
".",
"set_multiprocessing_logging_queue",
"(",
"self",
".",
"log_queue",
")",
"if",
"self",
".",
"log_queue_level",
"is",
"not",
"None",
":",
"salt",
".",
"log",
".",
"setup",
".",
"set_multiprocessing_logging_level",
"(",
"self",
".",
"log_queue_level",
")",
"salt",
".",
"log",
".",
"setup",
".",
"setup_multiprocessing_logging",
"(",
"self",
".",
"log_queue",
")",
"if",
"self",
".",
"secrets",
"is",
"not",
"None",
":",
"SMaster",
".",
"secrets",
"=",
"self",
".",
"secrets",
"dfn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"opts",
"[",
"'cachedir'",
"]",
",",
"'.dfn'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"dfn",
")",
":",
"try",
":",
"if",
"salt",
".",
"utils",
".",
"platform",
".",
"is_windows",
"(",
")",
"and",
"not",
"os",
".",
"access",
"(",
"dfn",
",",
"os",
".",
"W_OK",
")",
":",
"# Cannot delete read-only files on Windows.",
"os",
".",
"chmod",
"(",
"dfn",
",",
"stat",
".",
"S_IRUSR",
"|",
"stat",
".",
"S_IWUSR",
")",
"os",
".",
"remove",
"(",
"dfn",
")",
"except",
"os",
".",
"error",
":",
"pass",
"# Wait for kill should be less then parent's ProcessManager.",
"self",
".",
"process_manager",
"=",
"salt",
".",
"utils",
".",
"process",
".",
"ProcessManager",
"(",
"name",
"=",
"'ReqServer_ProcessManager'",
",",
"wait_for_kill",
"=",
"1",
")",
"req_channels",
"=",
"[",
"]",
"tcp_only",
"=",
"True",
"for",
"transport",
",",
"opts",
"in",
"iter_transport_opts",
"(",
"self",
".",
"opts",
")",
":",
"chan",
"=",
"salt",
".",
"transport",
".",
"server",
".",
"ReqServerChannel",
".",
"factory",
"(",
"opts",
")",
"chan",
".",
"pre_fork",
"(",
"self",
".",
"process_manager",
")",
"req_channels",
".",
"append",
"(",
"chan",
")",
"if",
"transport",
"!=",
"'tcp'",
":",
"tcp_only",
"=",
"False",
"kwargs",
"=",
"{",
"}",
"if",
"salt",
".",
"utils",
".",
"platform",
".",
"is_windows",
"(",
")",
":",
"kwargs",
"[",
"'log_queue'",
"]",
"=",
"self",
".",
"log_queue",
"kwargs",
"[",
"'log_queue_level'",
"]",
"=",
"self",
".",
"log_queue_level",
"# Use one worker thread if only the TCP transport is set up on",
"# Windows and we are using Python 2. There is load balancer",
"# support on Windows for the TCP transport when using Python 3.",
"if",
"tcp_only",
"and",
"six",
".",
"PY2",
"and",
"int",
"(",
"self",
".",
"opts",
"[",
"'worker_threads'",
"]",
")",
"!=",
"1",
":",
"log",
".",
"warning",
"(",
"'TCP transport supports only 1 worker on Windows '",
"'when using Python 2.'",
")",
"self",
".",
"opts",
"[",
"'worker_threads'",
"]",
"=",
"1",
"# Reset signals to default ones before adding processes to the process",
"# manager. We don't want the processes being started to inherit those",
"# signal handlers",
"with",
"salt",
".",
"utils",
".",
"process",
".",
"default_signals",
"(",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIGTERM",
")",
":",
"for",
"ind",
"in",
"range",
"(",
"int",
"(",
"self",
".",
"opts",
"[",
"'worker_threads'",
"]",
")",
")",
":",
"name",
"=",
"'MWorker-{0}'",
".",
"format",
"(",
"ind",
")",
"self",
".",
"process_manager",
".",
"add_process",
"(",
"MWorker",
",",
"args",
"=",
"(",
"self",
".",
"opts",
",",
"self",
".",
"master_key",
",",
"self",
".",
"key",
",",
"req_channels",
",",
"name",
")",
",",
"kwargs",
"=",
"kwargs",
",",
"name",
"=",
"name",
")",
"self",
".",
"process_manager",
".",
"run",
"(",
")"
] |
Binds the reply server
|
[
"Binds",
"the",
"reply",
"server"
] |
python
|
train
| 48.064516 |
numenta/nupic
|
src/nupic/algorithms/fdrutilities.py
|
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L319-L411
|
def generateL2Sequences(nL1Patterns=10, l1Hubs=[2,6], l1SeqLength=[5,6,7],
nL1SimpleSequences=50, nL1HubSequences=50,
l1Pooling=4, perfectStability=False, spHysteresisFactor=1.0,
patternLen=500, patternActivity=50):
"""
Generate the simulated output from a spatial pooler that's sitting
on top of another spatial pooler / temporal memory pair. The average on-time
of the outputs from the simulated TM is given by the l1Pooling argument.
In this routine, L1 refers to the first spatial and temporal memory and L2
refers to the spatial pooler above that.
Parameters:
-----------------------------------------------
nL1Patterns: the number of patterns to use in the L1 sequences.
l1Hubs: which of the elements will be used as hubs.
l1SeqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nL1SimpleSequences: The number of simple sequences to generate for L1
nL1HubSequences: The number of hub sequences to generate for L1
l1Pooling: The number of time steps to pool over in the L1 temporal
pooler
perfectStability: If true, then the input patterns represented by the
sequences generated will have perfect stability over
l1Pooling time steps. This is the best case ideal input
to a TM. In actual situations, with an actual SP
providing input, the stability will always be less than
this.
spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler.
Only used when perfectStability is False
patternLen: The number of elements in each pattern output by L2
patternActivity: The number of elements that should be active in
each pattern
@retval: (seqList, patterns)
seqList: a list of sequences output from L2. Each sequence is
itself a list containing the input pattern indices for that
sequence.
patterns: the input patterns used in the L2 seqList.
"""
# First, generate the L1 sequences
l1SeqList = generateSimpleSequences(nCoinc=nL1Patterns, seqLength=l1SeqLength,
nSeq=nL1SimpleSequences) + \
generateHubSequences(nCoinc=nL1Patterns, hubs=l1Hubs,
seqLength=l1SeqLength, nSeq=nL1HubSequences)
# Generate the L2 SP output from those
spOutput = generateSlowSPOutput(seqListBelow = l1SeqList,
poolingTimeBelow=l1Pooling, outputWidth=patternLen,
activity=patternActivity, perfectStability=perfectStability,
spHysteresisFactor=spHysteresisFactor)
# Map the spOutput patterns into indices into a pattern matrix which we
# generate now.
outSeq = None
outSeqList = []
outPatterns = SM32(0, patternLen)
for pattern in spOutput:
# If we have a reset vector start a new sequence
if pattern.sum() == 0:
if outSeq is not None:
outSeqList.append(outSeq)
outSeq = []
continue
# See if this vector matches a pattern we've already seen before
patternIdx = None
if outPatterns.nRows() > 0:
# Find most matching 1's.
matches = outPatterns.rightVecSumAtNZ(pattern)
outCoinc = matches.argmax().astype('uint32')
# See if its number of 1's is the same in the pattern and in the
# coincidence row. If so, it is an exact match
numOnes = pattern.sum()
if matches[outCoinc] == numOnes \
and outPatterns.getRow(int(outCoinc)).sum() == numOnes:
patternIdx = outCoinc
# If no match, add this pattern to our matrix
if patternIdx is None:
outPatterns.addRow(pattern)
patternIdx = outPatterns.nRows() - 1
# Store the pattern index into the sequence
outSeq.append(patternIdx)
# Put in last finished sequence
if outSeq is not None:
outSeqList.append(outSeq)
# Return with the seqList and patterns matrix
return (outSeqList, outPatterns)
|
[
"def",
"generateL2Sequences",
"(",
"nL1Patterns",
"=",
"10",
",",
"l1Hubs",
"=",
"[",
"2",
",",
"6",
"]",
",",
"l1SeqLength",
"=",
"[",
"5",
",",
"6",
",",
"7",
"]",
",",
"nL1SimpleSequences",
"=",
"50",
",",
"nL1HubSequences",
"=",
"50",
",",
"l1Pooling",
"=",
"4",
",",
"perfectStability",
"=",
"False",
",",
"spHysteresisFactor",
"=",
"1.0",
",",
"patternLen",
"=",
"500",
",",
"patternActivity",
"=",
"50",
")",
":",
"# First, generate the L1 sequences",
"l1SeqList",
"=",
"generateSimpleSequences",
"(",
"nCoinc",
"=",
"nL1Patterns",
",",
"seqLength",
"=",
"l1SeqLength",
",",
"nSeq",
"=",
"nL1SimpleSequences",
")",
"+",
"generateHubSequences",
"(",
"nCoinc",
"=",
"nL1Patterns",
",",
"hubs",
"=",
"l1Hubs",
",",
"seqLength",
"=",
"l1SeqLength",
",",
"nSeq",
"=",
"nL1HubSequences",
")",
"# Generate the L2 SP output from those",
"spOutput",
"=",
"generateSlowSPOutput",
"(",
"seqListBelow",
"=",
"l1SeqList",
",",
"poolingTimeBelow",
"=",
"l1Pooling",
",",
"outputWidth",
"=",
"patternLen",
",",
"activity",
"=",
"patternActivity",
",",
"perfectStability",
"=",
"perfectStability",
",",
"spHysteresisFactor",
"=",
"spHysteresisFactor",
")",
"# Map the spOutput patterns into indices into a pattern matrix which we",
"# generate now.",
"outSeq",
"=",
"None",
"outSeqList",
"=",
"[",
"]",
"outPatterns",
"=",
"SM32",
"(",
"0",
",",
"patternLen",
")",
"for",
"pattern",
"in",
"spOutput",
":",
"# If we have a reset vector start a new sequence",
"if",
"pattern",
".",
"sum",
"(",
")",
"==",
"0",
":",
"if",
"outSeq",
"is",
"not",
"None",
":",
"outSeqList",
".",
"append",
"(",
"outSeq",
")",
"outSeq",
"=",
"[",
"]",
"continue",
"# See if this vector matches a pattern we've already seen before",
"patternIdx",
"=",
"None",
"if",
"outPatterns",
".",
"nRows",
"(",
")",
">",
"0",
":",
"# Find most matching 1's.",
"matches",
"=",
"outPatterns",
".",
"rightVecSumAtNZ",
"(",
"pattern",
")",
"outCoinc",
"=",
"matches",
".",
"argmax",
"(",
")",
".",
"astype",
"(",
"'uint32'",
")",
"# See if its number of 1's is the same in the pattern and in the",
"# coincidence row. If so, it is an exact match",
"numOnes",
"=",
"pattern",
".",
"sum",
"(",
")",
"if",
"matches",
"[",
"outCoinc",
"]",
"==",
"numOnes",
"and",
"outPatterns",
".",
"getRow",
"(",
"int",
"(",
"outCoinc",
")",
")",
".",
"sum",
"(",
")",
"==",
"numOnes",
":",
"patternIdx",
"=",
"outCoinc",
"# If no match, add this pattern to our matrix",
"if",
"patternIdx",
"is",
"None",
":",
"outPatterns",
".",
"addRow",
"(",
"pattern",
")",
"patternIdx",
"=",
"outPatterns",
".",
"nRows",
"(",
")",
"-",
"1",
"# Store the pattern index into the sequence",
"outSeq",
".",
"append",
"(",
"patternIdx",
")",
"# Put in last finished sequence",
"if",
"outSeq",
"is",
"not",
"None",
":",
"outSeqList",
".",
"append",
"(",
"outSeq",
")",
"# Return with the seqList and patterns matrix",
"return",
"(",
"outSeqList",
",",
"outPatterns",
")"
] |
Generate the simulated output from a spatial pooler that's sitting
on top of another spatial pooler / temporal memory pair. The average on-time
of the outputs from the simulated TM is given by the l1Pooling argument.
In this routine, L1 refers to the first spatial and temporal memory and L2
refers to the spatial pooler above that.
Parameters:
-----------------------------------------------
nL1Patterns: the number of patterns to use in the L1 sequences.
l1Hubs: which of the elements will be used as hubs.
l1SeqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nL1SimpleSequences: The number of simple sequences to generate for L1
nL1HubSequences: The number of hub sequences to generate for L1
l1Pooling: The number of time steps to pool over in the L1 temporal
pooler
perfectStability: If true, then the input patterns represented by the
sequences generated will have perfect stability over
l1Pooling time steps. This is the best case ideal input
to a TM. In actual situations, with an actual SP
providing input, the stability will always be less than
this.
spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler.
Only used when perfectStability is False
patternLen: The number of elements in each pattern output by L2
patternActivity: The number of elements that should be active in
each pattern
@retval: (seqList, patterns)
seqList: a list of sequences output from L2. Each sequence is
itself a list containing the input pattern indices for that
sequence.
patterns: the input patterns used in the L2 seqList.
|
[
"Generate",
"the",
"simulated",
"output",
"from",
"a",
"spatial",
"pooler",
"that",
"s",
"sitting",
"on",
"top",
"of",
"another",
"spatial",
"pooler",
"/",
"temporal",
"memory",
"pair",
".",
"The",
"average",
"on",
"-",
"time",
"of",
"the",
"outputs",
"from",
"the",
"simulated",
"TM",
"is",
"given",
"by",
"the",
"l1Pooling",
"argument",
"."
] |
python
|
valid
| 45.043011 |
libyal/dtfabric
|
dtfabric/registry.py
|
https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/registry.py#L61-L92
|
def RegisterDefinition(self, data_type_definition):
"""Registers a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definitions.
Raises:
KeyError: if data type definition is already set for the corresponding
name.
"""
name_lower = data_type_definition.name.lower()
if name_lower in self._definitions:
raise KeyError('Definition already set for name: {0:s}.'.format(
data_type_definition.name))
if data_type_definition.name in self._aliases:
raise KeyError('Alias already set for name: {0:s}.'.format(
data_type_definition.name))
for alias in data_type_definition.aliases:
if alias in self._aliases:
raise KeyError('Alias already set for name: {0:s}.'.format(alias))
self._definitions[name_lower] = data_type_definition
for alias in data_type_definition.aliases:
self._aliases[alias] = name_lower
if data_type_definition.TYPE_INDICATOR == definitions.TYPE_INDICATOR_FORMAT:
self._format_definitions.append(name_lower)
|
[
"def",
"RegisterDefinition",
"(",
"self",
",",
"data_type_definition",
")",
":",
"name_lower",
"=",
"data_type_definition",
".",
"name",
".",
"lower",
"(",
")",
"if",
"name_lower",
"in",
"self",
".",
"_definitions",
":",
"raise",
"KeyError",
"(",
"'Definition already set for name: {0:s}.'",
".",
"format",
"(",
"data_type_definition",
".",
"name",
")",
")",
"if",
"data_type_definition",
".",
"name",
"in",
"self",
".",
"_aliases",
":",
"raise",
"KeyError",
"(",
"'Alias already set for name: {0:s}.'",
".",
"format",
"(",
"data_type_definition",
".",
"name",
")",
")",
"for",
"alias",
"in",
"data_type_definition",
".",
"aliases",
":",
"if",
"alias",
"in",
"self",
".",
"_aliases",
":",
"raise",
"KeyError",
"(",
"'Alias already set for name: {0:s}.'",
".",
"format",
"(",
"alias",
")",
")",
"self",
".",
"_definitions",
"[",
"name_lower",
"]",
"=",
"data_type_definition",
"for",
"alias",
"in",
"data_type_definition",
".",
"aliases",
":",
"self",
".",
"_aliases",
"[",
"alias",
"]",
"=",
"name_lower",
"if",
"data_type_definition",
".",
"TYPE_INDICATOR",
"==",
"definitions",
".",
"TYPE_INDICATOR_FORMAT",
":",
"self",
".",
"_format_definitions",
".",
"append",
"(",
"name_lower",
")"
] |
Registers a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definitions.
Raises:
KeyError: if data type definition is already set for the corresponding
name.
|
[
"Registers",
"a",
"data",
"type",
"definition",
"."
] |
python
|
train
| 35.1875 |
kshlm/gant
|
gant/utils/ssh.py
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/ssh.py#L10-L20
|
def launch_shell(username, hostname, password, port=22):
"""
Launches an ssh shell
"""
if not username or not hostname or not password:
return False
with tempfile.NamedTemporaryFile() as tmpFile:
os.system(sshCmdLine.format(password, tmpFile.name, username, hostname,
port))
return True
|
[
"def",
"launch_shell",
"(",
"username",
",",
"hostname",
",",
"password",
",",
"port",
"=",
"22",
")",
":",
"if",
"not",
"username",
"or",
"not",
"hostname",
"or",
"not",
"password",
":",
"return",
"False",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"as",
"tmpFile",
":",
"os",
".",
"system",
"(",
"sshCmdLine",
".",
"format",
"(",
"password",
",",
"tmpFile",
".",
"name",
",",
"username",
",",
"hostname",
",",
"port",
")",
")",
"return",
"True"
] |
Launches an ssh shell
|
[
"Launches",
"an",
"ssh",
"shell"
] |
python
|
train
| 32.090909 |
ciena/afkak
|
afkak/consumer.py
|
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L236-L283
|
def start(self, start_offset):
"""
Starts fetching messages from Kafka and delivering them to the
:attr:`.processor` function.
:param int start_offset:
The offset within the partition from which to start fetching.
Special values include: :const:`OFFSET_EARLIEST`,
:const:`OFFSET_LATEST`, and :const:`OFFSET_COMMITTED`. If the
supplied offset is :const:`OFFSET_EARLIEST` or
:const:`OFFSET_LATEST` the :class:`Consumer` will use the
OffsetRequest Kafka API to retrieve the actual offset used for
fetching. In the case :const:`OFFSET_COMMITTED` is used,
`commit_policy` MUST be set on the Consumer, and the Consumer
will use the OffsetFetchRequest Kafka API to retrieve the actual
offset used for fetching.
:returns:
A :class:`~twisted.internet.defer.Deferred` which will resolve
successfully when the consumer is cleanly stopped, or with
a failure if the :class:`Consumer` encounters an error from which
it is unable to recover.
:raises: :exc:`RestartError` if already running.
"""
# Have we been started already, and not stopped?
if self._start_d is not None:
raise RestartError("Start called on already-started consumer")
# Keep track of state for debugging
self._state = '[started]'
# Create and return a deferred for alerting on errors/stoppage
start_d = self._start_d = Deferred()
# Start a new fetch request, possibly just for the starting offset
self._fetch_offset = start_offset
self._do_fetch()
# Set up the auto-commit timer, if needed
if self.consumer_group and self.auto_commit_every_s:
self._commit_looper = LoopingCall(self._auto_commit)
self._commit_looper.clock = self.client.reactor
self._commit_looper_d = self._commit_looper.start(
self.auto_commit_every_s, now=False)
self._commit_looper_d.addCallbacks(self._commit_timer_stopped,
self._commit_timer_failed)
return start_d
|
[
"def",
"start",
"(",
"self",
",",
"start_offset",
")",
":",
"# Have we been started already, and not stopped?",
"if",
"self",
".",
"_start_d",
"is",
"not",
"None",
":",
"raise",
"RestartError",
"(",
"\"Start called on already-started consumer\"",
")",
"# Keep track of state for debugging",
"self",
".",
"_state",
"=",
"'[started]'",
"# Create and return a deferred for alerting on errors/stoppage",
"start_d",
"=",
"self",
".",
"_start_d",
"=",
"Deferred",
"(",
")",
"# Start a new fetch request, possibly just for the starting offset",
"self",
".",
"_fetch_offset",
"=",
"start_offset",
"self",
".",
"_do_fetch",
"(",
")",
"# Set up the auto-commit timer, if needed",
"if",
"self",
".",
"consumer_group",
"and",
"self",
".",
"auto_commit_every_s",
":",
"self",
".",
"_commit_looper",
"=",
"LoopingCall",
"(",
"self",
".",
"_auto_commit",
")",
"self",
".",
"_commit_looper",
".",
"clock",
"=",
"self",
".",
"client",
".",
"reactor",
"self",
".",
"_commit_looper_d",
"=",
"self",
".",
"_commit_looper",
".",
"start",
"(",
"self",
".",
"auto_commit_every_s",
",",
"now",
"=",
"False",
")",
"self",
".",
"_commit_looper_d",
".",
"addCallbacks",
"(",
"self",
".",
"_commit_timer_stopped",
",",
"self",
".",
"_commit_timer_failed",
")",
"return",
"start_d"
] |
Starts fetching messages from Kafka and delivering them to the
:attr:`.processor` function.
:param int start_offset:
The offset within the partition from which to start fetching.
Special values include: :const:`OFFSET_EARLIEST`,
:const:`OFFSET_LATEST`, and :const:`OFFSET_COMMITTED`. If the
supplied offset is :const:`OFFSET_EARLIEST` or
:const:`OFFSET_LATEST` the :class:`Consumer` will use the
OffsetRequest Kafka API to retrieve the actual offset used for
fetching. In the case :const:`OFFSET_COMMITTED` is used,
`commit_policy` MUST be set on the Consumer, and the Consumer
will use the OffsetFetchRequest Kafka API to retrieve the actual
offset used for fetching.
:returns:
A :class:`~twisted.internet.defer.Deferred` which will resolve
successfully when the consumer is cleanly stopped, or with
a failure if the :class:`Consumer` encounters an error from which
it is unable to recover.
:raises: :exc:`RestartError` if already running.
|
[
"Starts",
"fetching",
"messages",
"from",
"Kafka",
"and",
"delivering",
"them",
"to",
"the",
":",
"attr",
":",
".",
"processor",
"function",
"."
] |
python
|
train
| 45.6875 |
bcbio/bcbio-nextgen
|
bcbio/structural/pindel.py
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/pindel.py#L20-L44
|
def _pindel_options(items, config, out_file, region, tmp_path):
"""parse pindel options. Add region to cmd.
:param items: (dict) information from yaml
:param config: (dict) information from yaml (items[0]['config'])
:param region: (str or tupple) region to analyze
:param tmp_path: (str) temporal folder
:returns: (list) options for pindel
"""
variant_regions = utils.get_in(config, ("algorithm", "variant_regions"))
target = subset_variant_regions(variant_regions, region, out_file, items)
opts = ""
if target:
if isinstance(target, six.string_types) and os.path.isfile(target):
target_bed = target
else:
target_bed = os.path.join(tmp_path, "tmp.bed")
with file_transaction(config, target_bed) as tx_tmp_bed:
if not isinstance(region, (list, tuple)):
message = ("Region must be a tuple - something odd just happened")
raise ValueError(message)
chrom, start, end = region
with open(tx_tmp_bed, "w") as out_handle:
print("%s\t%s\t%s" % (chrom, start, end), file=out_handle)
opts = "-j " + remove_lcr_regions(target_bed, items)
return opts
|
[
"def",
"_pindel_options",
"(",
"items",
",",
"config",
",",
"out_file",
",",
"region",
",",
"tmp_path",
")",
":",
"variant_regions",
"=",
"utils",
".",
"get_in",
"(",
"config",
",",
"(",
"\"algorithm\"",
",",
"\"variant_regions\"",
")",
")",
"target",
"=",
"subset_variant_regions",
"(",
"variant_regions",
",",
"region",
",",
"out_file",
",",
"items",
")",
"opts",
"=",
"\"\"",
"if",
"target",
":",
"if",
"isinstance",
"(",
"target",
",",
"six",
".",
"string_types",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"target",
")",
":",
"target_bed",
"=",
"target",
"else",
":",
"target_bed",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_path",
",",
"\"tmp.bed\"",
")",
"with",
"file_transaction",
"(",
"config",
",",
"target_bed",
")",
"as",
"tx_tmp_bed",
":",
"if",
"not",
"isinstance",
"(",
"region",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"message",
"=",
"(",
"\"Region must be a tuple - something odd just happened\"",
")",
"raise",
"ValueError",
"(",
"message",
")",
"chrom",
",",
"start",
",",
"end",
"=",
"region",
"with",
"open",
"(",
"tx_tmp_bed",
",",
"\"w\"",
")",
"as",
"out_handle",
":",
"print",
"(",
"\"%s\\t%s\\t%s\"",
"%",
"(",
"chrom",
",",
"start",
",",
"end",
")",
",",
"file",
"=",
"out_handle",
")",
"opts",
"=",
"\"-j \"",
"+",
"remove_lcr_regions",
"(",
"target_bed",
",",
"items",
")",
"return",
"opts"
] |
parse pindel options. Add region to cmd.
:param items: (dict) information from yaml
:param config: (dict) information from yaml (items[0]['config'])
:param region: (str or tupple) region to analyze
:param tmp_path: (str) temporal folder
:returns: (list) options for pindel
|
[
"parse",
"pindel",
"options",
".",
"Add",
"region",
"to",
"cmd",
".",
":",
"param",
"items",
":",
"(",
"dict",
")",
"information",
"from",
"yaml",
":",
"param",
"config",
":",
"(",
"dict",
")",
"information",
"from",
"yaml",
"(",
"items",
"[",
"0",
"]",
"[",
"config",
"]",
")",
":",
"param",
"region",
":",
"(",
"str",
"or",
"tupple",
")",
"region",
"to",
"analyze",
":",
"param",
"tmp_path",
":",
"(",
"str",
")",
"temporal",
"folder",
":",
"returns",
":",
"(",
"list",
")",
"options",
"for",
"pindel"
] |
python
|
train
| 49.16 |
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/multi_problem_v2.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L397-L410
|
def decode_schedule(string):
"""Decodes a string into a schedule tuple.
Args:
string: The string encoding of a schedule tuple.
Returns:
A schedule tuple, see encode_schedule for details.
"""
splits = string.split()
steps = [int(x[1:]) for x in splits[1:] if x[0] == '@']
pmfs = np.reshape(
[float(x) for x in splits[1:] if x[0] != '@'], [len(steps), -1])
return splits[0], tuplize(steps), tuplize(pmfs)
|
[
"def",
"decode_schedule",
"(",
"string",
")",
":",
"splits",
"=",
"string",
".",
"split",
"(",
")",
"steps",
"=",
"[",
"int",
"(",
"x",
"[",
"1",
":",
"]",
")",
"for",
"x",
"in",
"splits",
"[",
"1",
":",
"]",
"if",
"x",
"[",
"0",
"]",
"==",
"'@'",
"]",
"pmfs",
"=",
"np",
".",
"reshape",
"(",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"splits",
"[",
"1",
":",
"]",
"if",
"x",
"[",
"0",
"]",
"!=",
"'@'",
"]",
",",
"[",
"len",
"(",
"steps",
")",
",",
"-",
"1",
"]",
")",
"return",
"splits",
"[",
"0",
"]",
",",
"tuplize",
"(",
"steps",
")",
",",
"tuplize",
"(",
"pmfs",
")"
] |
Decodes a string into a schedule tuple.
Args:
string: The string encoding of a schedule tuple.
Returns:
A schedule tuple, see encode_schedule for details.
|
[
"Decodes",
"a",
"string",
"into",
"a",
"schedule",
"tuple",
"."
] |
python
|
train
| 30.071429 |
vatlab/SoS
|
src/sos/task_engines.py
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/task_engines.py#L651-L702
|
def _submit_task_with_template(self, task_ids):
'''Submit tasks by interpolating a shell script defined in job_template'''
runtime = self.config
runtime.update({
'workdir': os.getcwd(),
'cur_dir': os.getcwd(), # for backward compatibility
'verbosity': env.verbosity,
'sig_mode': env.config.get('sig_mode', 'default'),
'run_mode': env.config.get('run_mode', 'run'),
'home_dir': os.path.expanduser('~')
})
if '_runtime' in env.sos_dict:
runtime.update({
x: env.sos_dict['_runtime'][x]
for x in ('nodes', 'cores', 'workdir', 'mem', 'walltime')
if x in env.sos_dict['_runtime']
})
if 'nodes' not in runtime:
runtime['nodes'] = 1
if 'cores' not in runtime:
runtime['cores'] = 1
# let us first prepare a task file
job_text = ''
for task_id in task_ids:
runtime['task'] = task_id
try:
job_text += cfg_interpolate(self.job_template, runtime)
job_text += '\n'
except Exception as e:
raise ValueError(
f'Failed to generate job file for task {task_id}: {e}')
filename = task_ids[0] + ('.sh' if len(task_ids) == 1 else
f'-{task_ids[-1]}.sh')
# now we need to write a job file
job_file = os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', filename)
# do not translate newline under windows because the script will be executed
# under linux/mac
with open(job_file, 'w', newline='') as job:
job.write(job_text)
# then copy the job file to remote host if necessary
self.agent.send_task_file(job_file)
try:
cmd = f'bash ~/.sos/tasks/{filename}'
self.agent.run_command(cmd, wait_for_task=self.wait_for_task)
except Exception as e:
raise RuntimeError(f'Failed to submit task {task_ids}: {e}')
return True
|
[
"def",
"_submit_task_with_template",
"(",
"self",
",",
"task_ids",
")",
":",
"runtime",
"=",
"self",
".",
"config",
"runtime",
".",
"update",
"(",
"{",
"'workdir'",
":",
"os",
".",
"getcwd",
"(",
")",
",",
"'cur_dir'",
":",
"os",
".",
"getcwd",
"(",
")",
",",
"# for backward compatibility",
"'verbosity'",
":",
"env",
".",
"verbosity",
",",
"'sig_mode'",
":",
"env",
".",
"config",
".",
"get",
"(",
"'sig_mode'",
",",
"'default'",
")",
",",
"'run_mode'",
":",
"env",
".",
"config",
".",
"get",
"(",
"'run_mode'",
",",
"'run'",
")",
",",
"'home_dir'",
":",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
"}",
")",
"if",
"'_runtime'",
"in",
"env",
".",
"sos_dict",
":",
"runtime",
".",
"update",
"(",
"{",
"x",
":",
"env",
".",
"sos_dict",
"[",
"'_runtime'",
"]",
"[",
"x",
"]",
"for",
"x",
"in",
"(",
"'nodes'",
",",
"'cores'",
",",
"'workdir'",
",",
"'mem'",
",",
"'walltime'",
")",
"if",
"x",
"in",
"env",
".",
"sos_dict",
"[",
"'_runtime'",
"]",
"}",
")",
"if",
"'nodes'",
"not",
"in",
"runtime",
":",
"runtime",
"[",
"'nodes'",
"]",
"=",
"1",
"if",
"'cores'",
"not",
"in",
"runtime",
":",
"runtime",
"[",
"'cores'",
"]",
"=",
"1",
"# let us first prepare a task file",
"job_text",
"=",
"''",
"for",
"task_id",
"in",
"task_ids",
":",
"runtime",
"[",
"'task'",
"]",
"=",
"task_id",
"try",
":",
"job_text",
"+=",
"cfg_interpolate",
"(",
"self",
".",
"job_template",
",",
"runtime",
")",
"job_text",
"+=",
"'\\n'",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"f'Failed to generate job file for task {task_id}: {e}'",
")",
"filename",
"=",
"task_ids",
"[",
"0",
"]",
"+",
"(",
"'.sh'",
"if",
"len",
"(",
"task_ids",
")",
"==",
"1",
"else",
"f'-{task_ids[-1]}.sh'",
")",
"# now we need to write a job file",
"job_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
",",
"'.sos'",
",",
"'tasks'",
",",
"filename",
")",
"# do not translate newline under windows because the script will be executed",
"# under linux/mac",
"with",
"open",
"(",
"job_file",
",",
"'w'",
",",
"newline",
"=",
"''",
")",
"as",
"job",
":",
"job",
".",
"write",
"(",
"job_text",
")",
"# then copy the job file to remote host if necessary",
"self",
".",
"agent",
".",
"send_task_file",
"(",
"job_file",
")",
"try",
":",
"cmd",
"=",
"f'bash ~/.sos/tasks/{filename}'",
"self",
".",
"agent",
".",
"run_command",
"(",
"cmd",
",",
"wait_for_task",
"=",
"self",
".",
"wait_for_task",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"RuntimeError",
"(",
"f'Failed to submit task {task_ids}: {e}'",
")",
"return",
"True"
] |
Submit tasks by interpolating a shell script defined in job_template
|
[
"Submit",
"tasks",
"by",
"interpolating",
"a",
"shell",
"script",
"defined",
"in",
"job_template"
] |
python
|
train
| 39.961538 |
spencerahill/aospy
|
aospy/data_loader.py
|
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/data_loader.py#L463-L471
|
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
"""Returns the file_set for the given interval in."""
try:
return self.file_map[intvl_in]
except KeyError:
raise KeyError('File set does not exist for the specified'
' intvl_in {0}'.format(intvl_in))
|
[
"def",
"_generate_file_set",
"(",
"self",
",",
"var",
"=",
"None",
",",
"start_date",
"=",
"None",
",",
"end_date",
"=",
"None",
",",
"domain",
"=",
"None",
",",
"intvl_in",
"=",
"None",
",",
"dtype_in_vert",
"=",
"None",
",",
"dtype_in_time",
"=",
"None",
",",
"intvl_out",
"=",
"None",
")",
":",
"try",
":",
"return",
"self",
".",
"file_map",
"[",
"intvl_in",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"'File set does not exist for the specified'",
"' intvl_in {0}'",
".",
"format",
"(",
"intvl_in",
")",
")"
] |
Returns the file_set for the given interval in.
|
[
"Returns",
"the",
"file_set",
"for",
"the",
"given",
"interval",
"in",
"."
] |
python
|
train
| 52.888889 |
fxsjy/jieba
|
jieba/posseg/__init__.py
|
https://github.com/fxsjy/jieba/blob/8212b6c5725d08311952a3a08e5509eeaee33eb7/jieba/posseg/__init__.py#L272-L291
|
def cut(sentence, HMM=True):
"""
Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported.
"""
global dt
if jieba.pool is None:
for w in dt.cut(sentence, HMM=HMM):
yield w
else:
parts = strdecode(sentence).splitlines(True)
if HMM:
result = jieba.pool.map(_lcut_internal, parts)
else:
result = jieba.pool.map(_lcut_internal_no_hmm, parts)
for r in result:
for w in r:
yield w
|
[
"def",
"cut",
"(",
"sentence",
",",
"HMM",
"=",
"True",
")",
":",
"global",
"dt",
"if",
"jieba",
".",
"pool",
"is",
"None",
":",
"for",
"w",
"in",
"dt",
".",
"cut",
"(",
"sentence",
",",
"HMM",
"=",
"HMM",
")",
":",
"yield",
"w",
"else",
":",
"parts",
"=",
"strdecode",
"(",
"sentence",
")",
".",
"splitlines",
"(",
"True",
")",
"if",
"HMM",
":",
"result",
"=",
"jieba",
".",
"pool",
".",
"map",
"(",
"_lcut_internal",
",",
"parts",
")",
"else",
":",
"result",
"=",
"jieba",
".",
"pool",
".",
"map",
"(",
"_lcut_internal_no_hmm",
",",
"parts",
")",
"for",
"r",
"in",
"result",
":",
"for",
"w",
"in",
"r",
":",
"yield",
"w"
] |
Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported.
|
[
"Global",
"cut",
"function",
"that",
"supports",
"parallel",
"processing",
"."
] |
python
|
train
| 28.8 |
wbond/asn1crypto
|
asn1crypto/keys.py
|
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/keys.py#L753-L761
|
def algorithm(self):
"""
:return:
A unicode string of "rsa", "dsa" or "ec"
"""
if self._algorithm is None:
self._algorithm = self['private_key_algorithm']['algorithm'].native
return self._algorithm
|
[
"def",
"algorithm",
"(",
"self",
")",
":",
"if",
"self",
".",
"_algorithm",
"is",
"None",
":",
"self",
".",
"_algorithm",
"=",
"self",
"[",
"'private_key_algorithm'",
"]",
"[",
"'algorithm'",
"]",
".",
"native",
"return",
"self",
".",
"_algorithm"
] |
:return:
A unicode string of "rsa", "dsa" or "ec"
|
[
":",
"return",
":",
"A",
"unicode",
"string",
"of",
"rsa",
"dsa",
"or",
"ec"
] |
python
|
train
| 28.222222 |
limpyd/redis-limpyd
|
limpyd/database.py
|
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/database.py#L171-L200
|
def call_script(self, script_dict, keys=None, args=None):
"""Call a redis script with keys and args
The first time we call a script, we register it to speed up later calls.
We expect a dict with a ``lua`` key having the script, and the dict will be
updated with a ``script_object`` key, with the content returned by the
the redis-py ``register_script`` command.
Parameters
----------
script_dict: dict
A dict with a ``lua`` entry containing the lua code. A new key, ``script_object``
will be added after that.
keys: list of str
List of the keys that will be read/updated by the lua script
args: list of str
List of all the args expected by the script.
Returns
-------
Anything that will be returned by the script
"""
if keys is None:
keys = []
if args is None:
args = []
if 'script_object' not in script_dict:
script_dict['script_object'] = self.connection.register_script(script_dict['lua'])
return script_dict['script_object'](keys=keys, args=args, client=self.connection)
|
[
"def",
"call_script",
"(",
"self",
",",
"script_dict",
",",
"keys",
"=",
"None",
",",
"args",
"=",
"None",
")",
":",
"if",
"keys",
"is",
"None",
":",
"keys",
"=",
"[",
"]",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"[",
"]",
"if",
"'script_object'",
"not",
"in",
"script_dict",
":",
"script_dict",
"[",
"'script_object'",
"]",
"=",
"self",
".",
"connection",
".",
"register_script",
"(",
"script_dict",
"[",
"'lua'",
"]",
")",
"return",
"script_dict",
"[",
"'script_object'",
"]",
"(",
"keys",
"=",
"keys",
",",
"args",
"=",
"args",
",",
"client",
"=",
"self",
".",
"connection",
")"
] |
Call a redis script with keys and args
The first time we call a script, we register it to speed up later calls.
We expect a dict with a ``lua`` key having the script, and the dict will be
updated with a ``script_object`` key, with the content returned by the
the redis-py ``register_script`` command.
Parameters
----------
script_dict: dict
A dict with a ``lua`` entry containing the lua code. A new key, ``script_object``
will be added after that.
keys: list of str
List of the keys that will be read/updated by the lua script
args: list of str
List of all the args expected by the script.
Returns
-------
Anything that will be returned by the script
|
[
"Call",
"a",
"redis",
"script",
"with",
"keys",
"and",
"args"
] |
python
|
train
| 39.233333 |
tomokinakamaru/mapletree
|
mapletree/defaults/request/validators.py
|
https://github.com/tomokinakamaru/mapletree/blob/19ec68769ef2c1cd2e4164ed8623e0c4280279bb/mapletree/defaults/request/validators.py#L258-L268
|
def int_option(string, options):
""" Requires values (int) to be in `args`
:param string: Value to validate
:type string: str
"""
i = int(string)
if i in options:
return i
raise ValueError('Not in allowed options')
|
[
"def",
"int_option",
"(",
"string",
",",
"options",
")",
":",
"i",
"=",
"int",
"(",
"string",
")",
"if",
"i",
"in",
"options",
":",
"return",
"i",
"raise",
"ValueError",
"(",
"'Not in allowed options'",
")"
] |
Requires values (int) to be in `args`
:param string: Value to validate
:type string: str
|
[
"Requires",
"values",
"(",
"int",
")",
"to",
"be",
"in",
"args"
] |
python
|
train
| 22 |
Gorialis/jishaku
|
jishaku/cog.py
|
https://github.com/Gorialis/jishaku/blob/fc7c479b9d510ede189a929c8aa6f7c8ef7f9a6e/jishaku/cog.py#L175-L184
|
async def jsk_hide(self, ctx: commands.Context):
"""
Hides Jishaku from the help command.
"""
if self.jsk.hidden:
return await ctx.send("Jishaku is already hidden.")
self.jsk.hidden = True
await ctx.send("Jishaku is now hidden.")
|
[
"async",
"def",
"jsk_hide",
"(",
"self",
",",
"ctx",
":",
"commands",
".",
"Context",
")",
":",
"if",
"self",
".",
"jsk",
".",
"hidden",
":",
"return",
"await",
"ctx",
".",
"send",
"(",
"\"Jishaku is already hidden.\"",
")",
"self",
".",
"jsk",
".",
"hidden",
"=",
"True",
"await",
"ctx",
".",
"send",
"(",
"\"Jishaku is now hidden.\"",
")"
] |
Hides Jishaku from the help command.
|
[
"Hides",
"Jishaku",
"from",
"the",
"help",
"command",
"."
] |
python
|
train
| 28.2 |
ml4ai/delphi
|
delphi/GrFN/networks.py
|
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/GrFN/networks.py#L126-L152
|
def to_CAG(self):
""" Export to a Causal Analysis Graph (CAG) PyGraphviz AGraph object.
The CAG shows the influence relationships between the variables and
elides the function nodes."""
G = nx.DiGraph()
for (name, attrs) in self.nodes(data=True):
if attrs["type"] == "variable":
for pred_fn in self.predecessors(name):
if not any(
fn_type in pred_fn
for fn_type in ("condition", "decision")
):
for pred_var in self.predecessors(pred_fn):
G.add_node(
self.nodes[pred_var]["basename"],
**self.nodes[pred_var],
)
G.add_node(attrs["basename"], **attrs)
G.add_edge(
self.nodes[pred_var]["basename"],
attrs["basename"],
)
if attrs["is_loop_index"]:
G.add_edge(attrs["basename"], attrs["basename"])
return G
|
[
"def",
"to_CAG",
"(",
"self",
")",
":",
"G",
"=",
"nx",
".",
"DiGraph",
"(",
")",
"for",
"(",
"name",
",",
"attrs",
")",
"in",
"self",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
":",
"if",
"attrs",
"[",
"\"type\"",
"]",
"==",
"\"variable\"",
":",
"for",
"pred_fn",
"in",
"self",
".",
"predecessors",
"(",
"name",
")",
":",
"if",
"not",
"any",
"(",
"fn_type",
"in",
"pred_fn",
"for",
"fn_type",
"in",
"(",
"\"condition\"",
",",
"\"decision\"",
")",
")",
":",
"for",
"pred_var",
"in",
"self",
".",
"predecessors",
"(",
"pred_fn",
")",
":",
"G",
".",
"add_node",
"(",
"self",
".",
"nodes",
"[",
"pred_var",
"]",
"[",
"\"basename\"",
"]",
",",
"*",
"*",
"self",
".",
"nodes",
"[",
"pred_var",
"]",
",",
")",
"G",
".",
"add_node",
"(",
"attrs",
"[",
"\"basename\"",
"]",
",",
"*",
"*",
"attrs",
")",
"G",
".",
"add_edge",
"(",
"self",
".",
"nodes",
"[",
"pred_var",
"]",
"[",
"\"basename\"",
"]",
",",
"attrs",
"[",
"\"basename\"",
"]",
",",
")",
"if",
"attrs",
"[",
"\"is_loop_index\"",
"]",
":",
"G",
".",
"add_edge",
"(",
"attrs",
"[",
"\"basename\"",
"]",
",",
"attrs",
"[",
"\"basename\"",
"]",
")",
"return",
"G"
] |
Export to a Causal Analysis Graph (CAG) PyGraphviz AGraph object.
The CAG shows the influence relationships between the variables and
elides the function nodes.
|
[
"Export",
"to",
"a",
"Causal",
"Analysis",
"Graph",
"(",
"CAG",
")",
"PyGraphviz",
"AGraph",
"object",
".",
"The",
"CAG",
"shows",
"the",
"influence",
"relationships",
"between",
"the",
"variables",
"and",
"elides",
"the",
"function",
"nodes",
"."
] |
python
|
train
| 43.259259 |
asmodehn/filefinder2
|
filefinder2/_fileloader2.py
|
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/_fileloader2.py#L154-L160
|
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = os.path.split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__'
|
[
"def",
"is_package",
"(",
"self",
",",
"fullname",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"self",
".",
"get_filename",
"(",
"fullname",
")",
")",
"[",
"1",
"]",
"filename_base",
"=",
"filename",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"tail_name",
"=",
"fullname",
".",
"rpartition",
"(",
"'.'",
")",
"[",
"2",
"]",
"return",
"filename_base",
"==",
"'__init__'",
"and",
"tail_name",
"!=",
"'__init__'"
] |
Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'.
|
[
"Concrete",
"implementation",
"of",
"InspectLoader",
".",
"is_package",
"by",
"checking",
"if",
"the",
"path",
"returned",
"by",
"get_filename",
"has",
"a",
"filename",
"of",
"__init__",
".",
"py",
"."
] |
python
|
train
| 59.428571 |
collectiveacuity/labPack
|
labpack/platforms/aws/ec2.py
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L918-L1000
|
def list_images(self, tag_values=None):
'''
a method to retrieve the list of images of account on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of image AWS ids
'''
title = '%s.list_images' % self.__class__.__name__
# validate inputs
input_fields = {
'tag_values': tag_values
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# add tags to method arguments
kw_args = { 'Owners': [ self.iam.owner_id ] }
tag_text = ''
if tag_values:
kw_args = {
'Filters': [ { 'Name': 'tag-value', 'Values': tag_values } ]
}
from labpack.parsing.grammar import join_words
plural_value = ''
if len(tag_values) > 1:
plural_value = 's'
tag_text = ' with tag value%s %s' % (plural_value, join_words(tag_values))
# request image details from AWS
self.iam.printer('Querying AWS region %s for images%s.' % (self.iam.region_name, tag_text))
image_list = []
try:
response = self.connection.describe_images(**kw_args)
except:
raise AWSConnectionError(title)
response_list = response['Images']
# repeat request
if not response_list:
from time import sleep
from timeit import default_timer as timer
self.iam.printer('No images found initially. Checking again', flush=True)
state_timeout = 0
delay = 3
while not response_list and state_timeout < 12:
self.iam.printer('.', flush=True)
sleep(delay)
t3 = timer()
try:
response = self.connection.describe_images(**kw_args)
except:
raise AWSConnectionError(title)
response_list = response['Images']
t4 = timer()
state_timeout += 1
response_time = t4 - t3
if 3 - response_time > 0:
delay = 3 - response_time
else:
delay = 0
self.iam.printer(' done.')
# wait until all images are no longer pending
for image in response_list:
image_list.append(image['ImageId'])
# report outcome and return results
if image_list:
print_out = 'Found image'
if len(image_list) > 1:
print_out += 's'
from labpack.parsing.grammar import join_words
print_out += ' %s.' % join_words(image_list)
self.iam.printer(print_out)
else:
self.iam.printer('No images found.')
return image_list
|
[
"def",
"list_images",
"(",
"self",
",",
"tag_values",
"=",
"None",
")",
":",
"title",
"=",
"'%s.list_images'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'tag_values'",
":",
"tag_values",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"if",
"value",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# add tags to method arguments",
"kw_args",
"=",
"{",
"'Owners'",
":",
"[",
"self",
".",
"iam",
".",
"owner_id",
"]",
"}",
"tag_text",
"=",
"''",
"if",
"tag_values",
":",
"kw_args",
"=",
"{",
"'Filters'",
":",
"[",
"{",
"'Name'",
":",
"'tag-value'",
",",
"'Values'",
":",
"tag_values",
"}",
"]",
"}",
"from",
"labpack",
".",
"parsing",
".",
"grammar",
"import",
"join_words",
"plural_value",
"=",
"''",
"if",
"len",
"(",
"tag_values",
")",
">",
"1",
":",
"plural_value",
"=",
"'s'",
"tag_text",
"=",
"' with tag value%s %s'",
"%",
"(",
"plural_value",
",",
"join_words",
"(",
"tag_values",
")",
")",
"# request image details from AWS",
"self",
".",
"iam",
".",
"printer",
"(",
"'Querying AWS region %s for images%s.'",
"%",
"(",
"self",
".",
"iam",
".",
"region_name",
",",
"tag_text",
")",
")",
"image_list",
"=",
"[",
"]",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_images",
"(",
"*",
"*",
"kw_args",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"response_list",
"=",
"response",
"[",
"'Images'",
"]",
"# repeat request",
"if",
"not",
"response_list",
":",
"from",
"time",
"import",
"sleep",
"from",
"timeit",
"import",
"default_timer",
"as",
"timer",
"self",
".",
"iam",
".",
"printer",
"(",
"'No images found initially. Checking again'",
",",
"flush",
"=",
"True",
")",
"state_timeout",
"=",
"0",
"delay",
"=",
"3",
"while",
"not",
"response_list",
"and",
"state_timeout",
"<",
"12",
":",
"self",
".",
"iam",
".",
"printer",
"(",
"'.'",
",",
"flush",
"=",
"True",
")",
"sleep",
"(",
"delay",
")",
"t3",
"=",
"timer",
"(",
")",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_images",
"(",
"*",
"*",
"kw_args",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"response_list",
"=",
"response",
"[",
"'Images'",
"]",
"t4",
"=",
"timer",
"(",
")",
"state_timeout",
"+=",
"1",
"response_time",
"=",
"t4",
"-",
"t3",
"if",
"3",
"-",
"response_time",
">",
"0",
":",
"delay",
"=",
"3",
"-",
"response_time",
"else",
":",
"delay",
"=",
"0",
"self",
".",
"iam",
".",
"printer",
"(",
"' done.'",
")",
"# wait until all images are no longer pending",
"for",
"image",
"in",
"response_list",
":",
"image_list",
".",
"append",
"(",
"image",
"[",
"'ImageId'",
"]",
")",
"# report outcome and return results",
"if",
"image_list",
":",
"print_out",
"=",
"'Found image'",
"if",
"len",
"(",
"image_list",
")",
">",
"1",
":",
"print_out",
"+=",
"'s'",
"from",
"labpack",
".",
"parsing",
".",
"grammar",
"import",
"join_words",
"print_out",
"+=",
"' %s.'",
"%",
"join_words",
"(",
"image_list",
")",
"self",
".",
"iam",
".",
"printer",
"(",
"print_out",
")",
"else",
":",
"self",
".",
"iam",
".",
"printer",
"(",
"'No images found.'",
")",
"return",
"image_list"
] |
a method to retrieve the list of images of account on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of image AWS ids
|
[
"a",
"method",
"to",
"retrieve",
"the",
"list",
"of",
"images",
"of",
"account",
"on",
"AWS",
"EC2"
] |
python
|
train
| 34.795181 |
glitchassassin/lackey
|
lackey/RegionMatching.py
|
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1226-L1229
|
def setCenter(self, loc):
""" Move this region so it is centered on ``loc`` """
offset = self.getCenter().getOffset(loc) # Calculate offset from current center
return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
|
[
"def",
"setCenter",
"(",
"self",
",",
"loc",
")",
":",
"offset",
"=",
"self",
".",
"getCenter",
"(",
")",
".",
"getOffset",
"(",
"loc",
")",
"# Calculate offset from current center",
"return",
"self",
".",
"setLocation",
"(",
"self",
".",
"getTopLeft",
"(",
")",
".",
"offset",
"(",
"offset",
")",
")",
"# Move top left corner by the same offset"
] |
Move this region so it is centered on ``loc``
|
[
"Move",
"this",
"region",
"so",
"it",
"is",
"centered",
"on",
"loc"
] |
python
|
train
| 70 |
tcalmant/ipopo
|
pelix/ipopo/waiting.py
|
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/waiting.py#L93-L121
|
def _try_instantiate(self, ipopo, factory, component):
# type: (Any, str, str) -> None
"""
Tries to instantiate a component from the queue. Hides all exceptions.
:param ipopo: The iPOPO service
:param factory: Component factory
:param component: Component name
"""
try:
# Get component properties
with self.__lock:
properties = self.__queue[factory][component]
except KeyError:
# Component not in queue
return
else:
try:
# Try instantiation
ipopo.instantiate(factory, component, properties)
except TypeError:
# Unknown factory: try later
pass
except ValueError as ex:
# Already known component
_logger.error("Component already running: %s", ex)
except Exception as ex:
# Other error
_logger.exception("Error instantiating component: %s", ex)
|
[
"def",
"_try_instantiate",
"(",
"self",
",",
"ipopo",
",",
"factory",
",",
"component",
")",
":",
"# type: (Any, str, str) -> None",
"try",
":",
"# Get component properties",
"with",
"self",
".",
"__lock",
":",
"properties",
"=",
"self",
".",
"__queue",
"[",
"factory",
"]",
"[",
"component",
"]",
"except",
"KeyError",
":",
"# Component not in queue",
"return",
"else",
":",
"try",
":",
"# Try instantiation",
"ipopo",
".",
"instantiate",
"(",
"factory",
",",
"component",
",",
"properties",
")",
"except",
"TypeError",
":",
"# Unknown factory: try later",
"pass",
"except",
"ValueError",
"as",
"ex",
":",
"# Already known component",
"_logger",
".",
"error",
"(",
"\"Component already running: %s\"",
",",
"ex",
")",
"except",
"Exception",
"as",
"ex",
":",
"# Other error",
"_logger",
".",
"exception",
"(",
"\"Error instantiating component: %s\"",
",",
"ex",
")"
] |
Tries to instantiate a component from the queue. Hides all exceptions.
:param ipopo: The iPOPO service
:param factory: Component factory
:param component: Component name
|
[
"Tries",
"to",
"instantiate",
"a",
"component",
"from",
"the",
"queue",
".",
"Hides",
"all",
"exceptions",
"."
] |
python
|
train
| 35.655172 |
FutunnOpen/futuquant
|
futuquant/examples/learn/make_order_and_cancel.py
|
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/learn/make_order_and_cancel.py#L25-L136
|
def make_order_and_cancel(api_svr_ip, api_svr_port, unlock_password, test_code, trade_env, acc_id):
"""
使用请先配置正确参数:
:param api_svr_ip: (string) ip
:param api_svr_port: (string) ip
:param unlock_password: (string) 交易解锁密码, 必需修改!
:param test_code: (string) 股票
:param trade_env: 参见 ft.TrdEnv的定义
:param acc_id: 交易子账号id
"""
if unlock_password == "":
raise Exception("请先配置交易解锁密码!")
quote_ctx = ft.OpenQuoteContext(host=api_svr_ip, port=api_svr_port) # 创建行情api
quote_ctx.subscribe(test_code, ft.SubType.ORDER_BOOK) # 定阅摆盘
# 创建交易api
is_hk_trade = 'HK.' in test_code
if is_hk_trade:
trade_ctx = ft.OpenHKTradeContext(host=api_svr_ip, port=api_svr_port)
else:
trade_ctx = ft.OpenUSTradeContext(host=api_svr_ip, port=api_svr_port)
# 每手股数
lot_size = 0
is_unlock_trade = False
is_fire_trade = False
while not is_fire_trade:
sleep(2)
# 解锁交易
if not is_unlock_trade and trade_env == ft.TrdEnv.REAL:
print("unlocking trade...")
ret_code, ret_data = trade_ctx.unlock_trade(unlock_password)
is_unlock_trade = (ret_code == ft.RET_OK)
if not is_unlock_trade:
print("请求交易解锁失败:{}".format(ret_data))
break
if lot_size == 0:
print("get lotsize...")
ret, data = quote_ctx.get_market_snapshot(test_code)
lot_size = data.iloc[0]['lot_size'] if ret == ft.RET_OK else 0
if ret != ft.RET_OK:
print("取不到每手信息,重试中: {}".format(data))
continue
elif lot_size <= 0:
raise BaseException("该股票每手信息错误,可能不支持交易 code ={}".format(test_code))
print("get order book...")
ret, data = quote_ctx.get_order_book(test_code) # 得到第十档数据
if ret != ft.RET_OK:
continue
# 计算交易价格
bid_order_arr = data['Bid']
if is_hk_trade:
if len(bid_order_arr) != 10:
continue
# 港股下单: 价格定为第十档
price, _, _ = bid_order_arr[9]
else:
if len(bid_order_arr) == 0:
continue
# 美股下单: 价格定为一档降10%
price, _, _ = bid_order_arr[0]
price = round(price * 0.9, 2)
qty = lot_size
# 价格和数量判断
if qty == 0 or price == 0.0:
continue
# 下单
order_id = 0
print("place order : price={} qty={} code={}".format(price, qty, test_code))
ret_code, ret_data = trade_ctx.place_order(price=price, qty=qty, code=test_code, trd_side=ft.TrdSide.BUY,
order_type=ft.OrderType.NORMAL, trd_env=trade_env, acc_id=acc_id)
is_fire_trade = True
print('下单ret={} data={}'.format(ret_code, ret_data))
if ret_code == ft.RET_OK:
row = ret_data.iloc[0]
order_id = row['order_id']
# 循环撤单
sleep(2)
if order_id:
while True:
ret_code, ret_data = trade_ctx.order_list_query(order_id=order_id, status_filter_list=[], code='',
start='', end='', trd_env=trade_env, acc_id=acc_id)
if ret_code != ft.RET_OK:
sleep(2)
continue
order_status = ret_data.iloc[0]['order_status']
if order_status in [ft.OrderStatus.SUBMIT_FAILED, ft.OrderStatus.TIMEOUT, ft.OrderStatus.FILLED_ALL,
ft.OrderStatus.FAILED, ft.OrderStatus.DELETED]:
break
print("cancel order...")
ret_code, ret_data = trade_ctx.modify_order(modify_order_op=ft.ModifyOrderOp.CANCEL, order_id=order_id,
price=price, qty=qty, adjust_limit=0, trd_env=trade_env, acc_id=acc_id)
print("撤单ret={} data={}".format(ret_code, ret_data))
if ret_code == ft.RET_OK:
break
else:
sleep(2)
# destroy object
quote_ctx.close()
trade_ctx.close()
|
[
"def",
"make_order_and_cancel",
"(",
"api_svr_ip",
",",
"api_svr_port",
",",
"unlock_password",
",",
"test_code",
",",
"trade_env",
",",
"acc_id",
")",
":",
"if",
"unlock_password",
"==",
"\"\"",
":",
"raise",
"Exception",
"(",
"\"请先配置交易解锁密码!\")",
"",
"quote_ctx",
"=",
"ft",
".",
"OpenQuoteContext",
"(",
"host",
"=",
"api_svr_ip",
",",
"port",
"=",
"api_svr_port",
")",
"# 创建行情api",
"quote_ctx",
".",
"subscribe",
"(",
"test_code",
",",
"ft",
".",
"SubType",
".",
"ORDER_BOOK",
")",
"# 定阅摆盘",
"# 创建交易api",
"is_hk_trade",
"=",
"'HK.'",
"in",
"test_code",
"if",
"is_hk_trade",
":",
"trade_ctx",
"=",
"ft",
".",
"OpenHKTradeContext",
"(",
"host",
"=",
"api_svr_ip",
",",
"port",
"=",
"api_svr_port",
")",
"else",
":",
"trade_ctx",
"=",
"ft",
".",
"OpenUSTradeContext",
"(",
"host",
"=",
"api_svr_ip",
",",
"port",
"=",
"api_svr_port",
")",
"# 每手股数",
"lot_size",
"=",
"0",
"is_unlock_trade",
"=",
"False",
"is_fire_trade",
"=",
"False",
"while",
"not",
"is_fire_trade",
":",
"sleep",
"(",
"2",
")",
"# 解锁交易",
"if",
"not",
"is_unlock_trade",
"and",
"trade_env",
"==",
"ft",
".",
"TrdEnv",
".",
"REAL",
":",
"print",
"(",
"\"unlocking trade...\"",
")",
"ret_code",
",",
"ret_data",
"=",
"trade_ctx",
".",
"unlock_trade",
"(",
"unlock_password",
")",
"is_unlock_trade",
"=",
"(",
"ret_code",
"==",
"ft",
".",
"RET_OK",
")",
"if",
"not",
"is_unlock_trade",
":",
"print",
"(",
"\"请求交易解锁失败:{}\".format(ret_data))",
"",
"",
"",
"",
"",
"",
"break",
"if",
"lot_size",
"==",
"0",
":",
"print",
"(",
"\"get lotsize...\"",
")",
"ret",
",",
"data",
"=",
"quote_ctx",
".",
"get_market_snapshot",
"(",
"test_code",
")",
"lot_size",
"=",
"data",
".",
"iloc",
"[",
"0",
"]",
"[",
"'lot_size'",
"]",
"if",
"ret",
"==",
"ft",
".",
"RET_OK",
"else",
"0",
"if",
"ret",
"!=",
"ft",
".",
"RET_OK",
":",
"print",
"(",
"\"取不到每手信息,重试中: {}\".format(data))",
"",
"",
"",
"",
"",
"",
"continue",
"elif",
"lot_size",
"<=",
"0",
":",
"raise",
"BaseException",
"(",
"\"该股票每手信息错误,可能不支持交易 code ={}\".format(test_code))",
"",
"",
"",
"",
"",
"",
"print",
"(",
"\"get order book...\"",
")",
"ret",
",",
"data",
"=",
"quote_ctx",
".",
"get_order_book",
"(",
"test_code",
")",
"# 得到第十档数据",
"if",
"ret",
"!=",
"ft",
".",
"RET_OK",
":",
"continue",
"# 计算交易价格",
"bid_order_arr",
"=",
"data",
"[",
"'Bid'",
"]",
"if",
"is_hk_trade",
":",
"if",
"len",
"(",
"bid_order_arr",
")",
"!=",
"10",
":",
"continue",
"# 港股下单: 价格定为第十档",
"price",
",",
"_",
",",
"_",
"=",
"bid_order_arr",
"[",
"9",
"]",
"else",
":",
"if",
"len",
"(",
"bid_order_arr",
")",
"==",
"0",
":",
"continue",
"# 美股下单: 价格定为一档降10%",
"price",
",",
"_",
",",
"_",
"=",
"bid_order_arr",
"[",
"0",
"]",
"price",
"=",
"round",
"(",
"price",
"*",
"0.9",
",",
"2",
")",
"qty",
"=",
"lot_size",
"# 价格和数量判断",
"if",
"qty",
"==",
"0",
"or",
"price",
"==",
"0.0",
":",
"continue",
"# 下单",
"order_id",
"=",
"0",
"print",
"(",
"\"place order : price={} qty={} code={}\"",
".",
"format",
"(",
"price",
",",
"qty",
",",
"test_code",
")",
")",
"ret_code",
",",
"ret_data",
"=",
"trade_ctx",
".",
"place_order",
"(",
"price",
"=",
"price",
",",
"qty",
"=",
"qty",
",",
"code",
"=",
"test_code",
",",
"trd_side",
"=",
"ft",
".",
"TrdSide",
".",
"BUY",
",",
"order_type",
"=",
"ft",
".",
"OrderType",
".",
"NORMAL",
",",
"trd_env",
"=",
"trade_env",
",",
"acc_id",
"=",
"acc_id",
")",
"is_fire_trade",
"=",
"True",
"print",
"(",
"'下单ret={} data={}'.for",
"m",
"at(ret",
"_",
"code, re",
"t",
"data))",
"",
"",
"if",
"ret_code",
"==",
"ft",
".",
"RET_OK",
":",
"row",
"=",
"ret_data",
".",
"iloc",
"[",
"0",
"]",
"order_id",
"=",
"row",
"[",
"'order_id'",
"]",
"# 循环撤单",
"sleep",
"(",
"2",
")",
"if",
"order_id",
":",
"while",
"True",
":",
"ret_code",
",",
"ret_data",
"=",
"trade_ctx",
".",
"order_list_query",
"(",
"order_id",
"=",
"order_id",
",",
"status_filter_list",
"=",
"[",
"]",
",",
"code",
"=",
"''",
",",
"start",
"=",
"''",
",",
"end",
"=",
"''",
",",
"trd_env",
"=",
"trade_env",
",",
"acc_id",
"=",
"acc_id",
")",
"if",
"ret_code",
"!=",
"ft",
".",
"RET_OK",
":",
"sleep",
"(",
"2",
")",
"continue",
"order_status",
"=",
"ret_data",
".",
"iloc",
"[",
"0",
"]",
"[",
"'order_status'",
"]",
"if",
"order_status",
"in",
"[",
"ft",
".",
"OrderStatus",
".",
"SUBMIT_FAILED",
",",
"ft",
".",
"OrderStatus",
".",
"TIMEOUT",
",",
"ft",
".",
"OrderStatus",
".",
"FILLED_ALL",
",",
"ft",
".",
"OrderStatus",
".",
"FAILED",
",",
"ft",
".",
"OrderStatus",
".",
"DELETED",
"]",
":",
"break",
"print",
"(",
"\"cancel order...\"",
")",
"ret_code",
",",
"ret_data",
"=",
"trade_ctx",
".",
"modify_order",
"(",
"modify_order_op",
"=",
"ft",
".",
"ModifyOrderOp",
".",
"CANCEL",
",",
"order_id",
"=",
"order_id",
",",
"price",
"=",
"price",
",",
"qty",
"=",
"qty",
",",
"adjust_limit",
"=",
"0",
",",
"trd_env",
"=",
"trade_env",
",",
"acc_id",
"=",
"acc_id",
")",
"print",
"(",
"\"撤单ret={} data={}\".for",
"m",
"at(ret",
"_",
"code, re",
"t",
"data))",
"",
"",
"if",
"ret_code",
"==",
"ft",
".",
"RET_OK",
":",
"break",
"else",
":",
"sleep",
"(",
"2",
")",
"# destroy object",
"quote_ctx",
".",
"close",
"(",
")",
"trade_ctx",
".",
"close",
"(",
")"
] |
使用请先配置正确参数:
:param api_svr_ip: (string) ip
:param api_svr_port: (string) ip
:param unlock_password: (string) 交易解锁密码, 必需修改!
:param test_code: (string) 股票
:param trade_env: 参见 ft.TrdEnv的定义
:param acc_id: 交易子账号id
|
[
"使用请先配置正确参数",
":",
":",
"param",
"api_svr_ip",
":",
"(",
"string",
")",
"ip",
":",
"param",
"api_svr_port",
":",
"(",
"string",
")",
"ip",
":",
"param",
"unlock_password",
":",
"(",
"string",
")",
"交易解锁密码",
"必需修改!",
":",
"param",
"test_code",
":",
"(",
"string",
")",
"股票",
":",
"param",
"trade_env",
":",
"参见",
"ft",
".",
"TrdEnv的定义",
":",
"param",
"acc_id",
":",
"交易子账号id"
] |
python
|
train
| 36.1875 |
DataONEorg/d1_python
|
client_cli/src/d1_cli/impl/command_parser.py
|
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L565-L587
|
def _print_help(self):
"""Custom help message to group commands by functionality."""
msg = """Commands (type help <command> for details)
CLI: help history exit quit
Session, General: set load save reset
Session, Access Control: allowaccess denyaccess clearaccess
Session, Replication: allowrep denyrep preferrep blockrep
removerep numberrep clearrep
Read Operations: get meta list log resolve
Write Operations: update create package archive
updateaccess updatereplication
Utilities: listformats listnodes search ping
Write Operation Queue: queue run edit clearqueue
Command History: Arrow Up, Arrow Down
Command Editing: Arrow Left, Arrow Right, Delete
"""
if platform.system() != "Windows":
msg += """Command Completion: Single Tab: Complete unique command
Double Tab: Display possible commands
"""
d1_cli.impl.util.print_info(msg)
|
[
"def",
"_print_help",
"(",
"self",
")",
":",
"msg",
"=",
"\"\"\"Commands (type help <command> for details)\n\nCLI: help history exit quit\nSession, General: set load save reset\nSession, Access Control: allowaccess denyaccess clearaccess\nSession, Replication: allowrep denyrep preferrep blockrep\n removerep numberrep clearrep\nRead Operations: get meta list log resolve\nWrite Operations: update create package archive\n updateaccess updatereplication\nUtilities: listformats listnodes search ping\nWrite Operation Queue: queue run edit clearqueue\n\nCommand History: Arrow Up, Arrow Down\nCommand Editing: Arrow Left, Arrow Right, Delete\n \"\"\"",
"if",
"platform",
".",
"system",
"(",
")",
"!=",
"\"Windows\"",
":",
"msg",
"+=",
"\"\"\"Command Completion: Single Tab: Complete unique command\n Double Tab: Display possible commands\n \"\"\"",
"d1_cli",
".",
"impl",
".",
"util",
".",
"print_info",
"(",
"msg",
")"
] |
Custom help message to group commands by functionality.
|
[
"Custom",
"help",
"message",
"to",
"group",
"commands",
"by",
"functionality",
"."
] |
python
|
train
| 44.913043 |
ARMmbed/mbed-cloud-sdk-python
|
src/mbed_cloud/connect/notifications.py
|
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/connect/notifications.py#L107-L123
|
def check_error(self):
"""Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: status_code, error_msg, payload
:rtype: tuple
"""
if not self.is_done:
raise CloudUnhandledError("Need to check if request is done, before checking for error")
response = self.db[self.async_id]
error_msg = response["error"]
status_code = int(response["status_code"])
payload = response["payload"]
return status_code, error_msg, payload
|
[
"def",
"check_error",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_done",
":",
"raise",
"CloudUnhandledError",
"(",
"\"Need to check if request is done, before checking for error\"",
")",
"response",
"=",
"self",
".",
"db",
"[",
"self",
".",
"async_id",
"]",
"error_msg",
"=",
"response",
"[",
"\"error\"",
"]",
"status_code",
"=",
"int",
"(",
"response",
"[",
"\"status_code\"",
"]",
")",
"payload",
"=",
"response",
"[",
"\"payload\"",
"]",
"return",
"status_code",
",",
"error_msg",
",",
"payload"
] |
Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: status_code, error_msg, payload
:rtype: tuple
|
[
"Check",
"if",
"the",
"async",
"response",
"is",
"an",
"error",
"."
] |
python
|
train
| 40.470588 |
satellogic/telluric
|
telluric/georaster.py
|
https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L736-L740
|
def band_names(self):
"""Raster affine."""
if self._band_names is None:
self._populate_from_rasterio_object(read_image=False)
return self._band_names
|
[
"def",
"band_names",
"(",
"self",
")",
":",
"if",
"self",
".",
"_band_names",
"is",
"None",
":",
"self",
".",
"_populate_from_rasterio_object",
"(",
"read_image",
"=",
"False",
")",
"return",
"self",
".",
"_band_names"
] |
Raster affine.
|
[
"Raster",
"affine",
"."
] |
python
|
train
| 36.2 |
IndicoDataSolutions/IndicoIo-python
|
indicoio/custom/custom.py
|
https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L81-L117
|
def visualize_explanation(explanation, label=None):
"""
Given the output of the explain() endpoint, produces a terminal visual that plots response strength over a sequence
"""
if not sys.version_info[:2] >= (3, 5):
raise IndicoError("Python >= 3.5+ is required for explanation visualization")
try:
from colr import Colr as C
except ImportError:
raise IndicoError("Package colr >= 0.8.1 is required for explanation visualization.")
cursor = 0
text = explanation['text']
for token in explanation.get('token_predictions'):
try:
class_confidence = token.get('prediction')[label]
except KeyError:
raise IndicoError("Invalid label: {}".format(label))
if class_confidence > 0.5:
fg_color = (255, 255, 255)
else:
fg_color = (0, 0, 0)
rg_value = 255 - int(class_confidence * 255)
token_end = token.get('token').get('end')
token_text = text[cursor:token_end]
cursor = token_end
sys.stdout.write(
str(C().b_rgb(
rg_value, rg_value, 255
).rgb(
fg_color[0], fg_color[1], fg_color[2], token_text
))
)
sys.stdout.write("\n")
sys.stdout.flush()
|
[
"def",
"visualize_explanation",
"(",
"explanation",
",",
"label",
"=",
"None",
")",
":",
"if",
"not",
"sys",
".",
"version_info",
"[",
":",
"2",
"]",
">=",
"(",
"3",
",",
"5",
")",
":",
"raise",
"IndicoError",
"(",
"\"Python >= 3.5+ is required for explanation visualization\"",
")",
"try",
":",
"from",
"colr",
"import",
"Colr",
"as",
"C",
"except",
"ImportError",
":",
"raise",
"IndicoError",
"(",
"\"Package colr >= 0.8.1 is required for explanation visualization.\"",
")",
"cursor",
"=",
"0",
"text",
"=",
"explanation",
"[",
"'text'",
"]",
"for",
"token",
"in",
"explanation",
".",
"get",
"(",
"'token_predictions'",
")",
":",
"try",
":",
"class_confidence",
"=",
"token",
".",
"get",
"(",
"'prediction'",
")",
"[",
"label",
"]",
"except",
"KeyError",
":",
"raise",
"IndicoError",
"(",
"\"Invalid label: {}\"",
".",
"format",
"(",
"label",
")",
")",
"if",
"class_confidence",
">",
"0.5",
":",
"fg_color",
"=",
"(",
"255",
",",
"255",
",",
"255",
")",
"else",
":",
"fg_color",
"=",
"(",
"0",
",",
"0",
",",
"0",
")",
"rg_value",
"=",
"255",
"-",
"int",
"(",
"class_confidence",
"*",
"255",
")",
"token_end",
"=",
"token",
".",
"get",
"(",
"'token'",
")",
".",
"get",
"(",
"'end'",
")",
"token_text",
"=",
"text",
"[",
"cursor",
":",
"token_end",
"]",
"cursor",
"=",
"token_end",
"sys",
".",
"stdout",
".",
"write",
"(",
"str",
"(",
"C",
"(",
")",
".",
"b_rgb",
"(",
"rg_value",
",",
"rg_value",
",",
"255",
")",
".",
"rgb",
"(",
"fg_color",
"[",
"0",
"]",
",",
"fg_color",
"[",
"1",
"]",
",",
"fg_color",
"[",
"2",
"]",
",",
"token_text",
")",
")",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\n\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] |
Given the output of the explain() endpoint, produces a terminal visual that plots response strength over a sequence
|
[
"Given",
"the",
"output",
"of",
"the",
"explain",
"()",
"endpoint",
"produces",
"a",
"terminal",
"visual",
"that",
"plots",
"response",
"strength",
"over",
"a",
"sequence"
] |
python
|
train
| 33.486486 |
rosenbrockc/fortpy
|
fortpy/isense/classes.py
|
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/isense/classes.py#L62-L82
|
def description(self):
"""Returns the full docstring information for the element suggested
as a completion."""
result = ""
if isinstance(self._element, ValueElement):
if self._element.kind is not None:
result = "{}({}) | {}".format(self._element.dtype, self._element.kind,
self._element.summary)
else:
result = "{} | {}".format(self._element.dtype,
self._element.summary)
elif isinstance(self._element, Executable):
result = "({})".format(self._element.parameters_as_string())
elif isinstance(self._element, str):
result = "Intrinsic Fortran Symbol"
elif isinstance(self._element, TypeExecutable):
result = self._type_description()
#Clean off any line breaks from the XML and excessive whitespace.
cleaned = re.sub("\s+", " ", result.replace("\n", " "))
return cleaned
|
[
"def",
"description",
"(",
"self",
")",
":",
"result",
"=",
"\"\"",
"if",
"isinstance",
"(",
"self",
".",
"_element",
",",
"ValueElement",
")",
":",
"if",
"self",
".",
"_element",
".",
"kind",
"is",
"not",
"None",
":",
"result",
"=",
"\"{}({}) | {}\"",
".",
"format",
"(",
"self",
".",
"_element",
".",
"dtype",
",",
"self",
".",
"_element",
".",
"kind",
",",
"self",
".",
"_element",
".",
"summary",
")",
"else",
":",
"result",
"=",
"\"{} | {}\"",
".",
"format",
"(",
"self",
".",
"_element",
".",
"dtype",
",",
"self",
".",
"_element",
".",
"summary",
")",
"elif",
"isinstance",
"(",
"self",
".",
"_element",
",",
"Executable",
")",
":",
"result",
"=",
"\"({})\"",
".",
"format",
"(",
"self",
".",
"_element",
".",
"parameters_as_string",
"(",
")",
")",
"elif",
"isinstance",
"(",
"self",
".",
"_element",
",",
"str",
")",
":",
"result",
"=",
"\"Intrinsic Fortran Symbol\"",
"elif",
"isinstance",
"(",
"self",
".",
"_element",
",",
"TypeExecutable",
")",
":",
"result",
"=",
"self",
".",
"_type_description",
"(",
")",
"#Clean off any line breaks from the XML and excessive whitespace.",
"cleaned",
"=",
"re",
".",
"sub",
"(",
"\"\\s+\"",
",",
"\" \"",
",",
"result",
".",
"replace",
"(",
"\"\\n\"",
",",
"\" \"",
")",
")",
"return",
"cleaned"
] |
Returns the full docstring information for the element suggested
as a completion.
|
[
"Returns",
"the",
"full",
"docstring",
"information",
"for",
"the",
"element",
"suggested",
"as",
"a",
"completion",
"."
] |
python
|
train
| 47.904762 |
pyviz/holoviews
|
holoviews/core/dimension.py
|
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/dimension.py#L97-L129
|
def process_dimensions(kdims, vdims):
"""Converts kdims and vdims to Dimension objects.
Args:
kdims: List or single key dimension(s) specified as strings,
tuples dicts or Dimension objects.
vdims: List or single value dimension(s) specified as strings,
tuples dicts or Dimension objects.
Returns:
Dictionary containing kdims and vdims converted to Dimension
objects:
{'kdims': [Dimension('x')], 'vdims': [Dimension('y')]
"""
dimensions = {}
for group, dims in [('kdims', kdims), ('vdims', vdims)]:
if dims is None:
continue
elif isinstance(dims, (tuple, basestring, Dimension, dict)):
dims = [dims]
elif not isinstance(dims, list):
raise ValueError("%s argument expects a Dimension or list of dimensions, "
"specified as tuples, strings, dictionaries or Dimension "
"instances, not a %s type. Ensure you passed the data as the "
"first argument." % (group, type(dims).__name__))
for dim in dims:
if not isinstance(dim, (tuple, basestring, Dimension, dict)):
raise ValueError('Dimensions must be defined as a tuple, '
'string, dictionary or Dimension instance, '
'found a %s type.' % type(dim).__name__)
dimensions[group] = [asdim(d) for d in dims]
return dimensions
|
[
"def",
"process_dimensions",
"(",
"kdims",
",",
"vdims",
")",
":",
"dimensions",
"=",
"{",
"}",
"for",
"group",
",",
"dims",
"in",
"[",
"(",
"'kdims'",
",",
"kdims",
")",
",",
"(",
"'vdims'",
",",
"vdims",
")",
"]",
":",
"if",
"dims",
"is",
"None",
":",
"continue",
"elif",
"isinstance",
"(",
"dims",
",",
"(",
"tuple",
",",
"basestring",
",",
"Dimension",
",",
"dict",
")",
")",
":",
"dims",
"=",
"[",
"dims",
"]",
"elif",
"not",
"isinstance",
"(",
"dims",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"%s argument expects a Dimension or list of dimensions, \"",
"\"specified as tuples, strings, dictionaries or Dimension \"",
"\"instances, not a %s type. Ensure you passed the data as the \"",
"\"first argument.\"",
"%",
"(",
"group",
",",
"type",
"(",
"dims",
")",
".",
"__name__",
")",
")",
"for",
"dim",
"in",
"dims",
":",
"if",
"not",
"isinstance",
"(",
"dim",
",",
"(",
"tuple",
",",
"basestring",
",",
"Dimension",
",",
"dict",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Dimensions must be defined as a tuple, '",
"'string, dictionary or Dimension instance, '",
"'found a %s type.'",
"%",
"type",
"(",
"dim",
")",
".",
"__name__",
")",
"dimensions",
"[",
"group",
"]",
"=",
"[",
"asdim",
"(",
"d",
")",
"for",
"d",
"in",
"dims",
"]",
"return",
"dimensions"
] |
Converts kdims and vdims to Dimension objects.
Args:
kdims: List or single key dimension(s) specified as strings,
tuples dicts or Dimension objects.
vdims: List or single value dimension(s) specified as strings,
tuples dicts or Dimension objects.
Returns:
Dictionary containing kdims and vdims converted to Dimension
objects:
{'kdims': [Dimension('x')], 'vdims': [Dimension('y')]
|
[
"Converts",
"kdims",
"and",
"vdims",
"to",
"Dimension",
"objects",
"."
] |
python
|
train
| 45 |
ocadotechnology/django-closuretree
|
closuretree/models.py
|
https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L259-L266
|
def get_root(self):
"""Return the furthest ancestor of this node."""
if self.is_root_node():
return self
return self.get_ancestors().order_by(
"-%s__depth" % self._closure_parentref()
)[0]
|
[
"def",
"get_root",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_root_node",
"(",
")",
":",
"return",
"self",
"return",
"self",
".",
"get_ancestors",
"(",
")",
".",
"order_by",
"(",
"\"-%s__depth\"",
"%",
"self",
".",
"_closure_parentref",
"(",
")",
")",
"[",
"0",
"]"
] |
Return the furthest ancestor of this node.
|
[
"Return",
"the",
"furthest",
"ancestor",
"of",
"this",
"node",
"."
] |
python
|
train
| 29.75 |
dead-beef/markovchain
|
markovchain/image/type.py
|
https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/image/type.py#L77-L97
|
def merge(self, imgs):
"""Merge image channels.
Parameters
----------
imgs : `list` of `PIL.Image.Image`
Returns
-------
`PIL.Image.Image`
Raises
------
ValueError
If image channel list is empty.
"""
if not imgs:
raise ValueError('empty channel list')
if len(imgs) == 1:
return imgs[0]
return Image.merge(self.mode, imgs)
|
[
"def",
"merge",
"(",
"self",
",",
"imgs",
")",
":",
"if",
"not",
"imgs",
":",
"raise",
"ValueError",
"(",
"'empty channel list'",
")",
"if",
"len",
"(",
"imgs",
")",
"==",
"1",
":",
"return",
"imgs",
"[",
"0",
"]",
"return",
"Image",
".",
"merge",
"(",
"self",
".",
"mode",
",",
"imgs",
")"
] |
Merge image channels.
Parameters
----------
imgs : `list` of `PIL.Image.Image`
Returns
-------
`PIL.Image.Image`
Raises
------
ValueError
If image channel list is empty.
|
[
"Merge",
"image",
"channels",
"."
] |
python
|
train
| 21.52381 |
NuGrid/NuGridPy
|
nugridpy/data_plot.py
|
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/data_plot.py#L4594-L4656
|
def density_profile(self,ixaxis='mass',ifig=None,colour=None,label=None,fname=None):
'''
Plot density as a function of either mass coordiate or radius.
Parameters
----------
ixaxis : string
'mass' or 'radius'
The default value is 'mass'
ifig : integer or string
The figure label
The default value is None
colour : string
What colour the line should be
The default value is None
label : string
Label for the line
The default value is None
fname : integer
What cycle to plot from (if SE output)
The default value is None
'''
pT=self._classTest()
# Class-specific things:
if pT is 'mesa_profile':
x = self.get(ixaxis)
if ixaxis is 'radius':
x = x*ast.rsun_cm
y = self.get('logRho')
elif pT is 'se':
if fname is None:
raise IOError("Please provide the cycle number fname")
x = self.se.get(fname,ixaxis)
y = np.log10(self.se.get(fname,'rho'))
else:
raise IOError("Sorry. the density_profile method is not available \
for this class")
# Plot-specific things:
if ixaxis is 'radius':
x = np.log10(x)
xlab='$\log_{10}(r\,/\,{\\rm cm})$'
else:
xlab='${\\rm Mass}\,/\,M_\odot$'
if ifig is not None:
pl.figure(ifig)
if label is not None:
if colour is not None:
pl.plot(x,y,color=colour,label=label)
else:
pl.plot(x,y,label=label)
pl.legend(loc='best').draw_frame(False)
else:
if colour is not None:
pl.plot(x,y,color=colour)
else:
pl.plot(x,y)
pl.xlabel(xlab)
pl.ylabel('$\log_{10}(\\rho\,/\,{\\rm g\,cm}^{-3})$')
|
[
"def",
"density_profile",
"(",
"self",
",",
"ixaxis",
"=",
"'mass'",
",",
"ifig",
"=",
"None",
",",
"colour",
"=",
"None",
",",
"label",
"=",
"None",
",",
"fname",
"=",
"None",
")",
":",
"pT",
"=",
"self",
".",
"_classTest",
"(",
")",
"# Class-specific things:",
"if",
"pT",
"is",
"'mesa_profile'",
":",
"x",
"=",
"self",
".",
"get",
"(",
"ixaxis",
")",
"if",
"ixaxis",
"is",
"'radius'",
":",
"x",
"=",
"x",
"*",
"ast",
".",
"rsun_cm",
"y",
"=",
"self",
".",
"get",
"(",
"'logRho'",
")",
"elif",
"pT",
"is",
"'se'",
":",
"if",
"fname",
"is",
"None",
":",
"raise",
"IOError",
"(",
"\"Please provide the cycle number fname\"",
")",
"x",
"=",
"self",
".",
"se",
".",
"get",
"(",
"fname",
",",
"ixaxis",
")",
"y",
"=",
"np",
".",
"log10",
"(",
"self",
".",
"se",
".",
"get",
"(",
"fname",
",",
"'rho'",
")",
")",
"else",
":",
"raise",
"IOError",
"(",
"\"Sorry. the density_profile method is not available \\\n for this class\"",
")",
"# Plot-specific things:",
"if",
"ixaxis",
"is",
"'radius'",
":",
"x",
"=",
"np",
".",
"log10",
"(",
"x",
")",
"xlab",
"=",
"'$\\log_{10}(r\\,/\\,{\\\\rm cm})$'",
"else",
":",
"xlab",
"=",
"'",
"\\\\",
"\\,",
"\\,",
"o",
"dot",
"$",
"'",
"if",
"ifig",
"is",
"not",
"None",
":",
"pl",
".",
"figure",
"(",
"ifig",
")",
"if",
"label",
"is",
"not",
"None",
":",
"if",
"colour",
"is",
"not",
"None",
":",
"pl",
".",
"plot",
"(",
"x",
",",
"y",
",",
"color",
"=",
"colour",
",",
"label",
"=",
"label",
")",
"else",
":",
"pl",
".",
"plot",
"(",
"x",
",",
"y",
",",
"label",
"=",
"label",
")",
"pl",
".",
"legend",
"(",
"loc",
"=",
"'best'",
")",
".",
"draw_frame",
"(",
"False",
")",
"else",
":",
"if",
"colour",
"is",
"not",
"None",
":",
"pl",
".",
"plot",
"(",
"x",
",",
"y",
",",
"color",
"=",
"colour",
")",
"else",
":",
"pl",
".",
"plot",
"(",
"x",
",",
"y",
")",
"pl",
".",
"xlabel",
"(",
"xlab",
")",
"pl",
".",
"ylabel",
"(",
"'$\\log_{10}(\\\\rho\\,/\\,{\\\\rm g\\,cm}^{-3})$'",
")"
] |
Plot density as a function of either mass coordiate or radius.
Parameters
----------
ixaxis : string
'mass' or 'radius'
The default value is 'mass'
ifig : integer or string
The figure label
The default value is None
colour : string
What colour the line should be
The default value is None
label : string
Label for the line
The default value is None
fname : integer
What cycle to plot from (if SE output)
The default value is None
|
[
"Plot",
"density",
"as",
"a",
"function",
"of",
"either",
"mass",
"coordiate",
"or",
"radius",
"."
] |
python
|
train
| 31.15873 |
RI-imaging/qpsphere
|
qpsphere/edgefit.py
|
https://github.com/RI-imaging/qpsphere/blob/3cfa0e9fb8e81be8c820abbeccd47242e7972ac1/qpsphere/edgefit.py#L174-L200
|
def circle_radii(params, xedge, yedge):
"""Compute the distance to the center from cartesian coordinates
This method is used for fitting a circle to a set of contour
points.
Parameters
----------
params: lmfit.Parameters
Must contain the keys:
- "cx": origin of x coordinate [px]
- "cy": origin of y coordinate [px]
xedge: 1D np.ndarray
Edge coordinates x [px]
yedge: 1D np.ndarray
Edge coordinates y [px]
Returns
-------
radii: 1D np.ndarray
Radii corresponding to edge coordinates relative to origin
"""
cx = params["cx"].value
cy = params["cy"].value
radii = np.sqrt((cx - xedge)**2 + (cy - yedge)**2)
return radii
|
[
"def",
"circle_radii",
"(",
"params",
",",
"xedge",
",",
"yedge",
")",
":",
"cx",
"=",
"params",
"[",
"\"cx\"",
"]",
".",
"value",
"cy",
"=",
"params",
"[",
"\"cy\"",
"]",
".",
"value",
"radii",
"=",
"np",
".",
"sqrt",
"(",
"(",
"cx",
"-",
"xedge",
")",
"**",
"2",
"+",
"(",
"cy",
"-",
"yedge",
")",
"**",
"2",
")",
"return",
"radii"
] |
Compute the distance to the center from cartesian coordinates
This method is used for fitting a circle to a set of contour
points.
Parameters
----------
params: lmfit.Parameters
Must contain the keys:
- "cx": origin of x coordinate [px]
- "cy": origin of y coordinate [px]
xedge: 1D np.ndarray
Edge coordinates x [px]
yedge: 1D np.ndarray
Edge coordinates y [px]
Returns
-------
radii: 1D np.ndarray
Radii corresponding to edge coordinates relative to origin
|
[
"Compute",
"the",
"distance",
"to",
"the",
"center",
"from",
"cartesian",
"coordinates"
] |
python
|
train
| 26.185185 |
ArchiveTeam/wpull
|
wpull/converter.py
|
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/converter.py#L51-L57
|
def convert_all(self):
'''Convert all links in URL table.'''
for url_record in self._url_table.get_all():
if url_record.status != Status.done:
continue
self.convert_by_record(url_record)
|
[
"def",
"convert_all",
"(",
"self",
")",
":",
"for",
"url_record",
"in",
"self",
".",
"_url_table",
".",
"get_all",
"(",
")",
":",
"if",
"url_record",
".",
"status",
"!=",
"Status",
".",
"done",
":",
"continue",
"self",
".",
"convert_by_record",
"(",
"url_record",
")"
] |
Convert all links in URL table.
|
[
"Convert",
"all",
"links",
"in",
"URL",
"table",
"."
] |
python
|
train
| 33.857143 |
inveniosoftware/invenio-migrator
|
invenio_migrator/records.py
|
https://github.com/inveniosoftware/invenio-migrator/blob/6902c6968a39b747d15e32363f43b7dffe2622c2/invenio_migrator/records.py#L260-L268
|
def missing_pids(self):
"""Filter persistent identifiers."""
missing = []
for p in self.pids:
try:
PersistentIdentifier.get(p.pid_type, p.pid_value)
except PIDDoesNotExistError:
missing.append(p)
return missing
|
[
"def",
"missing_pids",
"(",
"self",
")",
":",
"missing",
"=",
"[",
"]",
"for",
"p",
"in",
"self",
".",
"pids",
":",
"try",
":",
"PersistentIdentifier",
".",
"get",
"(",
"p",
".",
"pid_type",
",",
"p",
".",
"pid_value",
")",
"except",
"PIDDoesNotExistError",
":",
"missing",
".",
"append",
"(",
"p",
")",
"return",
"missing"
] |
Filter persistent identifiers.
|
[
"Filter",
"persistent",
"identifiers",
"."
] |
python
|
test
| 32.222222 |
vsoch/helpme
|
helpme/utils/terminal.py
|
https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/utils/terminal.py#L91-L104
|
def which(software, strip_newline=True):
'''get_install will return the path to where an executable is installed.
'''
if software is None:
software = "singularity"
cmd = ['which', software ]
try:
result = run_command(cmd)
if strip_newline is True:
result['message'] = result['message'].strip('\n')
return result
except: # FileNotFoundError
return None
|
[
"def",
"which",
"(",
"software",
",",
"strip_newline",
"=",
"True",
")",
":",
"if",
"software",
"is",
"None",
":",
"software",
"=",
"\"singularity\"",
"cmd",
"=",
"[",
"'which'",
",",
"software",
"]",
"try",
":",
"result",
"=",
"run_command",
"(",
"cmd",
")",
"if",
"strip_newline",
"is",
"True",
":",
"result",
"[",
"'message'",
"]",
"=",
"result",
"[",
"'message'",
"]",
".",
"strip",
"(",
"'\\n'",
")",
"return",
"result",
"except",
":",
"# FileNotFoundError",
"return",
"None"
] |
get_install will return the path to where an executable is installed.
|
[
"get_install",
"will",
"return",
"the",
"path",
"to",
"where",
"an",
"executable",
"is",
"installed",
"."
] |
python
|
train
| 29.642857 |
aio-libs/aioredis
|
aioredis/commands/generic.py
|
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/generic.py#L88-L120
|
def migrate_keys(self, host, port, keys, dest_db, timeout, *,
copy=False, replace=False):
"""Atomically transfer keys from one Redis instance to another one.
Keys argument must be list/tuple of keys to migrate.
"""
if not isinstance(host, str):
raise TypeError("host argument must be str")
if not isinstance(timeout, int):
raise TypeError("timeout argument must be int")
if not isinstance(dest_db, int):
raise TypeError("dest_db argument must be int")
if not isinstance(keys, (list, tuple)):
raise TypeError("keys argument must be list or tuple")
if not host:
raise ValueError("Got empty host")
if dest_db < 0:
raise ValueError("dest_db must be greater equal 0")
if timeout < 0:
raise ValueError("timeout must be greater equal 0")
if not keys:
raise ValueError("keys must not be empty")
flags = []
if copy:
flags.append(b'COPY')
if replace:
flags.append(b'REPLACE')
flags.append(b'KEYS')
flags.extend(keys)
fut = self.execute(b'MIGRATE', host, port,
"", dest_db, timeout, *flags)
return wait_ok(fut)
|
[
"def",
"migrate_keys",
"(",
"self",
",",
"host",
",",
"port",
",",
"keys",
",",
"dest_db",
",",
"timeout",
",",
"*",
",",
"copy",
"=",
"False",
",",
"replace",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"host",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"host argument must be str\"",
")",
"if",
"not",
"isinstance",
"(",
"timeout",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"timeout argument must be int\"",
")",
"if",
"not",
"isinstance",
"(",
"dest_db",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"dest_db argument must be int\"",
")",
"if",
"not",
"isinstance",
"(",
"keys",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"keys argument must be list or tuple\"",
")",
"if",
"not",
"host",
":",
"raise",
"ValueError",
"(",
"\"Got empty host\"",
")",
"if",
"dest_db",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"dest_db must be greater equal 0\"",
")",
"if",
"timeout",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"timeout must be greater equal 0\"",
")",
"if",
"not",
"keys",
":",
"raise",
"ValueError",
"(",
"\"keys must not be empty\"",
")",
"flags",
"=",
"[",
"]",
"if",
"copy",
":",
"flags",
".",
"append",
"(",
"b'COPY'",
")",
"if",
"replace",
":",
"flags",
".",
"append",
"(",
"b'REPLACE'",
")",
"flags",
".",
"append",
"(",
"b'KEYS'",
")",
"flags",
".",
"extend",
"(",
"keys",
")",
"fut",
"=",
"self",
".",
"execute",
"(",
"b'MIGRATE'",
",",
"host",
",",
"port",
",",
"\"\"",
",",
"dest_db",
",",
"timeout",
",",
"*",
"flags",
")",
"return",
"wait_ok",
"(",
"fut",
")"
] |
Atomically transfer keys from one Redis instance to another one.
Keys argument must be list/tuple of keys to migrate.
|
[
"Atomically",
"transfer",
"keys",
"from",
"one",
"Redis",
"instance",
"to",
"another",
"one",
"."
] |
python
|
train
| 38.818182 |
ayust/kitnirc
|
kitnirc/contrib/admintools.py
|
https://github.com/ayust/kitnirc/blob/cf19fe39219da75f053e1a3976bf21331b6fefea/kitnirc/contrib/admintools.py#L11-L30
|
def is_admin(controller, client, actor):
"""Used to determine whether someone issuing a command is an admin.
By default, checks to see if there's a line of the type nick=host that
matches the command's actor in the [admins] section of the config file,
or a key that matches the entire mask (e.g. "foo@bar" or "foo@bar=1").
"""
config = controller.config
if not config.has_section("admins"):
logging.debug("Ignoring is_admin check - no [admins] config found.")
return False
for key,val in config.items("admins"):
if actor == User(key):
logging.debug("is_admin: %r matches admin %r", actor, key)
return True
if actor.nick.lower() == key.lower() and actor.host.lower() == val.lower():
logging.debug("is_admin: %r matches admin %r=%r", actor, key, val)
return True
logging.debug("is_admin: %r is not an admin.", actor)
return False
|
[
"def",
"is_admin",
"(",
"controller",
",",
"client",
",",
"actor",
")",
":",
"config",
"=",
"controller",
".",
"config",
"if",
"not",
"config",
".",
"has_section",
"(",
"\"admins\"",
")",
":",
"logging",
".",
"debug",
"(",
"\"Ignoring is_admin check - no [admins] config found.\"",
")",
"return",
"False",
"for",
"key",
",",
"val",
"in",
"config",
".",
"items",
"(",
"\"admins\"",
")",
":",
"if",
"actor",
"==",
"User",
"(",
"key",
")",
":",
"logging",
".",
"debug",
"(",
"\"is_admin: %r matches admin %r\"",
",",
"actor",
",",
"key",
")",
"return",
"True",
"if",
"actor",
".",
"nick",
".",
"lower",
"(",
")",
"==",
"key",
".",
"lower",
"(",
")",
"and",
"actor",
".",
"host",
".",
"lower",
"(",
")",
"==",
"val",
".",
"lower",
"(",
")",
":",
"logging",
".",
"debug",
"(",
"\"is_admin: %r matches admin %r=%r\"",
",",
"actor",
",",
"key",
",",
"val",
")",
"return",
"True",
"logging",
".",
"debug",
"(",
"\"is_admin: %r is not an admin.\"",
",",
"actor",
")",
"return",
"False"
] |
Used to determine whether someone issuing a command is an admin.
By default, checks to see if there's a line of the type nick=host that
matches the command's actor in the [admins] section of the config file,
or a key that matches the entire mask (e.g. "foo@bar" or "foo@bar=1").
|
[
"Used",
"to",
"determine",
"whether",
"someone",
"issuing",
"a",
"command",
"is",
"an",
"admin",
"."
] |
python
|
train
| 46.45 |
ccubed/PyMoe
|
Pymoe/Kitsu/auth.py
|
https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/auth.py#L69-L88
|
def get(self, username):
"""
If using the remember option and KitsuAuth is storing your tokens, this function will retrieve one.
:param username: The username whose token we are retrieving
:return: A token, NotFound or NotSaving error
"""
if not self.remember:
raise NotSaving
if username not in self.token_storage:
raise UserNotFound
if self.token_storage[username]['expiration'] < time.time():
new_token = self.refresh(self.token_storage[username]['refresh'])
self.token_storage[username]['token'] = new_token[0]
self.token_storage[username]['expiration'] = new_token[1]
return new_token[0]
else:
return self.token_storage[username]['token']
|
[
"def",
"get",
"(",
"self",
",",
"username",
")",
":",
"if",
"not",
"self",
".",
"remember",
":",
"raise",
"NotSaving",
"if",
"username",
"not",
"in",
"self",
".",
"token_storage",
":",
"raise",
"UserNotFound",
"if",
"self",
".",
"token_storage",
"[",
"username",
"]",
"[",
"'expiration'",
"]",
"<",
"time",
".",
"time",
"(",
")",
":",
"new_token",
"=",
"self",
".",
"refresh",
"(",
"self",
".",
"token_storage",
"[",
"username",
"]",
"[",
"'refresh'",
"]",
")",
"self",
".",
"token_storage",
"[",
"username",
"]",
"[",
"'token'",
"]",
"=",
"new_token",
"[",
"0",
"]",
"self",
".",
"token_storage",
"[",
"username",
"]",
"[",
"'expiration'",
"]",
"=",
"new_token",
"[",
"1",
"]",
"return",
"new_token",
"[",
"0",
"]",
"else",
":",
"return",
"self",
".",
"token_storage",
"[",
"username",
"]",
"[",
"'token'",
"]"
] |
If using the remember option and KitsuAuth is storing your tokens, this function will retrieve one.
:param username: The username whose token we are retrieving
:return: A token, NotFound or NotSaving error
|
[
"If",
"using",
"the",
"remember",
"option",
"and",
"KitsuAuth",
"is",
"storing",
"your",
"tokens",
"this",
"function",
"will",
"retrieve",
"one",
"."
] |
python
|
train
| 39.15 |
SecurityInnovation/PGPy
|
pgpy/pgp.py
|
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/pgp.py#L609-L636
|
def new(cls, pn, comment="", email=""):
"""
Create a new User ID or photo.
:param pn: User ID name, or photo. If this is a ``bytearray``, it will be loaded as a photo.
Otherwise, it will be used as the name field for a User ID.
:type pn: ``bytearray``, ``str``, ``unicode``
:param comment: The comment field for a User ID. Ignored if this is a photo.
:type comment: ``str``, ``unicode``
:param email: The email address field for a User ID. Ignored if this is a photo.
:type email: ``str``, ``unicode``
:returns: :py:obj:`PGPUID`
"""
uid = PGPUID()
if isinstance(pn, bytearray):
uid._uid = UserAttribute()
uid._uid.image.image = pn
uid._uid.image.iencoding = ImageEncoding.encodingof(pn)
uid._uid.update_hlen()
else:
uid._uid = UserID()
uid._uid.name = pn
uid._uid.comment = comment
uid._uid.email = email
uid._uid.update_hlen()
return uid
|
[
"def",
"new",
"(",
"cls",
",",
"pn",
",",
"comment",
"=",
"\"\"",
",",
"email",
"=",
"\"\"",
")",
":",
"uid",
"=",
"PGPUID",
"(",
")",
"if",
"isinstance",
"(",
"pn",
",",
"bytearray",
")",
":",
"uid",
".",
"_uid",
"=",
"UserAttribute",
"(",
")",
"uid",
".",
"_uid",
".",
"image",
".",
"image",
"=",
"pn",
"uid",
".",
"_uid",
".",
"image",
".",
"iencoding",
"=",
"ImageEncoding",
".",
"encodingof",
"(",
"pn",
")",
"uid",
".",
"_uid",
".",
"update_hlen",
"(",
")",
"else",
":",
"uid",
".",
"_uid",
"=",
"UserID",
"(",
")",
"uid",
".",
"_uid",
".",
"name",
"=",
"pn",
"uid",
".",
"_uid",
".",
"comment",
"=",
"comment",
"uid",
".",
"_uid",
".",
"email",
"=",
"email",
"uid",
".",
"_uid",
".",
"update_hlen",
"(",
")",
"return",
"uid"
] |
Create a new User ID or photo.
:param pn: User ID name, or photo. If this is a ``bytearray``, it will be loaded as a photo.
Otherwise, it will be used as the name field for a User ID.
:type pn: ``bytearray``, ``str``, ``unicode``
:param comment: The comment field for a User ID. Ignored if this is a photo.
:type comment: ``str``, ``unicode``
:param email: The email address field for a User ID. Ignored if this is a photo.
:type email: ``str``, ``unicode``
:returns: :py:obj:`PGPUID`
|
[
"Create",
"a",
"new",
"User",
"ID",
"or",
"photo",
"."
] |
python
|
train
| 37.607143 |
google/grr
|
grr/client/grr_response_client/client_actions/file_finder_utils/globbing.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/file_finder_utils/globbing.py#L279-L293
|
def ExpandPath(path, opts=None):
"""Applies all expansion mechanisms to the given path.
Args:
path: A path to expand.
opts: A `PathOpts` object.
Yields:
All paths possible to obtain from a given path by performing expansions.
"""
precondition.AssertType(path, Text)
for grouped_path in ExpandGroups(path):
for globbed_path in ExpandGlobs(grouped_path, opts):
yield globbed_path
|
[
"def",
"ExpandPath",
"(",
"path",
",",
"opts",
"=",
"None",
")",
":",
"precondition",
".",
"AssertType",
"(",
"path",
",",
"Text",
")",
"for",
"grouped_path",
"in",
"ExpandGroups",
"(",
"path",
")",
":",
"for",
"globbed_path",
"in",
"ExpandGlobs",
"(",
"grouped_path",
",",
"opts",
")",
":",
"yield",
"globbed_path"
] |
Applies all expansion mechanisms to the given path.
Args:
path: A path to expand.
opts: A `PathOpts` object.
Yields:
All paths possible to obtain from a given path by performing expansions.
|
[
"Applies",
"all",
"expansion",
"mechanisms",
"to",
"the",
"given",
"path",
"."
] |
python
|
train
| 26.666667 |
StackStorm/pybind
|
pybind/nos/v6_0_2f/rbridge_id/crypto/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/crypto/__init__.py#L127-L148
|
def _set_ca(self, v, load=False):
"""
Setter method for ca, mapped from YANG variable /rbridge_id/crypto/ca (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ca is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ca() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("trustpoint",ca.ca, yang_name="ca", rest_name="ca", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='trustpoint', extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}), is_container='list', yang_name="ca", rest_name="ca", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-crypto', defining_module='brocade-crypto', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ca must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("trustpoint",ca.ca, yang_name="ca", rest_name="ca", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='trustpoint', extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}), is_container='list', yang_name="ca", rest_name="ca", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-crypto', defining_module='brocade-crypto', yang_type='list', is_config=True)""",
})
self.__ca = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_ca",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"YANGListType",
"(",
"\"trustpoint\"",
",",
"ca",
".",
"ca",
",",
"yang_name",
"=",
"\"ca\"",
",",
"rest_name",
"=",
"\"ca\"",
",",
"parent",
"=",
"self",
",",
"is_container",
"=",
"'list'",
",",
"user_ordered",
"=",
"False",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"yang_keys",
"=",
"'trustpoint'",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Configure TrustpointCA'",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'callpoint'",
":",
"u'crypto_ca_cp'",
",",
"u'cli-full-command'",
":",
"None",
"}",
"}",
")",
",",
"is_container",
"=",
"'list'",
",",
"yang_name",
"=",
"\"ca\"",
",",
"rest_name",
"=",
"\"ca\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Configure TrustpointCA'",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'callpoint'",
":",
"u'crypto_ca_cp'",
",",
"u'cli-full-command'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-crypto'",
",",
"defining_module",
"=",
"'brocade-crypto'",
",",
"yang_type",
"=",
"'list'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"ca must be of a type compatible with list\"\"\"",
",",
"'defined-type'",
":",
"\"list\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=YANGListType(\"trustpoint\",ca.ca, yang_name=\"ca\", rest_name=\"ca\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='trustpoint', extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}), is_container='list', yang_name=\"ca\", rest_name=\"ca\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-crypto', defining_module='brocade-crypto', yang_type='list', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__ca",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for ca, mapped from YANG variable /rbridge_id/crypto/ca (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ca is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ca() directly.
|
[
"Setter",
"method",
"for",
"ca",
"mapped",
"from",
"YANG",
"variable",
"/",
"rbridge_id",
"/",
"crypto",
"/",
"ca",
"(",
"list",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_ca",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_ca",
"()",
"directly",
"."
] |
python
|
train
| 101.681818 |
wikimedia/editquality
|
editquality/codegen/generate.py
|
https://github.com/wikimedia/editquality/blob/73bab7bdd0ef3dba9a000f91f2fd810b1772d1f0/editquality/codegen/generate.py#L4-L33
|
def generate(variables, templates_path, main_template):
"""
:Parameters:
variables : dict
Template parameters, passed through.
templates_path : str
Root directory for transclusions.
main_template : str
Contents of the main template.
Returns the rendered output.
"""
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_path),
lstrip_blocks=True,
trim_blocks=True
)
def norm_alg_filename(alg_name):
if alg_name in variables['globals']['algorithm_filename_parts']:
return variables['globals']['algorithm_filename_parts'][alg_name]
else:
raise KeyError("{0} not found in globals.algorithm_filename_parts"
.format(alg_name))
env.globals.update(norm_alg_filename=norm_alg_filename)
template = env.from_string(main_template)
return template.render(variables) + "\n"
|
[
"def",
"generate",
"(",
"variables",
",",
"templates_path",
",",
"main_template",
")",
":",
"env",
"=",
"jinja2",
".",
"Environment",
"(",
"loader",
"=",
"jinja2",
".",
"FileSystemLoader",
"(",
"templates_path",
")",
",",
"lstrip_blocks",
"=",
"True",
",",
"trim_blocks",
"=",
"True",
")",
"def",
"norm_alg_filename",
"(",
"alg_name",
")",
":",
"if",
"alg_name",
"in",
"variables",
"[",
"'globals'",
"]",
"[",
"'algorithm_filename_parts'",
"]",
":",
"return",
"variables",
"[",
"'globals'",
"]",
"[",
"'algorithm_filename_parts'",
"]",
"[",
"alg_name",
"]",
"else",
":",
"raise",
"KeyError",
"(",
"\"{0} not found in globals.algorithm_filename_parts\"",
".",
"format",
"(",
"alg_name",
")",
")",
"env",
".",
"globals",
".",
"update",
"(",
"norm_alg_filename",
"=",
"norm_alg_filename",
")",
"template",
"=",
"env",
".",
"from_string",
"(",
"main_template",
")",
"return",
"template",
".",
"render",
"(",
"variables",
")",
"+",
"\"\\n\""
] |
:Parameters:
variables : dict
Template parameters, passed through.
templates_path : str
Root directory for transclusions.
main_template : str
Contents of the main template.
Returns the rendered output.
|
[
":",
"Parameters",
":",
"variables",
":",
"dict",
"Template",
"parameters",
"passed",
"through",
".",
"templates_path",
":",
"str",
"Root",
"directory",
"for",
"transclusions",
".",
"main_template",
":",
"str",
"Contents",
"of",
"the",
"main",
"template",
"."
] |
python
|
train
| 31.333333 |
IRC-SPHERE/HyperStream
|
hyperstream/tool/base_tool.py
|
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/base_tool.py#L131-L147
|
def parameters_from_model(parameters_model):
"""
Get the tool parameters model from dictionaries
:param parameters_model: The parameters as a mongoengine model
:return: The tool parameters as a dictionary
"""
parameters = {}
for p in parameters_model:
if p.is_function:
code, defaults, closure = pickle.loads(p.value)
parameters[p.key] = func_load(code, defaults, closure, globs=globals())
elif p.is_set:
parameters[p.key] = set(p.value)
else:
parameters[p.key] = p.value
return parameters
|
[
"def",
"parameters_from_model",
"(",
"parameters_model",
")",
":",
"parameters",
"=",
"{",
"}",
"for",
"p",
"in",
"parameters_model",
":",
"if",
"p",
".",
"is_function",
":",
"code",
",",
"defaults",
",",
"closure",
"=",
"pickle",
".",
"loads",
"(",
"p",
".",
"value",
")",
"parameters",
"[",
"p",
".",
"key",
"]",
"=",
"func_load",
"(",
"code",
",",
"defaults",
",",
"closure",
",",
"globs",
"=",
"globals",
"(",
")",
")",
"elif",
"p",
".",
"is_set",
":",
"parameters",
"[",
"p",
".",
"key",
"]",
"=",
"set",
"(",
"p",
".",
"value",
")",
"else",
":",
"parameters",
"[",
"p",
".",
"key",
"]",
"=",
"p",
".",
"value",
"return",
"parameters"
] |
Get the tool parameters model from dictionaries
:param parameters_model: The parameters as a mongoengine model
:return: The tool parameters as a dictionary
|
[
"Get",
"the",
"tool",
"parameters",
"model",
"from",
"dictionaries"
] |
python
|
train
| 37.529412 |
finklabs/metrics
|
metrics/position.py
|
https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/position.py#L95-L110
|
def add_scope(self, scope_type, scope_name, scope_start, is_method=False):
"""we identified a scope and add it to positions."""
if self._curr is not None:
self._curr['end'] = scope_start - 1 # close last scope
self._curr = {
'type': scope_type, 'name': scope_name,
'start': scope_start, 'end': scope_start
}
if is_method and self._positions:
last = self._positions[-1]
if not 'methods' in last:
last['methods'] = []
last['methods'].append(self._curr)
else:
self._positions.append(self._curr)
|
[
"def",
"add_scope",
"(",
"self",
",",
"scope_type",
",",
"scope_name",
",",
"scope_start",
",",
"is_method",
"=",
"False",
")",
":",
"if",
"self",
".",
"_curr",
"is",
"not",
"None",
":",
"self",
".",
"_curr",
"[",
"'end'",
"]",
"=",
"scope_start",
"-",
"1",
"# close last scope",
"self",
".",
"_curr",
"=",
"{",
"'type'",
":",
"scope_type",
",",
"'name'",
":",
"scope_name",
",",
"'start'",
":",
"scope_start",
",",
"'end'",
":",
"scope_start",
"}",
"if",
"is_method",
"and",
"self",
".",
"_positions",
":",
"last",
"=",
"self",
".",
"_positions",
"[",
"-",
"1",
"]",
"if",
"not",
"'methods'",
"in",
"last",
":",
"last",
"[",
"'methods'",
"]",
"=",
"[",
"]",
"last",
"[",
"'methods'",
"]",
".",
"append",
"(",
"self",
".",
"_curr",
")",
"else",
":",
"self",
".",
"_positions",
".",
"append",
"(",
"self",
".",
"_curr",
")"
] |
we identified a scope and add it to positions.
|
[
"we",
"identified",
"a",
"scope",
"and",
"add",
"it",
"to",
"positions",
"."
] |
python
|
train
| 39.125 |
pgxcentre/geneparse
|
geneparse/core.py
|
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/core.py#L456-L465
|
def iter_variants_by_names(self, names):
"""Iterates over the genotypes for variants using a list of names.
Args:
names (list): The list of names for variant extraction.
"""
for name in names:
for result in self.get_variant_by_name(name):
yield result
|
[
"def",
"iter_variants_by_names",
"(",
"self",
",",
"names",
")",
":",
"for",
"name",
"in",
"names",
":",
"for",
"result",
"in",
"self",
".",
"get_variant_by_name",
"(",
"name",
")",
":",
"yield",
"result"
] |
Iterates over the genotypes for variants using a list of names.
Args:
names (list): The list of names for variant extraction.
|
[
"Iterates",
"over",
"the",
"genotypes",
"for",
"variants",
"using",
"a",
"list",
"of",
"names",
"."
] |
python
|
train
| 31.6 |
redhat-cip/dci-control-server
|
dci/api/v1/jobs.py
|
https://github.com/redhat-cip/dci-control-server/blob/b416cf935ec93e4fdd5741f61a21cabecf8454d2/dci/api/v1/jobs.py#L521-L551
|
def get_all_results_from_jobs(user, j_id):
"""Get all results from job.
"""
job = v1_utils.verify_existence_and_get(j_id, _TABLE)
if not user.is_in_team(job['team_id']) and not user.is_read_only_user():
raise dci_exc.Unauthorized()
# get testscases from tests_results
query = sql.select([models.TESTS_RESULTS]). \
where(models.TESTS_RESULTS.c.job_id == job['id'])
all_tests_results = flask.g.db_conn.execute(query).fetchall()
results = []
for test_result in all_tests_results:
test_result = dict(test_result)
results.append({'filename': test_result['name'],
'name': test_result['name'],
'total': test_result['total'],
'failures': test_result['failures'],
'errors': test_result['errors'],
'skips': test_result['skips'],
'time': test_result['time'],
'regressions': test_result['regressions'],
'successfixes': test_result['successfixes'],
'success': test_result['success'],
'file_id': test_result['file_id']})
return flask.jsonify({'results': results,
'_meta': {'count': len(results)}})
|
[
"def",
"get_all_results_from_jobs",
"(",
"user",
",",
"j_id",
")",
":",
"job",
"=",
"v1_utils",
".",
"verify_existence_and_get",
"(",
"j_id",
",",
"_TABLE",
")",
"if",
"not",
"user",
".",
"is_in_team",
"(",
"job",
"[",
"'team_id'",
"]",
")",
"and",
"not",
"user",
".",
"is_read_only_user",
"(",
")",
":",
"raise",
"dci_exc",
".",
"Unauthorized",
"(",
")",
"# get testscases from tests_results",
"query",
"=",
"sql",
".",
"select",
"(",
"[",
"models",
".",
"TESTS_RESULTS",
"]",
")",
".",
"where",
"(",
"models",
".",
"TESTS_RESULTS",
".",
"c",
".",
"job_id",
"==",
"job",
"[",
"'id'",
"]",
")",
"all_tests_results",
"=",
"flask",
".",
"g",
".",
"db_conn",
".",
"execute",
"(",
"query",
")",
".",
"fetchall",
"(",
")",
"results",
"=",
"[",
"]",
"for",
"test_result",
"in",
"all_tests_results",
":",
"test_result",
"=",
"dict",
"(",
"test_result",
")",
"results",
".",
"append",
"(",
"{",
"'filename'",
":",
"test_result",
"[",
"'name'",
"]",
",",
"'name'",
":",
"test_result",
"[",
"'name'",
"]",
",",
"'total'",
":",
"test_result",
"[",
"'total'",
"]",
",",
"'failures'",
":",
"test_result",
"[",
"'failures'",
"]",
",",
"'errors'",
":",
"test_result",
"[",
"'errors'",
"]",
",",
"'skips'",
":",
"test_result",
"[",
"'skips'",
"]",
",",
"'time'",
":",
"test_result",
"[",
"'time'",
"]",
",",
"'regressions'",
":",
"test_result",
"[",
"'regressions'",
"]",
",",
"'successfixes'",
":",
"test_result",
"[",
"'successfixes'",
"]",
",",
"'success'",
":",
"test_result",
"[",
"'success'",
"]",
",",
"'file_id'",
":",
"test_result",
"[",
"'file_id'",
"]",
"}",
")",
"return",
"flask",
".",
"jsonify",
"(",
"{",
"'results'",
":",
"results",
",",
"'_meta'",
":",
"{",
"'count'",
":",
"len",
"(",
"results",
")",
"}",
"}",
")"
] |
Get all results from job.
|
[
"Get",
"all",
"results",
"from",
"job",
"."
] |
python
|
train
| 41.806452 |
matthewdeanmartin/jiggle_version
|
sample_projects/ver_in_weird_file/setup_helpers.py
|
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/sample_projects/ver_in_weird_file/setup_helpers.py#L70-L100
|
def get_version(filename, pattern=None):
"""Extract the __version__ from a file without importing it.
While you could get the __version__ by importing the module, the very act
of importing can cause unintended consequences. For example, Distribute's
automatic 2to3 support will break. Instead, this searches the file for a
line that starts with __version__, and extract the version number by
regular expression matching.
By default, two or three dot-separated digits are recognized, but by
passing a pattern parameter, you can recognize just about anything. Use
the `version` group name to specify the match group.
:param filename: The name of the file to search.
:type filename: string
:param pattern: Optional alternative regular expression pattern to use.
:type pattern: string
:return: The version that was extracted.
:rtype: string
"""
if pattern is None:
cre = DEFAULT_VERSION_RE
else:
cre = re.compile(pattern)
with open(filename) as fp:
for line in fp:
if line.startswith('__version__'):
mo = cre.search(line)
assert mo, 'No valid __version__ string found'
return mo.group('version')
raise AssertionError('No __version__ assignment found')
|
[
"def",
"get_version",
"(",
"filename",
",",
"pattern",
"=",
"None",
")",
":",
"if",
"pattern",
"is",
"None",
":",
"cre",
"=",
"DEFAULT_VERSION_RE",
"else",
":",
"cre",
"=",
"re",
".",
"compile",
"(",
"pattern",
")",
"with",
"open",
"(",
"filename",
")",
"as",
"fp",
":",
"for",
"line",
"in",
"fp",
":",
"if",
"line",
".",
"startswith",
"(",
"'__version__'",
")",
":",
"mo",
"=",
"cre",
".",
"search",
"(",
"line",
")",
"assert",
"mo",
",",
"'No valid __version__ string found'",
"return",
"mo",
".",
"group",
"(",
"'version'",
")",
"raise",
"AssertionError",
"(",
"'No __version__ assignment found'",
")"
] |
Extract the __version__ from a file without importing it.
While you could get the __version__ by importing the module, the very act
of importing can cause unintended consequences. For example, Distribute's
automatic 2to3 support will break. Instead, this searches the file for a
line that starts with __version__, and extract the version number by
regular expression matching.
By default, two or three dot-separated digits are recognized, but by
passing a pattern parameter, you can recognize just about anything. Use
the `version` group name to specify the match group.
:param filename: The name of the file to search.
:type filename: string
:param pattern: Optional alternative regular expression pattern to use.
:type pattern: string
:return: The version that was extracted.
:rtype: string
|
[
"Extract",
"the",
"__version__",
"from",
"a",
"file",
"without",
"importing",
"it",
"."
] |
python
|
train
| 41.516129 |
wummel/linkchecker
|
linkcheck/log.py
|
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/log.py#L88-L95
|
def info (logname, msg, *args, **kwargs):
"""Log an informational message.
return: None
"""
log = logging.getLogger(logname)
if log.isEnabledFor(logging.INFO):
_log(log.info, msg, args, **kwargs)
|
[
"def",
"info",
"(",
"logname",
",",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"logname",
")",
"if",
"log",
".",
"isEnabledFor",
"(",
"logging",
".",
"INFO",
")",
":",
"_log",
"(",
"log",
".",
"info",
",",
"msg",
",",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Log an informational message.
return: None
|
[
"Log",
"an",
"informational",
"message",
"."
] |
python
|
train
| 27.125 |
odlgroup/odl
|
odl/phantom/geometric.py
|
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/phantom/geometric.py#L21-L91
|
def cuboid(space, min_pt=None, max_pt=None):
"""Rectangular cuboid.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom should be created.
min_pt : array-like of shape ``(space.ndim,)``, optional
Lower left corner of the cuboid. If ``None`` is given, a quarter
of the extent from ``space.min_pt`` towards the inside is chosen.
max_pt : array-like of shape ``(space.ndim,)``, optional
Upper right corner of the cuboid. If ``None`` is given, ``min_pt``
plus half the extent is chosen.
Returns
-------
phantom : `DiscretizedSpaceElement`
The generated cuboid phantom in ``space``.
Examples
--------
If both ``min_pt`` and ``max_pt`` are omitted, the cuboid lies in the
middle of the space domain and extends halfway towards all sides:
>>> space = odl.uniform_discr([0, 0], [1, 1], [4, 6])
>>> odl.phantom.cuboid(space)
uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element(
[[ 0., 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 0.],
[ 0., 0., 0., 0., 0., 0.]]
)
By specifying the corners, the cuboid can be arbitrarily placed and
scaled:
>>> odl.phantom.cuboid(space, [0.25, 0], [0.75, 0.5])
uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element(
[[ 0., 0., 0., 0., 0., 0.],
[ 1., 1., 1., 0., 0., 0.],
[ 1., 1., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0.]]
)
"""
dom_min_pt = np.asarray(space.domain.min())
dom_max_pt = np.asarray(space.domain.max())
if min_pt is None:
min_pt = dom_min_pt * 0.75 + dom_max_pt * 0.25
if max_pt is None:
max_pt = dom_min_pt * 0.25 + dom_max_pt * 0.75
min_pt = np.atleast_1d(min_pt)
max_pt = np.atleast_1d(max_pt)
if min_pt.shape != (space.ndim,):
raise ValueError('shape of `min_pt` must be {}, got {}'
''.format((space.ndim,), min_pt.shape))
if max_pt.shape != (space.ndim,):
raise ValueError('shape of `max_pt` must be {}, got {}'
''.format((space.ndim,), max_pt.shape))
def phantom(x):
result = True
for xi, xmin, xmax in zip(x, min_pt, max_pt):
result = (result &
np.less_equal(xmin, xi) & np.less_equal(xi, xmax))
return result
return space.element(phantom)
|
[
"def",
"cuboid",
"(",
"space",
",",
"min_pt",
"=",
"None",
",",
"max_pt",
"=",
"None",
")",
":",
"dom_min_pt",
"=",
"np",
".",
"asarray",
"(",
"space",
".",
"domain",
".",
"min",
"(",
")",
")",
"dom_max_pt",
"=",
"np",
".",
"asarray",
"(",
"space",
".",
"domain",
".",
"max",
"(",
")",
")",
"if",
"min_pt",
"is",
"None",
":",
"min_pt",
"=",
"dom_min_pt",
"*",
"0.75",
"+",
"dom_max_pt",
"*",
"0.25",
"if",
"max_pt",
"is",
"None",
":",
"max_pt",
"=",
"dom_min_pt",
"*",
"0.25",
"+",
"dom_max_pt",
"*",
"0.75",
"min_pt",
"=",
"np",
".",
"atleast_1d",
"(",
"min_pt",
")",
"max_pt",
"=",
"np",
".",
"atleast_1d",
"(",
"max_pt",
")",
"if",
"min_pt",
".",
"shape",
"!=",
"(",
"space",
".",
"ndim",
",",
")",
":",
"raise",
"ValueError",
"(",
"'shape of `min_pt` must be {}, got {}'",
"''",
".",
"format",
"(",
"(",
"space",
".",
"ndim",
",",
")",
",",
"min_pt",
".",
"shape",
")",
")",
"if",
"max_pt",
".",
"shape",
"!=",
"(",
"space",
".",
"ndim",
",",
")",
":",
"raise",
"ValueError",
"(",
"'shape of `max_pt` must be {}, got {}'",
"''",
".",
"format",
"(",
"(",
"space",
".",
"ndim",
",",
")",
",",
"max_pt",
".",
"shape",
")",
")",
"def",
"phantom",
"(",
"x",
")",
":",
"result",
"=",
"True",
"for",
"xi",
",",
"xmin",
",",
"xmax",
"in",
"zip",
"(",
"x",
",",
"min_pt",
",",
"max_pt",
")",
":",
"result",
"=",
"(",
"result",
"&",
"np",
".",
"less_equal",
"(",
"xmin",
",",
"xi",
")",
"&",
"np",
".",
"less_equal",
"(",
"xi",
",",
"xmax",
")",
")",
"return",
"result",
"return",
"space",
".",
"element",
"(",
"phantom",
")"
] |
Rectangular cuboid.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom should be created.
min_pt : array-like of shape ``(space.ndim,)``, optional
Lower left corner of the cuboid. If ``None`` is given, a quarter
of the extent from ``space.min_pt`` towards the inside is chosen.
max_pt : array-like of shape ``(space.ndim,)``, optional
Upper right corner of the cuboid. If ``None`` is given, ``min_pt``
plus half the extent is chosen.
Returns
-------
phantom : `DiscretizedSpaceElement`
The generated cuboid phantom in ``space``.
Examples
--------
If both ``min_pt`` and ``max_pt`` are omitted, the cuboid lies in the
middle of the space domain and extends halfway towards all sides:
>>> space = odl.uniform_discr([0, 0], [1, 1], [4, 6])
>>> odl.phantom.cuboid(space)
uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element(
[[ 0., 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 0.],
[ 0., 0., 0., 0., 0., 0.]]
)
By specifying the corners, the cuboid can be arbitrarily placed and
scaled:
>>> odl.phantom.cuboid(space, [0.25, 0], [0.75, 0.5])
uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element(
[[ 0., 0., 0., 0., 0., 0.],
[ 1., 1., 1., 0., 0., 0.],
[ 1., 1., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0.]]
)
|
[
"Rectangular",
"cuboid",
"."
] |
python
|
train
| 33.84507 |
PredixDev/predixpy
|
predix/admin/cf/spaces.py
|
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/admin/cf/spaces.py#L46-L52
|
def _get_spaces(self):
"""
Get the marketplace services.
"""
guid = self.api.config.get_organization_guid()
uri = '/v2/organizations/%s/spaces' % (guid)
return self.api.get(uri)
|
[
"def",
"_get_spaces",
"(",
"self",
")",
":",
"guid",
"=",
"self",
".",
"api",
".",
"config",
".",
"get_organization_guid",
"(",
")",
"uri",
"=",
"'/v2/organizations/%s/spaces'",
"%",
"(",
"guid",
")",
"return",
"self",
".",
"api",
".",
"get",
"(",
"uri",
")"
] |
Get the marketplace services.
|
[
"Get",
"the",
"marketplace",
"services",
"."
] |
python
|
train
| 31.285714 |
scot-dev/scot
|
scot/connectivity_statistics.py
|
https://github.com/scot-dev/scot/blob/48598b79d4400dad893b134cd2194715511facda/scot/connectivity_statistics.py#L67-L123
|
def jackknife_connectivity(measures, data, var, nfft=512, leaveout=1, n_jobs=1,
verbose=0):
"""Calculate jackknife estimates of connectivity.
For each jackknife estimate a block of trials is left out. This is repeated
until each trial was left out exactly once. The number of estimates depends
on the number of trials and the value of `leaveout`. It is calculated by
repeats = `n_trials` // `leaveout`.
.. note:: Parameter `var` will be modified by the function. Treat as
undefined after the function returns.
Parameters
----------
measures : str or list of str
Name(s) of the connectivity measure(s) to calculate. See
:class:`Connectivity` for supported measures.
data : array, shape (trials, channels, samples)
Time series data (multiple trials).
var : VARBase-like object
Instance of a VAR model.
nfft : int, optional
Number of frequency bins to calculate. Note that these points cover the
range between 0 and half the sampling rate.
leaveout : int, optional
Number of trials to leave out in each estimate.
n_jobs : int | None, optional
Number of jobs to run in parallel. If set to None, joblib is not used
at all. See `joblib.Parallel` for details.
verbose : int, optional
Verbosity level passed to joblib.
Returns
-------
result : array, shape (`repeats`, n_channels, n_channels, nfft)
Values of the connectivity measure for each surrogate. If
`measure_names` is a list of strings a dictionary is returned, where
each key is the name of the measure, and the corresponding values are
arrays of shape (`repeats`, n_channels, n_channels, nfft).
"""
data = atleast_3d(data)
t, m, n = data.shape
assert(t > 1)
if leaveout < 1:
leaveout = int(leaveout * t)
num_blocks = t // leaveout
mask = lambda block: [i for i in range(t) if i < block*leaveout or
i >= (block + 1) * leaveout]
par, func = parallel_loop(_calc_jackknife, n_jobs=n_jobs, verbose=verbose)
output = par(func(data[mask(b), :, :], var, measures, nfft)
for b in range(num_blocks))
return convert_output_(output, measures)
|
[
"def",
"jackknife_connectivity",
"(",
"measures",
",",
"data",
",",
"var",
",",
"nfft",
"=",
"512",
",",
"leaveout",
"=",
"1",
",",
"n_jobs",
"=",
"1",
",",
"verbose",
"=",
"0",
")",
":",
"data",
"=",
"atleast_3d",
"(",
"data",
")",
"t",
",",
"m",
",",
"n",
"=",
"data",
".",
"shape",
"assert",
"(",
"t",
">",
"1",
")",
"if",
"leaveout",
"<",
"1",
":",
"leaveout",
"=",
"int",
"(",
"leaveout",
"*",
"t",
")",
"num_blocks",
"=",
"t",
"//",
"leaveout",
"mask",
"=",
"lambda",
"block",
":",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"t",
")",
"if",
"i",
"<",
"block",
"*",
"leaveout",
"or",
"i",
">=",
"(",
"block",
"+",
"1",
")",
"*",
"leaveout",
"]",
"par",
",",
"func",
"=",
"parallel_loop",
"(",
"_calc_jackknife",
",",
"n_jobs",
"=",
"n_jobs",
",",
"verbose",
"=",
"verbose",
")",
"output",
"=",
"par",
"(",
"func",
"(",
"data",
"[",
"mask",
"(",
"b",
")",
",",
":",
",",
":",
"]",
",",
"var",
",",
"measures",
",",
"nfft",
")",
"for",
"b",
"in",
"range",
"(",
"num_blocks",
")",
")",
"return",
"convert_output_",
"(",
"output",
",",
"measures",
")"
] |
Calculate jackknife estimates of connectivity.
For each jackknife estimate a block of trials is left out. This is repeated
until each trial was left out exactly once. The number of estimates depends
on the number of trials and the value of `leaveout`. It is calculated by
repeats = `n_trials` // `leaveout`.
.. note:: Parameter `var` will be modified by the function. Treat as
undefined after the function returns.
Parameters
----------
measures : str or list of str
Name(s) of the connectivity measure(s) to calculate. See
:class:`Connectivity` for supported measures.
data : array, shape (trials, channels, samples)
Time series data (multiple trials).
var : VARBase-like object
Instance of a VAR model.
nfft : int, optional
Number of frequency bins to calculate. Note that these points cover the
range between 0 and half the sampling rate.
leaveout : int, optional
Number of trials to leave out in each estimate.
n_jobs : int | None, optional
Number of jobs to run in parallel. If set to None, joblib is not used
at all. See `joblib.Parallel` for details.
verbose : int, optional
Verbosity level passed to joblib.
Returns
-------
result : array, shape (`repeats`, n_channels, n_channels, nfft)
Values of the connectivity measure for each surrogate. If
`measure_names` is a list of strings a dictionary is returned, where
each key is the name of the measure, and the corresponding values are
arrays of shape (`repeats`, n_channels, n_channels, nfft).
|
[
"Calculate",
"jackknife",
"estimates",
"of",
"connectivity",
"."
] |
python
|
train
| 39.719298 |
CxAalto/gtfspy
|
gtfspy/gtfs.py
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/gtfs.py#L147-L164
|
def get_directly_accessible_stops_within_distance(self, stop, distance):
"""
Returns stops that are accessible without transfer from the stops that are within a specific walking distance
:param stop: int
:param distance: int
:return:
"""
query = """SELECT stop.* FROM
(SELECT st2.* FROM
(SELECT * FROM stop_distances
WHERE from_stop_I = %s) sd,
(SELECT * FROM stop_times) st1,
(SELECT * FROM stop_times) st2
WHERE sd.d < %s AND sd.to_stop_I = st1.stop_I AND st1.trip_I = st2.trip_I
GROUP BY st2.stop_I) sq,
(SELECT * FROM stops) stop
WHERE sq.stop_I = stop.stop_I""" % (stop, distance)
return pd.read_sql_query(query, self.conn)
|
[
"def",
"get_directly_accessible_stops_within_distance",
"(",
"self",
",",
"stop",
",",
"distance",
")",
":",
"query",
"=",
"\"\"\"SELECT stop.* FROM\n (SELECT st2.* FROM \n (SELECT * FROM stop_distances\n WHERE from_stop_I = %s) sd,\n (SELECT * FROM stop_times) st1,\n (SELECT * FROM stop_times) st2\n WHERE sd.d < %s AND sd.to_stop_I = st1.stop_I AND st1.trip_I = st2.trip_I \n GROUP BY st2.stop_I) sq,\n (SELECT * FROM stops) stop\n WHERE sq.stop_I = stop.stop_I\"\"\"",
"%",
"(",
"stop",
",",
"distance",
")",
"return",
"pd",
".",
"read_sql_query",
"(",
"query",
",",
"self",
".",
"conn",
")"
] |
Returns stops that are accessible without transfer from the stops that are within a specific walking distance
:param stop: int
:param distance: int
:return:
|
[
"Returns",
"stops",
"that",
"are",
"accessible",
"without",
"transfer",
"from",
"the",
"stops",
"that",
"are",
"within",
"a",
"specific",
"walking",
"distance",
":",
"param",
"stop",
":",
"int",
":",
"param",
"distance",
":",
"int",
":",
"return",
":"
] |
python
|
valid
| 47.611111 |
esheldon/fitsio
|
fitsio/hdu/table.py
|
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L933-L961
|
def get_rec_dtype(self, **keys):
"""
Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns
"""
colnums = keys.get('colnums', None)
vstorage = keys.get('vstorage', self._vstorage)
if colnums is None:
colnums = self._extract_colnums()
descr = []
isvararray = numpy.zeros(len(colnums), dtype=numpy.bool)
for i, colnum in enumerate(colnums):
dt, isvar = self.get_rec_column_descr(colnum, vstorage)
descr.append(dt)
isvararray[i] = isvar
dtype = numpy.dtype(descr)
offsets = numpy.zeros(len(colnums), dtype='i8')
for i, n in enumerate(dtype.names):
offsets[i] = dtype.fields[n][1]
return dtype, offsets, isvararray
|
[
"def",
"get_rec_dtype",
"(",
"self",
",",
"*",
"*",
"keys",
")",
":",
"colnums",
"=",
"keys",
".",
"get",
"(",
"'colnums'",
",",
"None",
")",
"vstorage",
"=",
"keys",
".",
"get",
"(",
"'vstorage'",
",",
"self",
".",
"_vstorage",
")",
"if",
"colnums",
"is",
"None",
":",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"descr",
"=",
"[",
"]",
"isvararray",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"colnums",
")",
",",
"dtype",
"=",
"numpy",
".",
"bool",
")",
"for",
"i",
",",
"colnum",
"in",
"enumerate",
"(",
"colnums",
")",
":",
"dt",
",",
"isvar",
"=",
"self",
".",
"get_rec_column_descr",
"(",
"colnum",
",",
"vstorage",
")",
"descr",
".",
"append",
"(",
"dt",
")",
"isvararray",
"[",
"i",
"]",
"=",
"isvar",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"descr",
")",
"offsets",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"colnums",
")",
",",
"dtype",
"=",
"'i8'",
")",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"dtype",
".",
"names",
")",
":",
"offsets",
"[",
"i",
"]",
"=",
"dtype",
".",
"fields",
"[",
"n",
"]",
"[",
"1",
"]",
"return",
"dtype",
",",
"offsets",
",",
"isvararray"
] |
Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns
|
[
"Get",
"the",
"dtype",
"for",
"the",
"specified",
"columns"
] |
python
|
train
| 31.62069 |
zimeon/iiif
|
iiif_cgi.py
|
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif_cgi.py#L115-L162
|
def do_GET_body(self):
"""Create body of GET."""
iiif = self.iiif
if (len(self.path) > 1024):
raise IIIFError(code=414,
text="URI Too Long: Max 1024 chars, got %d\n" % len(self.path))
try:
# self.path has leading / then identifier/params...
self.path = self.path.lstrip('/')
sys.stderr.write("path = %s" % (self.path))
iiif.parse_url(self.path)
except Exception as e:
# Something completely unexpected => 500
raise IIIFError(code=500,
text="Internal Server Error: unexpected exception parsing request (" + str(e) + ")")
# Now we have a full iiif request
if (re.match('[\w\.\-]+$', iiif.identifier)):
file = os.path.join(TESTIMAGE_DIR, iiif.identifier)
if (not os.path.isfile(file)):
images_available = ""
for image_file in os.listdir(TESTIMAGE_DIR):
if (os.path.isfile(os.path.join(TESTIMAGE_DIR, image_file))):
images_available += " " + image_file + "\n"
raise IIIFError(code=404, parameter="identifier",
text="Image resource '" + iiif.identifier + "' not found. Local image files available:\n" + images_available)
else:
raise IIIFError(code=404, parameter="identifier",
text="Image resource '" + iiif.identifier + "' not found. Only local test images and http: URIs for images are supported.\n")
# Now know image is OK
manipulator = IIIFRequestHandler.manipulator_class()
# Stash manipulator object so we can cleanup after reading file
self.manipulator = manipulator
self.compliance_uri = manipulator.compliance_uri
if (iiif.info):
# get size
manipulator.srcfile = file
manipulator.do_first()
# most of info.json comes from config, a few things
# specific to image
i = IIIFInfo()
i.identifier = self.iiif.identifier
i.width = manipulator.width
i.height = manipulator.height
import io
return(io.StringIO(i.as_json()), "application/json")
else:
(outfile, mime_type) = manipulator.derive(file, iiif)
return(open(outfile, 'r'), mime_type)
|
[
"def",
"do_GET_body",
"(",
"self",
")",
":",
"iiif",
"=",
"self",
".",
"iiif",
"if",
"(",
"len",
"(",
"self",
".",
"path",
")",
">",
"1024",
")",
":",
"raise",
"IIIFError",
"(",
"code",
"=",
"414",
",",
"text",
"=",
"\"URI Too Long: Max 1024 chars, got %d\\n\"",
"%",
"len",
"(",
"self",
".",
"path",
")",
")",
"try",
":",
"# self.path has leading / then identifier/params...",
"self",
".",
"path",
"=",
"self",
".",
"path",
".",
"lstrip",
"(",
"'/'",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"path = %s\"",
"%",
"(",
"self",
".",
"path",
")",
")",
"iiif",
".",
"parse_url",
"(",
"self",
".",
"path",
")",
"except",
"Exception",
"as",
"e",
":",
"# Something completely unexpected => 500",
"raise",
"IIIFError",
"(",
"code",
"=",
"500",
",",
"text",
"=",
"\"Internal Server Error: unexpected exception parsing request (\"",
"+",
"str",
"(",
"e",
")",
"+",
"\")\"",
")",
"# Now we have a full iiif request",
"if",
"(",
"re",
".",
"match",
"(",
"'[\\w\\.\\-]+$'",
",",
"iiif",
".",
"identifier",
")",
")",
":",
"file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"TESTIMAGE_DIR",
",",
"iiif",
".",
"identifier",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file",
")",
")",
":",
"images_available",
"=",
"\"\"",
"for",
"image_file",
"in",
"os",
".",
"listdir",
"(",
"TESTIMAGE_DIR",
")",
":",
"if",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"TESTIMAGE_DIR",
",",
"image_file",
")",
")",
")",
":",
"images_available",
"+=",
"\" \"",
"+",
"image_file",
"+",
"\"\\n\"",
"raise",
"IIIFError",
"(",
"code",
"=",
"404",
",",
"parameter",
"=",
"\"identifier\"",
",",
"text",
"=",
"\"Image resource '\"",
"+",
"iiif",
".",
"identifier",
"+",
"\"' not found. Local image files available:\\n\"",
"+",
"images_available",
")",
"else",
":",
"raise",
"IIIFError",
"(",
"code",
"=",
"404",
",",
"parameter",
"=",
"\"identifier\"",
",",
"text",
"=",
"\"Image resource '\"",
"+",
"iiif",
".",
"identifier",
"+",
"\"' not found. Only local test images and http: URIs for images are supported.\\n\"",
")",
"# Now know image is OK",
"manipulator",
"=",
"IIIFRequestHandler",
".",
"manipulator_class",
"(",
")",
"# Stash manipulator object so we can cleanup after reading file",
"self",
".",
"manipulator",
"=",
"manipulator",
"self",
".",
"compliance_uri",
"=",
"manipulator",
".",
"compliance_uri",
"if",
"(",
"iiif",
".",
"info",
")",
":",
"# get size",
"manipulator",
".",
"srcfile",
"=",
"file",
"manipulator",
".",
"do_first",
"(",
")",
"# most of info.json comes from config, a few things",
"# specific to image",
"i",
"=",
"IIIFInfo",
"(",
")",
"i",
".",
"identifier",
"=",
"self",
".",
"iiif",
".",
"identifier",
"i",
".",
"width",
"=",
"manipulator",
".",
"width",
"i",
".",
"height",
"=",
"manipulator",
".",
"height",
"import",
"io",
"return",
"(",
"io",
".",
"StringIO",
"(",
"i",
".",
"as_json",
"(",
")",
")",
",",
"\"application/json\"",
")",
"else",
":",
"(",
"outfile",
",",
"mime_type",
")",
"=",
"manipulator",
".",
"derive",
"(",
"file",
",",
"iiif",
")",
"return",
"(",
"open",
"(",
"outfile",
",",
"'r'",
")",
",",
"mime_type",
")"
] |
Create body of GET.
|
[
"Create",
"body",
"of",
"GET",
"."
] |
python
|
train
| 49.875 |
lambdalisue/e4u
|
e4u/__init__.py
|
https://github.com/lambdalisue/e4u/blob/108635c5ba37e7ae33001adbf07a95878f31fd50/e4u/__init__.py#L40-L63
|
def translate_char(source_char, carrier, reverse=False, encoding=False):
u"""translate unicode emoji character to unicode carrier emoji character (or reverse)
Attributes:
source_char - emoji character. it must be unicode instance or have to set `encoding` attribute to decode
carrier - the target carrier
reverse - if you want to translate CARRIER => UNICODE, turn it True
encoding - encoding name for decode (Default is None)
"""
if not isinstance(source_char, unicode) and encoding:
source_char = source_char.decode(encoding, 'replace')
elif not isinstance(source_char, unicode):
raise AttributeError(u"`source_char` must be decoded to `unicode` or set `encoding` attribute to decode `source_char`")
if len(source_char) > 1:
raise AttributeError(u"`source_char` must be a letter. use `translate` method insted.")
translate_dictionary = _loader.translate_dictionaries[carrier]
if not reverse:
translate_dictionary = translate_dictionary[0]
else:
translate_dictionary = translate_dictionary[1]
if not translate_dictionary:
return source_char
return translate_dictionary.get(source_char, source_char)
|
[
"def",
"translate_char",
"(",
"source_char",
",",
"carrier",
",",
"reverse",
"=",
"False",
",",
"encoding",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"source_char",
",",
"unicode",
")",
"and",
"encoding",
":",
"source_char",
"=",
"source_char",
".",
"decode",
"(",
"encoding",
",",
"'replace'",
")",
"elif",
"not",
"isinstance",
"(",
"source_char",
",",
"unicode",
")",
":",
"raise",
"AttributeError",
"(",
"u\"`source_char` must be decoded to `unicode` or set `encoding` attribute to decode `source_char`\"",
")",
"if",
"len",
"(",
"source_char",
")",
">",
"1",
":",
"raise",
"AttributeError",
"(",
"u\"`source_char` must be a letter. use `translate` method insted.\"",
")",
"translate_dictionary",
"=",
"_loader",
".",
"translate_dictionaries",
"[",
"carrier",
"]",
"if",
"not",
"reverse",
":",
"translate_dictionary",
"=",
"translate_dictionary",
"[",
"0",
"]",
"else",
":",
"translate_dictionary",
"=",
"translate_dictionary",
"[",
"1",
"]",
"if",
"not",
"translate_dictionary",
":",
"return",
"source_char",
"return",
"translate_dictionary",
".",
"get",
"(",
"source_char",
",",
"source_char",
")"
] |
u"""translate unicode emoji character to unicode carrier emoji character (or reverse)
Attributes:
source_char - emoji character. it must be unicode instance or have to set `encoding` attribute to decode
carrier - the target carrier
reverse - if you want to translate CARRIER => UNICODE, turn it True
encoding - encoding name for decode (Default is None)
|
[
"u",
"translate",
"unicode",
"emoji",
"character",
"to",
"unicode",
"carrier",
"emoji",
"character",
"(",
"or",
"reverse",
")",
"Attributes",
":",
"source_char",
"-",
"emoji",
"character",
".",
"it",
"must",
"be",
"unicode",
"instance",
"or",
"have",
"to",
"set",
"encoding",
"attribute",
"to",
"decode",
"carrier",
"-",
"the",
"target",
"carrier",
"reverse",
"-",
"if",
"you",
"want",
"to",
"translate",
"CARRIER",
"=",
">",
"UNICODE",
"turn",
"it",
"True",
"encoding",
"-",
"encoding",
"name",
"for",
"decode",
"(",
"Default",
"is",
"None",
")"
] |
python
|
train
| 51.208333 |
PMEAL/porespy
|
porespy/networks/__snow__.py
|
https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/networks/__snow__.py#L11-L110
|
def snow(im, voxel_size=1,
boundary_faces=['top', 'bottom', 'left', 'right', 'front', 'back'],
marching_cubes_area=False):
r"""
Analyzes an image that has been partitioned into void and solid regions
and extracts the void and solid phase geometry as well as network
connectivity.
Parameters
----------
im : ND-array
Binary image in the Boolean form with True’s as void phase and False’s
as solid phase.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is alway 1 unit lenth per voxel.
boundary_faces : list of strings
Boundary faces labels are provided to assign hypothetical boundary
nodes having zero resistance to transport process. For cubical
geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
marching_cubes_area : bool
If ``True`` then the surface area and interfacial area between regions
will be using the marching cube algorithm. This is a more accurate
representation of area in extracted network, but is quite slow, so
it is ``False`` by default. The default method simply counts voxels
so does not correctly account for the voxelated nature of the images.
Returns
-------
A dictionary containing the void phase size data, as well as the network
topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command.
* ``net``: A dictionary containing all the void and solid phase size data,
as well as the network topological information. The dictionary names
use the OpenPNM convention (i.e. 'pore.coords', 'throat.conns') so it
may be converted directly to an OpenPNM network object using the
``update`` command.
* ``im``: The binary image of the void space
* ``dt``: The combined distance transform of the image
* ``regions``: The void and solid space partitioned into pores and solids
phases using a marker based watershed with the peaks found by the
SNOW Algorithm.
"""
# -------------------------------------------------------------------------
# SNOW void phase
regions = snow_partitioning(im=im, return_all=True)
im = regions.im
dt = regions.dt
regions = regions.regions
b_num = sp.amax(regions)
# -------------------------------------------------------------------------
# Boundary Conditions
regions = add_boundary_regions(regions=regions, faces=boundary_faces)
# -------------------------------------------------------------------------
# Padding distance transform and image to extract geometrical properties
dt = pad_faces(im=dt, faces=boundary_faces)
im = pad_faces(im=im, faces=boundary_faces)
regions = regions*im
regions = make_contiguous(regions)
# -------------------------------------------------------------------------
# Extract void and throat information from image
net = regions_to_network(im=regions, dt=dt, voxel_size=voxel_size)
# -------------------------------------------------------------------------
# Extract marching cube surface area and interfacial area of regions
if marching_cubes_area:
areas = region_surface_areas(regions=regions)
interface_area = region_interface_areas(regions=regions, areas=areas,
voxel_size=voxel_size)
net['pore.surface_area'] = areas * voxel_size**2
net['throat.area'] = interface_area.area
# -------------------------------------------------------------------------
# Find void to void connections of boundary and internal voids
boundary_labels = net['pore.label'] > b_num
loc1 = net['throat.conns'][:, 0] < b_num
loc2 = net['throat.conns'][:, 1] >= b_num
pore_labels = net['pore.label'] <= b_num
loc3 = net['throat.conns'][:, 0] < b_num
loc4 = net['throat.conns'][:, 1] < b_num
net['pore.boundary'] = boundary_labels
net['throat.boundary'] = loc1 * loc2
net['pore.internal'] = pore_labels
net['throat.internal'] = loc3 * loc4
# -------------------------------------------------------------------------
# label boundary cells
net = label_boundary_cells(network=net, boundary_faces=boundary_faces)
# -------------------------------------------------------------------------
# assign out values to dummy dict
temp = _net_dict(net)
temp.im = im.copy()
temp.dt = dt
temp.regions = regions
return temp
|
[
"def",
"snow",
"(",
"im",
",",
"voxel_size",
"=",
"1",
",",
"boundary_faces",
"=",
"[",
"'top'",
",",
"'bottom'",
",",
"'left'",
",",
"'right'",
",",
"'front'",
",",
"'back'",
"]",
",",
"marching_cubes_area",
"=",
"False",
")",
":",
"# -------------------------------------------------------------------------",
"# SNOW void phase",
"regions",
"=",
"snow_partitioning",
"(",
"im",
"=",
"im",
",",
"return_all",
"=",
"True",
")",
"im",
"=",
"regions",
".",
"im",
"dt",
"=",
"regions",
".",
"dt",
"regions",
"=",
"regions",
".",
"regions",
"b_num",
"=",
"sp",
".",
"amax",
"(",
"regions",
")",
"# -------------------------------------------------------------------------",
"# Boundary Conditions",
"regions",
"=",
"add_boundary_regions",
"(",
"regions",
"=",
"regions",
",",
"faces",
"=",
"boundary_faces",
")",
"# -------------------------------------------------------------------------",
"# Padding distance transform and image to extract geometrical properties",
"dt",
"=",
"pad_faces",
"(",
"im",
"=",
"dt",
",",
"faces",
"=",
"boundary_faces",
")",
"im",
"=",
"pad_faces",
"(",
"im",
"=",
"im",
",",
"faces",
"=",
"boundary_faces",
")",
"regions",
"=",
"regions",
"*",
"im",
"regions",
"=",
"make_contiguous",
"(",
"regions",
")",
"# -------------------------------------------------------------------------",
"# Extract void and throat information from image",
"net",
"=",
"regions_to_network",
"(",
"im",
"=",
"regions",
",",
"dt",
"=",
"dt",
",",
"voxel_size",
"=",
"voxel_size",
")",
"# -------------------------------------------------------------------------",
"# Extract marching cube surface area and interfacial area of regions",
"if",
"marching_cubes_area",
":",
"areas",
"=",
"region_surface_areas",
"(",
"regions",
"=",
"regions",
")",
"interface_area",
"=",
"region_interface_areas",
"(",
"regions",
"=",
"regions",
",",
"areas",
"=",
"areas",
",",
"voxel_size",
"=",
"voxel_size",
")",
"net",
"[",
"'pore.surface_area'",
"]",
"=",
"areas",
"*",
"voxel_size",
"**",
"2",
"net",
"[",
"'throat.area'",
"]",
"=",
"interface_area",
".",
"area",
"# -------------------------------------------------------------------------",
"# Find void to void connections of boundary and internal voids",
"boundary_labels",
"=",
"net",
"[",
"'pore.label'",
"]",
">",
"b_num",
"loc1",
"=",
"net",
"[",
"'throat.conns'",
"]",
"[",
":",
",",
"0",
"]",
"<",
"b_num",
"loc2",
"=",
"net",
"[",
"'throat.conns'",
"]",
"[",
":",
",",
"1",
"]",
">=",
"b_num",
"pore_labels",
"=",
"net",
"[",
"'pore.label'",
"]",
"<=",
"b_num",
"loc3",
"=",
"net",
"[",
"'throat.conns'",
"]",
"[",
":",
",",
"0",
"]",
"<",
"b_num",
"loc4",
"=",
"net",
"[",
"'throat.conns'",
"]",
"[",
":",
",",
"1",
"]",
"<",
"b_num",
"net",
"[",
"'pore.boundary'",
"]",
"=",
"boundary_labels",
"net",
"[",
"'throat.boundary'",
"]",
"=",
"loc1",
"*",
"loc2",
"net",
"[",
"'pore.internal'",
"]",
"=",
"pore_labels",
"net",
"[",
"'throat.internal'",
"]",
"=",
"loc3",
"*",
"loc4",
"# -------------------------------------------------------------------------",
"# label boundary cells",
"net",
"=",
"label_boundary_cells",
"(",
"network",
"=",
"net",
",",
"boundary_faces",
"=",
"boundary_faces",
")",
"# -------------------------------------------------------------------------",
"# assign out values to dummy dict",
"temp",
"=",
"_net_dict",
"(",
"net",
")",
"temp",
".",
"im",
"=",
"im",
".",
"copy",
"(",
")",
"temp",
".",
"dt",
"=",
"dt",
"temp",
".",
"regions",
"=",
"regions",
"return",
"temp"
] |
r"""
Analyzes an image that has been partitioned into void and solid regions
and extracts the void and solid phase geometry as well as network
connectivity.
Parameters
----------
im : ND-array
Binary image in the Boolean form with True’s as void phase and False’s
as solid phase.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is alway 1 unit lenth per voxel.
boundary_faces : list of strings
Boundary faces labels are provided to assign hypothetical boundary
nodes having zero resistance to transport process. For cubical
geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
marching_cubes_area : bool
If ``True`` then the surface area and interfacial area between regions
will be using the marching cube algorithm. This is a more accurate
representation of area in extracted network, but is quite slow, so
it is ``False`` by default. The default method simply counts voxels
so does not correctly account for the voxelated nature of the images.
Returns
-------
A dictionary containing the void phase size data, as well as the network
topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command.
* ``net``: A dictionary containing all the void and solid phase size data,
as well as the network topological information. The dictionary names
use the OpenPNM convention (i.e. 'pore.coords', 'throat.conns') so it
may be converted directly to an OpenPNM network object using the
``update`` command.
* ``im``: The binary image of the void space
* ``dt``: The combined distance transform of the image
* ``regions``: The void and solid space partitioned into pores and solids
phases using a marker based watershed with the peaks found by the
SNOW Algorithm.
|
[
"r",
"Analyzes",
"an",
"image",
"that",
"has",
"been",
"partitioned",
"into",
"void",
"and",
"solid",
"regions",
"and",
"extracts",
"the",
"void",
"and",
"solid",
"phase",
"geometry",
"as",
"well",
"as",
"network",
"connectivity",
"."
] |
python
|
train
| 49.88 |
pycontribs/pyrax
|
pyrax/autoscale.py
|
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/autoscale.py#L134-L143
|
def add_policy(self, name, policy_type, cooldown, change=None,
is_percent=False, desired_capacity=None, args=None):
"""
Adds a policy with the given values to this scaling group. The
'change' parameter is treated as an absolute amount, unless
'is_percent' is True, in which case it is treated as a percentage.
"""
return self.manager.add_policy(self, name, policy_type, cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
|
[
"def",
"add_policy",
"(",
"self",
",",
"name",
",",
"policy_type",
",",
"cooldown",
",",
"change",
"=",
"None",
",",
"is_percent",
"=",
"False",
",",
"desired_capacity",
"=",
"None",
",",
"args",
"=",
"None",
")",
":",
"return",
"self",
".",
"manager",
".",
"add_policy",
"(",
"self",
",",
"name",
",",
"policy_type",
",",
"cooldown",
",",
"change",
"=",
"change",
",",
"is_percent",
"=",
"is_percent",
",",
"desired_capacity",
"=",
"desired_capacity",
",",
"args",
"=",
"args",
")"
] |
Adds a policy with the given values to this scaling group. The
'change' parameter is treated as an absolute amount, unless
'is_percent' is True, in which case it is treated as a percentage.
|
[
"Adds",
"a",
"policy",
"with",
"the",
"given",
"values",
"to",
"this",
"scaling",
"group",
".",
"The",
"change",
"parameter",
"is",
"treated",
"as",
"an",
"absolute",
"amount",
"unless",
"is_percent",
"is",
"True",
"in",
"which",
"case",
"it",
"is",
"treated",
"as",
"a",
"percentage",
"."
] |
python
|
train
| 54.6 |
rckclmbr/pyportify
|
pyportify/pkcs1/primitives.py
|
https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primitives.py#L67-L79
|
def i2osp(x, x_len):
'''Converts the integer x to its big-endian representation of length
x_len.
'''
if x > 256**x_len:
raise exceptions.IntegerTooLarge
h = hex(x)[2:]
if h[-1] == 'L':
h = h[:-1]
if len(h) & 1 == 1:
h = '0%s' % h
x = binascii.unhexlify(h)
return b'\x00' * int(x_len-len(x)) + x
|
[
"def",
"i2osp",
"(",
"x",
",",
"x_len",
")",
":",
"if",
"x",
">",
"256",
"**",
"x_len",
":",
"raise",
"exceptions",
".",
"IntegerTooLarge",
"h",
"=",
"hex",
"(",
"x",
")",
"[",
"2",
":",
"]",
"if",
"h",
"[",
"-",
"1",
"]",
"==",
"'L'",
":",
"h",
"=",
"h",
"[",
":",
"-",
"1",
"]",
"if",
"len",
"(",
"h",
")",
"&",
"1",
"==",
"1",
":",
"h",
"=",
"'0%s'",
"%",
"h",
"x",
"=",
"binascii",
".",
"unhexlify",
"(",
"h",
")",
"return",
"b'\\x00'",
"*",
"int",
"(",
"x_len",
"-",
"len",
"(",
"x",
")",
")",
"+",
"x"
] |
Converts the integer x to its big-endian representation of length
x_len.
|
[
"Converts",
"the",
"integer",
"x",
"to",
"its",
"big",
"-",
"endian",
"representation",
"of",
"length",
"x_len",
"."
] |
python
|
train
| 26.538462 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.