repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
rocky/python3-trepan | trepan/processor/cmdfns.py | https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/cmdfns.py#L103-L118 | def get_int(errmsg, arg, default=1, cmdname=None):
"""If arg is an int, use that otherwise take default."""
if arg:
try:
# eval() is used so we will allow arithmetic expressions,
# variables etc.
default = int(eval(arg))
except (SyntaxError, NameError, ValueError):
if cmdname:
errmsg("Command '%s' expects an integer; got: %s." %
(cmdname, str(arg)))
else:
errmsg('Expecting an integer, got: %s.' % str(arg))
pass
raise ValueError
return default | [
"def",
"get_int",
"(",
"errmsg",
",",
"arg",
",",
"default",
"=",
"1",
",",
"cmdname",
"=",
"None",
")",
":",
"if",
"arg",
":",
"try",
":",
"# eval() is used so we will allow arithmetic expressions,",
"# variables etc.",
"default",
"=",
"int",
"(",
"eval",
"(",
"arg",
")",
")",
"except",
"(",
"SyntaxError",
",",
"NameError",
",",
"ValueError",
")",
":",
"if",
"cmdname",
":",
"errmsg",
"(",
"\"Command '%s' expects an integer; got: %s.\"",
"%",
"(",
"cmdname",
",",
"str",
"(",
"arg",
")",
")",
")",
"else",
":",
"errmsg",
"(",
"'Expecting an integer, got: %s.'",
"%",
"str",
"(",
"arg",
")",
")",
"pass",
"raise",
"ValueError",
"return",
"default"
] | If arg is an int, use that otherwise take default. | [
"If",
"arg",
"is",
"an",
"int",
"use",
"that",
"otherwise",
"take",
"default",
"."
] | python | test |
RudolfCardinal/pythonlib | cardinal_pythonlib/buildfunc.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/buildfunc.py#L56-L70 | def download_if_not_exists(url: str, filename: str,
skip_cert_verify: bool = True,
mkdir: bool = True) -> None:
"""
Downloads a URL to a file, unless the file already exists.
"""
if os.path.isfile(filename):
log.info("No need to download, already have: {}", filename)
return
if mkdir:
directory, basename = os.path.split(os.path.abspath(filename))
mkdir_p(directory)
download(url=url,
filename=filename,
skip_cert_verify=skip_cert_verify) | [
"def",
"download_if_not_exists",
"(",
"url",
":",
"str",
",",
"filename",
":",
"str",
",",
"skip_cert_verify",
":",
"bool",
"=",
"True",
",",
"mkdir",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"log",
".",
"info",
"(",
"\"No need to download, already have: {}\"",
",",
"filename",
")",
"return",
"if",
"mkdir",
":",
"directory",
",",
"basename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"filename",
")",
")",
"mkdir_p",
"(",
"directory",
")",
"download",
"(",
"url",
"=",
"url",
",",
"filename",
"=",
"filename",
",",
"skip_cert_verify",
"=",
"skip_cert_verify",
")"
] | Downloads a URL to a file, unless the file already exists. | [
"Downloads",
"a",
"URL",
"to",
"a",
"file",
"unless",
"the",
"file",
"already",
"exists",
"."
] | python | train |
mozilla-b2g/fxos-certsuite | mcts/tools/webidl/manifest_parser.py | https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/tools/webidl/manifest_parser.py#L15-L36 | def main(argv):
"""
This will generate you a manifest file, and you need to modify it!
There are three category: files, untested, skipped.
You can reference current manifest.json.
usage: manifest_parser.py (GECKO LOCATION: /B2G/gecko/dom/webidl)
The generated file can then be used with process_idl.py
"""
argparser = argparse.ArgumentParser()
argparser.add_argument("gecko", help="/B2G/gecko/dom/webidl")
args = argparser.parse_args(argv[1:])
files = [ "gecko/dom/webidl/" + f for f in listdir(args.gecko) if isfile(join(args.gecko,f)) and f.endswith("webidl") ]
files.sort()
with open('manifest_generated.json', 'w') as merged:
merged.write('{\n "files": [\n')
merged.write(" \"" + "\",\n \"".join(files) + "\"\n")
merged.write(' ],\n "untested": [\n ],\n "skipped": [\n ]\n}\n') | [
"def",
"main",
"(",
"argv",
")",
":",
"argparser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"argparser",
".",
"add_argument",
"(",
"\"gecko\"",
",",
"help",
"=",
"\"/B2G/gecko/dom/webidl\"",
")",
"args",
"=",
"argparser",
".",
"parse_args",
"(",
"argv",
"[",
"1",
":",
"]",
")",
"files",
"=",
"[",
"\"gecko/dom/webidl/\"",
"+",
"f",
"for",
"f",
"in",
"listdir",
"(",
"args",
".",
"gecko",
")",
"if",
"isfile",
"(",
"join",
"(",
"args",
".",
"gecko",
",",
"f",
")",
")",
"and",
"f",
".",
"endswith",
"(",
"\"webidl\"",
")",
"]",
"files",
".",
"sort",
"(",
")",
"with",
"open",
"(",
"'manifest_generated.json'",
",",
"'w'",
")",
"as",
"merged",
":",
"merged",
".",
"write",
"(",
"'{\\n \"files\": [\\n'",
")",
"merged",
".",
"write",
"(",
"\" \\\"\"",
"+",
"\"\\\",\\n \\\"\"",
".",
"join",
"(",
"files",
")",
"+",
"\"\\\"\\n\"",
")",
"merged",
".",
"write",
"(",
"' ],\\n \"untested\": [\\n ],\\n \"skipped\": [\\n ]\\n}\\n'",
")"
] | This will generate you a manifest file, and you need to modify it!
There are three category: files, untested, skipped.
You can reference current manifest.json.
usage: manifest_parser.py (GECKO LOCATION: /B2G/gecko/dom/webidl)
The generated file can then be used with process_idl.py | [
"This",
"will",
"generate",
"you",
"a",
"manifest",
"file",
"and",
"you",
"need",
"to",
"modify",
"it!",
"There",
"are",
"three",
"category",
":",
"files",
"untested",
"skipped",
".",
"You",
"can",
"reference",
"current",
"manifest",
".",
"json",
"."
] | python | train |
matrix-org/matrix-python-sdk | matrix_client/api.py | https://github.com/matrix-org/matrix-python-sdk/blob/e734cce3ccd35f2d355c6a19a7a701033472498a/matrix_client/api.py#L376-L388 | def send_message(self, room_id, text_content, msgtype="m.text", timestamp=None):
"""Perform PUT /rooms/$room_id/send/m.room.message
Args:
room_id (str): The room ID to send the event in.
text_content (str): The m.text body to send.
timestamp (int): Set origin_server_ts (For application services only)
"""
return self.send_message_event(
room_id, "m.room.message",
self.get_text_body(text_content, msgtype),
timestamp=timestamp
) | [
"def",
"send_message",
"(",
"self",
",",
"room_id",
",",
"text_content",
",",
"msgtype",
"=",
"\"m.text\"",
",",
"timestamp",
"=",
"None",
")",
":",
"return",
"self",
".",
"send_message_event",
"(",
"room_id",
",",
"\"m.room.message\"",
",",
"self",
".",
"get_text_body",
"(",
"text_content",
",",
"msgtype",
")",
",",
"timestamp",
"=",
"timestamp",
")"
] | Perform PUT /rooms/$room_id/send/m.room.message
Args:
room_id (str): The room ID to send the event in.
text_content (str): The m.text body to send.
timestamp (int): Set origin_server_ts (For application services only) | [
"Perform",
"PUT",
"/",
"rooms",
"/",
"$room_id",
"/",
"send",
"/",
"m",
".",
"room",
".",
"message"
] | python | train |
mabuchilab/QNET | src/qnet/algebra/core/algebraic_properties.py | https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/algebraic_properties.py#L856-L944 | def _deltasummation(term, ranges, i_range):
"""Partially execute a summation for `term` with a Kronecker Delta for one
of the summation indices.
This implements the solution to the core sub-problem in
:func:`indexed_sum_over_kronecker`
Args:
term (QuantumExpression): term of the sum
ranges (list): list of all summation index ranges
(class:`IndexRangeBase` instances)
i_range (int): list-index of element in `ranges` which should be
eliminated
Returns:
``(result, flag)`` where `result` is a list
of ``(new_term, new_ranges)`` tuples and `flag` is an integer.
There are three possible cases, indicated by the returned `flag`. Consider
the following setup::
>>> i, j, k = symbols('i, j, k', cls=IdxSym)
>>> i_range = IndexOverList(i, (0, 1))
>>> j_range = IndexOverList(j, (0, 1))
>>> ranges = [i_range, j_range]
>>> def A(i, j):
... from sympy import IndexedBase
... return OperatorSymbol(StrLabel(IndexedBase('A')[i, j]), hs=0)
1. If executing the sum produces a single non-zero term, result will be
``[(new_term, new_ranges)]`` where `new_ranges` contains the input `ranges`
without the eliminated range specified by `i_range`. This should be the
most common case for calls to:func:`_deltasummation`::
>>> term = KroneckerDelta(i, j) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [(A(i, i), [i_range])]
>>> assert flag == 1
2. If executing the sum for the index symbol specified via `index_range`
does not reduce the sum, the result will be the list ``[(term, ranges)]``
with unchanged `term` and `ranges`::
>>> term = KroneckerDelta(j, k) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
This case also covers if there is no Kroncker delta in the term::
>>> term = A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
3. If `term` does not contain a Kronecker delta as a factor, but in a
sum that can be expanded, the result will be a list of
``[(summand1, ranges), (summand2, ranges), ...]`` for the summands of that
expansion. In this case, `:func:`_deltasummation` should be called again
for every tuple in the list, with the same `i_range`::
>>> term = (KroneckerDelta(i, j) + 1) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [
... (A(i, j), [i_range, j_range]),
... (KroneckerDelta(i,j) * A(i, j), [i_range, j_range])]
>>> assert flag == 3
"""
from qnet.algebra.core.abstract_quantum_algebra import QuantumExpression
idx = ranges[i_range].index_symbol
summands = _expand_delta(term, idx)
if len(summands) > 1:
return [(summand, ranges) for summand in summands], 3
else:
delta, expr = _extract_delta(summands[0], idx)
if not delta:
return [(term, ranges)], 2
solns = sympy.solve(delta.args[0] - delta.args[1], idx)
assert len(solns) > 0 # I can't think of an example that might cause this
# if len(solns) == 0:
# return [(term._zero, [])], 4
if len(solns) != 1:
return [(term, ranges)], 2
value = solns[0]
new_term = expr.substitute({idx: value})
if _RESOLVE_KRONECKER_WITH_PIECEWISE:
new_term *= ranges[i_range].piecewise_one(value)
assert isinstance(new_term, QuantumExpression)
return [(new_term, ranges[:i_range] + ranges[i_range+1:])], 1 | [
"def",
"_deltasummation",
"(",
"term",
",",
"ranges",
",",
"i_range",
")",
":",
"from",
"qnet",
".",
"algebra",
".",
"core",
".",
"abstract_quantum_algebra",
"import",
"QuantumExpression",
"idx",
"=",
"ranges",
"[",
"i_range",
"]",
".",
"index_symbol",
"summands",
"=",
"_expand_delta",
"(",
"term",
",",
"idx",
")",
"if",
"len",
"(",
"summands",
")",
">",
"1",
":",
"return",
"[",
"(",
"summand",
",",
"ranges",
")",
"for",
"summand",
"in",
"summands",
"]",
",",
"3",
"else",
":",
"delta",
",",
"expr",
"=",
"_extract_delta",
"(",
"summands",
"[",
"0",
"]",
",",
"idx",
")",
"if",
"not",
"delta",
":",
"return",
"[",
"(",
"term",
",",
"ranges",
")",
"]",
",",
"2",
"solns",
"=",
"sympy",
".",
"solve",
"(",
"delta",
".",
"args",
"[",
"0",
"]",
"-",
"delta",
".",
"args",
"[",
"1",
"]",
",",
"idx",
")",
"assert",
"len",
"(",
"solns",
")",
">",
"0",
"# I can't think of an example that might cause this",
"# if len(solns) == 0:",
"# return [(term._zero, [])], 4",
"if",
"len",
"(",
"solns",
")",
"!=",
"1",
":",
"return",
"[",
"(",
"term",
",",
"ranges",
")",
"]",
",",
"2",
"value",
"=",
"solns",
"[",
"0",
"]",
"new_term",
"=",
"expr",
".",
"substitute",
"(",
"{",
"idx",
":",
"value",
"}",
")",
"if",
"_RESOLVE_KRONECKER_WITH_PIECEWISE",
":",
"new_term",
"*=",
"ranges",
"[",
"i_range",
"]",
".",
"piecewise_one",
"(",
"value",
")",
"assert",
"isinstance",
"(",
"new_term",
",",
"QuantumExpression",
")",
"return",
"[",
"(",
"new_term",
",",
"ranges",
"[",
":",
"i_range",
"]",
"+",
"ranges",
"[",
"i_range",
"+",
"1",
":",
"]",
")",
"]",
",",
"1"
] | Partially execute a summation for `term` with a Kronecker Delta for one
of the summation indices.
This implements the solution to the core sub-problem in
:func:`indexed_sum_over_kronecker`
Args:
term (QuantumExpression): term of the sum
ranges (list): list of all summation index ranges
(class:`IndexRangeBase` instances)
i_range (int): list-index of element in `ranges` which should be
eliminated
Returns:
``(result, flag)`` where `result` is a list
of ``(new_term, new_ranges)`` tuples and `flag` is an integer.
There are three possible cases, indicated by the returned `flag`. Consider
the following setup::
>>> i, j, k = symbols('i, j, k', cls=IdxSym)
>>> i_range = IndexOverList(i, (0, 1))
>>> j_range = IndexOverList(j, (0, 1))
>>> ranges = [i_range, j_range]
>>> def A(i, j):
... from sympy import IndexedBase
... return OperatorSymbol(StrLabel(IndexedBase('A')[i, j]), hs=0)
1. If executing the sum produces a single non-zero term, result will be
``[(new_term, new_ranges)]`` where `new_ranges` contains the input `ranges`
without the eliminated range specified by `i_range`. This should be the
most common case for calls to:func:`_deltasummation`::
>>> term = KroneckerDelta(i, j) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [(A(i, i), [i_range])]
>>> assert flag == 1
2. If executing the sum for the index symbol specified via `index_range`
does not reduce the sum, the result will be the list ``[(term, ranges)]``
with unchanged `term` and `ranges`::
>>> term = KroneckerDelta(j, k) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
This case also covers if there is no Kroncker delta in the term::
>>> term = A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
3. If `term` does not contain a Kronecker delta as a factor, but in a
sum that can be expanded, the result will be a list of
``[(summand1, ranges), (summand2, ranges), ...]`` for the summands of that
expansion. In this case, `:func:`_deltasummation` should be called again
for every tuple in the list, with the same `i_range`::
>>> term = (KroneckerDelta(i, j) + 1) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [
... (A(i, j), [i_range, j_range]),
... (KroneckerDelta(i,j) * A(i, j), [i_range, j_range])]
>>> assert flag == 3 | [
"Partially",
"execute",
"a",
"summation",
"for",
"term",
"with",
"a",
"Kronecker",
"Delta",
"for",
"one",
"of",
"the",
"summation",
"indices",
"."
] | python | train |
cmbruns/pyopenvr | src/openvr/__init__.py | https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L5727-L5742 | def hookScreenshot(self, numTypes):
"""
Called by the running VR application to indicate that it
wishes to be in charge of screenshots. If the
application does not call this, the Compositor will only
support VRScreenshotType_Stereo screenshots that will be
captured without notification to the running app.
Once hooked your application will receive a
VREvent_RequestScreenshot event when the user presses the
buttons to take a screenshot.
"""
fn = self.function_table.hookScreenshot
pSupportedTypes = EVRScreenshotType()
result = fn(byref(pSupportedTypes), numTypes)
return result, pSupportedTypes | [
"def",
"hookScreenshot",
"(",
"self",
",",
"numTypes",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"hookScreenshot",
"pSupportedTypes",
"=",
"EVRScreenshotType",
"(",
")",
"result",
"=",
"fn",
"(",
"byref",
"(",
"pSupportedTypes",
")",
",",
"numTypes",
")",
"return",
"result",
",",
"pSupportedTypes"
] | Called by the running VR application to indicate that it
wishes to be in charge of screenshots. If the
application does not call this, the Compositor will only
support VRScreenshotType_Stereo screenshots that will be
captured without notification to the running app.
Once hooked your application will receive a
VREvent_RequestScreenshot event when the user presses the
buttons to take a screenshot. | [
"Called",
"by",
"the",
"running",
"VR",
"application",
"to",
"indicate",
"that",
"it",
"wishes",
"to",
"be",
"in",
"charge",
"of",
"screenshots",
".",
"If",
"the",
"application",
"does",
"not",
"call",
"this",
"the",
"Compositor",
"will",
"only",
"support",
"VRScreenshotType_Stereo",
"screenshots",
"that",
"will",
"be",
"captured",
"without",
"notification",
"to",
"the",
"running",
"app",
".",
"Once",
"hooked",
"your",
"application",
"will",
"receive",
"a",
"VREvent_RequestScreenshot",
"event",
"when",
"the",
"user",
"presses",
"the",
"buttons",
"to",
"take",
"a",
"screenshot",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/structural/regions.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L346-L364 | def get_base_cnv_regions(data, work_dir, genome_default="transcripts1e4", include_gene_names=True):
"""Retrieve set of target regions for CNV analysis.
Subsets to extended transcript regions for WGS experiments to avoid
long runtimes.
"""
cov_interval = dd.get_coverage_interval(data)
base_regions = get_sv_bed(data, include_gene_names=include_gene_names)
# if we don't have a configured BED or regions to use for SV caling
if not base_regions:
# For genome calls, subset to regions near genes as targets
if cov_interval == "genome":
base_regions = get_sv_bed(data, genome_default, work_dir, include_gene_names=include_gene_names)
if base_regions:
base_regions = remove_exclude_regions(base_regions, base_regions, [data])
# Finally, default to the defined variant regions
if not base_regions:
base_regions = dd.get_variant_regions(data) or dd.get_sample_callable(data)
return bedutils.clean_file(base_regions, data) | [
"def",
"get_base_cnv_regions",
"(",
"data",
",",
"work_dir",
",",
"genome_default",
"=",
"\"transcripts1e4\"",
",",
"include_gene_names",
"=",
"True",
")",
":",
"cov_interval",
"=",
"dd",
".",
"get_coverage_interval",
"(",
"data",
")",
"base_regions",
"=",
"get_sv_bed",
"(",
"data",
",",
"include_gene_names",
"=",
"include_gene_names",
")",
"# if we don't have a configured BED or regions to use for SV caling",
"if",
"not",
"base_regions",
":",
"# For genome calls, subset to regions near genes as targets",
"if",
"cov_interval",
"==",
"\"genome\"",
":",
"base_regions",
"=",
"get_sv_bed",
"(",
"data",
",",
"genome_default",
",",
"work_dir",
",",
"include_gene_names",
"=",
"include_gene_names",
")",
"if",
"base_regions",
":",
"base_regions",
"=",
"remove_exclude_regions",
"(",
"base_regions",
",",
"base_regions",
",",
"[",
"data",
"]",
")",
"# Finally, default to the defined variant regions",
"if",
"not",
"base_regions",
":",
"base_regions",
"=",
"dd",
".",
"get_variant_regions",
"(",
"data",
")",
"or",
"dd",
".",
"get_sample_callable",
"(",
"data",
")",
"return",
"bedutils",
".",
"clean_file",
"(",
"base_regions",
",",
"data",
")"
] | Retrieve set of target regions for CNV analysis.
Subsets to extended transcript regions for WGS experiments to avoid
long runtimes. | [
"Retrieve",
"set",
"of",
"target",
"regions",
"for",
"CNV",
"analysis",
"."
] | python | train |
helgi/python-command | command/core.py | https://github.com/helgi/python-command/blob/c41fb8cdd9074b847c7bc5b5ee7f027508f52d7f/command/core.py#L212-L248 | def which(program, environ=None):
"""
Find out if an executable exists in the supplied PATH.
If so, the absolute path to the executable is returned.
If not, an exception is raised.
:type string
:param program: Executable to be checked for
:param dict
:param environ: Any additional ENV variables required, specifically PATH
:return string|:class:`command.CommandException` Returns the location if found, otherwise raises exception
"""
def is_exe(path):
"""
Helper method to check if a file exists and is executable
"""
return isfile(path) and os.access(path, os.X_OK)
if program is None:
raise CommandException("Invalid program name passed")
fpath, fname = split(program)
if fpath:
if is_exe(program):
return program
else:
if environ is None:
environ = os.environ
for path in environ['PATH'].split(os.pathsep):
exe_file = join(path, program)
if is_exe(exe_file):
return exe_file
raise CommandException("Could not find %s" % program) | [
"def",
"which",
"(",
"program",
",",
"environ",
"=",
"None",
")",
":",
"def",
"is_exe",
"(",
"path",
")",
":",
"\"\"\"\n Helper method to check if a file exists and is executable\n \"\"\"",
"return",
"isfile",
"(",
"path",
")",
"and",
"os",
".",
"access",
"(",
"path",
",",
"os",
".",
"X_OK",
")",
"if",
"program",
"is",
"None",
":",
"raise",
"CommandException",
"(",
"\"Invalid program name passed\"",
")",
"fpath",
",",
"fname",
"=",
"split",
"(",
"program",
")",
"if",
"fpath",
":",
"if",
"is_exe",
"(",
"program",
")",
":",
"return",
"program",
"else",
":",
"if",
"environ",
"is",
"None",
":",
"environ",
"=",
"os",
".",
"environ",
"for",
"path",
"in",
"environ",
"[",
"'PATH'",
"]",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
":",
"exe_file",
"=",
"join",
"(",
"path",
",",
"program",
")",
"if",
"is_exe",
"(",
"exe_file",
")",
":",
"return",
"exe_file",
"raise",
"CommandException",
"(",
"\"Could not find %s\"",
"%",
"program",
")"
] | Find out if an executable exists in the supplied PATH.
If so, the absolute path to the executable is returned.
If not, an exception is raised.
:type string
:param program: Executable to be checked for
:param dict
:param environ: Any additional ENV variables required, specifically PATH
:return string|:class:`command.CommandException` Returns the location if found, otherwise raises exception | [
"Find",
"out",
"if",
"an",
"executable",
"exists",
"in",
"the",
"supplied",
"PATH",
".",
"If",
"so",
"the",
"absolute",
"path",
"to",
"the",
"executable",
"is",
"returned",
".",
"If",
"not",
"an",
"exception",
"is",
"raised",
"."
] | python | train |
uogbuji/versa | tools/py/pipeline/core_actions.py | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/pipeline/core_actions.py#L185-L319 | def materialize(typ, rel=None, origin=None, unique=None, links=None, inverse=False, split=None, attributes=None):
'''
Create a new resource related to the origin
:param typ: IRI of the type for the resource to be materialized,
which becomes the target of the main link, and the origin of any
additional links given in the links param
:param rel: IRI of the relationship between the origin and the materialized
target, or a list of relationship IRIs, each of which will be used to create
a separate link, or a versa action function to derive this relationship or
list of relationships at run time, or None. If none, use the action context.
:param origin: Literal IRI or Versa action function for origin of the
main generated link. If none, use the action context.
:param unique: Versa action function to be invoked in order to
derive a unique hash key input for the materialized resource, in the form of
multiple key, value pairs (or key, list-of-values)
:param links: Dictionary of links from the newly materialized resource.
Each keys can be a relationship IRIs, a Versa action function returning
a relationship IRI, a Versa action function returning a list of Versa
contexts, which can be used to guide a sequence pattern of generated
links, or a Versa action function returning None, which signals that
the particular link is skipped entirely.
:param postprocess: IRI or list of IRI queueing up actiona to be postprocessed
for this materialized resource. None, the default, signals no special postprocessing
For examples of all these scenarios see marcpatterns.py
:return: Versa action function to do the actual work
'''
links = links or []
attributes = attributes or {}
def _materialize(ctx):
'''
Inserts at least two main links in the context's output_model, one or more for
the relationship from the origin to the materialized resource, one for the
type of the materialized resource, and links according to the links parameter
:param ctx: Runtime Versa context used in processing (e.g. includes the prototype link)
:return: None
This function is intricate in its use and shifting of Versa context, but the
intricacies are all designed to make the marcpatterns mini language more natural.
'''
#FIXME: Part of the datachef sorting out
if not ctx.idgen: ctx.idgen = idgen
_typ = typ(ctx) if callable(typ) else typ
_rel = rel(ctx) if callable(rel) else rel
_unique = unique(ctx) if callable(unique) else unique
(o, r, t, a) = ctx.current_link
#FIXME: On redesign implement split using function composition instead
targets = [ sub_t.strip() for sub_t in t.split(split) ] if split else [t]
#Conversions to make sure we end up with a list of relationships out of it all
if _rel is None:
_rel = [r]
rels = _rel if isinstance(_rel, list) else ([_rel] if _rel else [])
objids = []
#Botanical analogy
#The stem is the relationship from the original to the materialized resource
#The veins are any further relationships from materialized resource
for target in targets:
ctx_stem = ctx.copy(current_link=(o, r, target, a))
if origin:
#Have been given enough info to derive the origin from context. Ignore origin in current link
o = origin(ctx_stem)
computed_unique = [] if _unique else None
if _unique:
# strip None values from computed unique list, including pairs where v is None
for k, v in _unique:
if None in (k, v): continue
v = v if isinstance(v, list) else [v]
for subitem in v:
subval = subitem(ctx) if callable(subitem) else subitem
if subval:
subval = subval if isinstance(subval, list) else [subval]
computed_unique.extend([(k, s) for s in subval])
objid = materialize_entity(ctx, _typ, unique=computed_unique)
objids.append(objid)
for curr_rel in rels:
#e.g. scenario if passed in rel=ifexists(...)
curr_rel = curr_rel(ctx) if callable(curr_rel) else curr_rel
#FIXME: Fix this properly, by slugifying & making sure slugify handles all numeric case (prepend '_')
curr_rel = '_' + curr_rel if curr_rel.isdigit() else curr_rel
if curr_rel:
if inverse:
ctx.output_model.add(I(objid), I(iri.absolutize(curr_rel, ctx.base)), I(o), {})
else:
ctx.output_model.add(I(o), I(iri.absolutize(curr_rel, ctx.base)), I(objid), {})
#print((objid, ctx_.existing_ids))
if objid not in ctx.existing_ids:
if _typ: ctx.output_model.add(I(objid), VTYPE_REL, I(iri.absolutize(_typ, ctx.base)), {})
#FIXME: Should we be using Python Nones to mark blanks, or should Versa define some sort of null resource?
#XXX: Note, links are only processed on new objects! This needs some thought
for k, v in links:
new_current_link = (I(objid), k, ctx.current_link[TARGET], ctx.current_link[ATTRIBUTES])
ctx_vein = ctx_stem.copy(current_link=new_current_link)
k = k(ctx_vein) if callable(k) else k
#If k is a list of contexts use it to dynamically execute functions
if isinstance(k, list):
if k and isinstance(k[0], context):
for newctx in k:
#The function in question will generate any needed links in the output model
v(newctx)
continue
#import traceback; traceback.print_stack() #For looking up the call stack e.g. to debug nested materialize
#Check that the links key is not None, which is a signal not to
#generate the item. For example if the key is an ifexists and the
#test expression result is False, it will come back as None,
#and we don't want to run the v function
if k:
v = v(ctx_vein) if callable(v) else v
#If k or v come from pipeline functions as None it signals to skip generating anything else for this link item
if v is not None:
v = v(ctx_vein) if callable(v) else v
#FIXME: Fix properly, by slugifying & making sure slugify handles all-numeric case
if k.isdigit(): k = '_' + k
if isinstance(v, list):
for valitems in v:
if valitems:
ctx.output_model.add(I(objid), I(iri.absolutize(k, ctx_vein.base)), valitems, {})
else:
ctx.output_model.add(I(objid), I(iri.absolutize(k, ctx_vein.base)), v, {})
ctx.existing_ids.add(objid)
return objids
return _materialize | [
"def",
"materialize",
"(",
"typ",
",",
"rel",
"=",
"None",
",",
"origin",
"=",
"None",
",",
"unique",
"=",
"None",
",",
"links",
"=",
"None",
",",
"inverse",
"=",
"False",
",",
"split",
"=",
"None",
",",
"attributes",
"=",
"None",
")",
":",
"links",
"=",
"links",
"or",
"[",
"]",
"attributes",
"=",
"attributes",
"or",
"{",
"}",
"def",
"_materialize",
"(",
"ctx",
")",
":",
"'''\n Inserts at least two main links in the context's output_model, one or more for\n the relationship from the origin to the materialized resource, one for the\n type of the materialized resource, and links according to the links parameter\n\n :param ctx: Runtime Versa context used in processing (e.g. includes the prototype link)\n :return: None\n\n This function is intricate in its use and shifting of Versa context, but the\n intricacies are all designed to make the marcpatterns mini language more natural.\n '''",
"#FIXME: Part of the datachef sorting out",
"if",
"not",
"ctx",
".",
"idgen",
":",
"ctx",
".",
"idgen",
"=",
"idgen",
"_typ",
"=",
"typ",
"(",
"ctx",
")",
"if",
"callable",
"(",
"typ",
")",
"else",
"typ",
"_rel",
"=",
"rel",
"(",
"ctx",
")",
"if",
"callable",
"(",
"rel",
")",
"else",
"rel",
"_unique",
"=",
"unique",
"(",
"ctx",
")",
"if",
"callable",
"(",
"unique",
")",
"else",
"unique",
"(",
"o",
",",
"r",
",",
"t",
",",
"a",
")",
"=",
"ctx",
".",
"current_link",
"#FIXME: On redesign implement split using function composition instead",
"targets",
"=",
"[",
"sub_t",
".",
"strip",
"(",
")",
"for",
"sub_t",
"in",
"t",
".",
"split",
"(",
"split",
")",
"]",
"if",
"split",
"else",
"[",
"t",
"]",
"#Conversions to make sure we end up with a list of relationships out of it all",
"if",
"_rel",
"is",
"None",
":",
"_rel",
"=",
"[",
"r",
"]",
"rels",
"=",
"_rel",
"if",
"isinstance",
"(",
"_rel",
",",
"list",
")",
"else",
"(",
"[",
"_rel",
"]",
"if",
"_rel",
"else",
"[",
"]",
")",
"objids",
"=",
"[",
"]",
"#Botanical analogy",
"#The stem is the relationship from the original to the materialized resource ",
"#The veins are any further relationships from materialized resource ",
"for",
"target",
"in",
"targets",
":",
"ctx_stem",
"=",
"ctx",
".",
"copy",
"(",
"current_link",
"=",
"(",
"o",
",",
"r",
",",
"target",
",",
"a",
")",
")",
"if",
"origin",
":",
"#Have been given enough info to derive the origin from context. Ignore origin in current link",
"o",
"=",
"origin",
"(",
"ctx_stem",
")",
"computed_unique",
"=",
"[",
"]",
"if",
"_unique",
"else",
"None",
"if",
"_unique",
":",
"# strip None values from computed unique list, including pairs where v is None",
"for",
"k",
",",
"v",
"in",
"_unique",
":",
"if",
"None",
"in",
"(",
"k",
",",
"v",
")",
":",
"continue",
"v",
"=",
"v",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
"else",
"[",
"v",
"]",
"for",
"subitem",
"in",
"v",
":",
"subval",
"=",
"subitem",
"(",
"ctx",
")",
"if",
"callable",
"(",
"subitem",
")",
"else",
"subitem",
"if",
"subval",
":",
"subval",
"=",
"subval",
"if",
"isinstance",
"(",
"subval",
",",
"list",
")",
"else",
"[",
"subval",
"]",
"computed_unique",
".",
"extend",
"(",
"[",
"(",
"k",
",",
"s",
")",
"for",
"s",
"in",
"subval",
"]",
")",
"objid",
"=",
"materialize_entity",
"(",
"ctx",
",",
"_typ",
",",
"unique",
"=",
"computed_unique",
")",
"objids",
".",
"append",
"(",
"objid",
")",
"for",
"curr_rel",
"in",
"rels",
":",
"#e.g. scenario if passed in rel=ifexists(...)",
"curr_rel",
"=",
"curr_rel",
"(",
"ctx",
")",
"if",
"callable",
"(",
"curr_rel",
")",
"else",
"curr_rel",
"#FIXME: Fix this properly, by slugifying & making sure slugify handles all numeric case (prepend '_')",
"curr_rel",
"=",
"'_'",
"+",
"curr_rel",
"if",
"curr_rel",
".",
"isdigit",
"(",
")",
"else",
"curr_rel",
"if",
"curr_rel",
":",
"if",
"inverse",
":",
"ctx",
".",
"output_model",
".",
"add",
"(",
"I",
"(",
"objid",
")",
",",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"curr_rel",
",",
"ctx",
".",
"base",
")",
")",
",",
"I",
"(",
"o",
")",
",",
"{",
"}",
")",
"else",
":",
"ctx",
".",
"output_model",
".",
"add",
"(",
"I",
"(",
"o",
")",
",",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"curr_rel",
",",
"ctx",
".",
"base",
")",
")",
",",
"I",
"(",
"objid",
")",
",",
"{",
"}",
")",
"#print((objid, ctx_.existing_ids))",
"if",
"objid",
"not",
"in",
"ctx",
".",
"existing_ids",
":",
"if",
"_typ",
":",
"ctx",
".",
"output_model",
".",
"add",
"(",
"I",
"(",
"objid",
")",
",",
"VTYPE_REL",
",",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"_typ",
",",
"ctx",
".",
"base",
")",
")",
",",
"{",
"}",
")",
"#FIXME: Should we be using Python Nones to mark blanks, or should Versa define some sort of null resource?",
"#XXX: Note, links are only processed on new objects! This needs some thought",
"for",
"k",
",",
"v",
"in",
"links",
":",
"new_current_link",
"=",
"(",
"I",
"(",
"objid",
")",
",",
"k",
",",
"ctx",
".",
"current_link",
"[",
"TARGET",
"]",
",",
"ctx",
".",
"current_link",
"[",
"ATTRIBUTES",
"]",
")",
"ctx_vein",
"=",
"ctx_stem",
".",
"copy",
"(",
"current_link",
"=",
"new_current_link",
")",
"k",
"=",
"k",
"(",
"ctx_vein",
")",
"if",
"callable",
"(",
"k",
")",
"else",
"k",
"#If k is a list of contexts use it to dynamically execute functions",
"if",
"isinstance",
"(",
"k",
",",
"list",
")",
":",
"if",
"k",
"and",
"isinstance",
"(",
"k",
"[",
"0",
"]",
",",
"context",
")",
":",
"for",
"newctx",
"in",
"k",
":",
"#The function in question will generate any needed links in the output model",
"v",
"(",
"newctx",
")",
"continue",
"#import traceback; traceback.print_stack() #For looking up the call stack e.g. to debug nested materialize",
"#Check that the links key is not None, which is a signal not to",
"#generate the item. For example if the key is an ifexists and the",
"#test expression result is False, it will come back as None,",
"#and we don't want to run the v function",
"if",
"k",
":",
"v",
"=",
"v",
"(",
"ctx_vein",
")",
"if",
"callable",
"(",
"v",
")",
"else",
"v",
"#If k or v come from pipeline functions as None it signals to skip generating anything else for this link item",
"if",
"v",
"is",
"not",
"None",
":",
"v",
"=",
"v",
"(",
"ctx_vein",
")",
"if",
"callable",
"(",
"v",
")",
"else",
"v",
"#FIXME: Fix properly, by slugifying & making sure slugify handles all-numeric case",
"if",
"k",
".",
"isdigit",
"(",
")",
":",
"k",
"=",
"'_'",
"+",
"k",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"for",
"valitems",
"in",
"v",
":",
"if",
"valitems",
":",
"ctx",
".",
"output_model",
".",
"add",
"(",
"I",
"(",
"objid",
")",
",",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"k",
",",
"ctx_vein",
".",
"base",
")",
")",
",",
"valitems",
",",
"{",
"}",
")",
"else",
":",
"ctx",
".",
"output_model",
".",
"add",
"(",
"I",
"(",
"objid",
")",
",",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"k",
",",
"ctx_vein",
".",
"base",
")",
")",
",",
"v",
",",
"{",
"}",
")",
"ctx",
".",
"existing_ids",
".",
"add",
"(",
"objid",
")",
"return",
"objids",
"return",
"_materialize"
] | Create a new resource related to the origin
:param typ: IRI of the type for the resource to be materialized,
which becomes the target of the main link, and the origin of any
additional links given in the links param
:param rel: IRI of the relationship between the origin and the materialized
target, or a list of relationship IRIs, each of which will be used to create
a separate link, or a versa action function to derive this relationship or
list of relationships at run time, or None. If none, use the action context.
:param origin: Literal IRI or Versa action function for origin of the
main generated link. If none, use the action context.
:param unique: Versa action function to be invoked in order to
derive a unique hash key input for the materialized resource, in the form of
multiple key, value pairs (or key, list-of-values)
:param links: Dictionary of links from the newly materialized resource.
Each keys can be a relationship IRIs, a Versa action function returning
a relationship IRI, a Versa action function returning a list of Versa
contexts, which can be used to guide a sequence pattern of generated
links, or a Versa action function returning None, which signals that
the particular link is skipped entirely.
:param postprocess: IRI or list of IRI queueing up actiona to be postprocessed
for this materialized resource. None, the default, signals no special postprocessing
For examples of all these scenarios see marcpatterns.py
:return: Versa action function to do the actual work | [
"Create",
"a",
"new",
"resource",
"related",
"to",
"the",
"origin"
] | python | train |
portfors-lab/sparkle | sparkle/gui/stim/stimulusview.py | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/stimulusview.py#L549-L556 | def paint(self, painter, option, index):
"""Uses the :meth:`paint<sparkle.gui.stim.components.qcomponents.QStimulusComponent.paint>`
method of the component it represents to fill in an appropriately
sized rectange. :qtdoc:`Re-implemented<QStyledItemDelegate.paint>`"""
component = index.model().data(index, role=QtCore.Qt.UserRole)
painter.drawRect(option.rect)
component.paint(painter, option.rect, option.palette) | [
"def",
"paint",
"(",
"self",
",",
"painter",
",",
"option",
",",
"index",
")",
":",
"component",
"=",
"index",
".",
"model",
"(",
")",
".",
"data",
"(",
"index",
",",
"role",
"=",
"QtCore",
".",
"Qt",
".",
"UserRole",
")",
"painter",
".",
"drawRect",
"(",
"option",
".",
"rect",
")",
"component",
".",
"paint",
"(",
"painter",
",",
"option",
".",
"rect",
",",
"option",
".",
"palette",
")"
] | Uses the :meth:`paint<sparkle.gui.stim.components.qcomponents.QStimulusComponent.paint>`
method of the component it represents to fill in an appropriately
sized rectange. :qtdoc:`Re-implemented<QStyledItemDelegate.paint>` | [
"Uses",
"the",
":",
"meth",
":",
"paint<sparkle",
".",
"gui",
".",
"stim",
".",
"components",
".",
"qcomponents",
".",
"QStimulusComponent",
".",
"paint",
">",
"method",
"of",
"the",
"component",
"it",
"represents",
"to",
"fill",
"in",
"an",
"appropriately",
"sized",
"rectange",
".",
":",
"qtdoc",
":",
"Re",
"-",
"implemented<QStyledItemDelegate",
".",
"paint",
">"
] | python | train |
saltstack/salt | salt/modules/win_system.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L676-L698 | def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname) | [
"def",
"set_hostname",
"(",
"hostname",
")",
":",
"with",
"salt",
".",
"utils",
".",
"winapi",
".",
"Com",
"(",
")",
":",
"conn",
"=",
"wmi",
".",
"WMI",
"(",
")",
"comp",
"=",
"conn",
".",
"Win32_ComputerSystem",
"(",
")",
"[",
"0",
"]",
"return",
"comp",
".",
"Rename",
"(",
"Name",
"=",
"hostname",
")"
] | Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname | [
"Set",
"the",
"hostname",
"of",
"the",
"windows",
"minion",
"requires",
"a",
"restart",
"before",
"this",
"will",
"be",
"updated",
"."
] | python | train |
TrafficSenseMSD/SumoTools | sumolib/__init__.py | https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/sumolib/__init__.py#L198-L206 | def flush(self):
"""flushes all file contents to disc"""
for fp in self.files:
fp.flush()
if isinstance(fp, int) or hasattr(fp, "fileno"):
try:
os.fsync(fp)
except OSError:
pass | [
"def",
"flush",
"(",
"self",
")",
":",
"for",
"fp",
"in",
"self",
".",
"files",
":",
"fp",
".",
"flush",
"(",
")",
"if",
"isinstance",
"(",
"fp",
",",
"int",
")",
"or",
"hasattr",
"(",
"fp",
",",
"\"fileno\"",
")",
":",
"try",
":",
"os",
".",
"fsync",
"(",
"fp",
")",
"except",
"OSError",
":",
"pass"
] | flushes all file contents to disc | [
"flushes",
"all",
"file",
"contents",
"to",
"disc"
] | python | train |
job/aggregate6 | aggregate6/aggregate6.py | https://github.com/job/aggregate6/blob/fa93046a39e397795d6258ea4c46033dee3df69b/aggregate6/aggregate6.py#L44-L61 | def aggregate(l):
"""Aggregate a `list` of prefixes.
Keyword arguments:
l -- a python list of prefixes
Example use:
>>> aggregate(["10.0.0.0/8", "10.0.0.0/24"])
['10.0.0.0/8']
"""
tree = radix.Radix()
for item in l:
try:
tree.add(item)
except (ValueError) as err:
raise Exception("ERROR: invalid IP prefix: {}".format(item))
return aggregate_tree(tree).prefixes() | [
"def",
"aggregate",
"(",
"l",
")",
":",
"tree",
"=",
"radix",
".",
"Radix",
"(",
")",
"for",
"item",
"in",
"l",
":",
"try",
":",
"tree",
".",
"add",
"(",
"item",
")",
"except",
"(",
"ValueError",
")",
"as",
"err",
":",
"raise",
"Exception",
"(",
"\"ERROR: invalid IP prefix: {}\"",
".",
"format",
"(",
"item",
")",
")",
"return",
"aggregate_tree",
"(",
"tree",
")",
".",
"prefixes",
"(",
")"
] | Aggregate a `list` of prefixes.
Keyword arguments:
l -- a python list of prefixes
Example use:
>>> aggregate(["10.0.0.0/8", "10.0.0.0/24"])
['10.0.0.0/8'] | [
"Aggregate",
"a",
"list",
"of",
"prefixes",
"."
] | python | valid |
log2timeline/plaso | plaso/cli/helpers/extraction.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/helpers/extraction.py#L51-L76 | def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
preferred_year = cls._ParseNumericOption(options, 'preferred_year')
process_archives = getattr(options, 'process_archives', False)
process_compressed_streams = getattr(
options, 'process_compressed_streams', True)
setattr(configuration_object, '_preferred_year', preferred_year)
setattr(configuration_object, '_process_archives', process_archives)
setattr(
configuration_object, '_process_compressed_streams',
process_compressed_streams) | [
"def",
"ParseOptions",
"(",
"cls",
",",
"options",
",",
"configuration_object",
")",
":",
"if",
"not",
"isinstance",
"(",
"configuration_object",
",",
"tools",
".",
"CLITool",
")",
":",
"raise",
"errors",
".",
"BadConfigObject",
"(",
"'Configuration object is not an instance of CLITool'",
")",
"preferred_year",
"=",
"cls",
".",
"_ParseNumericOption",
"(",
"options",
",",
"'preferred_year'",
")",
"process_archives",
"=",
"getattr",
"(",
"options",
",",
"'process_archives'",
",",
"False",
")",
"process_compressed_streams",
"=",
"getattr",
"(",
"options",
",",
"'process_compressed_streams'",
",",
"True",
")",
"setattr",
"(",
"configuration_object",
",",
"'_preferred_year'",
",",
"preferred_year",
")",
"setattr",
"(",
"configuration_object",
",",
"'_process_archives'",
",",
"process_archives",
")",
"setattr",
"(",
"configuration_object",
",",
"'_process_compressed_streams'",
",",
"process_compressed_streams",
")"
] | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type. | [
"Parses",
"and",
"validates",
"options",
"."
] | python | train |
cdeboever3/cdpybio | cdpybio/star.py | https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/star.py#L137-L200 | def _make_sj_out_panel(sj_outD, total_jxn_cov_cutoff=20):
"""Filter junctions from many sj_out files and make panel.
Parameters
----------
sj_outD : dict
Dict whose keys are sample names and values are sj_out dataframes
total_jxn_cov_cutoff : int
If the unique read coverage of a junction summed over all samples is not
greater than or equal to this value, the junction will not be included
in the final output.
Returns
-------
sj_outP : pandas.Panel
Panel where each dataframe corresponds to an sj_out file filtered to
remove low coverage junctions. Each dataframe has COUNT_COLS =
('unique_junction_reads', 'multimap_junction_reads', 'max_overhang')
annotDF : pandas.DataFrame
Dataframe with values ANNOTATION_COLS = ('chrom', 'start',
'end', 'intron_motif', 'annotated') that are otherwise
duplicated in the panel.
"""
# num_jxns = dict()
# # set of all junctions
# jxnS = reduce(lambda x,y: set(x) | set(y),
# [ sj_outD[k].index for k in sj_outD.keys() ])
# jxn_keepS = set()
# jxn_setsD = dict()
# for k in sj_outD.keys():
# jxn_setsD[k] = frozenset(sj_outD[k].index)
# for j in jxnS:
# if sum([ sj_outD[k].ix[j,'unique_junction_reads'] for k in sj_outD.keys()
# if j in jxn_setsD[k] ]) >= total_jxn_cov_cutoff:
# jxn_keepS.add(j)
# for k in sj_outD.keys():
# sj_outD[k] = sj_outD[k].ix[jxn_keepS]
sj_outP = pd.Panel(sj_outD)
for col in ['unique_junction_reads', 'multimap_junction_reads',
'max_overhang']:
sj_outP.ix[:,:,col] = sj_outP.ix[:,:,col].fillna(0)
# Some dataframes will be missing information like intron_motif etc. for
# junctions that were not observed in that sample. The info is somewhere in
# the panel though so we can get it.
annotDF = reduce(pd.DataFrame.combine_first,
[ sj_outP.ix[item,:,ANNOTATION_COLS].dropna() for item in
sj_outP.items ])
annotDF['start'] = annotDF['start'].astype(int)
annotDF['end'] = annotDF['end'].astype(int)
annotDF['annotated'] = annotDF['annotated'].astype(bool)
# Sort annotation and panel
annotDF = annotDF.sort_values(by=['chrom', 'start', 'end'])
sj_outP = sj_outP.ix[:, annotDF.index, :]
sj_outP = sj_outP.ix[:,:,COUNT_COLS].astype(int)
return sj_outP, annotDF | [
"def",
"_make_sj_out_panel",
"(",
"sj_outD",
",",
"total_jxn_cov_cutoff",
"=",
"20",
")",
":",
"# num_jxns = dict()",
"# # set of all junctions",
"# jxnS = reduce(lambda x,y: set(x) | set(y),",
"# [ sj_outD[k].index for k in sj_outD.keys() ])",
"# jxn_keepS = set()",
"# jxn_setsD = dict()",
"# for k in sj_outD.keys():",
"# jxn_setsD[k] = frozenset(sj_outD[k].index)",
"# for j in jxnS:",
"# if sum([ sj_outD[k].ix[j,'unique_junction_reads'] for k in sj_outD.keys()",
"# if j in jxn_setsD[k] ]) >= total_jxn_cov_cutoff:",
"# jxn_keepS.add(j)",
"# for k in sj_outD.keys():",
"# sj_outD[k] = sj_outD[k].ix[jxn_keepS]",
"sj_outP",
"=",
"pd",
".",
"Panel",
"(",
"sj_outD",
")",
"for",
"col",
"in",
"[",
"'unique_junction_reads'",
",",
"'multimap_junction_reads'",
",",
"'max_overhang'",
"]",
":",
"sj_outP",
".",
"ix",
"[",
":",
",",
":",
",",
"col",
"]",
"=",
"sj_outP",
".",
"ix",
"[",
":",
",",
":",
",",
"col",
"]",
".",
"fillna",
"(",
"0",
")",
"# Some dataframes will be missing information like intron_motif etc. for ",
"# junctions that were not observed in that sample. The info is somewhere in",
"# the panel though so we can get it.",
"annotDF",
"=",
"reduce",
"(",
"pd",
".",
"DataFrame",
".",
"combine_first",
",",
"[",
"sj_outP",
".",
"ix",
"[",
"item",
",",
":",
",",
"ANNOTATION_COLS",
"]",
".",
"dropna",
"(",
")",
"for",
"item",
"in",
"sj_outP",
".",
"items",
"]",
")",
"annotDF",
"[",
"'start'",
"]",
"=",
"annotDF",
"[",
"'start'",
"]",
".",
"astype",
"(",
"int",
")",
"annotDF",
"[",
"'end'",
"]",
"=",
"annotDF",
"[",
"'end'",
"]",
".",
"astype",
"(",
"int",
")",
"annotDF",
"[",
"'annotated'",
"]",
"=",
"annotDF",
"[",
"'annotated'",
"]",
".",
"astype",
"(",
"bool",
")",
"# Sort annotation and panel",
"annotDF",
"=",
"annotDF",
".",
"sort_values",
"(",
"by",
"=",
"[",
"'chrom'",
",",
"'start'",
",",
"'end'",
"]",
")",
"sj_outP",
"=",
"sj_outP",
".",
"ix",
"[",
":",
",",
"annotDF",
".",
"index",
",",
":",
"]",
"sj_outP",
"=",
"sj_outP",
".",
"ix",
"[",
":",
",",
":",
",",
"COUNT_COLS",
"]",
".",
"astype",
"(",
"int",
")",
"return",
"sj_outP",
",",
"annotDF"
] | Filter junctions from many sj_out files and make panel.
Parameters
----------
sj_outD : dict
Dict whose keys are sample names and values are sj_out dataframes
total_jxn_cov_cutoff : int
If the unique read coverage of a junction summed over all samples is not
greater than or equal to this value, the junction will not be included
in the final output.
Returns
-------
sj_outP : pandas.Panel
Panel where each dataframe corresponds to an sj_out file filtered to
remove low coverage junctions. Each dataframe has COUNT_COLS =
('unique_junction_reads', 'multimap_junction_reads', 'max_overhang')
annotDF : pandas.DataFrame
Dataframe with values ANNOTATION_COLS = ('chrom', 'start',
'end', 'intron_motif', 'annotated') that are otherwise
duplicated in the panel. | [
"Filter",
"junctions",
"from",
"many",
"sj_out",
"files",
"and",
"make",
"panel",
"."
] | python | train |
fermiPy/fermipy | fermipy/model_utils.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/model_utils.py#L41-L65 | def get_function_spec(name):
"""Return a dictionary with the specification of a function:
parameter names and defaults (value, bounds, scale, etc.).
Returns
-------
par_names : list
List of parameter names for this function.
norm_par : str
Name of normalization parameter.
default : dict
Parameter defaults dictionary.
"""
if not hasattr(get_function_spec, 'fndict'):
modelfile = os.path.join('$FERMIPY_ROOT',
'data', 'models.yaml')
modelfile = os.path.expandvars(modelfile)
get_function_spec.fndict = yaml.load(open(modelfile))
if not name in get_function_spec.fndict.keys():
raise Exception('Invalid Function Name: %s' % name)
return get_function_spec.fndict[name] | [
"def",
"get_function_spec",
"(",
"name",
")",
":",
"if",
"not",
"hasattr",
"(",
"get_function_spec",
",",
"'fndict'",
")",
":",
"modelfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'$FERMIPY_ROOT'",
",",
"'data'",
",",
"'models.yaml'",
")",
"modelfile",
"=",
"os",
".",
"path",
".",
"expandvars",
"(",
"modelfile",
")",
"get_function_spec",
".",
"fndict",
"=",
"yaml",
".",
"load",
"(",
"open",
"(",
"modelfile",
")",
")",
"if",
"not",
"name",
"in",
"get_function_spec",
".",
"fndict",
".",
"keys",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Invalid Function Name: %s'",
"%",
"name",
")",
"return",
"get_function_spec",
".",
"fndict",
"[",
"name",
"]"
] | Return a dictionary with the specification of a function:
parameter names and defaults (value, bounds, scale, etc.).
Returns
-------
par_names : list
List of parameter names for this function.
norm_par : str
Name of normalization parameter.
default : dict
Parameter defaults dictionary. | [
"Return",
"a",
"dictionary",
"with",
"the",
"specification",
"of",
"a",
"function",
":",
"parameter",
"names",
"and",
"defaults",
"(",
"value",
"bounds",
"scale",
"etc",
".",
")",
"."
] | python | train |
mikedh/trimesh | trimesh/path/exchange/misc.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/exchange/misc.py#L151-L181 | def faces_to_path(mesh, face_ids=None, **kwargs):
"""
Given a mesh and face indices find the outline edges and
turn them into a Path3D.
Parameters
---------
mesh : trimesh.Trimesh
Triangulated surface in 3D
face_ids : (n,) int
Indexes referencing mesh.faces
Returns
---------
kwargs : dict
Kwargs for Path3D constructor
"""
if face_ids is None:
edges = mesh.edges_sorted
else:
# take advantage of edge ordering to index as single row
edges = mesh.edges_sorted.reshape(
(-1, 6))[face_ids].reshape((-1, 2))
# an edge which occurs onely once is on the boundary
unique_edges = grouping.group_rows(
edges, require_count=1)
# add edges and vertices to kwargs
kwargs.update(edges_to_path(edges=edges[unique_edges],
vertices=mesh.vertices))
return kwargs | [
"def",
"faces_to_path",
"(",
"mesh",
",",
"face_ids",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"face_ids",
"is",
"None",
":",
"edges",
"=",
"mesh",
".",
"edges_sorted",
"else",
":",
"# take advantage of edge ordering to index as single row",
"edges",
"=",
"mesh",
".",
"edges_sorted",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"6",
")",
")",
"[",
"face_ids",
"]",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"2",
")",
")",
"# an edge which occurs onely once is on the boundary",
"unique_edges",
"=",
"grouping",
".",
"group_rows",
"(",
"edges",
",",
"require_count",
"=",
"1",
")",
"# add edges and vertices to kwargs",
"kwargs",
".",
"update",
"(",
"edges_to_path",
"(",
"edges",
"=",
"edges",
"[",
"unique_edges",
"]",
",",
"vertices",
"=",
"mesh",
".",
"vertices",
")",
")",
"return",
"kwargs"
] | Given a mesh and face indices find the outline edges and
turn them into a Path3D.
Parameters
---------
mesh : trimesh.Trimesh
Triangulated surface in 3D
face_ids : (n,) int
Indexes referencing mesh.faces
Returns
---------
kwargs : dict
Kwargs for Path3D constructor | [
"Given",
"a",
"mesh",
"and",
"face",
"indices",
"find",
"the",
"outline",
"edges",
"and",
"turn",
"them",
"into",
"a",
"Path3D",
"."
] | python | train |
marrow/WebCore | web/core/application.py | https://github.com/marrow/WebCore/blob/38d50f8022ca62976a1e5ff23f7714bd647b6532/web/core/application.py#L108-L147 | def _configure(self, config):
"""Prepare the incoming configuration and ensure certain expected values are present.
For example, this ensures BaseExtension is included in the extension list, and populates the logging config.
"""
config = config or dict()
# We really need this to be there.
if 'extensions' not in config: config['extensions'] = list()
if not any(isinstance(ext, BaseExtension) for ext in config['extensions']):
# Always make sure the BaseExtension is present since request/response objects are handy.
config['extensions'].insert(0, BaseExtension())
if not any(isinstance(ext, arguments.ArgumentExtension) for ext in config['extensions']):
# Prepare a default set of argument mutators.
config['extensions'].extend([
arguments.ValidateArgumentsExtension(),
arguments.ContextArgsExtension(),
arguments.RemainderArgsExtension(),
arguments.QueryStringArgsExtension(),
arguments.FormEncodedKwargsExtension(),
arguments.JSONKwargsExtension(),
])
config['extensions'].append(self) # Allow the application object itself to register callbacks.
try:
addLoggingLevel('trace', logging.DEBUG - 5)
except AttributeError:
pass
# Tests are skipped on these as we have no particular need to test Python's own logging mechanism.
level = config.get('logging', {}).get('level', None)
if level: # pragma: no cover
logging.basicConfig(level=getattr(logging, level.upper()))
elif 'logging' in config: # pragma: no cover
logging.config.dictConfig(config['logging'])
return config | [
"def",
"_configure",
"(",
"self",
",",
"config",
")",
":",
"config",
"=",
"config",
"or",
"dict",
"(",
")",
"# We really need this to be there.",
"if",
"'extensions'",
"not",
"in",
"config",
":",
"config",
"[",
"'extensions'",
"]",
"=",
"list",
"(",
")",
"if",
"not",
"any",
"(",
"isinstance",
"(",
"ext",
",",
"BaseExtension",
")",
"for",
"ext",
"in",
"config",
"[",
"'extensions'",
"]",
")",
":",
"# Always make sure the BaseExtension is present since request/response objects are handy.",
"config",
"[",
"'extensions'",
"]",
".",
"insert",
"(",
"0",
",",
"BaseExtension",
"(",
")",
")",
"if",
"not",
"any",
"(",
"isinstance",
"(",
"ext",
",",
"arguments",
".",
"ArgumentExtension",
")",
"for",
"ext",
"in",
"config",
"[",
"'extensions'",
"]",
")",
":",
"# Prepare a default set of argument mutators.",
"config",
"[",
"'extensions'",
"]",
".",
"extend",
"(",
"[",
"arguments",
".",
"ValidateArgumentsExtension",
"(",
")",
",",
"arguments",
".",
"ContextArgsExtension",
"(",
")",
",",
"arguments",
".",
"RemainderArgsExtension",
"(",
")",
",",
"arguments",
".",
"QueryStringArgsExtension",
"(",
")",
",",
"arguments",
".",
"FormEncodedKwargsExtension",
"(",
")",
",",
"arguments",
".",
"JSONKwargsExtension",
"(",
")",
",",
"]",
")",
"config",
"[",
"'extensions'",
"]",
".",
"append",
"(",
"self",
")",
"# Allow the application object itself to register callbacks.",
"try",
":",
"addLoggingLevel",
"(",
"'trace'",
",",
"logging",
".",
"DEBUG",
"-",
"5",
")",
"except",
"AttributeError",
":",
"pass",
"# Tests are skipped on these as we have no particular need to test Python's own logging mechanism.",
"level",
"=",
"config",
".",
"get",
"(",
"'logging'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'level'",
",",
"None",
")",
"if",
"level",
":",
"# pragma: no cover",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"getattr",
"(",
"logging",
",",
"level",
".",
"upper",
"(",
")",
")",
")",
"elif",
"'logging'",
"in",
"config",
":",
"# pragma: no cover",
"logging",
".",
"config",
".",
"dictConfig",
"(",
"config",
"[",
"'logging'",
"]",
")",
"return",
"config"
] | Prepare the incoming configuration and ensure certain expected values are present.
For example, this ensures BaseExtension is included in the extension list, and populates the logging config. | [
"Prepare",
"the",
"incoming",
"configuration",
"and",
"ensure",
"certain",
"expected",
"values",
"are",
"present",
".",
"For",
"example",
"this",
"ensures",
"BaseExtension",
"is",
"included",
"in",
"the",
"extension",
"list",
"and",
"populates",
"the",
"logging",
"config",
"."
] | python | train |
CityOfZion/neo-python | neo/Wallets/Wallet.py | https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Wallets/Wallet.py#L135-L150 | def AddContract(self, contract):
"""
Add a contract to the wallet.
Args:
contract (Contract): a contract of type neo.SmartContract.Contract.
Raises:
Exception: Invalid operation - public key mismatch.
"""
if not contract.PublicKeyHash.ToBytes() in self._keys.keys():
raise Exception('Invalid operation - public key mismatch')
self._contracts[contract.ScriptHash.ToBytes()] = contract
if contract.ScriptHash in self._watch_only:
self._watch_only.remove(contract.ScriptHash) | [
"def",
"AddContract",
"(",
"self",
",",
"contract",
")",
":",
"if",
"not",
"contract",
".",
"PublicKeyHash",
".",
"ToBytes",
"(",
")",
"in",
"self",
".",
"_keys",
".",
"keys",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Invalid operation - public key mismatch'",
")",
"self",
".",
"_contracts",
"[",
"contract",
".",
"ScriptHash",
".",
"ToBytes",
"(",
")",
"]",
"=",
"contract",
"if",
"contract",
".",
"ScriptHash",
"in",
"self",
".",
"_watch_only",
":",
"self",
".",
"_watch_only",
".",
"remove",
"(",
"contract",
".",
"ScriptHash",
")"
] | Add a contract to the wallet.
Args:
contract (Contract): a contract of type neo.SmartContract.Contract.
Raises:
Exception: Invalid operation - public key mismatch. | [
"Add",
"a",
"contract",
"to",
"the",
"wallet",
"."
] | python | train |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/identity/identity_client.py | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/identity/identity_client.py#L73-L83 | def delete_group(self, group_id):
"""DeleteGroup.
:param str group_id:
"""
route_values = {}
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
self._send(http_method='DELETE',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='5.0',
route_values=route_values) | [
"def",
"delete_group",
"(",
"self",
",",
"group_id",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"group_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'groupId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'group_id'",
",",
"group_id",
",",
"'str'",
")",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'DELETE'",
",",
"location_id",
"=",
"'5966283b-4196-4d57-9211-1b68f41ec1c2'",
",",
"version",
"=",
"'5.0'",
",",
"route_values",
"=",
"route_values",
")"
] | DeleteGroup.
:param str group_id: | [
"DeleteGroup",
".",
":",
"param",
"str",
"group_id",
":"
] | python | train |
etobella/python-xmlsig | src/xmlsig/utils.py | https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/utils.py#L85-L102 | def create_node(name, parent=None, ns='', tail=False, text=False):
"""
Creates a new node
:param name: Node name
:param parent: Node parent
:param ns: Namespace to use
:param tail: Tail to add
:param text: Text of the node
:return: New node
"""
node = etree.Element(etree.QName(ns, name))
if parent is not None:
parent.append(node)
if tail:
node.tail = tail
if text:
node.text = text
return node | [
"def",
"create_node",
"(",
"name",
",",
"parent",
"=",
"None",
",",
"ns",
"=",
"''",
",",
"tail",
"=",
"False",
",",
"text",
"=",
"False",
")",
":",
"node",
"=",
"etree",
".",
"Element",
"(",
"etree",
".",
"QName",
"(",
"ns",
",",
"name",
")",
")",
"if",
"parent",
"is",
"not",
"None",
":",
"parent",
".",
"append",
"(",
"node",
")",
"if",
"tail",
":",
"node",
".",
"tail",
"=",
"tail",
"if",
"text",
":",
"node",
".",
"text",
"=",
"text",
"return",
"node"
] | Creates a new node
:param name: Node name
:param parent: Node parent
:param ns: Namespace to use
:param tail: Tail to add
:param text: Text of the node
:return: New node | [
"Creates",
"a",
"new",
"node",
":",
"param",
"name",
":",
"Node",
"name",
":",
"param",
"parent",
":",
"Node",
"parent",
":",
"param",
"ns",
":",
"Namespace",
"to",
"use",
":",
"param",
"tail",
":",
"Tail",
"to",
"add",
":",
"param",
"text",
":",
"Text",
"of",
"the",
"node",
":",
"return",
":",
"New",
"node"
] | python | train |
binilinlquad/bing-search-api | bing_search_api/api.py | https://github.com/binilinlquad/bing-search-api/blob/c3a296ad7d0050dc929eda9d9760df5c15faa51a/bing_search_api/api.py#L62-L71 | def search_composite(self, query, source, payload=None):
'''Shortcut search with composite source'''
source = '+'.join(source)
if payload is None:
payload = dict(Sources=quote(source))
else:
payload['Sources'] = quote(source)
return self.search(query, 'Composite', payload) | [
"def",
"search_composite",
"(",
"self",
",",
"query",
",",
"source",
",",
"payload",
"=",
"None",
")",
":",
"source",
"=",
"'+'",
".",
"join",
"(",
"source",
")",
"if",
"payload",
"is",
"None",
":",
"payload",
"=",
"dict",
"(",
"Sources",
"=",
"quote",
"(",
"source",
")",
")",
"else",
":",
"payload",
"[",
"'Sources'",
"]",
"=",
"quote",
"(",
"source",
")",
"return",
"self",
".",
"search",
"(",
"query",
",",
"'Composite'",
",",
"payload",
")"
] | Shortcut search with composite source | [
"Shortcut",
"search",
"with",
"composite",
"source"
] | python | train |
bcbio/bcbio-nextgen | bcbio/cwl/defs.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/defs.py#L511-L602 | def _variant_sv(checkpoints):
"""Structural variant workflow.
"""
if not checkpoints.get("sv"):
return [], []
sv = [s("detect_sv", "batch-single",
[["sv_batch_rec"]],
[cwlout("sv_rec", "record",
fields=[cwlout(["sv", "variantcaller"], ["string", "null"]),
cwlout(["sv", "vrn_file"], ["File", "null"], [".tbi"]),
cwlout(["sv", "supplemental"], {"type": "array", "items": ["File"]}),
cwlout(["svvalidate", "summary"], ["File", "null"]),
cwlout("inherit", exclude=[["align_bam"], ["work_bam_plus"],
["reference", "snpeff"]])])],
"bcbio-vc", ["bedtools", "cnvkit", "delly", "duphold", "extract-sv-reads", "gsort",
"lumpy-sv;env=python2", "manta;env=python2", "break-point-inspector", "mosdepth", "samtools",
"smoove;env=python2", "pysam>=0.13.0",
"seq2c", "simple_sv_annotation;env=python2", "survivor", "svtools;env=python2",
"svtyper;env=python2",
"r=3.5.1", "r-base", "xorg-libxt", "vawk;env=python2"],
disk={"files": 2.0})]
sv_batch_inputs = [["analysis"], ["genome_build"],
["work_bam_plus", "disc"], ["work_bam_plus", "sr"],
["config", "algorithm", "background", "cnv_reference"],
["config", "algorithm", "tools_on"],
["config", "algorithm", "tools_off"],
["config", "algorithm", "svprioritize"],
["config", "algorithm", "svvalidate"], ["regions", "sample_callable"],
["genome_resources", "variation", "gc_profile"],
["genome_resources", "variation", "germline_het_pon"],
["genome_resources", "aliases", "snpeff"], ["reference", "snpeff", "genome_build"],
["sv_coverage_rec"]]
if checkpoints.get("vc"):
sv_batch_inputs.append(["variants", "samples"])
steps = [s("calculate_sv_bins", "multi-combined",
[["align_bam"], ["reference", "fasta", "base"],
["metadata", "batch"], ["metadata", "phenotype"],
["config", "algorithm", "background", "cnv_reference"],
["config", "algorithm", "callable_regions"],
["config", "algorithm", "coverage_interval"],
["config", "algorithm", "exclude_regions"],
["config", "algorithm", "sv_regions"],
["config", "algorithm", "variant_regions"],
["config", "algorithm", "variant_regions_merged"],
["config", "algorithm", "seq2c_bed_ready"],
["config", "algorithm", "svcaller"],
["depth", "variant_regions", "regions"],
["genome_resources", "variation", "lcr"], ["genome_resources", "variation", "polyx"],
["genome_resources", "variation", "encode_blacklist"],
["genome_resources", "rnaseq", "gene_bed"]],
[cwlout("sv_bin_rec", "record",
fields=[cwlout(["regions", "bins", "target"], ["File", "null"]),
cwlout(["regions", "bins", "antitarget"], ["File", "null"]),
cwlout(["regions", "bins", "gcannotated"], ["File", "null"]),
cwlout(["regions", "bins", "group"], ["string", "null"]),
cwlout("inherit")])],
"bcbio-vc", ["bedtools", "cnvkit"],
disk={"files": 1.5}, cores=1),
s("calculate_sv_coverage", "multi-parallel",
[["sv_bin_rec"]],
[cwlout("sv_rawcoverage_rec", "record",
fields=[cwlout(["depth", "bins", "target"], ["File", "null"]),
cwlout(["depth", "bins", "antitarget"], ["File", "null"]),
cwlout(["depth", "bins", "seq2c"], ["File", "null"]),
cwlout("inherit")])],
"bcbio-vc", ["mosdepth", "cnvkit", "seq2c"],
disk={"files": 1.5}),
s("normalize_sv_coverage", "multi-combined",
[["sv_rawcoverage_rec"]],
[cwlout("sv_coverage_rec", "record",
fields=[cwlout(["depth", "bins", "normalized"], ["File", "null"]),
cwlout(["depth", "bins", "background"], ["File", "null"]),
cwlout("inherit")])],
"bcbio-vc", ["cnvkit"],
disk={"files": 1.5}),
s("batch_for_sv", "multi-batch", sv_batch_inputs,
[cwlout("sv_batch_rec", "record")],
"bcbio-vc",
unlist=[["config", "algorithm", "svcaller"]]),
w("svcall", "multi-parallel", sv, []),
s("summarize_sv", "multi-combined",
[["sv_rec"]],
[cwlout(["sv", "calls"], {"type": "array", "items": ["File", "null"]}),
cwlout(["sv", "supplemental"], {"type": "array", "items": ["File"]}),
cwlout(["sv", "prioritize", "tsv"], {"type": "array", "items": ["File", "null"]}),
cwlout(["sv", "prioritize", "raw"], {"type": "array", "items": ["File", "null"]}),
cwlout(["svvalidate", "grading_summary"], ["File", "null"]),
cwlout(["svvalidate", "grading_plots"], {"type": "array", "items": ["File", "null"]})],
"bcbio-vc", ["bcbio-prioritize"], disk={"files": 1.0}, cores=1)]
final_outputs = [["sv", "calls"], ["svvalidate", "grading_summary"], ["sv", "prioritize", "tsv"],
["sv", "prioritize", "raw"], ["sv", "supplemental"]]
return steps, final_outputs | [
"def",
"_variant_sv",
"(",
"checkpoints",
")",
":",
"if",
"not",
"checkpoints",
".",
"get",
"(",
"\"sv\"",
")",
":",
"return",
"[",
"]",
",",
"[",
"]",
"sv",
"=",
"[",
"s",
"(",
"\"detect_sv\"",
",",
"\"batch-single\"",
",",
"[",
"[",
"\"sv_batch_rec\"",
"]",
"]",
",",
"[",
"cwlout",
"(",
"\"sv_rec\"",
",",
"\"record\"",
",",
"fields",
"=",
"[",
"cwlout",
"(",
"[",
"\"sv\"",
",",
"\"variantcaller\"",
"]",
",",
"[",
"\"string\"",
",",
"\"null\"",
"]",
")",
",",
"cwlout",
"(",
"[",
"\"sv\"",
",",
"\"vrn_file\"",
"]",
",",
"[",
"\"File\"",
",",
"\"null\"",
"]",
",",
"[",
"\".tbi\"",
"]",
")",
",",
"cwlout",
"(",
"[",
"\"sv\"",
",",
"\"supplemental\"",
"]",
",",
"{",
"\"type\"",
":",
"\"array\"",
",",
"\"items\"",
":",
"[",
"\"File\"",
"]",
"}",
")",
",",
"cwlout",
"(",
"[",
"\"svvalidate\"",
",",
"\"summary\"",
"]",
",",
"[",
"\"File\"",
",",
"\"null\"",
"]",
")",
",",
"cwlout",
"(",
"\"inherit\"",
",",
"exclude",
"=",
"[",
"[",
"\"align_bam\"",
"]",
",",
"[",
"\"work_bam_plus\"",
"]",
",",
"[",
"\"reference\"",
",",
"\"snpeff\"",
"]",
"]",
")",
"]",
")",
"]",
",",
"\"bcbio-vc\"",
",",
"[",
"\"bedtools\"",
",",
"\"cnvkit\"",
",",
"\"delly\"",
",",
"\"duphold\"",
",",
"\"extract-sv-reads\"",
",",
"\"gsort\"",
",",
"\"lumpy-sv;env=python2\"",
",",
"\"manta;env=python2\"",
",",
"\"break-point-inspector\"",
",",
"\"mosdepth\"",
",",
"\"samtools\"",
",",
"\"smoove;env=python2\"",
",",
"\"pysam>=0.13.0\"",
",",
"\"seq2c\"",
",",
"\"simple_sv_annotation;env=python2\"",
",",
"\"survivor\"",
",",
"\"svtools;env=python2\"",
",",
"\"svtyper;env=python2\"",
",",
"\"r=3.5.1\"",
",",
"\"r-base\"",
",",
"\"xorg-libxt\"",
",",
"\"vawk;env=python2\"",
"]",
",",
"disk",
"=",
"{",
"\"files\"",
":",
"2.0",
"}",
")",
"]",
"sv_batch_inputs",
"=",
"[",
"[",
"\"analysis\"",
"]",
",",
"[",
"\"genome_build\"",
"]",
",",
"[",
"\"work_bam_plus\"",
",",
"\"disc\"",
"]",
",",
"[",
"\"work_bam_plus\"",
",",
"\"sr\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"background\"",
",",
"\"cnv_reference\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"tools_on\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"tools_off\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"svprioritize\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"svvalidate\"",
"]",
",",
"[",
"\"regions\"",
",",
"\"sample_callable\"",
"]",
",",
"[",
"\"genome_resources\"",
",",
"\"variation\"",
",",
"\"gc_profile\"",
"]",
",",
"[",
"\"genome_resources\"",
",",
"\"variation\"",
",",
"\"germline_het_pon\"",
"]",
",",
"[",
"\"genome_resources\"",
",",
"\"aliases\"",
",",
"\"snpeff\"",
"]",
",",
"[",
"\"reference\"",
",",
"\"snpeff\"",
",",
"\"genome_build\"",
"]",
",",
"[",
"\"sv_coverage_rec\"",
"]",
"]",
"if",
"checkpoints",
".",
"get",
"(",
"\"vc\"",
")",
":",
"sv_batch_inputs",
".",
"append",
"(",
"[",
"\"variants\"",
",",
"\"samples\"",
"]",
")",
"steps",
"=",
"[",
"s",
"(",
"\"calculate_sv_bins\"",
",",
"\"multi-combined\"",
",",
"[",
"[",
"\"align_bam\"",
"]",
",",
"[",
"\"reference\"",
",",
"\"fasta\"",
",",
"\"base\"",
"]",
",",
"[",
"\"metadata\"",
",",
"\"batch\"",
"]",
",",
"[",
"\"metadata\"",
",",
"\"phenotype\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"background\"",
",",
"\"cnv_reference\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"callable_regions\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"coverage_interval\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"exclude_regions\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"sv_regions\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"variant_regions\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"variant_regions_merged\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"seq2c_bed_ready\"",
"]",
",",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"svcaller\"",
"]",
",",
"[",
"\"depth\"",
",",
"\"variant_regions\"",
",",
"\"regions\"",
"]",
",",
"[",
"\"genome_resources\"",
",",
"\"variation\"",
",",
"\"lcr\"",
"]",
",",
"[",
"\"genome_resources\"",
",",
"\"variation\"",
",",
"\"polyx\"",
"]",
",",
"[",
"\"genome_resources\"",
",",
"\"variation\"",
",",
"\"encode_blacklist\"",
"]",
",",
"[",
"\"genome_resources\"",
",",
"\"rnaseq\"",
",",
"\"gene_bed\"",
"]",
"]",
",",
"[",
"cwlout",
"(",
"\"sv_bin_rec\"",
",",
"\"record\"",
",",
"fields",
"=",
"[",
"cwlout",
"(",
"[",
"\"regions\"",
",",
"\"bins\"",
",",
"\"target\"",
"]",
",",
"[",
"\"File\"",
",",
"\"null\"",
"]",
")",
",",
"cwlout",
"(",
"[",
"\"regions\"",
",",
"\"bins\"",
",",
"\"antitarget\"",
"]",
",",
"[",
"\"File\"",
",",
"\"null\"",
"]",
")",
",",
"cwlout",
"(",
"[",
"\"regions\"",
",",
"\"bins\"",
",",
"\"gcannotated\"",
"]",
",",
"[",
"\"File\"",
",",
"\"null\"",
"]",
")",
",",
"cwlout",
"(",
"[",
"\"regions\"",
",",
"\"bins\"",
",",
"\"group\"",
"]",
",",
"[",
"\"string\"",
",",
"\"null\"",
"]",
")",
",",
"cwlout",
"(",
"\"inherit\"",
")",
"]",
")",
"]",
",",
"\"bcbio-vc\"",
",",
"[",
"\"bedtools\"",
",",
"\"cnvkit\"",
"]",
",",
"disk",
"=",
"{",
"\"files\"",
":",
"1.5",
"}",
",",
"cores",
"=",
"1",
")",
",",
"s",
"(",
"\"calculate_sv_coverage\"",
",",
"\"multi-parallel\"",
",",
"[",
"[",
"\"sv_bin_rec\"",
"]",
"]",
",",
"[",
"cwlout",
"(",
"\"sv_rawcoverage_rec\"",
",",
"\"record\"",
",",
"fields",
"=",
"[",
"cwlout",
"(",
"[",
"\"depth\"",
",",
"\"bins\"",
",",
"\"target\"",
"]",
",",
"[",
"\"File\"",
",",
"\"null\"",
"]",
")",
",",
"cwlout",
"(",
"[",
"\"depth\"",
",",
"\"bins\"",
",",
"\"antitarget\"",
"]",
",",
"[",
"\"File\"",
",",
"\"null\"",
"]",
")",
",",
"cwlout",
"(",
"[",
"\"depth\"",
",",
"\"bins\"",
",",
"\"seq2c\"",
"]",
",",
"[",
"\"File\"",
",",
"\"null\"",
"]",
")",
",",
"cwlout",
"(",
"\"inherit\"",
")",
"]",
")",
"]",
",",
"\"bcbio-vc\"",
",",
"[",
"\"mosdepth\"",
",",
"\"cnvkit\"",
",",
"\"seq2c\"",
"]",
",",
"disk",
"=",
"{",
"\"files\"",
":",
"1.5",
"}",
")",
",",
"s",
"(",
"\"normalize_sv_coverage\"",
",",
"\"multi-combined\"",
",",
"[",
"[",
"\"sv_rawcoverage_rec\"",
"]",
"]",
",",
"[",
"cwlout",
"(",
"\"sv_coverage_rec\"",
",",
"\"record\"",
",",
"fields",
"=",
"[",
"cwlout",
"(",
"[",
"\"depth\"",
",",
"\"bins\"",
",",
"\"normalized\"",
"]",
",",
"[",
"\"File\"",
",",
"\"null\"",
"]",
")",
",",
"cwlout",
"(",
"[",
"\"depth\"",
",",
"\"bins\"",
",",
"\"background\"",
"]",
",",
"[",
"\"File\"",
",",
"\"null\"",
"]",
")",
",",
"cwlout",
"(",
"\"inherit\"",
")",
"]",
")",
"]",
",",
"\"bcbio-vc\"",
",",
"[",
"\"cnvkit\"",
"]",
",",
"disk",
"=",
"{",
"\"files\"",
":",
"1.5",
"}",
")",
",",
"s",
"(",
"\"batch_for_sv\"",
",",
"\"multi-batch\"",
",",
"sv_batch_inputs",
",",
"[",
"cwlout",
"(",
"\"sv_batch_rec\"",
",",
"\"record\"",
")",
"]",
",",
"\"bcbio-vc\"",
",",
"unlist",
"=",
"[",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"svcaller\"",
"]",
"]",
")",
",",
"w",
"(",
"\"svcall\"",
",",
"\"multi-parallel\"",
",",
"sv",
",",
"[",
"]",
")",
",",
"s",
"(",
"\"summarize_sv\"",
",",
"\"multi-combined\"",
",",
"[",
"[",
"\"sv_rec\"",
"]",
"]",
",",
"[",
"cwlout",
"(",
"[",
"\"sv\"",
",",
"\"calls\"",
"]",
",",
"{",
"\"type\"",
":",
"\"array\"",
",",
"\"items\"",
":",
"[",
"\"File\"",
",",
"\"null\"",
"]",
"}",
")",
",",
"cwlout",
"(",
"[",
"\"sv\"",
",",
"\"supplemental\"",
"]",
",",
"{",
"\"type\"",
":",
"\"array\"",
",",
"\"items\"",
":",
"[",
"\"File\"",
"]",
"}",
")",
",",
"cwlout",
"(",
"[",
"\"sv\"",
",",
"\"prioritize\"",
",",
"\"tsv\"",
"]",
",",
"{",
"\"type\"",
":",
"\"array\"",
",",
"\"items\"",
":",
"[",
"\"File\"",
",",
"\"null\"",
"]",
"}",
")",
",",
"cwlout",
"(",
"[",
"\"sv\"",
",",
"\"prioritize\"",
",",
"\"raw\"",
"]",
",",
"{",
"\"type\"",
":",
"\"array\"",
",",
"\"items\"",
":",
"[",
"\"File\"",
",",
"\"null\"",
"]",
"}",
")",
",",
"cwlout",
"(",
"[",
"\"svvalidate\"",
",",
"\"grading_summary\"",
"]",
",",
"[",
"\"File\"",
",",
"\"null\"",
"]",
")",
",",
"cwlout",
"(",
"[",
"\"svvalidate\"",
",",
"\"grading_plots\"",
"]",
",",
"{",
"\"type\"",
":",
"\"array\"",
",",
"\"items\"",
":",
"[",
"\"File\"",
",",
"\"null\"",
"]",
"}",
")",
"]",
",",
"\"bcbio-vc\"",
",",
"[",
"\"bcbio-prioritize\"",
"]",
",",
"disk",
"=",
"{",
"\"files\"",
":",
"1.0",
"}",
",",
"cores",
"=",
"1",
")",
"]",
"final_outputs",
"=",
"[",
"[",
"\"sv\"",
",",
"\"calls\"",
"]",
",",
"[",
"\"svvalidate\"",
",",
"\"grading_summary\"",
"]",
",",
"[",
"\"sv\"",
",",
"\"prioritize\"",
",",
"\"tsv\"",
"]",
",",
"[",
"\"sv\"",
",",
"\"prioritize\"",
",",
"\"raw\"",
"]",
",",
"[",
"\"sv\"",
",",
"\"supplemental\"",
"]",
"]",
"return",
"steps",
",",
"final_outputs"
] | Structural variant workflow. | [
"Structural",
"variant",
"workflow",
"."
] | python | train |
cltk/cltk | cltk/lemmatize/french/lemma.py | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/lemmatize/french/lemma.py#L46-L74 | def lemmatize(self, tokens):
"""define list of lemmas"""
entries = self.entries
forms_and_lemmas = self.forms_and_lemmas
lemma_list = [x[0] for x in entries]
"""Provide a lemma for each token"""
lemmatized = []
for token in tokens:
"""check for a match between token and list of lemmas"""
if token in lemma_list:
lemmed = (token, token)
lemmatized.append(lemmed)
else:
"""if no match check for a match between token and list of lemma forms"""
lemma = [k for k, v in forms_and_lemmas.items() if token in v]
if lemma != []:
lemmed = (token, lemma)
lemmatized.append(lemmed)
elif lemma == []:
"""if no match apply regular expressions and check for a match against the list of lemmas again"""
regexed = regex(token)
if regexed in lemma_list:
lemmed = (token, regexed)
lemmatized.append(lemmed)
else:
lemmed = (token, "None")
lemmatized.append(lemmed)
return lemmatized | [
"def",
"lemmatize",
"(",
"self",
",",
"tokens",
")",
":",
"entries",
"=",
"self",
".",
"entries",
"forms_and_lemmas",
"=",
"self",
".",
"forms_and_lemmas",
"lemma_list",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"entries",
"]",
"\"\"\"Provide a lemma for each token\"\"\"",
"lemmatized",
"=",
"[",
"]",
"for",
"token",
"in",
"tokens",
":",
"\"\"\"check for a match between token and list of lemmas\"\"\"",
"if",
"token",
"in",
"lemma_list",
":",
"lemmed",
"=",
"(",
"token",
",",
"token",
")",
"lemmatized",
".",
"append",
"(",
"lemmed",
")",
"else",
":",
"\"\"\"if no match check for a match between token and list of lemma forms\"\"\"",
"lemma",
"=",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"forms_and_lemmas",
".",
"items",
"(",
")",
"if",
"token",
"in",
"v",
"]",
"if",
"lemma",
"!=",
"[",
"]",
":",
"lemmed",
"=",
"(",
"token",
",",
"lemma",
")",
"lemmatized",
".",
"append",
"(",
"lemmed",
")",
"elif",
"lemma",
"==",
"[",
"]",
":",
"\"\"\"if no match apply regular expressions and check for a match against the list of lemmas again\"\"\"",
"regexed",
"=",
"regex",
"(",
"token",
")",
"if",
"regexed",
"in",
"lemma_list",
":",
"lemmed",
"=",
"(",
"token",
",",
"regexed",
")",
"lemmatized",
".",
"append",
"(",
"lemmed",
")",
"else",
":",
"lemmed",
"=",
"(",
"token",
",",
"\"None\"",
")",
"lemmatized",
".",
"append",
"(",
"lemmed",
")",
"return",
"lemmatized"
] | define list of lemmas | [
"define",
"list",
"of",
"lemmas"
] | python | train |
ladybug-tools/ladybug | ladybug/datacollection.py | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datacollection.py#L903-L916 | def _check_values(self, values):
"""Check values whenever they come through the values setter."""
assert isinstance(values, Iterable) and not isinstance(
values, (str, dict, bytes, bytearray)), \
'values should be a list or tuple. Got {}'.format(type(values))
if self.header.analysis_period.is_annual:
a_period_len = 8760 * self.header.analysis_period.timestep
if self.header.analysis_period.is_leap_year is True:
a_period_len = a_period_len + 24 * self.header.analysis_period.timestep
else:
a_period_len = len(self.header.analysis_period.moys)
assert len(values) == a_period_len, \
'Length of values does not match that expected by the '\
'header analysis_period. {} != {}'.format(len(values), a_period_len) | [
"def",
"_check_values",
"(",
"self",
",",
"values",
")",
":",
"assert",
"isinstance",
"(",
"values",
",",
"Iterable",
")",
"and",
"not",
"isinstance",
"(",
"values",
",",
"(",
"str",
",",
"dict",
",",
"bytes",
",",
"bytearray",
")",
")",
",",
"'values should be a list or tuple. Got {}'",
".",
"format",
"(",
"type",
"(",
"values",
")",
")",
"if",
"self",
".",
"header",
".",
"analysis_period",
".",
"is_annual",
":",
"a_period_len",
"=",
"8760",
"*",
"self",
".",
"header",
".",
"analysis_period",
".",
"timestep",
"if",
"self",
".",
"header",
".",
"analysis_period",
".",
"is_leap_year",
"is",
"True",
":",
"a_period_len",
"=",
"a_period_len",
"+",
"24",
"*",
"self",
".",
"header",
".",
"analysis_period",
".",
"timestep",
"else",
":",
"a_period_len",
"=",
"len",
"(",
"self",
".",
"header",
".",
"analysis_period",
".",
"moys",
")",
"assert",
"len",
"(",
"values",
")",
"==",
"a_period_len",
",",
"'Length of values does not match that expected by the '",
"'header analysis_period. {} != {}'",
".",
"format",
"(",
"len",
"(",
"values",
")",
",",
"a_period_len",
")"
] | Check values whenever they come through the values setter. | [
"Check",
"values",
"whenever",
"they",
"come",
"through",
"the",
"values",
"setter",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/opennebula.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/opennebula.py#L4586-L4605 | def _xml_to_dict(xml):
'''
Helper function to covert xml into a data dictionary.
xml
The xml data to convert.
'''
dicts = {}
for item in xml:
key = item.tag.lower()
idx = 1
while key in dicts:
key += six.text_type(idx)
idx += 1
if item.text is None:
dicts[key] = _xml_to_dict(item)
else:
dicts[key] = item.text
return dicts | [
"def",
"_xml_to_dict",
"(",
"xml",
")",
":",
"dicts",
"=",
"{",
"}",
"for",
"item",
"in",
"xml",
":",
"key",
"=",
"item",
".",
"tag",
".",
"lower",
"(",
")",
"idx",
"=",
"1",
"while",
"key",
"in",
"dicts",
":",
"key",
"+=",
"six",
".",
"text_type",
"(",
"idx",
")",
"idx",
"+=",
"1",
"if",
"item",
".",
"text",
"is",
"None",
":",
"dicts",
"[",
"key",
"]",
"=",
"_xml_to_dict",
"(",
"item",
")",
"else",
":",
"dicts",
"[",
"key",
"]",
"=",
"item",
".",
"text",
"return",
"dicts"
] | Helper function to covert xml into a data dictionary.
xml
The xml data to convert. | [
"Helper",
"function",
"to",
"covert",
"xml",
"into",
"a",
"data",
"dictionary",
"."
] | python | train |
openstack/horizon | horizon/tables/views.py | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tables/views.py#L175-L188 | def update_server_filter_action(self, request, table=None):
"""Update the table server side filter action.
It is done based on the current filter. The filter info may be stored
in the session and this will restore it.
"""
if not table:
table = self.get_table()
filter_info = self.get_server_filter_info(request, table)
if filter_info is not None:
action = filter_info['action']
setattr(action, 'filter_string', filter_info['value'])
if filter_info['field_param']:
setattr(action, 'filter_field', filter_info['field']) | [
"def",
"update_server_filter_action",
"(",
"self",
",",
"request",
",",
"table",
"=",
"None",
")",
":",
"if",
"not",
"table",
":",
"table",
"=",
"self",
".",
"get_table",
"(",
")",
"filter_info",
"=",
"self",
".",
"get_server_filter_info",
"(",
"request",
",",
"table",
")",
"if",
"filter_info",
"is",
"not",
"None",
":",
"action",
"=",
"filter_info",
"[",
"'action'",
"]",
"setattr",
"(",
"action",
",",
"'filter_string'",
",",
"filter_info",
"[",
"'value'",
"]",
")",
"if",
"filter_info",
"[",
"'field_param'",
"]",
":",
"setattr",
"(",
"action",
",",
"'filter_field'",
",",
"filter_info",
"[",
"'field'",
"]",
")"
] | Update the table server side filter action.
It is done based on the current filter. The filter info may be stored
in the session and this will restore it. | [
"Update",
"the",
"table",
"server",
"side",
"filter",
"action",
"."
] | python | train |
grahame/dividebatur | dividebatur/counter.py | https://github.com/grahame/dividebatur/blob/adc1f6e8013943471f1679e3c94f9448a1e4a472/dividebatur/counter.py#L301-L311 | def get_initial_totals(self):
"determine the initial total for each candidate. only call this at the start of round 1"
candidate_votes = {}
# initialise to zero for every individual candidate
for candidate_id in self.candidate_ids:
candidate_votes[candidate_id] = 0
for candidate_id in self.candidate_ids:
candidate_votes[candidate_id] = self.candidate_bundle_transactions.get_paper_count(candidate_id)
for candidate_id in candidate_votes:
candidate_votes[candidate_id] = int(candidate_votes[candidate_id])
return candidate_votes, 0, 0 | [
"def",
"get_initial_totals",
"(",
"self",
")",
":",
"candidate_votes",
"=",
"{",
"}",
"# initialise to zero for every individual candidate",
"for",
"candidate_id",
"in",
"self",
".",
"candidate_ids",
":",
"candidate_votes",
"[",
"candidate_id",
"]",
"=",
"0",
"for",
"candidate_id",
"in",
"self",
".",
"candidate_ids",
":",
"candidate_votes",
"[",
"candidate_id",
"]",
"=",
"self",
".",
"candidate_bundle_transactions",
".",
"get_paper_count",
"(",
"candidate_id",
")",
"for",
"candidate_id",
"in",
"candidate_votes",
":",
"candidate_votes",
"[",
"candidate_id",
"]",
"=",
"int",
"(",
"candidate_votes",
"[",
"candidate_id",
"]",
")",
"return",
"candidate_votes",
",",
"0",
",",
"0"
] | determine the initial total for each candidate. only call this at the start of round 1 | [
"determine",
"the",
"initial",
"total",
"for",
"each",
"candidate",
".",
"only",
"call",
"this",
"at",
"the",
"start",
"of",
"round",
"1"
] | python | train |
kennethreitz/requests-html | requests_html.py | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L541-L610 | def render(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False):
"""Reloads the response in Chromium, and replaces HTML content
with an updated version, with JavaScript executed.
:param retries: The number of times to retry loading the page in Chromium.
:param script: JavaScript to execute upon page load (optional).
:param wait: The number of seconds to wait before loading the page, preventing timeouts (optional).
:param scrolldown: Integer, if provided, of how many times to page down.
:param sleep: Integer, if provided, of how many long to sleep after initial render.
:param reload: If ``False``, content will not be loaded from the browser, but will be provided from memory.
:param keep_page: If ``True`` will allow you to interact with the browser page through ``r.html.page``.
If ``scrolldown`` is specified, the page will scrolldown the specified
number of times, after sleeping the specified amount of time
(e.g. ``scrolldown=10, sleep=1``).
If just ``sleep`` is provided, the rendering will wait *n* seconds, before
returning.
If ``script`` is specified, it will execute the provided JavaScript at
runtime. Example:
.. code-block:: python
script = \"\"\"
() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
deviceScaleFactor: window.devicePixelRatio,
}
}
\"\"\"
Returns the return value of the executed ``script``, if any is provided:
.. code-block:: python
>>> r.html.render(script=script)
{'width': 800, 'height': 600, 'deviceScaleFactor': 1}
Warning: the first time you run this method, it will download
Chromium into your home directory (``~/.pyppeteer``).
"""
self.browser = self.session.browser # Automatically create a event loop and browser
content = None
# Automatically set Reload to False, if example URL is being used.
if self.url == DEFAULT_URL:
reload = False
for i in range(retries):
if not content:
try:
content, result, page = self.session.loop.run_until_complete(self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page))
except TypeError:
pass
else:
break
if not content:
raise MaxRetries("Unable to render the page. Try increasing timeout")
html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING)
self.__dict__.update(html.__dict__)
self.page = page
return result | [
"def",
"render",
"(",
"self",
",",
"retries",
":",
"int",
"=",
"8",
",",
"script",
":",
"str",
"=",
"None",
",",
"wait",
":",
"float",
"=",
"0.2",
",",
"scrolldown",
"=",
"False",
",",
"sleep",
":",
"int",
"=",
"0",
",",
"reload",
":",
"bool",
"=",
"True",
",",
"timeout",
":",
"Union",
"[",
"float",
",",
"int",
"]",
"=",
"8.0",
",",
"keep_page",
":",
"bool",
"=",
"False",
")",
":",
"self",
".",
"browser",
"=",
"self",
".",
"session",
".",
"browser",
"# Automatically create a event loop and browser",
"content",
"=",
"None",
"# Automatically set Reload to False, if example URL is being used.",
"if",
"self",
".",
"url",
"==",
"DEFAULT_URL",
":",
"reload",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"retries",
")",
":",
"if",
"not",
"content",
":",
"try",
":",
"content",
",",
"result",
",",
"page",
"=",
"self",
".",
"session",
".",
"loop",
".",
"run_until_complete",
"(",
"self",
".",
"_async_render",
"(",
"url",
"=",
"self",
".",
"url",
",",
"script",
"=",
"script",
",",
"sleep",
"=",
"sleep",
",",
"wait",
"=",
"wait",
",",
"content",
"=",
"self",
".",
"html",
",",
"reload",
"=",
"reload",
",",
"scrolldown",
"=",
"scrolldown",
",",
"timeout",
"=",
"timeout",
",",
"keep_page",
"=",
"keep_page",
")",
")",
"except",
"TypeError",
":",
"pass",
"else",
":",
"break",
"if",
"not",
"content",
":",
"raise",
"MaxRetries",
"(",
"\"Unable to render the page. Try increasing timeout\"",
")",
"html",
"=",
"HTML",
"(",
"url",
"=",
"self",
".",
"url",
",",
"html",
"=",
"content",
".",
"encode",
"(",
"DEFAULT_ENCODING",
")",
",",
"default_encoding",
"=",
"DEFAULT_ENCODING",
")",
"self",
".",
"__dict__",
".",
"update",
"(",
"html",
".",
"__dict__",
")",
"self",
".",
"page",
"=",
"page",
"return",
"result"
] | Reloads the response in Chromium, and replaces HTML content
with an updated version, with JavaScript executed.
:param retries: The number of times to retry loading the page in Chromium.
:param script: JavaScript to execute upon page load (optional).
:param wait: The number of seconds to wait before loading the page, preventing timeouts (optional).
:param scrolldown: Integer, if provided, of how many times to page down.
:param sleep: Integer, if provided, of how many long to sleep after initial render.
:param reload: If ``False``, content will not be loaded from the browser, but will be provided from memory.
:param keep_page: If ``True`` will allow you to interact with the browser page through ``r.html.page``.
If ``scrolldown`` is specified, the page will scrolldown the specified
number of times, after sleeping the specified amount of time
(e.g. ``scrolldown=10, sleep=1``).
If just ``sleep`` is provided, the rendering will wait *n* seconds, before
returning.
If ``script`` is specified, it will execute the provided JavaScript at
runtime. Example:
.. code-block:: python
script = \"\"\"
() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
deviceScaleFactor: window.devicePixelRatio,
}
}
\"\"\"
Returns the return value of the executed ``script``, if any is provided:
.. code-block:: python
>>> r.html.render(script=script)
{'width': 800, 'height': 600, 'deviceScaleFactor': 1}
Warning: the first time you run this method, it will download
Chromium into your home directory (``~/.pyppeteer``). | [
"Reloads",
"the",
"response",
"in",
"Chromium",
"and",
"replaces",
"HTML",
"content",
"with",
"an",
"updated",
"version",
"with",
"JavaScript",
"executed",
"."
] | python | train |
phoebe-project/phoebe2 | phoebe/backend/universe.py | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/universe.py#L2874-L2894 | def pointing_vector(self, s, time):
"""
s is the spin vector in roche coordinates
time is the current time
"""
t = time - self._t0
longitude = self._longitude + self._dlongdt * t
# define the basis vectors in the spin (primed) coordinates in terms of
# the Roche coordinates.
# ez' = s
# ex' = (ex - s(s.ex)) /|i - s(s.ex)|
# ey' = s x ex'
ex = np.array([1., 0., 0.])
ezp = s
exp = (ex - s*np.dot(s,ex))
eyp = np.cross(s, exp)
return np.sin(self._colat)*np.cos(longitude)*exp +\
np.sin(self._colat)*np.sin(longitude)*eyp +\
np.cos(self._colat)*ezp | [
"def",
"pointing_vector",
"(",
"self",
",",
"s",
",",
"time",
")",
":",
"t",
"=",
"time",
"-",
"self",
".",
"_t0",
"longitude",
"=",
"self",
".",
"_longitude",
"+",
"self",
".",
"_dlongdt",
"*",
"t",
"# define the basis vectors in the spin (primed) coordinates in terms of",
"# the Roche coordinates.",
"# ez' = s",
"# ex' = (ex - s(s.ex)) /|i - s(s.ex)|",
"# ey' = s x ex'",
"ex",
"=",
"np",
".",
"array",
"(",
"[",
"1.",
",",
"0.",
",",
"0.",
"]",
")",
"ezp",
"=",
"s",
"exp",
"=",
"(",
"ex",
"-",
"s",
"*",
"np",
".",
"dot",
"(",
"s",
",",
"ex",
")",
")",
"eyp",
"=",
"np",
".",
"cross",
"(",
"s",
",",
"exp",
")",
"return",
"np",
".",
"sin",
"(",
"self",
".",
"_colat",
")",
"*",
"np",
".",
"cos",
"(",
"longitude",
")",
"*",
"exp",
"+",
"np",
".",
"sin",
"(",
"self",
".",
"_colat",
")",
"*",
"np",
".",
"sin",
"(",
"longitude",
")",
"*",
"eyp",
"+",
"np",
".",
"cos",
"(",
"self",
".",
"_colat",
")",
"*",
"ezp"
] | s is the spin vector in roche coordinates
time is the current time | [
"s",
"is",
"the",
"spin",
"vector",
"in",
"roche",
"coordinates",
"time",
"is",
"the",
"current",
"time"
] | python | train |
sporteasy/python-poeditor | poeditor/client.py | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L232-L240 | def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project']) | [
"def",
"view_project_details",
"(",
"self",
",",
"project_id",
")",
":",
"data",
"=",
"self",
".",
"_run",
"(",
"url_path",
"=",
"\"projects/view\"",
",",
"id",
"=",
"project_id",
")",
"return",
"self",
".",
"_project_formatter",
"(",
"data",
"[",
"'result'",
"]",
"[",
"'project'",
"]",
")"
] | Returns project's details. | [
"Returns",
"project",
"s",
"details",
"."
] | python | train |
lins05/slackbot | slackbot/dispatcher.py | https://github.com/lins05/slackbot/blob/7195d46b9e1dc4ecfae0bdcaa91461202689bfe5/slackbot/dispatcher.py#L173-L185 | def unicode_compact(func):
"""
Make sure the first parameter of the decorated method to be a unicode
object.
"""
@wraps(func)
def wrapped(self, text, *a, **kw):
if not isinstance(text, six.text_type):
text = text.decode('utf-8')
return func(self, text, *a, **kw)
return wrapped | [
"def",
"unicode_compact",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped",
"(",
"self",
",",
"text",
",",
"*",
"a",
",",
"*",
"*",
"kw",
")",
":",
"if",
"not",
"isinstance",
"(",
"text",
",",
"six",
".",
"text_type",
")",
":",
"text",
"=",
"text",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"func",
"(",
"self",
",",
"text",
",",
"*",
"a",
",",
"*",
"*",
"kw",
")",
"return",
"wrapped"
] | Make sure the first parameter of the decorated method to be a unicode
object. | [
"Make",
"sure",
"the",
"first",
"parameter",
"of",
"the",
"decorated",
"method",
"to",
"be",
"a",
"unicode",
"object",
"."
] | python | train |
indico/indico-plugins | piwik/indico_piwik/queries/graphs.py | https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/piwik/indico_piwik/queries/graphs.py#L36-L48 | def get_result(self):
"""Perform the call and return the graph data
:return: Encoded PNG graph data string to be inserted in a `src`
atribute of a HTML img tag.
"""
png = self.call()
if png is None:
return
if png.startswith('GD extension must be loaded'):
current_plugin.logger.warning('Piwik server answered on ImageGraph.get: %s', png)
return
return 'data:image/png;base64,{}'.format(b64encode(png)) | [
"def",
"get_result",
"(",
"self",
")",
":",
"png",
"=",
"self",
".",
"call",
"(",
")",
"if",
"png",
"is",
"None",
":",
"return",
"if",
"png",
".",
"startswith",
"(",
"'GD extension must be loaded'",
")",
":",
"current_plugin",
".",
"logger",
".",
"warning",
"(",
"'Piwik server answered on ImageGraph.get: %s'",
",",
"png",
")",
"return",
"return",
"'data:image/png;base64,{}'",
".",
"format",
"(",
"b64encode",
"(",
"png",
")",
")"
] | Perform the call and return the graph data
:return: Encoded PNG graph data string to be inserted in a `src`
atribute of a HTML img tag. | [
"Perform",
"the",
"call",
"and",
"return",
"the",
"graph",
"data"
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/models/research/moe_experiments.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L64-L79 | def xmoe_tr_1d():
"""Mixture of experts (16 experts).
623M Params, einsum=1.09e13
Returns:
a hparams
"""
hparams = xmoe_tr_dense_2k()
hparams.encoder_layers = ["self_att", "moe_1d"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "moe_1d"] * 4
hparams.layout = "batch:batch;experts:batch"
hparams.moe_hidden_size = 2048
hparams.moe_num_experts = 16
return hparams | [
"def",
"xmoe_tr_1d",
"(",
")",
":",
"hparams",
"=",
"xmoe_tr_dense_2k",
"(",
")",
"hparams",
".",
"encoder_layers",
"=",
"[",
"\"self_att\"",
",",
"\"moe_1d\"",
"]",
"*",
"4",
"hparams",
".",
"decoder_layers",
"=",
"[",
"\"self_att\"",
",",
"\"enc_att\"",
",",
"\"moe_1d\"",
"]",
"*",
"4",
"hparams",
".",
"layout",
"=",
"\"batch:batch;experts:batch\"",
"hparams",
".",
"moe_hidden_size",
"=",
"2048",
"hparams",
".",
"moe_num_experts",
"=",
"16",
"return",
"hparams"
] | Mixture of experts (16 experts).
623M Params, einsum=1.09e13
Returns:
a hparams | [
"Mixture",
"of",
"experts",
"(",
"16",
"experts",
")",
"."
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L298-L324 | def _inject_constructor(self, cls, func, name, resolution_level, keep,
trace):
"""
Modifying Methods in Place - after the recipe 15.7 in the Python
Cookbook by Ken Seehof. The original constructors may be restored
later.
"""
try:
constructor = cls.__init__
except AttributeError:
def constructor(self, *_args, **_kwargs):
pass
# Possible name clash between keyword arguments of the tracked class'
# constructor and the curried arguments of the injected constructor.
# Therefore, the additional argument has a 'magic' name to make it less
# likely that an argument name clash occurs.
self._observers[cls] = _ClassObserver(constructor,
name,
resolution_level,
keep,
trace)
cls.__init__ = instancemethod(
lambda *args, **kwds: func(self._observers[cls], *args, **kwds),
None,
cls
) | [
"def",
"_inject_constructor",
"(",
"self",
",",
"cls",
",",
"func",
",",
"name",
",",
"resolution_level",
",",
"keep",
",",
"trace",
")",
":",
"try",
":",
"constructor",
"=",
"cls",
".",
"__init__",
"except",
"AttributeError",
":",
"def",
"constructor",
"(",
"self",
",",
"*",
"_args",
",",
"*",
"*",
"_kwargs",
")",
":",
"pass",
"# Possible name clash between keyword arguments of the tracked class'",
"# constructor and the curried arguments of the injected constructor.",
"# Therefore, the additional argument has a 'magic' name to make it less",
"# likely that an argument name clash occurs.",
"self",
".",
"_observers",
"[",
"cls",
"]",
"=",
"_ClassObserver",
"(",
"constructor",
",",
"name",
",",
"resolution_level",
",",
"keep",
",",
"trace",
")",
"cls",
".",
"__init__",
"=",
"instancemethod",
"(",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwds",
":",
"func",
"(",
"self",
".",
"_observers",
"[",
"cls",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
",",
"None",
",",
"cls",
")"
] | Modifying Methods in Place - after the recipe 15.7 in the Python
Cookbook by Ken Seehof. The original constructors may be restored
later. | [
"Modifying",
"Methods",
"in",
"Place",
"-",
"after",
"the",
"recipe",
"15",
".",
"7",
"in",
"the",
"Python",
"Cookbook",
"by",
"Ken",
"Seehof",
".",
"The",
"original",
"constructors",
"may",
"be",
"restored",
"later",
"."
] | python | train |
shoebot/shoebot | lib/web/wikipedia.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/web/wikipedia.py#L775-L792 | def parse_gallery_images(self, markup):
""" Parses images from the <gallery></gallery> section.
Images inside <gallery> tags do not have outer "[[" brackets.
Add these and then parse again.
"""
gallery = re.search(self.re["gallery"], markup)
if gallery:
gallery = gallery.group(1)
gallery = gallery.replace("Image:", "[[Image:")
gallery = gallery.replace("\n", "]]\n")
images, markup = self.parse_images(gallery)
return images
return [] | [
"def",
"parse_gallery_images",
"(",
"self",
",",
"markup",
")",
":",
"gallery",
"=",
"re",
".",
"search",
"(",
"self",
".",
"re",
"[",
"\"gallery\"",
"]",
",",
"markup",
")",
"if",
"gallery",
":",
"gallery",
"=",
"gallery",
".",
"group",
"(",
"1",
")",
"gallery",
"=",
"gallery",
".",
"replace",
"(",
"\"Image:\"",
",",
"\"[[Image:\"",
")",
"gallery",
"=",
"gallery",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"]]\\n\"",
")",
"images",
",",
"markup",
"=",
"self",
".",
"parse_images",
"(",
"gallery",
")",
"return",
"images",
"return",
"[",
"]"
] | Parses images from the <gallery></gallery> section.
Images inside <gallery> tags do not have outer "[[" brackets.
Add these and then parse again. | [
"Parses",
"images",
"from",
"the",
"<gallery",
">",
"<",
"/",
"gallery",
">",
"section",
".",
"Images",
"inside",
"<gallery",
">",
"tags",
"do",
"not",
"have",
"outer",
"[[",
"brackets",
".",
"Add",
"these",
"and",
"then",
"parse",
"again",
"."
] | python | valid |
ff0000/scarlet | scarlet/cms/actions.py | https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/actions.py#L421-L428 | def get_object_url(self):
"""
Returns the url to link to the object
The get_view_url will be called on the current bundle using
'edit` as the view name.
"""
return self.bundle.get_view_url('edit',
self.request.user, {}, self.kwargs) | [
"def",
"get_object_url",
"(",
"self",
")",
":",
"return",
"self",
".",
"bundle",
".",
"get_view_url",
"(",
"'edit'",
",",
"self",
".",
"request",
".",
"user",
",",
"{",
"}",
",",
"self",
".",
"kwargs",
")"
] | Returns the url to link to the object
The get_view_url will be called on the current bundle using
'edit` as the view name. | [
"Returns",
"the",
"url",
"to",
"link",
"to",
"the",
"object",
"The",
"get_view_url",
"will",
"be",
"called",
"on",
"the",
"current",
"bundle",
"using",
"edit",
"as",
"the",
"view",
"name",
"."
] | python | train |
cds-astro/mocpy | mocpy/tmoc/tmoc.py | https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/tmoc/tmoc.py#L288-L300 | def max_time(self):
"""
Get the `~astropy.time.Time` time of the tmoc last observation
Returns
-------
max_time : `~astropy.time.Time`
time of the last observation
"""
max_time = Time(self._interval_set.max / TimeMOC.DAY_MICRO_SEC, format='jd', scale='tdb')
return max_time | [
"def",
"max_time",
"(",
"self",
")",
":",
"max_time",
"=",
"Time",
"(",
"self",
".",
"_interval_set",
".",
"max",
"/",
"TimeMOC",
".",
"DAY_MICRO_SEC",
",",
"format",
"=",
"'jd'",
",",
"scale",
"=",
"'tdb'",
")",
"return",
"max_time"
] | Get the `~astropy.time.Time` time of the tmoc last observation
Returns
-------
max_time : `~astropy.time.Time`
time of the last observation | [
"Get",
"the",
"~astropy",
".",
"time",
".",
"Time",
"time",
"of",
"the",
"tmoc",
"last",
"observation"
] | python | train |
src-d/jgit-spark-connector | python/sourced/engine/engine.py | https://github.com/src-d/jgit-spark-connector/blob/79d05a0bcf0da435685d6118828a8884e2fe4b94/python/sourced/engine/engine.py#L549-L559 | def classify_languages(self):
"""
Returns a new DataFrame with the language data of any blob added to
its row.
>>> blobs_lang_df = blobs_df.classify_languages
:rtype: BlobsWithLanguageDataFrame
"""
return BlobsWithLanguageDataFrame(self._engine_dataframe.classifyLanguages(),
self._session, self._implicits) | [
"def",
"classify_languages",
"(",
"self",
")",
":",
"return",
"BlobsWithLanguageDataFrame",
"(",
"self",
".",
"_engine_dataframe",
".",
"classifyLanguages",
"(",
")",
",",
"self",
".",
"_session",
",",
"self",
".",
"_implicits",
")"
] | Returns a new DataFrame with the language data of any blob added to
its row.
>>> blobs_lang_df = blobs_df.classify_languages
:rtype: BlobsWithLanguageDataFrame | [
"Returns",
"a",
"new",
"DataFrame",
"with",
"the",
"language",
"data",
"of",
"any",
"blob",
"added",
"to",
"its",
"row",
"."
] | python | train |
JoeVirtual/KonFoo | konfoo/core.py | https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L1220-L1235 | def container_size(self):
""" Returns the accumulated bit size of all fields in the `Sequence` as
a tuple in the form of ``(number of bytes, remaining number of bits)``.
"""
length = 0
for name, item in enumerate(self):
# Container
if is_container(item):
byte_length, bit_length = item.container_size()
length += bit_length + byte_length * 8
# Field
elif is_field(item):
length += item.bit_size
else:
raise MemberTypeError(self, item, name)
return divmod(length, 8) | [
"def",
"container_size",
"(",
"self",
")",
":",
"length",
"=",
"0",
"for",
"name",
",",
"item",
"in",
"enumerate",
"(",
"self",
")",
":",
"# Container",
"if",
"is_container",
"(",
"item",
")",
":",
"byte_length",
",",
"bit_length",
"=",
"item",
".",
"container_size",
"(",
")",
"length",
"+=",
"bit_length",
"+",
"byte_length",
"*",
"8",
"# Field",
"elif",
"is_field",
"(",
"item",
")",
":",
"length",
"+=",
"item",
".",
"bit_size",
"else",
":",
"raise",
"MemberTypeError",
"(",
"self",
",",
"item",
",",
"name",
")",
"return",
"divmod",
"(",
"length",
",",
"8",
")"
] | Returns the accumulated bit size of all fields in the `Sequence` as
a tuple in the form of ``(number of bytes, remaining number of bits)``. | [
"Returns",
"the",
"accumulated",
"bit",
"size",
"of",
"all",
"fields",
"in",
"the",
"Sequence",
"as",
"a",
"tuple",
"in",
"the",
"form",
"of",
"(",
"number",
"of",
"bytes",
"remaining",
"number",
"of",
"bits",
")",
"."
] | python | train |
iotile/coretools | transport_plugins/websocket/iotile_transport_websocket/generic/async_server.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/websocket/iotile_transport_websocket/generic/async_server.py#L191-L209 | async def send_event(self, con, name, payload):
"""Send an event to a client connection.
This method will push an event message to the client with the given
name and payload. You need to have access to the the ``connection``
object for the client, which is only available once the client has
connected and passed to self.prepare_conn(connection).
Args:
con (websockets.Connection): The connection to use to send
the event.
name (str): The name of the event to send.
payload (object): The msgpack-serializable object so send
as the event's payload.
"""
message = dict(type="event", name=name, payload=payload)
encoded = pack(message)
await con.send(encoded) | [
"async",
"def",
"send_event",
"(",
"self",
",",
"con",
",",
"name",
",",
"payload",
")",
":",
"message",
"=",
"dict",
"(",
"type",
"=",
"\"event\"",
",",
"name",
"=",
"name",
",",
"payload",
"=",
"payload",
")",
"encoded",
"=",
"pack",
"(",
"message",
")",
"await",
"con",
".",
"send",
"(",
"encoded",
")"
] | Send an event to a client connection.
This method will push an event message to the client with the given
name and payload. You need to have access to the the ``connection``
object for the client, which is only available once the client has
connected and passed to self.prepare_conn(connection).
Args:
con (websockets.Connection): The connection to use to send
the event.
name (str): The name of the event to send.
payload (object): The msgpack-serializable object so send
as the event's payload. | [
"Send",
"an",
"event",
"to",
"a",
"client",
"connection",
"."
] | python | train |
BerkeleyAutomation/perception | perception/cnn.py | https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/cnn.py#L95-L129 | def _load(self):
""" Loads a model into weights """
if self._model_filename is None:
raise ValueError('Model filename not specified')
# read the input image
self._graph = tf.Graph()
with self._graph.as_default():
# read in filenames
reader = tf.train.NewCheckpointReader(self._model_filename)
# load AlexNet weights
weights = AlexNetWeights()
weights.conv1W = tf.Variable(reader.get_tensor("Variable"))
weights.conv1b = tf.Variable(reader.get_tensor("Variable_1"))
weights.conv2W = tf.Variable(reader.get_tensor("Variable_2"))
weights.conv2b = tf.Variable(reader.get_tensor("Variable_3"))
weights.conv3W = tf.Variable(reader.get_tensor("Variable_4"))
weights.conv3b = tf.Variable(reader.get_tensor("Variable_5"))
weights.conv4W = tf.Variable(reader.get_tensor("Variable_6"))
weights.conv4b = tf.Variable(reader.get_tensor("Variable_7"))
weights.conv5W = tf.Variable(reader.get_tensor("Variable_8"))
weights.conv5b = tf.Variable(reader.get_tensor("Variable_9"))
weights.fc6W = tf.Variable(reader.get_tensor("Variable_10"))
weights.fc6b = tf.Variable(reader.get_tensor("Variable_11"))
weights.fc7W = tf.Variable(reader.get_tensor("Variable_12"))
weights.fc7b = tf.Variable(reader.get_tensor("Variable_13"))
weights.fc8W = tf.Variable(reader.get_tensor("Variable_14"))
weights.fc8b = tf.Variable(reader.get_tensor("Variable_15"))
# form network
self._input_node = tf.placeholder(tf.float32, (self._batch_size, self._im_height, self._im_width, self._num_channels))
self._output_tensor = self.build_alexnet(weights)
self._feature_tensor = self.build_alexnet(weights, output_layer=self._feature_layer)
self._initialized = True | [
"def",
"_load",
"(",
"self",
")",
":",
"if",
"self",
".",
"_model_filename",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Model filename not specified'",
")",
"# read the input image",
"self",
".",
"_graph",
"=",
"tf",
".",
"Graph",
"(",
")",
"with",
"self",
".",
"_graph",
".",
"as_default",
"(",
")",
":",
"# read in filenames",
"reader",
"=",
"tf",
".",
"train",
".",
"NewCheckpointReader",
"(",
"self",
".",
"_model_filename",
")",
"# load AlexNet weights",
"weights",
"=",
"AlexNetWeights",
"(",
")",
"weights",
".",
"conv1W",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable\"",
")",
")",
"weights",
".",
"conv1b",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_1\"",
")",
")",
"weights",
".",
"conv2W",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_2\"",
")",
")",
"weights",
".",
"conv2b",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_3\"",
")",
")",
"weights",
".",
"conv3W",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_4\"",
")",
")",
"weights",
".",
"conv3b",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_5\"",
")",
")",
"weights",
".",
"conv4W",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_6\"",
")",
")",
"weights",
".",
"conv4b",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_7\"",
")",
")",
"weights",
".",
"conv5W",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_8\"",
")",
")",
"weights",
".",
"conv5b",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_9\"",
")",
")",
"weights",
".",
"fc6W",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_10\"",
")",
")",
"weights",
".",
"fc6b",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_11\"",
")",
")",
"weights",
".",
"fc7W",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_12\"",
")",
")",
"weights",
".",
"fc7b",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_13\"",
")",
")",
"weights",
".",
"fc8W",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_14\"",
")",
")",
"weights",
".",
"fc8b",
"=",
"tf",
".",
"Variable",
"(",
"reader",
".",
"get_tensor",
"(",
"\"Variable_15\"",
")",
")",
"# form network",
"self",
".",
"_input_node",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"(",
"self",
".",
"_batch_size",
",",
"self",
".",
"_im_height",
",",
"self",
".",
"_im_width",
",",
"self",
".",
"_num_channels",
")",
")",
"self",
".",
"_output_tensor",
"=",
"self",
".",
"build_alexnet",
"(",
"weights",
")",
"self",
".",
"_feature_tensor",
"=",
"self",
".",
"build_alexnet",
"(",
"weights",
",",
"output_layer",
"=",
"self",
".",
"_feature_layer",
")",
"self",
".",
"_initialized",
"=",
"True"
] | Loads a model into weights | [
"Loads",
"a",
"model",
"into",
"weights"
] | python | train |
offu/WeRoBot | werobot/client.py | https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/client.py#L937-L966 | def send_miniprogrampage_message(
self, user_id, title, appid, pagepath, thumb_media_id, kf_account=None
):
"""
发送小程序卡片(要求小程序与公众号已关联)
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param title: 小程序卡片的标题
:param appid: 小程序的 appid,要求小程序的 appid 需要与公众号有关联关系
:param pagepath: 小程序的页面路径,跟 app.json 对齐,支持参数,比如 pages/index/index?foo=bar
:param thumb_media_id: 小程序卡片图片的媒体 ID,小程序卡片图片建议大小为 520*416
:param kf_account: 需要以某个客服帐号来发消息时指定的客服账户
:return: 返回的 JSON 数据包
"""
data = {
"touser": user_id,
"msgtype": "miniprogrampage",
"miniprogrampage": {
"title": title,
"appid": appid,
"pagepath": pagepath,
"thumb_media_id": thumb_media_id
}
}
if kf_account is not None:
data["customservice"] = {"kf_account": kf_account}
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data=data
) | [
"def",
"send_miniprogrampage_message",
"(",
"self",
",",
"user_id",
",",
"title",
",",
"appid",
",",
"pagepath",
",",
"thumb_media_id",
",",
"kf_account",
"=",
"None",
")",
":",
"data",
"=",
"{",
"\"touser\"",
":",
"user_id",
",",
"\"msgtype\"",
":",
"\"miniprogrampage\"",
",",
"\"miniprogrampage\"",
":",
"{",
"\"title\"",
":",
"title",
",",
"\"appid\"",
":",
"appid",
",",
"\"pagepath\"",
":",
"pagepath",
",",
"\"thumb_media_id\"",
":",
"thumb_media_id",
"}",
"}",
"if",
"kf_account",
"is",
"not",
"None",
":",
"data",
"[",
"\"customservice\"",
"]",
"=",
"{",
"\"kf_account\"",
":",
"kf_account",
"}",
"return",
"self",
".",
"post",
"(",
"url",
"=",
"\"https://api.weixin.qq.com/cgi-bin/message/custom/send\"",
",",
"data",
"=",
"data",
")"
] | 发送小程序卡片(要求小程序与公众号已关联)
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param title: 小程序卡片的标题
:param appid: 小程序的 appid,要求小程序的 appid 需要与公众号有关联关系
:param pagepath: 小程序的页面路径,跟 app.json 对齐,支持参数,比如 pages/index/index?foo=bar
:param thumb_media_id: 小程序卡片图片的媒体 ID,小程序卡片图片建议大小为 520*416
:param kf_account: 需要以某个客服帐号来发消息时指定的客服账户
:return: 返回的 JSON 数据包 | [
"发送小程序卡片(要求小程序与公众号已关联)"
] | python | train |
saltstack/salt | salt/modules/smartos_vmadm.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/smartos_vmadm.py#L752-L790 | def update(vm, from_file=None, key='uuid', **kwargs):
'''
Update a new vm
vm : string
vm to be updated
from_file : string
json file to update the vm with -- if present, all other options will be ignored
key : string [uuid|alias|hostname]
value type of 'vm' parameter
kwargs : string|int|...
options to update for the vm
CLI Example:
.. code-block:: bash
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 from_file=/tmp/new_vm.json
salt '*' vmadm.update vm=nacl key=alias from_file=/tmp/new_vm.json
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 max_physical_memory=1024
'''
ret = {}
# prepare vmcfg
vmcfg = {}
kwargs = salt.utils.args.clean_kwargs(**kwargs)
for k, v in six.iteritems(kwargs):
vmcfg[k] = v
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
uuid = lookup('{0}={1}'.format(key, vm), one=True)
if 'Error' in uuid:
return uuid
if from_file:
return _create_update_from_file('update', uuid, path=from_file)
else:
return _create_update_from_cfg('update', uuid, vmcfg=vmcfg) | [
"def",
"update",
"(",
"vm",
",",
"from_file",
"=",
"None",
",",
"key",
"=",
"'uuid'",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"{",
"}",
"# prepare vmcfg",
"vmcfg",
"=",
"{",
"}",
"kwargs",
"=",
"salt",
".",
"utils",
".",
"args",
".",
"clean_kwargs",
"(",
"*",
"*",
"kwargs",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"kwargs",
")",
":",
"vmcfg",
"[",
"k",
"]",
"=",
"v",
"if",
"key",
"not",
"in",
"[",
"'uuid'",
",",
"'alias'",
",",
"'hostname'",
"]",
":",
"ret",
"[",
"'Error'",
"]",
"=",
"'Key must be either uuid, alias or hostname'",
"return",
"ret",
"uuid",
"=",
"lookup",
"(",
"'{0}={1}'",
".",
"format",
"(",
"key",
",",
"vm",
")",
",",
"one",
"=",
"True",
")",
"if",
"'Error'",
"in",
"uuid",
":",
"return",
"uuid",
"if",
"from_file",
":",
"return",
"_create_update_from_file",
"(",
"'update'",
",",
"uuid",
",",
"path",
"=",
"from_file",
")",
"else",
":",
"return",
"_create_update_from_cfg",
"(",
"'update'",
",",
"uuid",
",",
"vmcfg",
"=",
"vmcfg",
")"
] | Update a new vm
vm : string
vm to be updated
from_file : string
json file to update the vm with -- if present, all other options will be ignored
key : string [uuid|alias|hostname]
value type of 'vm' parameter
kwargs : string|int|...
options to update for the vm
CLI Example:
.. code-block:: bash
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 from_file=/tmp/new_vm.json
salt '*' vmadm.update vm=nacl key=alias from_file=/tmp/new_vm.json
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 max_physical_memory=1024 | [
"Update",
"a",
"new",
"vm"
] | python | train |
astroduff/commah | commah/commah.py | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L219-L235 | def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y) | [
"def",
"_int_growth",
"(",
"z",
",",
"*",
"*",
"cosmo",
")",
":",
"zmax",
"=",
"200",
"if",
"hasattr",
"(",
"z",
",",
"\"__len__\"",
")",
":",
"for",
"zval",
"in",
"z",
":",
"assert",
"(",
"zval",
"<",
"zmax",
")",
"else",
":",
"assert",
"(",
"z",
"<",
"zmax",
")",
"y",
",",
"yerr",
"=",
"scipy",
".",
"integrate",
".",
"quad",
"(",
"lambda",
"z",
":",
"(",
"1",
"+",
"z",
")",
"/",
"(",
"cosmo",
"[",
"'omega_M_0'",
"]",
"*",
"(",
"1",
"+",
"z",
")",
"**",
"3",
"+",
"cosmo",
"[",
"'omega_lambda_0'",
"]",
")",
"**",
"(",
"1.5",
")",
",",
"z",
",",
"zmax",
")",
"return",
"(",
"y",
")"
] | Returns integral of the linear growth factor from z=200 to z=z | [
"Returns",
"integral",
"of",
"the",
"linear",
"growth",
"factor",
"from",
"z",
"=",
"200",
"to",
"z",
"=",
"z"
] | python | train |
DEIB-GECO/PyGMQL | gmql/dataset/GMQLDataset.py | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GMQLDataset.py#L1133-L1182 | def union(self, other, left_name="LEFT", right_name="RIGHT"):
"""
*Wrapper of* ``UNION``
The UNION operation is used to integrate homogeneous or heterogeneous samples of two
datasets within a single dataset; for each sample of either one of the input datasets, a
sample is created in the result as follows:
* its metadata are the same as in the original sample;
* its schema is the schema of the first (left) input dataset; new
identifiers are assigned to each output sample;
* its regions are the same (in coordinates and attribute values) as in the original
sample. Region attributes which are missing in an input dataset sample
(w.r.t. the merged schema) are set to null.
:param other: a GMQLDataset
:param left_name: name that you want to assign to the left dataset
:param right_name: name tha t you want to assign to the right dataset
:return: a new GMQLDataset
Example of usage::
import gmql as gl
d1 = gl.get_example_dataset("Example_Dataset_1")
d2 = gl.get_example_dataset("Example_Dataset_2")
result = d1.union(other=d2, left_name="D1", right_name="D2")
"""
if not isinstance(left_name, str) or \
not isinstance(right_name, str):
raise TypeError("left_name and right_name must be strings. "
"{} - {} was provided".format(type(left_name), type(right_name)))
if isinstance(other, GMQLDataset):
other_idx = other.__index
else:
raise TypeError("other must be a GMQLDataset. "
"{} was provided".format(type(other)))
if len(left_name) == 0 or len(right_name) == 0:
raise ValueError("left_name and right_name must not be empty")
new_index = self.opmng.union(self.__index, other_idx, left_name, right_name)
new_local_sources, new_remote_sources = self.__combine_sources(self, other)
new_location = self.__combine_locations(self, other)
return GMQLDataset(index=new_index, location=new_location,
local_sources=new_local_sources,
remote_sources=new_remote_sources,
meta_profile=self.meta_profile) | [
"def",
"union",
"(",
"self",
",",
"other",
",",
"left_name",
"=",
"\"LEFT\"",
",",
"right_name",
"=",
"\"RIGHT\"",
")",
":",
"if",
"not",
"isinstance",
"(",
"left_name",
",",
"str",
")",
"or",
"not",
"isinstance",
"(",
"right_name",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"left_name and right_name must be strings. \"",
"\"{} - {} was provided\"",
".",
"format",
"(",
"type",
"(",
"left_name",
")",
",",
"type",
"(",
"right_name",
")",
")",
")",
"if",
"isinstance",
"(",
"other",
",",
"GMQLDataset",
")",
":",
"other_idx",
"=",
"other",
".",
"__index",
"else",
":",
"raise",
"TypeError",
"(",
"\"other must be a GMQLDataset. \"",
"\"{} was provided\"",
".",
"format",
"(",
"type",
"(",
"other",
")",
")",
")",
"if",
"len",
"(",
"left_name",
")",
"==",
"0",
"or",
"len",
"(",
"right_name",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"left_name and right_name must not be empty\"",
")",
"new_index",
"=",
"self",
".",
"opmng",
".",
"union",
"(",
"self",
".",
"__index",
",",
"other_idx",
",",
"left_name",
",",
"right_name",
")",
"new_local_sources",
",",
"new_remote_sources",
"=",
"self",
".",
"__combine_sources",
"(",
"self",
",",
"other",
")",
"new_location",
"=",
"self",
".",
"__combine_locations",
"(",
"self",
",",
"other",
")",
"return",
"GMQLDataset",
"(",
"index",
"=",
"new_index",
",",
"location",
"=",
"new_location",
",",
"local_sources",
"=",
"new_local_sources",
",",
"remote_sources",
"=",
"new_remote_sources",
",",
"meta_profile",
"=",
"self",
".",
"meta_profile",
")"
] | *Wrapper of* ``UNION``
The UNION operation is used to integrate homogeneous or heterogeneous samples of two
datasets within a single dataset; for each sample of either one of the input datasets, a
sample is created in the result as follows:
* its metadata are the same as in the original sample;
* its schema is the schema of the first (left) input dataset; new
identifiers are assigned to each output sample;
* its regions are the same (in coordinates and attribute values) as in the original
sample. Region attributes which are missing in an input dataset sample
(w.r.t. the merged schema) are set to null.
:param other: a GMQLDataset
:param left_name: name that you want to assign to the left dataset
:param right_name: name tha t you want to assign to the right dataset
:return: a new GMQLDataset
Example of usage::
import gmql as gl
d1 = gl.get_example_dataset("Example_Dataset_1")
d2 = gl.get_example_dataset("Example_Dataset_2")
result = d1.union(other=d2, left_name="D1", right_name="D2") | [
"*",
"Wrapper",
"of",
"*",
"UNION"
] | python | train |
glamp/bashplotlib | bashplotlib/utils/helpers.py | https://github.com/glamp/bashplotlib/blob/f7533172c4dc912b5accae42edd5c0f655d7468f/bashplotlib/utils/helpers.py#L36-L44 | def printcolour(text, sameline=False, colour=get_colour("ENDC")):
"""
Print color text using escape codes
"""
if sameline:
sep = ''
else:
sep = '\n'
sys.stdout.write(get_colour(colour) + text + bcolours["ENDC"] + sep) | [
"def",
"printcolour",
"(",
"text",
",",
"sameline",
"=",
"False",
",",
"colour",
"=",
"get_colour",
"(",
"\"ENDC\"",
")",
")",
":",
"if",
"sameline",
":",
"sep",
"=",
"''",
"else",
":",
"sep",
"=",
"'\\n'",
"sys",
".",
"stdout",
".",
"write",
"(",
"get_colour",
"(",
"colour",
")",
"+",
"text",
"+",
"bcolours",
"[",
"\"ENDC\"",
"]",
"+",
"sep",
")"
] | Print color text using escape codes | [
"Print",
"color",
"text",
"using",
"escape",
"codes"
] | python | train |
facelessuser/soupsieve | soupsieve/css_parser.py | https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/css_parser.py#L754-L794 | def parse_has_combinator(self, sel, m, has_selector, selectors, rel_type, index):
"""Parse combinator tokens."""
combinator = m.group('relation').strip()
if not combinator:
combinator = WS_COMBINATOR
if combinator == COMMA_COMBINATOR:
if not has_selector:
# If we've not captured any selector parts, the comma is either at the beginning of the pattern
# or following another comma, both of which are unexpected. Commas must split selectors.
raise SelectorSyntaxError(
"The combinator '{}' at postion {}, must have a selector before it".format(combinator, index),
self.pattern,
index
)
sel.rel_type = rel_type
selectors[-1].relations.append(sel)
rel_type = ":" + WS_COMBINATOR
selectors.append(_Selector())
else:
if has_selector:
# End the current selector and associate the leading combinator with this selector.
sel.rel_type = rel_type
selectors[-1].relations.append(sel)
elif rel_type[1:] != WS_COMBINATOR:
# It's impossible to have two whitespace combinators after each other as the patterns
# will gobble up trailing whitespace. It is also impossible to have a whitespace
# combinator after any other kind for the same reason. But we could have
# multiple non-whitespace combinators. So if the current combinator is not a whitespace,
# then we've hit the multiple combinator case, so we should fail.
raise SelectorSyntaxError(
'The multiple combinators at position {}'.format(index),
self.pattern,
index
)
# Set the leading combinator for the next selector.
rel_type = ':' + combinator
sel = _Selector()
has_selector = False
return has_selector, sel, rel_type | [
"def",
"parse_has_combinator",
"(",
"self",
",",
"sel",
",",
"m",
",",
"has_selector",
",",
"selectors",
",",
"rel_type",
",",
"index",
")",
":",
"combinator",
"=",
"m",
".",
"group",
"(",
"'relation'",
")",
".",
"strip",
"(",
")",
"if",
"not",
"combinator",
":",
"combinator",
"=",
"WS_COMBINATOR",
"if",
"combinator",
"==",
"COMMA_COMBINATOR",
":",
"if",
"not",
"has_selector",
":",
"# If we've not captured any selector parts, the comma is either at the beginning of the pattern",
"# or following another comma, both of which are unexpected. Commas must split selectors.",
"raise",
"SelectorSyntaxError",
"(",
"\"The combinator '{}' at postion {}, must have a selector before it\"",
".",
"format",
"(",
"combinator",
",",
"index",
")",
",",
"self",
".",
"pattern",
",",
"index",
")",
"sel",
".",
"rel_type",
"=",
"rel_type",
"selectors",
"[",
"-",
"1",
"]",
".",
"relations",
".",
"append",
"(",
"sel",
")",
"rel_type",
"=",
"\":\"",
"+",
"WS_COMBINATOR",
"selectors",
".",
"append",
"(",
"_Selector",
"(",
")",
")",
"else",
":",
"if",
"has_selector",
":",
"# End the current selector and associate the leading combinator with this selector.",
"sel",
".",
"rel_type",
"=",
"rel_type",
"selectors",
"[",
"-",
"1",
"]",
".",
"relations",
".",
"append",
"(",
"sel",
")",
"elif",
"rel_type",
"[",
"1",
":",
"]",
"!=",
"WS_COMBINATOR",
":",
"# It's impossible to have two whitespace combinators after each other as the patterns",
"# will gobble up trailing whitespace. It is also impossible to have a whitespace",
"# combinator after any other kind for the same reason. But we could have",
"# multiple non-whitespace combinators. So if the current combinator is not a whitespace,",
"# then we've hit the multiple combinator case, so we should fail.",
"raise",
"SelectorSyntaxError",
"(",
"'The multiple combinators at position {}'",
".",
"format",
"(",
"index",
")",
",",
"self",
".",
"pattern",
",",
"index",
")",
"# Set the leading combinator for the next selector.",
"rel_type",
"=",
"':'",
"+",
"combinator",
"sel",
"=",
"_Selector",
"(",
")",
"has_selector",
"=",
"False",
"return",
"has_selector",
",",
"sel",
",",
"rel_type"
] | Parse combinator tokens. | [
"Parse",
"combinator",
"tokens",
"."
] | python | train |
CityOfZion/neo-python | neo/Core/TX/Transaction.py | https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/TX/Transaction.py#L419-L429 | def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.DeserializeUnsigned(reader)
self.scripts = reader.ReadSerializableArray()
self.OnDeserialized() | [
"def",
"Deserialize",
"(",
"self",
",",
"reader",
")",
":",
"self",
".",
"DeserializeUnsigned",
"(",
"reader",
")",
"self",
".",
"scripts",
"=",
"reader",
".",
"ReadSerializableArray",
"(",
")",
"self",
".",
"OnDeserialized",
"(",
")"
] | Deserialize full object.
Args:
reader (neo.IO.BinaryReader): | [
"Deserialize",
"full",
"object",
"."
] | python | train |
aleju/imgaug | imgaug/augmentables/polys.py | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/polys.py#L950-L971 | def deepcopy(self, exterior=None, label=None):
"""
Create a deep copy of the Polygon object.
Parameters
----------
exterior : list of Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See `imgaug.Polygon.__init__` for details.
label : None or str
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Deep copy.
"""
return Polygon(
exterior=np.copy(self.exterior) if exterior is None else exterior,
label=self.label if label is None else label
) | [
"def",
"deepcopy",
"(",
"self",
",",
"exterior",
"=",
"None",
",",
"label",
"=",
"None",
")",
":",
"return",
"Polygon",
"(",
"exterior",
"=",
"np",
".",
"copy",
"(",
"self",
".",
"exterior",
")",
"if",
"exterior",
"is",
"None",
"else",
"exterior",
",",
"label",
"=",
"self",
".",
"label",
"if",
"label",
"is",
"None",
"else",
"label",
")"
] | Create a deep copy of the Polygon object.
Parameters
----------
exterior : list of Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See `imgaug.Polygon.__init__` for details.
label : None or str
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Deep copy. | [
"Create",
"a",
"deep",
"copy",
"of",
"the",
"Polygon",
"object",
"."
] | python | valid |
IAMconsortium/pyam | pyam/timeseries.py | https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/timeseries.py#L10-L32 | def fill_series(x, year):
"""Returns the value of a timeseries (indexed over years) for a year
by linear interpolation.
Parameters
----------
x: pandas.Series
a timeseries to be interpolated
year: int
year of interpolation
"""
x = x.dropna()
if year in x.index and not np.isnan(x[year]):
return x[year]
else:
prev = [i for i in x.index if i < year]
nxt = [i for i in x.index if i > year]
if prev and nxt:
p = max(prev)
n = min(nxt)
return ((n - year) * x[p] + (year - p) * x[n]) / (n - p)
else:
return np.nan | [
"def",
"fill_series",
"(",
"x",
",",
"year",
")",
":",
"x",
"=",
"x",
".",
"dropna",
"(",
")",
"if",
"year",
"in",
"x",
".",
"index",
"and",
"not",
"np",
".",
"isnan",
"(",
"x",
"[",
"year",
"]",
")",
":",
"return",
"x",
"[",
"year",
"]",
"else",
":",
"prev",
"=",
"[",
"i",
"for",
"i",
"in",
"x",
".",
"index",
"if",
"i",
"<",
"year",
"]",
"nxt",
"=",
"[",
"i",
"for",
"i",
"in",
"x",
".",
"index",
"if",
"i",
">",
"year",
"]",
"if",
"prev",
"and",
"nxt",
":",
"p",
"=",
"max",
"(",
"prev",
")",
"n",
"=",
"min",
"(",
"nxt",
")",
"return",
"(",
"(",
"n",
"-",
"year",
")",
"*",
"x",
"[",
"p",
"]",
"+",
"(",
"year",
"-",
"p",
")",
"*",
"x",
"[",
"n",
"]",
")",
"/",
"(",
"n",
"-",
"p",
")",
"else",
":",
"return",
"np",
".",
"nan"
] | Returns the value of a timeseries (indexed over years) for a year
by linear interpolation.
Parameters
----------
x: pandas.Series
a timeseries to be interpolated
year: int
year of interpolation | [
"Returns",
"the",
"value",
"of",
"a",
"timeseries",
"(",
"indexed",
"over",
"years",
")",
"for",
"a",
"year",
"by",
"linear",
"interpolation",
"."
] | python | train |
domainaware/parsedmarc | parsedmarc/__init__.py | https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L211-L339 | def parse_aggregate_report_xml(xml, nameservers=None, timeout=2.0,
parallel=False):
"""Parses a DMARC XML report string and returns a consistent OrderedDict
Args:
xml (str): A string of DMARC aggregate report XML
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed aggregate DMARC report
"""
errors = []
try:
xmltodict.parse(xml)["feedback"]
except Exception as e:
errors.append(e.__str__())
try:
# Replace XML header (sometimes they are invalid)
xml = xml_header_regex.sub("<?xml version=\"1.0\"?>", xml)
# Remove invalid schema tags
xml = xml_schema_regex.sub('', xml)
report = xmltodict.parse(xml)["feedback"]
report_metadata = report["report_metadata"]
schema = "draft"
if "version" in report:
schema = report["version"]
new_report = OrderedDict([("xml_schema", schema)])
new_report_metadata = OrderedDict()
if report_metadata["org_name"] is None:
if report_metadata["email"] is not None:
report_metadata["org_name"] = report_metadata[
"email"].split("@")[-1]
org_name = report_metadata["org_name"]
if org_name is not None:
org_name = get_base_domain(org_name)
new_report_metadata["org_name"] = org_name
new_report_metadata["org_email"] = report_metadata["email"]
extra = None
if "extra_contact_info" in report_metadata:
extra = report_metadata["extra_contact_info"]
new_report_metadata["org_extra_contact_info"] = extra
new_report_metadata["report_id"] = report_metadata["report_id"]
report_id = new_report_metadata["report_id"]
report_id = report_id.replace("<",
"").replace(">", "").split("@")[0]
new_report_metadata["report_id"] = report_id
date_range = report["report_metadata"]["date_range"]
date_range["begin"] = timestamp_to_human(date_range["begin"])
date_range["end"] = timestamp_to_human(date_range["end"])
new_report_metadata["begin_date"] = date_range["begin"]
new_report_metadata["end_date"] = date_range["end"]
if "error" in report["report_metadata"]:
if type(report["report_metadata"]["error"]) != list:
errors = [report["report_metadata"]["error"]]
else:
errors = report["report_metadata"]["error"]
new_report_metadata["errors"] = errors
new_report["report_metadata"] = new_report_metadata
records = []
policy_published = report["policy_published"]
new_policy_published = OrderedDict()
new_policy_published["domain"] = policy_published["domain"]
adkim = "r"
if "adkim" in policy_published:
if policy_published["adkim"] is not None:
adkim = policy_published["adkim"]
new_policy_published["adkim"] = adkim
aspf = "r"
if "aspf" in policy_published:
if policy_published["aspf"] is not None:
aspf = policy_published["aspf"]
new_policy_published["aspf"] = aspf
new_policy_published["p"] = policy_published["p"]
sp = new_policy_published["p"]
if "sp" in policy_published:
if policy_published["sp"] is not None:
sp = report["policy_published"]["sp"]
new_policy_published["sp"] = sp
pct = "100"
if "pct" in policy_published:
if policy_published["pct"] is not None:
pct = report["policy_published"]["pct"]
new_policy_published["pct"] = pct
fo = "0"
if "fo" in policy_published:
if policy_published["fo"] is not None:
fo = report["policy_published"]["fo"]
new_policy_published["fo"] = fo
new_report["policy_published"] = new_policy_published
if type(report["record"]) == list:
for record in report["record"]:
report_record = _parse_report_record(record,
nameservers=nameservers,
dns_timeout=timeout,
parallel=parallel)
records.append(report_record)
else:
report_record = _parse_report_record(report["record"],
nameservers=nameservers,
dns_timeout=timeout,
parallel=parallel)
records.append(report_record)
new_report["records"] = records
return new_report
except expat.ExpatError as error:
raise InvalidAggregateReport(
"Invalid XML: {0}".format(error.__str__()))
except KeyError as error:
raise InvalidAggregateReport(
"Missing field: {0}".format(error.__str__()))
except AttributeError:
raise InvalidAggregateReport("Report missing required section")
except Exception as error:
raise InvalidAggregateReport(
"Unexpected error: {0}".format(error.__str__())) | [
"def",
"parse_aggregate_report_xml",
"(",
"xml",
",",
"nameservers",
"=",
"None",
",",
"timeout",
"=",
"2.0",
",",
"parallel",
"=",
"False",
")",
":",
"errors",
"=",
"[",
"]",
"try",
":",
"xmltodict",
".",
"parse",
"(",
"xml",
")",
"[",
"\"feedback\"",
"]",
"except",
"Exception",
"as",
"e",
":",
"errors",
".",
"append",
"(",
"e",
".",
"__str__",
"(",
")",
")",
"try",
":",
"# Replace XML header (sometimes they are invalid)",
"xml",
"=",
"xml_header_regex",
".",
"sub",
"(",
"\"<?xml version=\\\"1.0\\\"?>\"",
",",
"xml",
")",
"# Remove invalid schema tags",
"xml",
"=",
"xml_schema_regex",
".",
"sub",
"(",
"''",
",",
"xml",
")",
"report",
"=",
"xmltodict",
".",
"parse",
"(",
"xml",
")",
"[",
"\"feedback\"",
"]",
"report_metadata",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"schema",
"=",
"\"draft\"",
"if",
"\"version\"",
"in",
"report",
":",
"schema",
"=",
"report",
"[",
"\"version\"",
"]",
"new_report",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"xml_schema\"",
",",
"schema",
")",
"]",
")",
"new_report_metadata",
"=",
"OrderedDict",
"(",
")",
"if",
"report_metadata",
"[",
"\"org_name\"",
"]",
"is",
"None",
":",
"if",
"report_metadata",
"[",
"\"email\"",
"]",
"is",
"not",
"None",
":",
"report_metadata",
"[",
"\"org_name\"",
"]",
"=",
"report_metadata",
"[",
"\"email\"",
"]",
".",
"split",
"(",
"\"@\"",
")",
"[",
"-",
"1",
"]",
"org_name",
"=",
"report_metadata",
"[",
"\"org_name\"",
"]",
"if",
"org_name",
"is",
"not",
"None",
":",
"org_name",
"=",
"get_base_domain",
"(",
"org_name",
")",
"new_report_metadata",
"[",
"\"org_name\"",
"]",
"=",
"org_name",
"new_report_metadata",
"[",
"\"org_email\"",
"]",
"=",
"report_metadata",
"[",
"\"email\"",
"]",
"extra",
"=",
"None",
"if",
"\"extra_contact_info\"",
"in",
"report_metadata",
":",
"extra",
"=",
"report_metadata",
"[",
"\"extra_contact_info\"",
"]",
"new_report_metadata",
"[",
"\"org_extra_contact_info\"",
"]",
"=",
"extra",
"new_report_metadata",
"[",
"\"report_id\"",
"]",
"=",
"report_metadata",
"[",
"\"report_id\"",
"]",
"report_id",
"=",
"new_report_metadata",
"[",
"\"report_id\"",
"]",
"report_id",
"=",
"report_id",
".",
"replace",
"(",
"\"<\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\">\"",
",",
"\"\"",
")",
".",
"split",
"(",
"\"@\"",
")",
"[",
"0",
"]",
"new_report_metadata",
"[",
"\"report_id\"",
"]",
"=",
"report_id",
"date_range",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"date_range\"",
"]",
"date_range",
"[",
"\"begin\"",
"]",
"=",
"timestamp_to_human",
"(",
"date_range",
"[",
"\"begin\"",
"]",
")",
"date_range",
"[",
"\"end\"",
"]",
"=",
"timestamp_to_human",
"(",
"date_range",
"[",
"\"end\"",
"]",
")",
"new_report_metadata",
"[",
"\"begin_date\"",
"]",
"=",
"date_range",
"[",
"\"begin\"",
"]",
"new_report_metadata",
"[",
"\"end_date\"",
"]",
"=",
"date_range",
"[",
"\"end\"",
"]",
"if",
"\"error\"",
"in",
"report",
"[",
"\"report_metadata\"",
"]",
":",
"if",
"type",
"(",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"error\"",
"]",
")",
"!=",
"list",
":",
"errors",
"=",
"[",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"error\"",
"]",
"]",
"else",
":",
"errors",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"error\"",
"]",
"new_report_metadata",
"[",
"\"errors\"",
"]",
"=",
"errors",
"new_report",
"[",
"\"report_metadata\"",
"]",
"=",
"new_report_metadata",
"records",
"=",
"[",
"]",
"policy_published",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"new_policy_published",
"=",
"OrderedDict",
"(",
")",
"new_policy_published",
"[",
"\"domain\"",
"]",
"=",
"policy_published",
"[",
"\"domain\"",
"]",
"adkim",
"=",
"\"r\"",
"if",
"\"adkim\"",
"in",
"policy_published",
":",
"if",
"policy_published",
"[",
"\"adkim\"",
"]",
"is",
"not",
"None",
":",
"adkim",
"=",
"policy_published",
"[",
"\"adkim\"",
"]",
"new_policy_published",
"[",
"\"adkim\"",
"]",
"=",
"adkim",
"aspf",
"=",
"\"r\"",
"if",
"\"aspf\"",
"in",
"policy_published",
":",
"if",
"policy_published",
"[",
"\"aspf\"",
"]",
"is",
"not",
"None",
":",
"aspf",
"=",
"policy_published",
"[",
"\"aspf\"",
"]",
"new_policy_published",
"[",
"\"aspf\"",
"]",
"=",
"aspf",
"new_policy_published",
"[",
"\"p\"",
"]",
"=",
"policy_published",
"[",
"\"p\"",
"]",
"sp",
"=",
"new_policy_published",
"[",
"\"p\"",
"]",
"if",
"\"sp\"",
"in",
"policy_published",
":",
"if",
"policy_published",
"[",
"\"sp\"",
"]",
"is",
"not",
"None",
":",
"sp",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"sp\"",
"]",
"new_policy_published",
"[",
"\"sp\"",
"]",
"=",
"sp",
"pct",
"=",
"\"100\"",
"if",
"\"pct\"",
"in",
"policy_published",
":",
"if",
"policy_published",
"[",
"\"pct\"",
"]",
"is",
"not",
"None",
":",
"pct",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"pct\"",
"]",
"new_policy_published",
"[",
"\"pct\"",
"]",
"=",
"pct",
"fo",
"=",
"\"0\"",
"if",
"\"fo\"",
"in",
"policy_published",
":",
"if",
"policy_published",
"[",
"\"fo\"",
"]",
"is",
"not",
"None",
":",
"fo",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"fo\"",
"]",
"new_policy_published",
"[",
"\"fo\"",
"]",
"=",
"fo",
"new_report",
"[",
"\"policy_published\"",
"]",
"=",
"new_policy_published",
"if",
"type",
"(",
"report",
"[",
"\"record\"",
"]",
")",
"==",
"list",
":",
"for",
"record",
"in",
"report",
"[",
"\"record\"",
"]",
":",
"report_record",
"=",
"_parse_report_record",
"(",
"record",
",",
"nameservers",
"=",
"nameservers",
",",
"dns_timeout",
"=",
"timeout",
",",
"parallel",
"=",
"parallel",
")",
"records",
".",
"append",
"(",
"report_record",
")",
"else",
":",
"report_record",
"=",
"_parse_report_record",
"(",
"report",
"[",
"\"record\"",
"]",
",",
"nameservers",
"=",
"nameservers",
",",
"dns_timeout",
"=",
"timeout",
",",
"parallel",
"=",
"parallel",
")",
"records",
".",
"append",
"(",
"report_record",
")",
"new_report",
"[",
"\"records\"",
"]",
"=",
"records",
"return",
"new_report",
"except",
"expat",
".",
"ExpatError",
"as",
"error",
":",
"raise",
"InvalidAggregateReport",
"(",
"\"Invalid XML: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"KeyError",
"as",
"error",
":",
"raise",
"InvalidAggregateReport",
"(",
"\"Missing field: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"AttributeError",
":",
"raise",
"InvalidAggregateReport",
"(",
"\"Report missing required section\"",
")",
"except",
"Exception",
"as",
"error",
":",
"raise",
"InvalidAggregateReport",
"(",
"\"Unexpected error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")"
] | Parses a DMARC XML report string and returns a consistent OrderedDict
Args:
xml (str): A string of DMARC aggregate report XML
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed aggregate DMARC report | [
"Parses",
"a",
"DMARC",
"XML",
"report",
"string",
"and",
"returns",
"a",
"consistent",
"OrderedDict"
] | python | test |
jaraco/jaraco.text | jaraco/text.py | https://github.com/jaraco/jaraco.text/blob/0fe070e9241cb1fdb737516a3f57da94a2618376/jaraco/text.py#L368-L375 | def common_prefix(s1, s2):
"""
Return the common prefix of two lines.
"""
index = min(len(s1), len(s2))
while s1[:index] != s2[:index]:
index -= 1
return s1[:index] | [
"def",
"common_prefix",
"(",
"s1",
",",
"s2",
")",
":",
"index",
"=",
"min",
"(",
"len",
"(",
"s1",
")",
",",
"len",
"(",
"s2",
")",
")",
"while",
"s1",
"[",
":",
"index",
"]",
"!=",
"s2",
"[",
":",
"index",
"]",
":",
"index",
"-=",
"1",
"return",
"s1",
"[",
":",
"index",
"]"
] | Return the common prefix of two lines. | [
"Return",
"the",
"common",
"prefix",
"of",
"two",
"lines",
"."
] | python | train |
kevinconway/venvctrl | venvctrl/venv/base.py | https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/base.py#L125-L129 | def dirs(self):
"""Get an iter of VenvDirs within the directory."""
contents = self.paths
contents = (VenvDir(path.path) for path in contents if path.is_dir)
return contents | [
"def",
"dirs",
"(",
"self",
")",
":",
"contents",
"=",
"self",
".",
"paths",
"contents",
"=",
"(",
"VenvDir",
"(",
"path",
".",
"path",
")",
"for",
"path",
"in",
"contents",
"if",
"path",
".",
"is_dir",
")",
"return",
"contents"
] | Get an iter of VenvDirs within the directory. | [
"Get",
"an",
"iter",
"of",
"VenvDirs",
"within",
"the",
"directory",
"."
] | python | train |
QUANTAXIS/QUANTAXIS | QUANTAXIS/QAARP/QAAccount.py | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1850-L1857 | def change_cash(self, money):
"""
外部操作|高危|
"""
res = self.cash[-1] + money
if res >= 0:
# 高危操作
self.cash[-1] = res | [
"def",
"change_cash",
"(",
"self",
",",
"money",
")",
":",
"res",
"=",
"self",
".",
"cash",
"[",
"-",
"1",
"]",
"+",
"money",
"if",
"res",
">=",
"0",
":",
"# 高危操作",
"self",
".",
"cash",
"[",
"-",
"1",
"]",
"=",
"res"
] | 外部操作|高危| | [
"外部操作|高危|"
] | python | train |
Becksteinlab/GromacsWrapper | gromacs/fileformats/xvg.py | https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/fileformats/xvg.py#L550-L635 | def plot(self, **kwargs):
"""Plot xvg file data.
The first column of the data is always taken as the abscissa
X. Additional columns are plotted as ordinates Y1, Y2, ...
In the special case that there is only a single column then this column
is plotted against the index, i.e. (N, Y).
:Keywords:
*columns* : list
Select the columns of the data to be plotted; the list
is used as a numpy.array extended slice. The default is
to use all columns. Columns are selected *after* a transform.
*transform* : function
function ``transform(array) -> array`` which transforms
the original array; must return a 2D numpy array of
shape [X, Y1, Y2, ...] where X, Y1, ... are column
vectors. By default the transformation is the
identity [``lambda x: x``].
*maxpoints* : int
limit the total number of data points; matplotlib has issues processing
png files with >100,000 points and pdfs take forever to display. Set to
``None`` if really all data should be displayed. At the moment we simply
decimate the data at regular intervals. [10000]
*method*
method to decimate the data to *maxpoints*, see :meth:`XVG.decimate`
for details
*color*
single color (used for all plots); sequence of colors
(will be repeated as necessary); or a matplotlib
colormap (e.g. "jet", see :mod:`matplotlib.cm`). The
default is to use the :attr:`XVG.default_color_cycle`.
*ax*
plot into given axes or create new one if ``None`` [``None``]
*kwargs*
All other keyword arguments are passed on to :func:`matplotlib.pyplot.plot`.
:Returns:
*ax*
axes instance
"""
columns = kwargs.pop('columns', Ellipsis) # slice for everything
maxpoints = kwargs.pop('maxpoints', self.maxpoints_default)
transform = kwargs.pop('transform', lambda x: x) # default is identity transformation
method = kwargs.pop('method', "mean")
ax = kwargs.pop('ax', None)
if columns is Ellipsis or columns is None:
columns = numpy.arange(self.array.shape[0])
if len(columns) == 0:
raise MissingDataError("plot() needs at least one column of data")
if len(self.array.shape) == 1 or self.array.shape[0] == 1:
# special case: plot against index; plot would do this automatically but
# we'll just produce our own xdata and pretend that this was X all along
a = numpy.ravel(self.array)
X = numpy.arange(len(a))
a = numpy.vstack((X, a))
columns = [0] + [c+1 for c in columns]
else:
a = self.array
color = kwargs.pop('color', self.default_color_cycle)
try:
cmap = matplotlib.cm.get_cmap(color)
colors = cmap(matplotlib.colors.Normalize()(numpy.arange(len(columns[1:]), dtype=float)))
except TypeError:
colors = cycle(utilities.asiterable(color))
if ax is None:
ax = plt.gca()
# (decimate/smooth o slice o transform)(array)
a = self.decimate(method, numpy.asarray(transform(a))[columns], maxpoints=maxpoints)
# now deal with infs, nans etc AFTER all transformations (needed for plotting across inf/nan)
ma = numpy.ma.MaskedArray(a, mask=numpy.logical_not(numpy.isfinite(a)))
# finally plot (each column separately to catch empty sets)
for column, color in zip(range(1,len(columns)), colors):
if len(ma[column]) == 0:
warnings.warn("No data to plot for column {column:d}".format(**vars()), category=MissingDataWarning)
kwargs['color'] = color
ax.plot(ma[0], ma[column], **kwargs) # plot all other columns in parallel
return ax | [
"def",
"plot",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"columns",
"=",
"kwargs",
".",
"pop",
"(",
"'columns'",
",",
"Ellipsis",
")",
"# slice for everything",
"maxpoints",
"=",
"kwargs",
".",
"pop",
"(",
"'maxpoints'",
",",
"self",
".",
"maxpoints_default",
")",
"transform",
"=",
"kwargs",
".",
"pop",
"(",
"'transform'",
",",
"lambda",
"x",
":",
"x",
")",
"# default is identity transformation",
"method",
"=",
"kwargs",
".",
"pop",
"(",
"'method'",
",",
"\"mean\"",
")",
"ax",
"=",
"kwargs",
".",
"pop",
"(",
"'ax'",
",",
"None",
")",
"if",
"columns",
"is",
"Ellipsis",
"or",
"columns",
"is",
"None",
":",
"columns",
"=",
"numpy",
".",
"arange",
"(",
"self",
".",
"array",
".",
"shape",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"columns",
")",
"==",
"0",
":",
"raise",
"MissingDataError",
"(",
"\"plot() needs at least one column of data\"",
")",
"if",
"len",
"(",
"self",
".",
"array",
".",
"shape",
")",
"==",
"1",
"or",
"self",
".",
"array",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
":",
"# special case: plot against index; plot would do this automatically but",
"# we'll just produce our own xdata and pretend that this was X all along",
"a",
"=",
"numpy",
".",
"ravel",
"(",
"self",
".",
"array",
")",
"X",
"=",
"numpy",
".",
"arange",
"(",
"len",
"(",
"a",
")",
")",
"a",
"=",
"numpy",
".",
"vstack",
"(",
"(",
"X",
",",
"a",
")",
")",
"columns",
"=",
"[",
"0",
"]",
"+",
"[",
"c",
"+",
"1",
"for",
"c",
"in",
"columns",
"]",
"else",
":",
"a",
"=",
"self",
".",
"array",
"color",
"=",
"kwargs",
".",
"pop",
"(",
"'color'",
",",
"self",
".",
"default_color_cycle",
")",
"try",
":",
"cmap",
"=",
"matplotlib",
".",
"cm",
".",
"get_cmap",
"(",
"color",
")",
"colors",
"=",
"cmap",
"(",
"matplotlib",
".",
"colors",
".",
"Normalize",
"(",
")",
"(",
"numpy",
".",
"arange",
"(",
"len",
"(",
"columns",
"[",
"1",
":",
"]",
")",
",",
"dtype",
"=",
"float",
")",
")",
")",
"except",
"TypeError",
":",
"colors",
"=",
"cycle",
"(",
"utilities",
".",
"asiterable",
"(",
"color",
")",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"# (decimate/smooth o slice o transform)(array)",
"a",
"=",
"self",
".",
"decimate",
"(",
"method",
",",
"numpy",
".",
"asarray",
"(",
"transform",
"(",
"a",
")",
")",
"[",
"columns",
"]",
",",
"maxpoints",
"=",
"maxpoints",
")",
"# now deal with infs, nans etc AFTER all transformations (needed for plotting across inf/nan)",
"ma",
"=",
"numpy",
".",
"ma",
".",
"MaskedArray",
"(",
"a",
",",
"mask",
"=",
"numpy",
".",
"logical_not",
"(",
"numpy",
".",
"isfinite",
"(",
"a",
")",
")",
")",
"# finally plot (each column separately to catch empty sets)",
"for",
"column",
",",
"color",
"in",
"zip",
"(",
"range",
"(",
"1",
",",
"len",
"(",
"columns",
")",
")",
",",
"colors",
")",
":",
"if",
"len",
"(",
"ma",
"[",
"column",
"]",
")",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"No data to plot for column {column:d}\"",
".",
"format",
"(",
"*",
"*",
"vars",
"(",
")",
")",
",",
"category",
"=",
"MissingDataWarning",
")",
"kwargs",
"[",
"'color'",
"]",
"=",
"color",
"ax",
".",
"plot",
"(",
"ma",
"[",
"0",
"]",
",",
"ma",
"[",
"column",
"]",
",",
"*",
"*",
"kwargs",
")",
"# plot all other columns in parallel",
"return",
"ax"
] | Plot xvg file data.
The first column of the data is always taken as the abscissa
X. Additional columns are plotted as ordinates Y1, Y2, ...
In the special case that there is only a single column then this column
is plotted against the index, i.e. (N, Y).
:Keywords:
*columns* : list
Select the columns of the data to be plotted; the list
is used as a numpy.array extended slice. The default is
to use all columns. Columns are selected *after* a transform.
*transform* : function
function ``transform(array) -> array`` which transforms
the original array; must return a 2D numpy array of
shape [X, Y1, Y2, ...] where X, Y1, ... are column
vectors. By default the transformation is the
identity [``lambda x: x``].
*maxpoints* : int
limit the total number of data points; matplotlib has issues processing
png files with >100,000 points and pdfs take forever to display. Set to
``None`` if really all data should be displayed. At the moment we simply
decimate the data at regular intervals. [10000]
*method*
method to decimate the data to *maxpoints*, see :meth:`XVG.decimate`
for details
*color*
single color (used for all plots); sequence of colors
(will be repeated as necessary); or a matplotlib
colormap (e.g. "jet", see :mod:`matplotlib.cm`). The
default is to use the :attr:`XVG.default_color_cycle`.
*ax*
plot into given axes or create new one if ``None`` [``None``]
*kwargs*
All other keyword arguments are passed on to :func:`matplotlib.pyplot.plot`.
:Returns:
*ax*
axes instance | [
"Plot",
"xvg",
"file",
"data",
"."
] | python | valid |
zimeon/iiif | iiif/static.py | https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L370-L436 | def write_html(self, html_dir='/tmp', include_osd=False,
osd_width=500, osd_height=500):
"""Write HTML test page using OpenSeadragon for the tiles generated.
Assumes that the generate(..) method has already been called to set up
identifier etc. Parameters:
html_dir - output directory for HTML files, will be created if it
does not already exist
include_osd - true to include OpenSeadragon code
osd_width - width of OpenSeadragon pane in pixels
osd_height - height of OpenSeadragon pane in pixels
"""
osd_config = self.get_osd_config(self.osd_version)
osd_base = osd_config['base']
osd_dir = osd_config['dir'] # relative to base
osd_js = os.path.join(osd_dir, osd_config['js'])
osd_images = os.path.join(osd_dir, osd_config['images'])
if (os.path.isdir(html_dir)):
# Exists, fine
pass
elif (os.path.isfile(html_dir)):
raise IIIFStaticError(
"Can't write to directory %s: a file of that name exists" % html_dir)
else:
os.makedirs(html_dir)
self.logger.info("Writing HTML to %s" % (html_dir))
with open(os.path.join(self.template_dir, 'static_osd.html'), 'r') as f:
template = f.read()
outfile = self.identifier + '.html'
outpath = os.path.join(html_dir, outfile)
with open(outpath, 'w') as f:
info_json_uri = '/'.join([self.identifier, 'info.json'])
if (self.prefix):
info_json_uri = '/'.join([self.prefix, info_json_uri])
d = dict(identifier=self.identifier,
api_version=self.api_version,
osd_version=self.osd_version,
osd_uri=osd_js,
osd_images_prefix=osd_images,
osd_height=osd_width,
osd_width=osd_height,
info_json_uri=info_json_uri)
f.write(Template(template).safe_substitute(d))
self.logger.info("%s / %s" % (html_dir, outfile))
# Do we want to copy OSD in there too? If so, do it only if
# we haven't already
if (include_osd):
if (self.copied_osd):
self.logger.info("OpenSeadragon already copied")
else:
# Make directory, copy JavaScript and icons (from osd_images)
osd_path = os.path.join(html_dir, osd_dir)
if (not os.path.isdir(osd_path)):
os.makedirs(osd_path)
shutil.copyfile(os.path.join(osd_base, osd_js),
os.path.join(html_dir, osd_js))
self.logger.info("%s / %s" % (html_dir, osd_js))
osd_images_path = os.path.join(html_dir, osd_images)
if (os.path.isdir(osd_images_path)):
self.logger.warning(
"OpenSeadragon images directory (%s) already exists, skipping"
% osd_images_path)
else:
shutil.copytree(os.path.join(osd_base, osd_images),
osd_images_path)
self.logger.info("%s / %s/*" % (html_dir, osd_images))
self.copied_osd = True | [
"def",
"write_html",
"(",
"self",
",",
"html_dir",
"=",
"'/tmp'",
",",
"include_osd",
"=",
"False",
",",
"osd_width",
"=",
"500",
",",
"osd_height",
"=",
"500",
")",
":",
"osd_config",
"=",
"self",
".",
"get_osd_config",
"(",
"self",
".",
"osd_version",
")",
"osd_base",
"=",
"osd_config",
"[",
"'base'",
"]",
"osd_dir",
"=",
"osd_config",
"[",
"'dir'",
"]",
"# relative to base",
"osd_js",
"=",
"os",
".",
"path",
".",
"join",
"(",
"osd_dir",
",",
"osd_config",
"[",
"'js'",
"]",
")",
"osd_images",
"=",
"os",
".",
"path",
".",
"join",
"(",
"osd_dir",
",",
"osd_config",
"[",
"'images'",
"]",
")",
"if",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"html_dir",
")",
")",
":",
"# Exists, fine",
"pass",
"elif",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"html_dir",
")",
")",
":",
"raise",
"IIIFStaticError",
"(",
"\"Can't write to directory %s: a file of that name exists\"",
"%",
"html_dir",
")",
"else",
":",
"os",
".",
"makedirs",
"(",
"html_dir",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Writing HTML to %s\"",
"%",
"(",
"html_dir",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"template_dir",
",",
"'static_osd.html'",
")",
",",
"'r'",
")",
"as",
"f",
":",
"template",
"=",
"f",
".",
"read",
"(",
")",
"outfile",
"=",
"self",
".",
"identifier",
"+",
"'.html'",
"outpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"html_dir",
",",
"outfile",
")",
"with",
"open",
"(",
"outpath",
",",
"'w'",
")",
"as",
"f",
":",
"info_json_uri",
"=",
"'/'",
".",
"join",
"(",
"[",
"self",
".",
"identifier",
",",
"'info.json'",
"]",
")",
"if",
"(",
"self",
".",
"prefix",
")",
":",
"info_json_uri",
"=",
"'/'",
".",
"join",
"(",
"[",
"self",
".",
"prefix",
",",
"info_json_uri",
"]",
")",
"d",
"=",
"dict",
"(",
"identifier",
"=",
"self",
".",
"identifier",
",",
"api_version",
"=",
"self",
".",
"api_version",
",",
"osd_version",
"=",
"self",
".",
"osd_version",
",",
"osd_uri",
"=",
"osd_js",
",",
"osd_images_prefix",
"=",
"osd_images",
",",
"osd_height",
"=",
"osd_width",
",",
"osd_width",
"=",
"osd_height",
",",
"info_json_uri",
"=",
"info_json_uri",
")",
"f",
".",
"write",
"(",
"Template",
"(",
"template",
")",
".",
"safe_substitute",
"(",
"d",
")",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"%s / %s\"",
"%",
"(",
"html_dir",
",",
"outfile",
")",
")",
"# Do we want to copy OSD in there too? If so, do it only if",
"# we haven't already",
"if",
"(",
"include_osd",
")",
":",
"if",
"(",
"self",
".",
"copied_osd",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"OpenSeadragon already copied\"",
")",
"else",
":",
"# Make directory, copy JavaScript and icons (from osd_images)",
"osd_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"html_dir",
",",
"osd_dir",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"osd_path",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"osd_path",
")",
"shutil",
".",
"copyfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"osd_base",
",",
"osd_js",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"html_dir",
",",
"osd_js",
")",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"%s / %s\"",
"%",
"(",
"html_dir",
",",
"osd_js",
")",
")",
"osd_images_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"html_dir",
",",
"osd_images",
")",
"if",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"osd_images_path",
")",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"OpenSeadragon images directory (%s) already exists, skipping\"",
"%",
"osd_images_path",
")",
"else",
":",
"shutil",
".",
"copytree",
"(",
"os",
".",
"path",
".",
"join",
"(",
"osd_base",
",",
"osd_images",
")",
",",
"osd_images_path",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"%s / %s/*\"",
"%",
"(",
"html_dir",
",",
"osd_images",
")",
")",
"self",
".",
"copied_osd",
"=",
"True"
] | Write HTML test page using OpenSeadragon for the tiles generated.
Assumes that the generate(..) method has already been called to set up
identifier etc. Parameters:
html_dir - output directory for HTML files, will be created if it
does not already exist
include_osd - true to include OpenSeadragon code
osd_width - width of OpenSeadragon pane in pixels
osd_height - height of OpenSeadragon pane in pixels | [
"Write",
"HTML",
"test",
"page",
"using",
"OpenSeadragon",
"for",
"the",
"tiles",
"generated",
"."
] | python | train |
kwikteam/phy | phy/stats/ccg.py | https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/stats/ccg.py#L57-L177 | def correlograms(spike_times,
spike_clusters,
cluster_ids=None,
sample_rate=1.,
bin_size=None,
window_size=None,
symmetrize=True,
):
"""Compute all pairwise cross-correlograms among the clusters appearing
in `spike_clusters`.
Parameters
----------
spike_times : array-like
Spike times in seconds.
spike_clusters : array-like
Spike-cluster mapping.
cluster_ids : array-like
The list of unique clusters, in any order. That order will be used
in the output array.
bin_size : float
Size of the bin, in seconds.
window_size : float
Size of the window, in seconds.
Returns
-------
correlograms : array
A `(n_clusters, n_clusters, winsize_samples)` array with all pairwise
CCGs.
"""
assert sample_rate > 0.
assert np.all(np.diff(spike_times) >= 0), ("The spike times must be "
"increasing.")
# Get the spike samples.
spike_times = np.asarray(spike_times, dtype=np.float64)
spike_samples = (spike_times * sample_rate).astype(np.int64)
spike_clusters = _as_array(spike_clusters)
assert spike_samples.ndim == 1
assert spike_samples.shape == spike_clusters.shape
# Find `binsize`.
bin_size = np.clip(bin_size, 1e-5, 1e5) # in seconds
binsize = int(sample_rate * bin_size) # in samples
assert binsize >= 1
# Find `winsize_bins`.
window_size = np.clip(window_size, 1e-5, 1e5) # in seconds
winsize_bins = 2 * int(.5 * window_size / bin_size) + 1
assert winsize_bins >= 1
assert winsize_bins % 2 == 1
# Take the cluster oder into account.
if cluster_ids is None:
clusters = _unique(spike_clusters)
else:
clusters = _as_array(cluster_ids)
n_clusters = len(clusters)
# Like spike_clusters, but with 0..n_clusters-1 indices.
spike_clusters_i = _index_of(spike_clusters, clusters)
# Shift between the two copies of the spike trains.
shift = 1
# At a given shift, the mask precises which spikes have matching spikes
# within the correlogram time window.
mask = np.ones_like(spike_samples, dtype=np.bool)
correlograms = _create_correlograms_array(n_clusters, winsize_bins)
# The loop continues as long as there is at least one spike with
# a matching spike.
while mask[:-shift].any():
# Number of time samples between spike i and spike i+shift.
spike_diff = _diff_shifted(spike_samples, shift)
# Binarize the delays between spike i and spike i+shift.
spike_diff_b = spike_diff // binsize
# Spikes with no matching spikes are masked.
mask[:-shift][spike_diff_b > (winsize_bins // 2)] = False
# Cache the masked spike delays.
m = mask[:-shift].copy()
d = spike_diff_b[m]
# # Update the masks given the clusters to update.
# m0 = np.in1d(spike_clusters[:-shift], clusters)
# m = m & m0
# d = spike_diff_b[m]
d = spike_diff_b[m]
# Find the indices in the raveled correlograms array that need
# to be incremented, taking into account the spike clusters.
indices = np.ravel_multi_index((spike_clusters_i[:-shift][m],
spike_clusters_i[+shift:][m],
d),
correlograms.shape)
# Increment the matching spikes in the correlograms array.
_increment(correlograms.ravel(), indices)
shift += 1
# Remove ACG peaks.
correlograms[np.arange(n_clusters),
np.arange(n_clusters),
0] = 0
if symmetrize:
return _symmetrize_correlograms(correlograms)
else:
return correlograms | [
"def",
"correlograms",
"(",
"spike_times",
",",
"spike_clusters",
",",
"cluster_ids",
"=",
"None",
",",
"sample_rate",
"=",
"1.",
",",
"bin_size",
"=",
"None",
",",
"window_size",
"=",
"None",
",",
"symmetrize",
"=",
"True",
",",
")",
":",
"assert",
"sample_rate",
">",
"0.",
"assert",
"np",
".",
"all",
"(",
"np",
".",
"diff",
"(",
"spike_times",
")",
">=",
"0",
")",
",",
"(",
"\"The spike times must be \"",
"\"increasing.\"",
")",
"# Get the spike samples.",
"spike_times",
"=",
"np",
".",
"asarray",
"(",
"spike_times",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"spike_samples",
"=",
"(",
"spike_times",
"*",
"sample_rate",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"spike_clusters",
"=",
"_as_array",
"(",
"spike_clusters",
")",
"assert",
"spike_samples",
".",
"ndim",
"==",
"1",
"assert",
"spike_samples",
".",
"shape",
"==",
"spike_clusters",
".",
"shape",
"# Find `binsize`.",
"bin_size",
"=",
"np",
".",
"clip",
"(",
"bin_size",
",",
"1e-5",
",",
"1e5",
")",
"# in seconds",
"binsize",
"=",
"int",
"(",
"sample_rate",
"*",
"bin_size",
")",
"# in samples",
"assert",
"binsize",
">=",
"1",
"# Find `winsize_bins`.",
"window_size",
"=",
"np",
".",
"clip",
"(",
"window_size",
",",
"1e-5",
",",
"1e5",
")",
"# in seconds",
"winsize_bins",
"=",
"2",
"*",
"int",
"(",
".5",
"*",
"window_size",
"/",
"bin_size",
")",
"+",
"1",
"assert",
"winsize_bins",
">=",
"1",
"assert",
"winsize_bins",
"%",
"2",
"==",
"1",
"# Take the cluster oder into account.",
"if",
"cluster_ids",
"is",
"None",
":",
"clusters",
"=",
"_unique",
"(",
"spike_clusters",
")",
"else",
":",
"clusters",
"=",
"_as_array",
"(",
"cluster_ids",
")",
"n_clusters",
"=",
"len",
"(",
"clusters",
")",
"# Like spike_clusters, but with 0..n_clusters-1 indices.",
"spike_clusters_i",
"=",
"_index_of",
"(",
"spike_clusters",
",",
"clusters",
")",
"# Shift between the two copies of the spike trains.",
"shift",
"=",
"1",
"# At a given shift, the mask precises which spikes have matching spikes",
"# within the correlogram time window.",
"mask",
"=",
"np",
".",
"ones_like",
"(",
"spike_samples",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"correlograms",
"=",
"_create_correlograms_array",
"(",
"n_clusters",
",",
"winsize_bins",
")",
"# The loop continues as long as there is at least one spike with",
"# a matching spike.",
"while",
"mask",
"[",
":",
"-",
"shift",
"]",
".",
"any",
"(",
")",
":",
"# Number of time samples between spike i and spike i+shift.",
"spike_diff",
"=",
"_diff_shifted",
"(",
"spike_samples",
",",
"shift",
")",
"# Binarize the delays between spike i and spike i+shift.",
"spike_diff_b",
"=",
"spike_diff",
"//",
"binsize",
"# Spikes with no matching spikes are masked.",
"mask",
"[",
":",
"-",
"shift",
"]",
"[",
"spike_diff_b",
">",
"(",
"winsize_bins",
"//",
"2",
")",
"]",
"=",
"False",
"# Cache the masked spike delays.",
"m",
"=",
"mask",
"[",
":",
"-",
"shift",
"]",
".",
"copy",
"(",
")",
"d",
"=",
"spike_diff_b",
"[",
"m",
"]",
"# # Update the masks given the clusters to update.",
"# m0 = np.in1d(spike_clusters[:-shift], clusters)",
"# m = m & m0",
"# d = spike_diff_b[m]",
"d",
"=",
"spike_diff_b",
"[",
"m",
"]",
"# Find the indices in the raveled correlograms array that need",
"# to be incremented, taking into account the spike clusters.",
"indices",
"=",
"np",
".",
"ravel_multi_index",
"(",
"(",
"spike_clusters_i",
"[",
":",
"-",
"shift",
"]",
"[",
"m",
"]",
",",
"spike_clusters_i",
"[",
"+",
"shift",
":",
"]",
"[",
"m",
"]",
",",
"d",
")",
",",
"correlograms",
".",
"shape",
")",
"# Increment the matching spikes in the correlograms array.",
"_increment",
"(",
"correlograms",
".",
"ravel",
"(",
")",
",",
"indices",
")",
"shift",
"+=",
"1",
"# Remove ACG peaks.",
"correlograms",
"[",
"np",
".",
"arange",
"(",
"n_clusters",
")",
",",
"np",
".",
"arange",
"(",
"n_clusters",
")",
",",
"0",
"]",
"=",
"0",
"if",
"symmetrize",
":",
"return",
"_symmetrize_correlograms",
"(",
"correlograms",
")",
"else",
":",
"return",
"correlograms"
] | Compute all pairwise cross-correlograms among the clusters appearing
in `spike_clusters`.
Parameters
----------
spike_times : array-like
Spike times in seconds.
spike_clusters : array-like
Spike-cluster mapping.
cluster_ids : array-like
The list of unique clusters, in any order. That order will be used
in the output array.
bin_size : float
Size of the bin, in seconds.
window_size : float
Size of the window, in seconds.
Returns
-------
correlograms : array
A `(n_clusters, n_clusters, winsize_samples)` array with all pairwise
CCGs. | [
"Compute",
"all",
"pairwise",
"cross",
"-",
"correlograms",
"among",
"the",
"clusters",
"appearing",
"in",
"spike_clusters",
"."
] | python | train |
numenta/nupic | src/nupic/algorithms/backtracking_tm_cpp.py | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/backtracking_tm_cpp.py#L625-L646 | def getSegmentOnCell(self, c, i, segIdx):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`.
"""
segList = self.cells4.getNonEmptySegList(c,i)
seg = self.cells4.getSegment(c, i, segList[segIdx])
numSyn = seg.size()
assert numSyn != 0
# Accumulate segment information
result = []
result.append([int(segIdx), bool(seg.isSequenceSegment()),
seg.getPositiveActivations(),
seg.getTotalActivations(), seg.getLastActiveIteration(),
seg.getLastPosDutyCycle(),
seg.getLastPosDutyCycleIteration()])
for s in xrange(numSyn):
sc, si = self.getColCellIdx(seg.getSrcCellIdx(s))
result.append([int(sc), int(si), seg.getPermanence(s)])
return result | [
"def",
"getSegmentOnCell",
"(",
"self",
",",
"c",
",",
"i",
",",
"segIdx",
")",
":",
"segList",
"=",
"self",
".",
"cells4",
".",
"getNonEmptySegList",
"(",
"c",
",",
"i",
")",
"seg",
"=",
"self",
".",
"cells4",
".",
"getSegment",
"(",
"c",
",",
"i",
",",
"segList",
"[",
"segIdx",
"]",
")",
"numSyn",
"=",
"seg",
".",
"size",
"(",
")",
"assert",
"numSyn",
"!=",
"0",
"# Accumulate segment information",
"result",
"=",
"[",
"]",
"result",
".",
"append",
"(",
"[",
"int",
"(",
"segIdx",
")",
",",
"bool",
"(",
"seg",
".",
"isSequenceSegment",
"(",
")",
")",
",",
"seg",
".",
"getPositiveActivations",
"(",
")",
",",
"seg",
".",
"getTotalActivations",
"(",
")",
",",
"seg",
".",
"getLastActiveIteration",
"(",
")",
",",
"seg",
".",
"getLastPosDutyCycle",
"(",
")",
",",
"seg",
".",
"getLastPosDutyCycleIteration",
"(",
")",
"]",
")",
"for",
"s",
"in",
"xrange",
"(",
"numSyn",
")",
":",
"sc",
",",
"si",
"=",
"self",
".",
"getColCellIdx",
"(",
"seg",
".",
"getSrcCellIdx",
"(",
"s",
")",
")",
"result",
".",
"append",
"(",
"[",
"int",
"(",
"sc",
")",
",",
"int",
"(",
"si",
")",
",",
"seg",
".",
"getPermanence",
"(",
"s",
")",
"]",
")",
"return",
"result"
] | Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`. | [
"Overrides",
":",
"meth",
":",
"nupic",
".",
"algorithms",
".",
"backtracking_tm",
".",
"BacktrackingTM",
".",
"getSegmentOnCell",
"."
] | python | valid |
ghcollin/multitables | multitables.py | https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L202-L217 | def get(self):
"""
Fetch the next item in the queue. Blocks until an item is ready.
:return: The next unsigned integer in the queue.
"""
with self.cvar:
while True:
if self.size.value > 0:
rval = self.vals[self.tail.value]
self.tail.value = (self.tail.value + 1) % len(self.vals)
self.size.value -= 1
if rval == -2:
return QueueClosed
assert(rval >= 0)
return rval
self.cvar.wait() | [
"def",
"get",
"(",
"self",
")",
":",
"with",
"self",
".",
"cvar",
":",
"while",
"True",
":",
"if",
"self",
".",
"size",
".",
"value",
">",
"0",
":",
"rval",
"=",
"self",
".",
"vals",
"[",
"self",
".",
"tail",
".",
"value",
"]",
"self",
".",
"tail",
".",
"value",
"=",
"(",
"self",
".",
"tail",
".",
"value",
"+",
"1",
")",
"%",
"len",
"(",
"self",
".",
"vals",
")",
"self",
".",
"size",
".",
"value",
"-=",
"1",
"if",
"rval",
"==",
"-",
"2",
":",
"return",
"QueueClosed",
"assert",
"(",
"rval",
">=",
"0",
")",
"return",
"rval",
"self",
".",
"cvar",
".",
"wait",
"(",
")"
] | Fetch the next item in the queue. Blocks until an item is ready.
:return: The next unsigned integer in the queue. | [
"Fetch",
"the",
"next",
"item",
"in",
"the",
"queue",
".",
"Blocks",
"until",
"an",
"item",
"is",
"ready",
".",
":",
"return",
":",
"The",
"next",
"unsigned",
"integer",
"in",
"the",
"queue",
"."
] | python | test |
QuantEcon/QuantEcon.py | quantecon/game_theory/vertex_enumeration.py | https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/game_theory/vertex_enumeration.py#L88-L123 | def _vertex_enumeration_gen(labelings_bits_tup, equations_tup, trans_recips):
"""
Main body of `vertex_enumeration_gen`.
Parameters
----------
labelings_bits_tup : tuple(ndarray(np.uint64, ndim=1))
Tuple of ndarrays of integers representing labelings of the
vertices of the best response polytopes.
equations_tup : tuple(ndarray(float, ndim=2))
Tuple of ndarrays containing the hyperplane equations of the
polar polytopes.
trans_recips : tuple(scalar(float))
Tuple of the reciprocals of the translations.
"""
m, n = equations_tup[0].shape[1] - 1, equations_tup[1].shape[1] - 1
num_vertices0, num_vertices1 = \
equations_tup[0].shape[0], equations_tup[1].shape[0]
ZERO_LABELING0_BITS = (np.uint64(1) << np.uint64(m)) - np.uint64(1)
COMPLETE_LABELING_BITS = (np.uint64(1) << np.uint64(m+n)) - np.uint64(1)
for i in range(num_vertices0):
if labelings_bits_tup[0][i] == ZERO_LABELING0_BITS:
continue
for j in range(num_vertices1):
xor = labelings_bits_tup[0][i] ^ labelings_bits_tup[1][j]
if xor == COMPLETE_LABELING_BITS:
yield _get_mixed_actions(
labelings_bits_tup[0][i],
(equations_tup[0][i], equations_tup[1][j]),
trans_recips
)
break | [
"def",
"_vertex_enumeration_gen",
"(",
"labelings_bits_tup",
",",
"equations_tup",
",",
"trans_recips",
")",
":",
"m",
",",
"n",
"=",
"equations_tup",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
",",
"equations_tup",
"[",
"1",
"]",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
"num_vertices0",
",",
"num_vertices1",
"=",
"equations_tup",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
",",
"equations_tup",
"[",
"1",
"]",
".",
"shape",
"[",
"0",
"]",
"ZERO_LABELING0_BITS",
"=",
"(",
"np",
".",
"uint64",
"(",
"1",
")",
"<<",
"np",
".",
"uint64",
"(",
"m",
")",
")",
"-",
"np",
".",
"uint64",
"(",
"1",
")",
"COMPLETE_LABELING_BITS",
"=",
"(",
"np",
".",
"uint64",
"(",
"1",
")",
"<<",
"np",
".",
"uint64",
"(",
"m",
"+",
"n",
")",
")",
"-",
"np",
".",
"uint64",
"(",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"num_vertices0",
")",
":",
"if",
"labelings_bits_tup",
"[",
"0",
"]",
"[",
"i",
"]",
"==",
"ZERO_LABELING0_BITS",
":",
"continue",
"for",
"j",
"in",
"range",
"(",
"num_vertices1",
")",
":",
"xor",
"=",
"labelings_bits_tup",
"[",
"0",
"]",
"[",
"i",
"]",
"^",
"labelings_bits_tup",
"[",
"1",
"]",
"[",
"j",
"]",
"if",
"xor",
"==",
"COMPLETE_LABELING_BITS",
":",
"yield",
"_get_mixed_actions",
"(",
"labelings_bits_tup",
"[",
"0",
"]",
"[",
"i",
"]",
",",
"(",
"equations_tup",
"[",
"0",
"]",
"[",
"i",
"]",
",",
"equations_tup",
"[",
"1",
"]",
"[",
"j",
"]",
")",
",",
"trans_recips",
")",
"break"
] | Main body of `vertex_enumeration_gen`.
Parameters
----------
labelings_bits_tup : tuple(ndarray(np.uint64, ndim=1))
Tuple of ndarrays of integers representing labelings of the
vertices of the best response polytopes.
equations_tup : tuple(ndarray(float, ndim=2))
Tuple of ndarrays containing the hyperplane equations of the
polar polytopes.
trans_recips : tuple(scalar(float))
Tuple of the reciprocals of the translations. | [
"Main",
"body",
"of",
"vertex_enumeration_gen",
"."
] | python | train |
heikomuller/sco-client | scocli/modelrun.py | https://github.com/heikomuller/sco-client/blob/c4afab71297f73003379bba4c1679be9dcf7cef8/scocli/modelrun.py#L311-L365 | def create(url, model_id, name, arguments, properties=None):
"""Create a new model run using the given SCO-API create model run Url.
Parameters
----------
url : string
Url to POST model run create model run request
model_id : string
Unique model identifier
name : string
User-defined name for model run
arguments : Dictionary
Dictionary of arguments for model run
properties : Dictionary, optional
Set of additional properties for created mode run.
Returns
-------
string
Url of created model run resource
"""
# Create list of model run arguments. Catch TypeErrors if arguments is
# not a list.
obj_args = []
try:
for arg in arguments:
obj_args.append({'name' : arg, 'value' : arguments[arg]})
except TypeError as ex:
raise ValueError('invalid argument set')
# Create request body and send POST request to given Url
body = {
'model' : model_id,
'name' : name,
'arguments' : obj_args,
}
# Create list of properties if given. Catch TypeErrors if properties is
# not a list.
if not properties is None:
obj_props = []
try:
for key in properties:
if key != 'name':
obj_props.append({'key':key, 'value':properties[key]})
except TypeError as ex:
raise ValueError('invalid property set')
body['properties'] = obj_props
# POST create model run request
try:
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(body))
except urllib2.URLError as ex:
raise ValueError(str(ex))
# Get model run self reference from successful response
return references_to_dict(json.load(response)['links'])[REF_SELF] | [
"def",
"create",
"(",
"url",
",",
"model_id",
",",
"name",
",",
"arguments",
",",
"properties",
"=",
"None",
")",
":",
"# Create list of model run arguments. Catch TypeErrors if arguments is",
"# not a list.",
"obj_args",
"=",
"[",
"]",
"try",
":",
"for",
"arg",
"in",
"arguments",
":",
"obj_args",
".",
"append",
"(",
"{",
"'name'",
":",
"arg",
",",
"'value'",
":",
"arguments",
"[",
"arg",
"]",
"}",
")",
"except",
"TypeError",
"as",
"ex",
":",
"raise",
"ValueError",
"(",
"'invalid argument set'",
")",
"# Create request body and send POST request to given Url",
"body",
"=",
"{",
"'model'",
":",
"model_id",
",",
"'name'",
":",
"name",
",",
"'arguments'",
":",
"obj_args",
",",
"}",
"# Create list of properties if given. Catch TypeErrors if properties is",
"# not a list.",
"if",
"not",
"properties",
"is",
"None",
":",
"obj_props",
"=",
"[",
"]",
"try",
":",
"for",
"key",
"in",
"properties",
":",
"if",
"key",
"!=",
"'name'",
":",
"obj_props",
".",
"append",
"(",
"{",
"'key'",
":",
"key",
",",
"'value'",
":",
"properties",
"[",
"key",
"]",
"}",
")",
"except",
"TypeError",
"as",
"ex",
":",
"raise",
"ValueError",
"(",
"'invalid property set'",
")",
"body",
"[",
"'properties'",
"]",
"=",
"obj_props",
"# POST create model run request",
"try",
":",
"req",
"=",
"urllib2",
".",
"Request",
"(",
"url",
")",
"req",
".",
"add_header",
"(",
"'Content-Type'",
",",
"'application/json'",
")",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"req",
",",
"json",
".",
"dumps",
"(",
"body",
")",
")",
"except",
"urllib2",
".",
"URLError",
"as",
"ex",
":",
"raise",
"ValueError",
"(",
"str",
"(",
"ex",
")",
")",
"# Get model run self reference from successful response",
"return",
"references_to_dict",
"(",
"json",
".",
"load",
"(",
"response",
")",
"[",
"'links'",
"]",
")",
"[",
"REF_SELF",
"]"
] | Create a new model run using the given SCO-API create model run Url.
Parameters
----------
url : string
Url to POST model run create model run request
model_id : string
Unique model identifier
name : string
User-defined name for model run
arguments : Dictionary
Dictionary of arguments for model run
properties : Dictionary, optional
Set of additional properties for created mode run.
Returns
-------
string
Url of created model run resource | [
"Create",
"a",
"new",
"model",
"run",
"using",
"the",
"given",
"SCO",
"-",
"API",
"create",
"model",
"run",
"Url",
"."
] | python | train |
datastax/python-driver | cassandra/cluster.py | https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cluster.py#L4386-L4400 | def one(self):
"""
Return a single row of the results or None if empty. This is basically
a shortcut to `result_set.current_rows[0]` and should only be used when
you know a query returns a single row. Consider using an iterator if the
ResultSet contains more than one row.
"""
row = None
if self._current_rows:
try:
row = self._current_rows[0]
except TypeError: # generator object is not subscriptable, PYTHON-1026
row = next(iter(self._current_rows))
return row | [
"def",
"one",
"(",
"self",
")",
":",
"row",
"=",
"None",
"if",
"self",
".",
"_current_rows",
":",
"try",
":",
"row",
"=",
"self",
".",
"_current_rows",
"[",
"0",
"]",
"except",
"TypeError",
":",
"# generator object is not subscriptable, PYTHON-1026",
"row",
"=",
"next",
"(",
"iter",
"(",
"self",
".",
"_current_rows",
")",
")",
"return",
"row"
] | Return a single row of the results or None if empty. This is basically
a shortcut to `result_set.current_rows[0]` and should only be used when
you know a query returns a single row. Consider using an iterator if the
ResultSet contains more than one row. | [
"Return",
"a",
"single",
"row",
"of",
"the",
"results",
"or",
"None",
"if",
"empty",
".",
"This",
"is",
"basically",
"a",
"shortcut",
"to",
"result_set",
".",
"current_rows",
"[",
"0",
"]",
"and",
"should",
"only",
"be",
"used",
"when",
"you",
"know",
"a",
"query",
"returns",
"a",
"single",
"row",
".",
"Consider",
"using",
"an",
"iterator",
"if",
"the",
"ResultSet",
"contains",
"more",
"than",
"one",
"row",
"."
] | python | train |
sorgerlab/indra | indra/sources/biopax/processor.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1398-L1404 | def _is_small_molecule(pe):
"""Return True if the element is a small molecule"""
val = isinstance(pe, _bp('SmallMolecule')) or \
isinstance(pe, _bpimpl('SmallMolecule')) or \
isinstance(pe, _bp('SmallMoleculeReference')) or \
isinstance(pe, _bpimpl('SmallMoleculeReference'))
return val | [
"def",
"_is_small_molecule",
"(",
"pe",
")",
":",
"val",
"=",
"isinstance",
"(",
"pe",
",",
"_bp",
"(",
"'SmallMolecule'",
")",
")",
"or",
"isinstance",
"(",
"pe",
",",
"_bpimpl",
"(",
"'SmallMolecule'",
")",
")",
"or",
"isinstance",
"(",
"pe",
",",
"_bp",
"(",
"'SmallMoleculeReference'",
")",
")",
"or",
"isinstance",
"(",
"pe",
",",
"_bpimpl",
"(",
"'SmallMoleculeReference'",
")",
")",
"return",
"val"
] | Return True if the element is a small molecule | [
"Return",
"True",
"if",
"the",
"element",
"is",
"a",
"small",
"molecule"
] | python | train |
census-instrumentation/opencensus-python | opencensus/trace/stack_trace.py | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/stack_trace.py#L86-L103 | def format_stack_frame_json(self):
"""Convert StackFrame object to json format."""
stack_frame_json = {}
stack_frame_json['function_name'] = get_truncatable_str(
self.func_name)
stack_frame_json['original_function_name'] = get_truncatable_str(
self.original_func_name)
stack_frame_json['file_name'] = get_truncatable_str(self.file_name)
stack_frame_json['line_number'] = self.line_num
stack_frame_json['column_number'] = self.col_num
stack_frame_json['load_module'] = {
'module': get_truncatable_str(self.load_module),
'build_id': get_truncatable_str(self.build_id),
}
stack_frame_json['source_version'] = get_truncatable_str(
self.source_version)
return stack_frame_json | [
"def",
"format_stack_frame_json",
"(",
"self",
")",
":",
"stack_frame_json",
"=",
"{",
"}",
"stack_frame_json",
"[",
"'function_name'",
"]",
"=",
"get_truncatable_str",
"(",
"self",
".",
"func_name",
")",
"stack_frame_json",
"[",
"'original_function_name'",
"]",
"=",
"get_truncatable_str",
"(",
"self",
".",
"original_func_name",
")",
"stack_frame_json",
"[",
"'file_name'",
"]",
"=",
"get_truncatable_str",
"(",
"self",
".",
"file_name",
")",
"stack_frame_json",
"[",
"'line_number'",
"]",
"=",
"self",
".",
"line_num",
"stack_frame_json",
"[",
"'column_number'",
"]",
"=",
"self",
".",
"col_num",
"stack_frame_json",
"[",
"'load_module'",
"]",
"=",
"{",
"'module'",
":",
"get_truncatable_str",
"(",
"self",
".",
"load_module",
")",
",",
"'build_id'",
":",
"get_truncatable_str",
"(",
"self",
".",
"build_id",
")",
",",
"}",
"stack_frame_json",
"[",
"'source_version'",
"]",
"=",
"get_truncatable_str",
"(",
"self",
".",
"source_version",
")",
"return",
"stack_frame_json"
] | Convert StackFrame object to json format. | [
"Convert",
"StackFrame",
"object",
"to",
"json",
"format",
"."
] | python | train |
googleapis/google-cloud-python | bigtable/google/cloud/bigtable/client.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/client.py#L230-L251 | def instance_admin_client(self):
"""Getter for the gRPC stub used for the Table Admin API.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_instance_admin_client]
:end-before: [END bigtable_instance_admin_client]
:rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin`
:returns: A BigtableInstanceAdmin instance.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed.
"""
if self._instance_admin_client is None:
if not self._admin:
raise ValueError("Client is not an admin client.")
self._instance_admin_client = _create_gapic_client(
bigtable_admin_v2.BigtableInstanceAdminClient
)(self)
return self._instance_admin_client | [
"def",
"instance_admin_client",
"(",
"self",
")",
":",
"if",
"self",
".",
"_instance_admin_client",
"is",
"None",
":",
"if",
"not",
"self",
".",
"_admin",
":",
"raise",
"ValueError",
"(",
"\"Client is not an admin client.\"",
")",
"self",
".",
"_instance_admin_client",
"=",
"_create_gapic_client",
"(",
"bigtable_admin_v2",
".",
"BigtableInstanceAdminClient",
")",
"(",
"self",
")",
"return",
"self",
".",
"_instance_admin_client"
] | Getter for the gRPC stub used for the Table Admin API.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_instance_admin_client]
:end-before: [END bigtable_instance_admin_client]
:rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin`
:returns: A BigtableInstanceAdmin instance.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed. | [
"Getter",
"for",
"the",
"gRPC",
"stub",
"used",
"for",
"the",
"Table",
"Admin",
"API",
"."
] | python | train |
pennersr/django-allauth | allauth/socialaccount/providers/base.py | https://github.com/pennersr/django-allauth/blob/f70cb3d622f992f15fe9b57098e0b328445b664e/allauth/socialaccount/providers/base.py#L65-L99 | def sociallogin_from_response(self, request, response):
"""
Instantiates and populates a `SocialLogin` model based on the data
retrieved in `response`. The method does NOT save the model to the
DB.
Data for `SocialLogin` will be extracted from `response` with the
help of the `.extract_uid()`, `.extract_extra_data()`,
`.extract_common_fields()`, and `.extract_email_addresses()`
methods.
:param request: a Django `HttpRequest` object.
:param response: object retrieved via the callback response of the
social auth provider.
:return: A populated instance of the `SocialLogin` model (unsaved).
"""
# NOTE: Avoid loading models at top due to registry boot...
from allauth.socialaccount.models import SocialLogin, SocialAccount
adapter = get_adapter(request)
uid = self.extract_uid(response)
extra_data = self.extract_extra_data(response)
common_fields = self.extract_common_fields(response)
socialaccount = SocialAccount(extra_data=extra_data,
uid=uid,
provider=self.id)
email_addresses = self.extract_email_addresses(response)
self.cleanup_email_addresses(common_fields.get('email'),
email_addresses)
sociallogin = SocialLogin(account=socialaccount,
email_addresses=email_addresses)
user = sociallogin.user = adapter.new_user(request, sociallogin)
user.set_unusable_password()
adapter.populate_user(request, sociallogin, common_fields)
return sociallogin | [
"def",
"sociallogin_from_response",
"(",
"self",
",",
"request",
",",
"response",
")",
":",
"# NOTE: Avoid loading models at top due to registry boot...",
"from",
"allauth",
".",
"socialaccount",
".",
"models",
"import",
"SocialLogin",
",",
"SocialAccount",
"adapter",
"=",
"get_adapter",
"(",
"request",
")",
"uid",
"=",
"self",
".",
"extract_uid",
"(",
"response",
")",
"extra_data",
"=",
"self",
".",
"extract_extra_data",
"(",
"response",
")",
"common_fields",
"=",
"self",
".",
"extract_common_fields",
"(",
"response",
")",
"socialaccount",
"=",
"SocialAccount",
"(",
"extra_data",
"=",
"extra_data",
",",
"uid",
"=",
"uid",
",",
"provider",
"=",
"self",
".",
"id",
")",
"email_addresses",
"=",
"self",
".",
"extract_email_addresses",
"(",
"response",
")",
"self",
".",
"cleanup_email_addresses",
"(",
"common_fields",
".",
"get",
"(",
"'email'",
")",
",",
"email_addresses",
")",
"sociallogin",
"=",
"SocialLogin",
"(",
"account",
"=",
"socialaccount",
",",
"email_addresses",
"=",
"email_addresses",
")",
"user",
"=",
"sociallogin",
".",
"user",
"=",
"adapter",
".",
"new_user",
"(",
"request",
",",
"sociallogin",
")",
"user",
".",
"set_unusable_password",
"(",
")",
"adapter",
".",
"populate_user",
"(",
"request",
",",
"sociallogin",
",",
"common_fields",
")",
"return",
"sociallogin"
] | Instantiates and populates a `SocialLogin` model based on the data
retrieved in `response`. The method does NOT save the model to the
DB.
Data for `SocialLogin` will be extracted from `response` with the
help of the `.extract_uid()`, `.extract_extra_data()`,
`.extract_common_fields()`, and `.extract_email_addresses()`
methods.
:param request: a Django `HttpRequest` object.
:param response: object retrieved via the callback response of the
social auth provider.
:return: A populated instance of the `SocialLogin` model (unsaved). | [
"Instantiates",
"and",
"populates",
"a",
"SocialLogin",
"model",
"based",
"on",
"the",
"data",
"retrieved",
"in",
"response",
".",
"The",
"method",
"does",
"NOT",
"save",
"the",
"model",
"to",
"the",
"DB",
"."
] | python | train |
Azure/azure-uamqp-python | uamqp/client.py | https://github.com/Azure/azure-uamqp-python/blob/b67e4fcaf2e8a337636947523570239c10a58ae2/uamqp/client.py#L633-L651 | def redirect(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
if self._ext_connection:
raise ValueError(
"Clients with a shared connection cannot be "
"automatically redirected.")
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._pending_messages = []
self._remote_address = address.Target(redirect.address)
self._redirect(redirect, auth) | [
"def",
"redirect",
"(",
"self",
",",
"redirect",
",",
"auth",
")",
":",
"if",
"self",
".",
"_ext_connection",
":",
"raise",
"ValueError",
"(",
"\"Clients with a shared connection cannot be \"",
"\"automatically redirected.\"",
")",
"if",
"self",
".",
"message_handler",
":",
"self",
".",
"message_handler",
".",
"destroy",
"(",
")",
"self",
".",
"message_handler",
"=",
"None",
"self",
".",
"_pending_messages",
"=",
"[",
"]",
"self",
".",
"_remote_address",
"=",
"address",
".",
"Target",
"(",
"redirect",
".",
"address",
")",
"self",
".",
"_redirect",
"(",
"redirect",
",",
"auth",
")"
] | Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth | [
"Redirect",
"the",
"client",
"endpoint",
"using",
"a",
"Link",
"DETACH",
"redirect",
"response",
"."
] | python | train |
rosenbrockc/fortpy | fortpy/isense/context.py | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/isense/context.py#L348-L360 | def _match_exec(self, i):
"""Looks at line 'i' for a subroutine or function definition."""
self.col_match = self.RE_EXEC.match(self._source[i])
if self.col_match is not None:
if self.col_match.group("codetype") == "function":
self.el_type = Function
else:
self.el_type = Subroutine
self.el_name = self.col_match.group("name")
return True
else:
return False | [
"def",
"_match_exec",
"(",
"self",
",",
"i",
")",
":",
"self",
".",
"col_match",
"=",
"self",
".",
"RE_EXEC",
".",
"match",
"(",
"self",
".",
"_source",
"[",
"i",
"]",
")",
"if",
"self",
".",
"col_match",
"is",
"not",
"None",
":",
"if",
"self",
".",
"col_match",
".",
"group",
"(",
"\"codetype\"",
")",
"==",
"\"function\"",
":",
"self",
".",
"el_type",
"=",
"Function",
"else",
":",
"self",
".",
"el_type",
"=",
"Subroutine",
"self",
".",
"el_name",
"=",
"self",
".",
"col_match",
".",
"group",
"(",
"\"name\"",
")",
"return",
"True",
"else",
":",
"return",
"False"
] | Looks at line 'i' for a subroutine or function definition. | [
"Looks",
"at",
"line",
"i",
"for",
"a",
"subroutine",
"or",
"function",
"definition",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/pathlib2/__init__.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pathlib2/__init__.py#L1647-L1656 | def expanduser(self):
""" Return a new path with expanded ~ and ~user constructs
(as returned by os.path.expanduser)
"""
if (not (self._drv or self._root)
and self._parts and self._parts[0][:1] == '~'):
homedir = self._flavour.gethomedir(self._parts[0][1:])
return self._from_parts([homedir] + self._parts[1:])
return self | [
"def",
"expanduser",
"(",
"self",
")",
":",
"if",
"(",
"not",
"(",
"self",
".",
"_drv",
"or",
"self",
".",
"_root",
")",
"and",
"self",
".",
"_parts",
"and",
"self",
".",
"_parts",
"[",
"0",
"]",
"[",
":",
"1",
"]",
"==",
"'~'",
")",
":",
"homedir",
"=",
"self",
".",
"_flavour",
".",
"gethomedir",
"(",
"self",
".",
"_parts",
"[",
"0",
"]",
"[",
"1",
":",
"]",
")",
"return",
"self",
".",
"_from_parts",
"(",
"[",
"homedir",
"]",
"+",
"self",
".",
"_parts",
"[",
"1",
":",
"]",
")",
"return",
"self"
] | Return a new path with expanded ~ and ~user constructs
(as returned by os.path.expanduser) | [
"Return",
"a",
"new",
"path",
"with",
"expanded",
"~",
"and",
"~user",
"constructs",
"(",
"as",
"returned",
"by",
"os",
".",
"path",
".",
"expanduser",
")"
] | python | train |
dslackw/slpkg | slpkg/main.py | https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/main.py#L665-L682 | def pkg_find(self):
"""Find packages from all enabled repositories
"""
flag = []
options = [
"-F",
"--FIND"
]
additional_options = ["--case-ins"]
for arg in self.args:
if arg in additional_options:
flag.append(arg)
self.args.remove(arg)
packages = self.args[1:]
if len(self.args) > 1 and self.args[0] in options:
FindFromRepos().find(packages, flag)
else:
usage("") | [
"def",
"pkg_find",
"(",
"self",
")",
":",
"flag",
"=",
"[",
"]",
"options",
"=",
"[",
"\"-F\"",
",",
"\"--FIND\"",
"]",
"additional_options",
"=",
"[",
"\"--case-ins\"",
"]",
"for",
"arg",
"in",
"self",
".",
"args",
":",
"if",
"arg",
"in",
"additional_options",
":",
"flag",
".",
"append",
"(",
"arg",
")",
"self",
".",
"args",
".",
"remove",
"(",
"arg",
")",
"packages",
"=",
"self",
".",
"args",
"[",
"1",
":",
"]",
"if",
"len",
"(",
"self",
".",
"args",
")",
">",
"1",
"and",
"self",
".",
"args",
"[",
"0",
"]",
"in",
"options",
":",
"FindFromRepos",
"(",
")",
".",
"find",
"(",
"packages",
",",
"flag",
")",
"else",
":",
"usage",
"(",
"\"\"",
")"
] | Find packages from all enabled repositories | [
"Find",
"packages",
"from",
"all",
"enabled",
"repositories"
] | python | train |
inasafe/inasafe | safe/gui/tools/help/peta_bencana_help.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/help/peta_bencana_help.py#L13-L26 | def peta_bencana_help():
"""Help message for PetaBencana dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message | [
"def",
"peta_bencana_help",
"(",
")",
":",
"message",
"=",
"m",
".",
"Message",
"(",
")",
"message",
".",
"add",
"(",
"m",
".",
"Brand",
"(",
")",
")",
"message",
".",
"add",
"(",
"heading",
"(",
")",
")",
"message",
".",
"add",
"(",
"content",
"(",
")",
")",
"return",
"message"
] | Help message for PetaBencana dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message | [
"Help",
"message",
"for",
"PetaBencana",
"dialog",
"."
] | python | train |
Clinical-Genomics/scout | scout/server/blueprints/cases/views.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/cases/views.py#L277-L311 | def matchmaker_delete(institute_id, case_name):
"""Remove a case from MatchMaker"""
# check that only authorized users can delete patients from MME
user_obj = store.user(current_user.email)
if 'mme_submitter' not in user_obj['roles']:
flash('unauthorized request', 'warning')
return redirect(request.referrer)
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
# Required params for sending a delete request to MME:
mme_base_url = current_app.config.get('MME_URL')
mme_token = current_app.config.get('MME_TOKEN')
if not mme_base_url or not mme_token:
flash('An error occurred reading matchmaker connection parameters. Please check config file!', 'danger')
return redirect(request.referrer)
delete_result = controllers.mme_delete(case_obj, mme_base_url, mme_token)
n_deleted = 0
category = 'warning'
for resp in delete_result:
if resp['status_code'] == 200:
n_deleted += 1
else:
flash(resp['message'], category)
if n_deleted:
category = 'success'
# update case by removing mme submission
# and create events for patients deletion from MME
user_obj = store.user(current_user.email)
store.case_mme_delete(case_obj=case_obj, user_obj=user_obj)
flash('Number of patients deleted from Matchmaker: {} out of {}'.format(n_deleted, len(delete_result)), category)
return redirect(request.referrer) | [
"def",
"matchmaker_delete",
"(",
"institute_id",
",",
"case_name",
")",
":",
"# check that only authorized users can delete patients from MME",
"user_obj",
"=",
"store",
".",
"user",
"(",
"current_user",
".",
"email",
")",
"if",
"'mme_submitter'",
"not",
"in",
"user_obj",
"[",
"'roles'",
"]",
":",
"flash",
"(",
"'unauthorized request'",
",",
"'warning'",
")",
"return",
"redirect",
"(",
"request",
".",
"referrer",
")",
"institute_obj",
",",
"case_obj",
"=",
"institute_and_case",
"(",
"store",
",",
"institute_id",
",",
"case_name",
")",
"# Required params for sending a delete request to MME:",
"mme_base_url",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'MME_URL'",
")",
"mme_token",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'MME_TOKEN'",
")",
"if",
"not",
"mme_base_url",
"or",
"not",
"mme_token",
":",
"flash",
"(",
"'An error occurred reading matchmaker connection parameters. Please check config file!'",
",",
"'danger'",
")",
"return",
"redirect",
"(",
"request",
".",
"referrer",
")",
"delete_result",
"=",
"controllers",
".",
"mme_delete",
"(",
"case_obj",
",",
"mme_base_url",
",",
"mme_token",
")",
"n_deleted",
"=",
"0",
"category",
"=",
"'warning'",
"for",
"resp",
"in",
"delete_result",
":",
"if",
"resp",
"[",
"'status_code'",
"]",
"==",
"200",
":",
"n_deleted",
"+=",
"1",
"else",
":",
"flash",
"(",
"resp",
"[",
"'message'",
"]",
",",
"category",
")",
"if",
"n_deleted",
":",
"category",
"=",
"'success'",
"# update case by removing mme submission",
"# and create events for patients deletion from MME",
"user_obj",
"=",
"store",
".",
"user",
"(",
"current_user",
".",
"email",
")",
"store",
".",
"case_mme_delete",
"(",
"case_obj",
"=",
"case_obj",
",",
"user_obj",
"=",
"user_obj",
")",
"flash",
"(",
"'Number of patients deleted from Matchmaker: {} out of {}'",
".",
"format",
"(",
"n_deleted",
",",
"len",
"(",
"delete_result",
")",
")",
",",
"category",
")",
"return",
"redirect",
"(",
"request",
".",
"referrer",
")"
] | Remove a case from MatchMaker | [
"Remove",
"a",
"case",
"from",
"MatchMaker"
] | python | test |
KelSolaar/Umbra | umbra/ui/visual_accelerators.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/visual_accelerators.py#L68-L102 | def highlight_occurences(editor):
"""
Highlights given editor current line.
:param editor: Document editor.
:type editor: QWidget
:return: Method success.
:rtype: bool
"""
format = editor.language.theme.get("accelerator.occurence")
if not format:
return False
extra_selections = editor.extraSelections() or []
if not editor.isReadOnly():
word = editor.get_word_under_cursor()
if not word:
return False
block = editor.document().findBlock(0)
cursor = editor.document().find(word,
block.position(),
QTextDocument.FindCaseSensitively | QTextDocument.FindWholeWords)
while block.isValid() and cursor.position() != -1:
selection = QTextEdit.ExtraSelection()
selection.format.setBackground(format.background())
selection.cursor = cursor
extra_selections.append(selection)
cursor = editor.document().find(word,
cursor.position(),
QTextDocument.FindCaseSensitively | QTextDocument.FindWholeWords)
block = block.next()
editor.setExtraSelections(extra_selections)
return True | [
"def",
"highlight_occurences",
"(",
"editor",
")",
":",
"format",
"=",
"editor",
".",
"language",
".",
"theme",
".",
"get",
"(",
"\"accelerator.occurence\"",
")",
"if",
"not",
"format",
":",
"return",
"False",
"extra_selections",
"=",
"editor",
".",
"extraSelections",
"(",
")",
"or",
"[",
"]",
"if",
"not",
"editor",
".",
"isReadOnly",
"(",
")",
":",
"word",
"=",
"editor",
".",
"get_word_under_cursor",
"(",
")",
"if",
"not",
"word",
":",
"return",
"False",
"block",
"=",
"editor",
".",
"document",
"(",
")",
".",
"findBlock",
"(",
"0",
")",
"cursor",
"=",
"editor",
".",
"document",
"(",
")",
".",
"find",
"(",
"word",
",",
"block",
".",
"position",
"(",
")",
",",
"QTextDocument",
".",
"FindCaseSensitively",
"|",
"QTextDocument",
".",
"FindWholeWords",
")",
"while",
"block",
".",
"isValid",
"(",
")",
"and",
"cursor",
".",
"position",
"(",
")",
"!=",
"-",
"1",
":",
"selection",
"=",
"QTextEdit",
".",
"ExtraSelection",
"(",
")",
"selection",
".",
"format",
".",
"setBackground",
"(",
"format",
".",
"background",
"(",
")",
")",
"selection",
".",
"cursor",
"=",
"cursor",
"extra_selections",
".",
"append",
"(",
"selection",
")",
"cursor",
"=",
"editor",
".",
"document",
"(",
")",
".",
"find",
"(",
"word",
",",
"cursor",
".",
"position",
"(",
")",
",",
"QTextDocument",
".",
"FindCaseSensitively",
"|",
"QTextDocument",
".",
"FindWholeWords",
")",
"block",
"=",
"block",
".",
"next",
"(",
")",
"editor",
".",
"setExtraSelections",
"(",
"extra_selections",
")",
"return",
"True"
] | Highlights given editor current line.
:param editor: Document editor.
:type editor: QWidget
:return: Method success.
:rtype: bool | [
"Highlights",
"given",
"editor",
"current",
"line",
"."
] | python | train |
NICTA/revrand | revrand/basis_functions.py | https://github.com/NICTA/revrand/blob/4c1881b6c1772d2b988518e49dde954f165acfb6/revrand/basis_functions.py#L109-L152 | def apply_grad(fun, grad):
"""
Apply a function that takes a gradient matrix to a sequence of 2 or 3
dimensional gradients.
This is partucularly useful when the gradient of a basis concatenation
object is quite complex, eg.
>>> X = np.random.randn(100, 3)
>>> y = np.random.randn(100)
>>> N, d = X.shape
>>> base = RandomRBF(Xdim=d, nbases=5) + RandomRBF(Xdim=d, nbases=5,
... lenscale=Parameter(np.ones(d), Positive()))
>>> Phi = base.transform(X, 1., np.ones(d))
>>> dffun = lambda dPhi: y.dot(Phi).dot(dPhi.T).dot(y)
>>> df = apply_grad(dffun, base.grad(X, 1., np.ones(d)))
>>> np.isscalar(df[0])
True
>>> df[1].shape
(3,)
Parameters
----------
fun: callable
the function too apply to the (2d) gradient.
grad: ndarray or generator
the gradient of the basis function (output of base.grad).
Returns
-------
scalar, ndarray or sequence:
the result of applying fun(grad) for a structured grad.
"""
if issequence(grad):
fgrad = [apply_grad(fun, g) for g in grad]
return fgrad if len(fgrad) != 1 else fgrad[0]
elif len(grad) == 0:
return []
elif (grad.ndim == 1) or (grad.ndim == 2):
return fun(grad)
elif grad.ndim == 3:
return np.array([fun(grad[:, :, i]) for i in range(grad.shape[2])])
else:
raise ValueError("Only up to 3d gradients allowed!") | [
"def",
"apply_grad",
"(",
"fun",
",",
"grad",
")",
":",
"if",
"issequence",
"(",
"grad",
")",
":",
"fgrad",
"=",
"[",
"apply_grad",
"(",
"fun",
",",
"g",
")",
"for",
"g",
"in",
"grad",
"]",
"return",
"fgrad",
"if",
"len",
"(",
"fgrad",
")",
"!=",
"1",
"else",
"fgrad",
"[",
"0",
"]",
"elif",
"len",
"(",
"grad",
")",
"==",
"0",
":",
"return",
"[",
"]",
"elif",
"(",
"grad",
".",
"ndim",
"==",
"1",
")",
"or",
"(",
"grad",
".",
"ndim",
"==",
"2",
")",
":",
"return",
"fun",
"(",
"grad",
")",
"elif",
"grad",
".",
"ndim",
"==",
"3",
":",
"return",
"np",
".",
"array",
"(",
"[",
"fun",
"(",
"grad",
"[",
":",
",",
":",
",",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"grad",
".",
"shape",
"[",
"2",
"]",
")",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Only up to 3d gradients allowed!\"",
")"
] | Apply a function that takes a gradient matrix to a sequence of 2 or 3
dimensional gradients.
This is partucularly useful when the gradient of a basis concatenation
object is quite complex, eg.
>>> X = np.random.randn(100, 3)
>>> y = np.random.randn(100)
>>> N, d = X.shape
>>> base = RandomRBF(Xdim=d, nbases=5) + RandomRBF(Xdim=d, nbases=5,
... lenscale=Parameter(np.ones(d), Positive()))
>>> Phi = base.transform(X, 1., np.ones(d))
>>> dffun = lambda dPhi: y.dot(Phi).dot(dPhi.T).dot(y)
>>> df = apply_grad(dffun, base.grad(X, 1., np.ones(d)))
>>> np.isscalar(df[0])
True
>>> df[1].shape
(3,)
Parameters
----------
fun: callable
the function too apply to the (2d) gradient.
grad: ndarray or generator
the gradient of the basis function (output of base.grad).
Returns
-------
scalar, ndarray or sequence:
the result of applying fun(grad) for a structured grad. | [
"Apply",
"a",
"function",
"that",
"takes",
"a",
"gradient",
"matrix",
"to",
"a",
"sequence",
"of",
"2",
"or",
"3",
"dimensional",
"gradients",
"."
] | python | train |
gwpy/gwpy | setup_utils.py | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/setup_utils.py#L110-L134 | def get_gitpython_version():
"""Determine the required version of GitPython
Because of target systems running very, very old versions of setuptools,
we only specify the actual version we need when we need it.
"""
# if not in git clone, it doesn't matter
if not in_git_clone():
return 'GitPython'
# otherwise, call out to get the git version
try:
gitv = subprocess.check_output('git --version', shell=True)
except (OSError, IOError, subprocess.CalledProcessError):
# no git installation, most likely
git_version = '0.0.0'
else:
if isinstance(gitv, bytes):
gitv = gitv.decode('utf-8')
git_version = gitv.strip().split()[2]
# if git>=2.15, we need GitPython>=2.1.8
if LooseVersion(git_version) >= '2.15':
return 'GitPython>=2.1.8'
return 'GitPython' | [
"def",
"get_gitpython_version",
"(",
")",
":",
"# if not in git clone, it doesn't matter",
"if",
"not",
"in_git_clone",
"(",
")",
":",
"return",
"'GitPython'",
"# otherwise, call out to get the git version",
"try",
":",
"gitv",
"=",
"subprocess",
".",
"check_output",
"(",
"'git --version'",
",",
"shell",
"=",
"True",
")",
"except",
"(",
"OSError",
",",
"IOError",
",",
"subprocess",
".",
"CalledProcessError",
")",
":",
"# no git installation, most likely",
"git_version",
"=",
"'0.0.0'",
"else",
":",
"if",
"isinstance",
"(",
"gitv",
",",
"bytes",
")",
":",
"gitv",
"=",
"gitv",
".",
"decode",
"(",
"'utf-8'",
")",
"git_version",
"=",
"gitv",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"2",
"]",
"# if git>=2.15, we need GitPython>=2.1.8",
"if",
"LooseVersion",
"(",
"git_version",
")",
">=",
"'2.15'",
":",
"return",
"'GitPython>=2.1.8'",
"return",
"'GitPython'"
] | Determine the required version of GitPython
Because of target systems running very, very old versions of setuptools,
we only specify the actual version we need when we need it. | [
"Determine",
"the",
"required",
"version",
"of",
"GitPython"
] | python | train |
spoqa/tsukkomi | tsukkomi/typed.py | https://github.com/spoqa/tsukkomi/blob/c67bd28a5211cdd11f8ac81f109c915f3b780445/tsukkomi/typed.py#L145-L161 | def check_union(data: typing.Union, hint: type) -> bool:
"""Check argument type & return type of :class:`typing.Union`. since it
raises check :class:`typing.Union` using `isinstance`, so compare in
diffrent way
:param data: union given as a argument
:param hint: assumed type of given ``data``
"""
r = any(check_type(data, t)[1] for t in hint.__union_params__)
if not r:
raise TypeError(
'expected one of {0!r}, found: {1!r}'.format(
hint.__union_params__, type(data)
)
)
return hint, r | [
"def",
"check_union",
"(",
"data",
":",
"typing",
".",
"Union",
",",
"hint",
":",
"type",
")",
"->",
"bool",
":",
"r",
"=",
"any",
"(",
"check_type",
"(",
"data",
",",
"t",
")",
"[",
"1",
"]",
"for",
"t",
"in",
"hint",
".",
"__union_params__",
")",
"if",
"not",
"r",
":",
"raise",
"TypeError",
"(",
"'expected one of {0!r}, found: {1!r}'",
".",
"format",
"(",
"hint",
".",
"__union_params__",
",",
"type",
"(",
"data",
")",
")",
")",
"return",
"hint",
",",
"r"
] | Check argument type & return type of :class:`typing.Union`. since it
raises check :class:`typing.Union` using `isinstance`, so compare in
diffrent way
:param data: union given as a argument
:param hint: assumed type of given ``data`` | [
"Check",
"argument",
"type",
"&",
"return",
"type",
"of",
":",
"class",
":",
"typing",
".",
"Union",
".",
"since",
"it",
"raises",
"check",
":",
"class",
":",
"typing",
".",
"Union",
"using",
"isinstance",
"so",
"compare",
"in",
"diffrent",
"way"
] | python | train |
not-na/peng3d | peng3d/window.py | https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/window.py#L215-L222 | def addMenu(self,menu):
"""
Adds a menu to the list of menus.
"""
# If there is no menu selected currently, this menu will automatically be made active.
# Add the line above to the docstring if fixed
self.menus[menu.name]=menu
self.peng.sendEvent("peng3d:window.menu.add",{"peng":self.peng,"window":self,"menu":menu}) | [
"def",
"addMenu",
"(",
"self",
",",
"menu",
")",
":",
"# If there is no menu selected currently, this menu will automatically be made active.",
"# Add the line above to the docstring if fixed",
"self",
".",
"menus",
"[",
"menu",
".",
"name",
"]",
"=",
"menu",
"self",
".",
"peng",
".",
"sendEvent",
"(",
"\"peng3d:window.menu.add\"",
",",
"{",
"\"peng\"",
":",
"self",
".",
"peng",
",",
"\"window\"",
":",
"self",
",",
"\"menu\"",
":",
"menu",
"}",
")"
] | Adds a menu to the list of menus. | [
"Adds",
"a",
"menu",
"to",
"the",
"list",
"of",
"menus",
"."
] | python | test |
mitsei/dlkit | dlkit/json_/osid/metadata.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/osid/metadata.py#L1025-L1039 | def supports_heading_type(self, heading_type):
"""Tests if the given heading type is supported.
arg: heading_type (osid.type.Type): a heading Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``HEADING``
raise: NullArgument - ``heading_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.Metadata.supports_coordinate_type
if self._kwargs['syntax'] not in ['``HEADING``']:
raise errors.IllegalState()
return heading_type in self.get_heading_types | [
"def",
"supports_heading_type",
"(",
"self",
",",
"heading_type",
")",
":",
"# Implemented from template for osid.Metadata.supports_coordinate_type",
"if",
"self",
".",
"_kwargs",
"[",
"'syntax'",
"]",
"not",
"in",
"[",
"'``HEADING``'",
"]",
":",
"raise",
"errors",
".",
"IllegalState",
"(",
")",
"return",
"heading_type",
"in",
"self",
".",
"get_heading_types"
] | Tests if the given heading type is supported.
arg: heading_type (osid.type.Type): a heading Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``HEADING``
raise: NullArgument - ``heading_type`` is ``null``
*compliance: mandatory -- This method must be implemented.* | [
"Tests",
"if",
"the",
"given",
"heading",
"type",
"is",
"supported",
"."
] | python | train |
noxdafox/vminspect | vminspect/comparator.py | https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/comparator.py#L128-L153 | def compare_registry(self, concurrent=False):
"""Compares the Windows Registry contained within the two File Systems.
It parses all the registry hive files contained within the disks
and generates the following report.
{'created_keys': {'\\Reg\\Key': (('Key', 'Type', 'Value'))}
'deleted_keys': ['\\Reg\\Key', ...],
'created_values': {'\\Reg\\Key': (('Key', 'Type', 'NewValue'))},
'deleted_values': {'\\Reg\\Key': (('Key', 'Type', 'OldValue'))},
'modified_values': {'\\Reg\\Key': (('Key', 'Type', 'NewValue'))}}
Only registry hives which are contained in both disks are compared.
If the second disk contains a new registry hive,
its content can be listed using winreg.RegistryHive.registry() method.
If the concurrent flag is True,
two processes will be used speeding up the comparison on multiple CPUs.
"""
self.logger.debug("Comparing Windows registries.")
self._assert_windows()
return compare_registries(self.filesystems[0], self.filesystems[1],
concurrent=concurrent) | [
"def",
"compare_registry",
"(",
"self",
",",
"concurrent",
"=",
"False",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Comparing Windows registries.\"",
")",
"self",
".",
"_assert_windows",
"(",
")",
"return",
"compare_registries",
"(",
"self",
".",
"filesystems",
"[",
"0",
"]",
",",
"self",
".",
"filesystems",
"[",
"1",
"]",
",",
"concurrent",
"=",
"concurrent",
")"
] | Compares the Windows Registry contained within the two File Systems.
It parses all the registry hive files contained within the disks
and generates the following report.
{'created_keys': {'\\Reg\\Key': (('Key', 'Type', 'Value'))}
'deleted_keys': ['\\Reg\\Key', ...],
'created_values': {'\\Reg\\Key': (('Key', 'Type', 'NewValue'))},
'deleted_values': {'\\Reg\\Key': (('Key', 'Type', 'OldValue'))},
'modified_values': {'\\Reg\\Key': (('Key', 'Type', 'NewValue'))}}
Only registry hives which are contained in both disks are compared.
If the second disk contains a new registry hive,
its content can be listed using winreg.RegistryHive.registry() method.
If the concurrent flag is True,
two processes will be used speeding up the comparison on multiple CPUs. | [
"Compares",
"the",
"Windows",
"Registry",
"contained",
"within",
"the",
"two",
"File",
"Systems",
"."
] | python | train |
midasplatform/pydas | pydas/api.py | https://github.com/midasplatform/pydas/blob/e5f9e96e754fb2dc5da187b05e4abc77a9b2affd/pydas/api.py#L331-L346 | def _create_folder(local_folder, parent_folder_id):
"""
Function for creating a remote folder and returning the id. This should be
a building block for user-level functions.
:param local_folder: full path to a local folder
:type local_folder: string
:param parent_folder_id: id of parent folder on the Midas Server instance,
where the new folder will be added
:type parent_folder_id: int | long
:returns: id of the remote folder that was created
:rtype: int | long
"""
new_folder = session.communicator.create_folder(
session.token, os.path.basename(local_folder), parent_folder_id)
return new_folder['folder_id'] | [
"def",
"_create_folder",
"(",
"local_folder",
",",
"parent_folder_id",
")",
":",
"new_folder",
"=",
"session",
".",
"communicator",
".",
"create_folder",
"(",
"session",
".",
"token",
",",
"os",
".",
"path",
".",
"basename",
"(",
"local_folder",
")",
",",
"parent_folder_id",
")",
"return",
"new_folder",
"[",
"'folder_id'",
"]"
] | Function for creating a remote folder and returning the id. This should be
a building block for user-level functions.
:param local_folder: full path to a local folder
:type local_folder: string
:param parent_folder_id: id of parent folder on the Midas Server instance,
where the new folder will be added
:type parent_folder_id: int | long
:returns: id of the remote folder that was created
:rtype: int | long | [
"Function",
"for",
"creating",
"a",
"remote",
"folder",
"and",
"returning",
"the",
"id",
".",
"This",
"should",
"be",
"a",
"building",
"block",
"for",
"user",
"-",
"level",
"functions",
"."
] | python | valid |
limodou/uliweb | uliweb/contrib/jsonql/__init__.py | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/jsonql/__init__.py#L313-L336 | def parse_entry(self, name):
"""
Parse query entry name, just like:
{
'User[]:user'
}
'User[]:user' is an entry name.
:param name:
:return:
"""
# calculate schema mode
# if ':name' or '' or '[]:name' or '[]' found, it'll be treat as multiple Schema query
alias = name
if ':' in name:
name, alias = name.split(':')
if name.endswith('[]'):
need_list = True
name = name[:-2]
else:
need_list = False
return alias, name, need_list | [
"def",
"parse_entry",
"(",
"self",
",",
"name",
")",
":",
"# calculate schema mode",
"# if ':name' or '' or '[]:name' or '[]' found, it'll be treat as multiple Schema query",
"alias",
"=",
"name",
"if",
"':'",
"in",
"name",
":",
"name",
",",
"alias",
"=",
"name",
".",
"split",
"(",
"':'",
")",
"if",
"name",
".",
"endswith",
"(",
"'[]'",
")",
":",
"need_list",
"=",
"True",
"name",
"=",
"name",
"[",
":",
"-",
"2",
"]",
"else",
":",
"need_list",
"=",
"False",
"return",
"alias",
",",
"name",
",",
"need_list"
] | Parse query entry name, just like:
{
'User[]:user'
}
'User[]:user' is an entry name.
:param name:
:return: | [
"Parse",
"query",
"entry",
"name",
"just",
"like",
":",
"{",
"User",
"[]",
":",
"user",
"}"
] | python | train |
OpenTreeOfLife/peyotl | peyotl/phylesystem/helper.py | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/phylesystem/helper.py#L64-L76 | def create_id2study_info(path, tag):
"""Searchers for *.json files in this repo and returns
a map of study id ==> (`tag`, dir, study filepath)
where `tag` is typically the shard name
"""
d = {}
for triple in os.walk(path):
root, files = triple[0], triple[2]
for filename in files:
if filename.endswith('.json'):
study_id = filename[:-5]
d[study_id] = (tag, root, os.path.join(root, filename))
return d | [
"def",
"create_id2study_info",
"(",
"path",
",",
"tag",
")",
":",
"d",
"=",
"{",
"}",
"for",
"triple",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"root",
",",
"files",
"=",
"triple",
"[",
"0",
"]",
",",
"triple",
"[",
"2",
"]",
"for",
"filename",
"in",
"files",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.json'",
")",
":",
"study_id",
"=",
"filename",
"[",
":",
"-",
"5",
"]",
"d",
"[",
"study_id",
"]",
"=",
"(",
"tag",
",",
"root",
",",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
")",
"return",
"d"
] | Searchers for *.json files in this repo and returns
a map of study id ==> (`tag`, dir, study filepath)
where `tag` is typically the shard name | [
"Searchers",
"for",
"*",
".",
"json",
"files",
"in",
"this",
"repo",
"and",
"returns",
"a",
"map",
"of",
"study",
"id",
"==",
">",
"(",
"tag",
"dir",
"study",
"filepath",
")",
"where",
"tag",
"is",
"typically",
"the",
"shard",
"name"
] | python | train |
amzn/ion-python | amazon/ion/reader_binary.py | https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_binary.py#L334-L387 | def _read_data_handler(length, whence, ctx, skip=False, stream_event=ION_STREAM_INCOMPLETE_EVENT):
"""Creates a co-routine for retrieving data up to a requested size.
Args:
length (int): The minimum length requested.
whence (Coroutine): The co-routine to return to after the data is satisfied.
ctx (_HandlerContext): The context for the read.
skip (Optional[bool]): Whether the requested number of bytes should be skipped.
stream_event (Optional[IonEvent]): The stream event to return if no bytes are read or
available.
"""
trans = None
queue = ctx.queue
if length > ctx.remaining:
raise IonException('Length overrun: %d bytes, %d remaining' % (length, ctx.remaining))
# Make sure to check the queue first.
queue_len = len(queue)
if queue_len > 0:
# Any data available means we can only be incomplete.
stream_event = ION_STREAM_INCOMPLETE_EVENT
length -= queue_len
if skip:
# For skipping we need to consume any remnant in the buffer queue.
if length >= 0:
queue.skip(queue_len)
else:
queue.skip(queue_len + length)
while True:
data_event, self = (yield trans)
if data_event is not None and data_event.data is not None:
data = data_event.data
data_len = len(data)
if data_len > 0:
# We got something so we can only be incomplete.
stream_event = ION_STREAM_INCOMPLETE_EVENT
length -= data_len
if not skip:
queue.extend(data)
else:
pos_adjustment = data_len
if length < 0:
pos_adjustment += length
# More data than we need to skip, so make sure to accumulate that remnant.
queue.extend(data[length:])
queue.position += pos_adjustment
if length <= 0:
# We got all the data we need, go back immediately
yield Transition(None, whence)
trans = Transition(stream_event, self) | [
"def",
"_read_data_handler",
"(",
"length",
",",
"whence",
",",
"ctx",
",",
"skip",
"=",
"False",
",",
"stream_event",
"=",
"ION_STREAM_INCOMPLETE_EVENT",
")",
":",
"trans",
"=",
"None",
"queue",
"=",
"ctx",
".",
"queue",
"if",
"length",
">",
"ctx",
".",
"remaining",
":",
"raise",
"IonException",
"(",
"'Length overrun: %d bytes, %d remaining'",
"%",
"(",
"length",
",",
"ctx",
".",
"remaining",
")",
")",
"# Make sure to check the queue first.",
"queue_len",
"=",
"len",
"(",
"queue",
")",
"if",
"queue_len",
">",
"0",
":",
"# Any data available means we can only be incomplete.",
"stream_event",
"=",
"ION_STREAM_INCOMPLETE_EVENT",
"length",
"-=",
"queue_len",
"if",
"skip",
":",
"# For skipping we need to consume any remnant in the buffer queue.",
"if",
"length",
">=",
"0",
":",
"queue",
".",
"skip",
"(",
"queue_len",
")",
"else",
":",
"queue",
".",
"skip",
"(",
"queue_len",
"+",
"length",
")",
"while",
"True",
":",
"data_event",
",",
"self",
"=",
"(",
"yield",
"trans",
")",
"if",
"data_event",
"is",
"not",
"None",
"and",
"data_event",
".",
"data",
"is",
"not",
"None",
":",
"data",
"=",
"data_event",
".",
"data",
"data_len",
"=",
"len",
"(",
"data",
")",
"if",
"data_len",
">",
"0",
":",
"# We got something so we can only be incomplete.",
"stream_event",
"=",
"ION_STREAM_INCOMPLETE_EVENT",
"length",
"-=",
"data_len",
"if",
"not",
"skip",
":",
"queue",
".",
"extend",
"(",
"data",
")",
"else",
":",
"pos_adjustment",
"=",
"data_len",
"if",
"length",
"<",
"0",
":",
"pos_adjustment",
"+=",
"length",
"# More data than we need to skip, so make sure to accumulate that remnant.",
"queue",
".",
"extend",
"(",
"data",
"[",
"length",
":",
"]",
")",
"queue",
".",
"position",
"+=",
"pos_adjustment",
"if",
"length",
"<=",
"0",
":",
"# We got all the data we need, go back immediately",
"yield",
"Transition",
"(",
"None",
",",
"whence",
")",
"trans",
"=",
"Transition",
"(",
"stream_event",
",",
"self",
")"
] | Creates a co-routine for retrieving data up to a requested size.
Args:
length (int): The minimum length requested.
whence (Coroutine): The co-routine to return to after the data is satisfied.
ctx (_HandlerContext): The context for the read.
skip (Optional[bool]): Whether the requested number of bytes should be skipped.
stream_event (Optional[IonEvent]): The stream event to return if no bytes are read or
available. | [
"Creates",
"a",
"co",
"-",
"routine",
"for",
"retrieving",
"data",
"up",
"to",
"a",
"requested",
"size",
"."
] | python | train |
mongodb/motor | motor/motor_gridfs.py | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/motor_gridfs.py#L457-L514 | def find(self, *args, **kwargs):
"""Find and return the files collection documents that match ``filter``.
Returns a cursor that iterates across files matching
arbitrary queries on the files collection. Can be combined
with other modifiers for additional control.
For example::
cursor = bucket.find({"filename": "lisa.txt"}, no_cursor_timeout=True)
while (yield cursor.fetch_next):
grid_out = cursor.next_object()
data = yield grid_out.read()
This iterates through all versions of "lisa.txt" stored in GridFS.
Note that setting no_cursor_timeout to True may be important to
prevent the cursor from timing out during long multi-file processing
work.
As another example, the call::
most_recent_three = fs.find().sort("uploadDate", -1).limit(3)
would return a cursor to the three most recently uploaded files
in GridFS.
Follows a similar interface to
:meth:`~motor.MotorCollection.find`
in :class:`~motor.MotorCollection`.
:Parameters:
- `filter`: Search query.
- `batch_size` (optional): The number of documents to return per
batch.
- `limit` (optional): The maximum number of documents to return.
- `no_cursor_timeout` (optional): The server normally times out idle
cursors after an inactivity period (10 minutes) to prevent excess
memory use. Set this option to True prevent that.
- `skip` (optional): The number of documents to skip before
returning.
- `sort` (optional): The order by which to sort results. Defaults to
None.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
If a :class:`~pymongo.client_session.ClientSession` is passed to
:meth:`find`, all returned :class:`MotorGridOut` instances
are associated with that session.
.. versionchanged:: 1.2
Added session parameter.
"""
cursor = self.delegate.find(*args, **kwargs)
grid_out_cursor = create_class_with_framework(
AgnosticGridOutCursor, self._framework, self.__module__)
return grid_out_cursor(cursor, self.collection) | [
"def",
"find",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cursor",
"=",
"self",
".",
"delegate",
".",
"find",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"grid_out_cursor",
"=",
"create_class_with_framework",
"(",
"AgnosticGridOutCursor",
",",
"self",
".",
"_framework",
",",
"self",
".",
"__module__",
")",
"return",
"grid_out_cursor",
"(",
"cursor",
",",
"self",
".",
"collection",
")"
] | Find and return the files collection documents that match ``filter``.
Returns a cursor that iterates across files matching
arbitrary queries on the files collection. Can be combined
with other modifiers for additional control.
For example::
cursor = bucket.find({"filename": "lisa.txt"}, no_cursor_timeout=True)
while (yield cursor.fetch_next):
grid_out = cursor.next_object()
data = yield grid_out.read()
This iterates through all versions of "lisa.txt" stored in GridFS.
Note that setting no_cursor_timeout to True may be important to
prevent the cursor from timing out during long multi-file processing
work.
As another example, the call::
most_recent_three = fs.find().sort("uploadDate", -1).limit(3)
would return a cursor to the three most recently uploaded files
in GridFS.
Follows a similar interface to
:meth:`~motor.MotorCollection.find`
in :class:`~motor.MotorCollection`.
:Parameters:
- `filter`: Search query.
- `batch_size` (optional): The number of documents to return per
batch.
- `limit` (optional): The maximum number of documents to return.
- `no_cursor_timeout` (optional): The server normally times out idle
cursors after an inactivity period (10 minutes) to prevent excess
memory use. Set this option to True prevent that.
- `skip` (optional): The number of documents to skip before
returning.
- `sort` (optional): The order by which to sort results. Defaults to
None.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
If a :class:`~pymongo.client_session.ClientSession` is passed to
:meth:`find`, all returned :class:`MotorGridOut` instances
are associated with that session.
.. versionchanged:: 1.2
Added session parameter. | [
"Find",
"and",
"return",
"the",
"files",
"collection",
"documents",
"that",
"match",
"filter",
"."
] | python | train |
bitlabstudio/django-subscribe | subscribe/templatetags/subscribe_tags.py | https://github.com/bitlabstudio/django-subscribe/blob/313de63fb4acda172e88b65c3327c793f98e8aa9/subscribe/templatetags/subscribe_tags.py#L35-L54 | def is_subscribed(user, obj):
"""
Returns ``True`` if the user is subscribed to the given object.
:param user: A ``User`` instance.
:param obj: Any object.
"""
if not user.is_authenticated():
return False
ctype = ContentType.objects.get_for_model(obj)
try:
Subscription.objects.get(
user=user, content_type=ctype, object_id=obj.pk)
except Subscription.DoesNotExist:
return False
return True | [
"def",
"is_subscribed",
"(",
"user",
",",
"obj",
")",
":",
"if",
"not",
"user",
".",
"is_authenticated",
"(",
")",
":",
"return",
"False",
"ctype",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"obj",
")",
"try",
":",
"Subscription",
".",
"objects",
".",
"get",
"(",
"user",
"=",
"user",
",",
"content_type",
"=",
"ctype",
",",
"object_id",
"=",
"obj",
".",
"pk",
")",
"except",
"Subscription",
".",
"DoesNotExist",
":",
"return",
"False",
"return",
"True"
] | Returns ``True`` if the user is subscribed to the given object.
:param user: A ``User`` instance.
:param obj: Any object. | [
"Returns",
"True",
"if",
"the",
"user",
"is",
"subscribed",
"to",
"the",
"given",
"object",
"."
] | python | train |
datastax/python-driver | cassandra/cqlengine/query.py | https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/query.py#L940-L946 | def allow_filtering(self):
"""
Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key
"""
clone = copy.deepcopy(self)
clone._allow_filtering = True
return clone | [
"def",
"allow_filtering",
"(",
"self",
")",
":",
"clone",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
")",
"clone",
".",
"_allow_filtering",
"=",
"True",
"return",
"clone"
] | Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key | [
"Enables",
"the",
"(",
"usually",
")",
"unwise",
"practive",
"of",
"querying",
"on",
"a",
"clustering",
"key",
"without",
"also",
"defining",
"a",
"partition",
"key"
] | python | train |
zhanglab/psamm | psamm/gapfill.py | https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/gapfill.py#L52-L140 | def gapfind(model, solver, epsilon=0.001, v_max=1000, implicit_sinks=True):
"""Identify compounds in the model that cannot be produced.
Yields all compounds that cannot be produced. This method
assumes implicit sinks for all compounds in the model so
the only factor that influences whether a compound can be
produced is the presence of the compounds needed to produce it.
Epsilon indicates the threshold amount of reaction flux for the products
to be considered non-blocked. V_max indicates the maximum flux.
This method is implemented as a MILP-program. Therefore it may
not be efficient for larger models.
Args:
model: :class:`MetabolicModel` containing core reactions and reactions
that can be added for gap-filling.
solver: MILP solver instance.
epsilon: Threshold amount of a compound produced for it to not be
considered blocked.
v_max: Maximum flux.
implicit_sinks: Whether implicit sinks for all compounds are included
when gap-filling (traditional GapFill uses implicit sinks).
"""
prob = solver.create_problem()
# Set integrality tolerance such that w constraints are correct
min_tol = prob.integrality_tolerance.min
int_tol = _find_integer_tolerance(epsilon, v_max, min_tol)
if int_tol < prob.integrality_tolerance.value:
prob.integrality_tolerance.value = int_tol
# Define flux variables
v = prob.namespace()
for reaction_id in model.reactions:
lower, upper = model.limits[reaction_id]
v.define([reaction_id], lower=lower, upper=upper)
# Define constraints on production of metabolites in reaction
w = prob.namespace(types=lp.VariableType.Binary)
binary_cons_lhs = {compound: 0 for compound in model.compounds}
for spec, value in iteritems(model.matrix):
compound, reaction_id = spec
if value != 0:
w.define([spec])
w_var = w(spec)
lower, upper = (float(x) for x in model.limits[reaction_id])
if value > 0:
dv = v(reaction_id)
else:
dv = -v(reaction_id)
lower, upper = -upper, -lower
prob.add_linear_constraints(
dv <= upper * w_var,
dv >= epsilon + (lower - epsilon) * (1 - w_var))
binary_cons_lhs[compound] += w_var
xp = prob.namespace(model.compounds, types=lp.VariableType.Binary)
objective = xp.sum(model.compounds)
prob.set_objective(objective)
for compound, lhs in iteritems(binary_cons_lhs):
prob.add_linear_constraints(lhs >= xp(compound))
# Define mass balance constraints
massbalance_lhs = {compound: 0 for compound in model.compounds}
for spec, value in iteritems(model.matrix):
compound, reaction_id = spec
massbalance_lhs[compound] += v(reaction_id) * value
for compound, lhs in iteritems(massbalance_lhs):
if implicit_sinks:
# The constraint is merely >0 meaning that we have implicit sinks
# for all compounds.
prob.add_linear_constraints(lhs >= 0)
else:
prob.add_linear_constraints(lhs == 0)
# Solve
try:
result = prob.solve(lp.ObjectiveSense.Maximize)
except lp.SolverError as e:
raise_from(GapFillError('Failed to solve gapfill: {}'.format(e), e))
for compound in model.compounds:
if result.get_value(xp(compound)) < 0.5:
yield compound | [
"def",
"gapfind",
"(",
"model",
",",
"solver",
",",
"epsilon",
"=",
"0.001",
",",
"v_max",
"=",
"1000",
",",
"implicit_sinks",
"=",
"True",
")",
":",
"prob",
"=",
"solver",
".",
"create_problem",
"(",
")",
"# Set integrality tolerance such that w constraints are correct",
"min_tol",
"=",
"prob",
".",
"integrality_tolerance",
".",
"min",
"int_tol",
"=",
"_find_integer_tolerance",
"(",
"epsilon",
",",
"v_max",
",",
"min_tol",
")",
"if",
"int_tol",
"<",
"prob",
".",
"integrality_tolerance",
".",
"value",
":",
"prob",
".",
"integrality_tolerance",
".",
"value",
"=",
"int_tol",
"# Define flux variables",
"v",
"=",
"prob",
".",
"namespace",
"(",
")",
"for",
"reaction_id",
"in",
"model",
".",
"reactions",
":",
"lower",
",",
"upper",
"=",
"model",
".",
"limits",
"[",
"reaction_id",
"]",
"v",
".",
"define",
"(",
"[",
"reaction_id",
"]",
",",
"lower",
"=",
"lower",
",",
"upper",
"=",
"upper",
")",
"# Define constraints on production of metabolites in reaction",
"w",
"=",
"prob",
".",
"namespace",
"(",
"types",
"=",
"lp",
".",
"VariableType",
".",
"Binary",
")",
"binary_cons_lhs",
"=",
"{",
"compound",
":",
"0",
"for",
"compound",
"in",
"model",
".",
"compounds",
"}",
"for",
"spec",
",",
"value",
"in",
"iteritems",
"(",
"model",
".",
"matrix",
")",
":",
"compound",
",",
"reaction_id",
"=",
"spec",
"if",
"value",
"!=",
"0",
":",
"w",
".",
"define",
"(",
"[",
"spec",
"]",
")",
"w_var",
"=",
"w",
"(",
"spec",
")",
"lower",
",",
"upper",
"=",
"(",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"model",
".",
"limits",
"[",
"reaction_id",
"]",
")",
"if",
"value",
">",
"0",
":",
"dv",
"=",
"v",
"(",
"reaction_id",
")",
"else",
":",
"dv",
"=",
"-",
"v",
"(",
"reaction_id",
")",
"lower",
",",
"upper",
"=",
"-",
"upper",
",",
"-",
"lower",
"prob",
".",
"add_linear_constraints",
"(",
"dv",
"<=",
"upper",
"*",
"w_var",
",",
"dv",
">=",
"epsilon",
"+",
"(",
"lower",
"-",
"epsilon",
")",
"*",
"(",
"1",
"-",
"w_var",
")",
")",
"binary_cons_lhs",
"[",
"compound",
"]",
"+=",
"w_var",
"xp",
"=",
"prob",
".",
"namespace",
"(",
"model",
".",
"compounds",
",",
"types",
"=",
"lp",
".",
"VariableType",
".",
"Binary",
")",
"objective",
"=",
"xp",
".",
"sum",
"(",
"model",
".",
"compounds",
")",
"prob",
".",
"set_objective",
"(",
"objective",
")",
"for",
"compound",
",",
"lhs",
"in",
"iteritems",
"(",
"binary_cons_lhs",
")",
":",
"prob",
".",
"add_linear_constraints",
"(",
"lhs",
">=",
"xp",
"(",
"compound",
")",
")",
"# Define mass balance constraints",
"massbalance_lhs",
"=",
"{",
"compound",
":",
"0",
"for",
"compound",
"in",
"model",
".",
"compounds",
"}",
"for",
"spec",
",",
"value",
"in",
"iteritems",
"(",
"model",
".",
"matrix",
")",
":",
"compound",
",",
"reaction_id",
"=",
"spec",
"massbalance_lhs",
"[",
"compound",
"]",
"+=",
"v",
"(",
"reaction_id",
")",
"*",
"value",
"for",
"compound",
",",
"lhs",
"in",
"iteritems",
"(",
"massbalance_lhs",
")",
":",
"if",
"implicit_sinks",
":",
"# The constraint is merely >0 meaning that we have implicit sinks",
"# for all compounds.",
"prob",
".",
"add_linear_constraints",
"(",
"lhs",
">=",
"0",
")",
"else",
":",
"prob",
".",
"add_linear_constraints",
"(",
"lhs",
"==",
"0",
")",
"# Solve",
"try",
":",
"result",
"=",
"prob",
".",
"solve",
"(",
"lp",
".",
"ObjectiveSense",
".",
"Maximize",
")",
"except",
"lp",
".",
"SolverError",
"as",
"e",
":",
"raise_from",
"(",
"GapFillError",
"(",
"'Failed to solve gapfill: {}'",
".",
"format",
"(",
"e",
")",
",",
"e",
")",
")",
"for",
"compound",
"in",
"model",
".",
"compounds",
":",
"if",
"result",
".",
"get_value",
"(",
"xp",
"(",
"compound",
")",
")",
"<",
"0.5",
":",
"yield",
"compound"
] | Identify compounds in the model that cannot be produced.
Yields all compounds that cannot be produced. This method
assumes implicit sinks for all compounds in the model so
the only factor that influences whether a compound can be
produced is the presence of the compounds needed to produce it.
Epsilon indicates the threshold amount of reaction flux for the products
to be considered non-blocked. V_max indicates the maximum flux.
This method is implemented as a MILP-program. Therefore it may
not be efficient for larger models.
Args:
model: :class:`MetabolicModel` containing core reactions and reactions
that can be added for gap-filling.
solver: MILP solver instance.
epsilon: Threshold amount of a compound produced for it to not be
considered blocked.
v_max: Maximum flux.
implicit_sinks: Whether implicit sinks for all compounds are included
when gap-filling (traditional GapFill uses implicit sinks). | [
"Identify",
"compounds",
"in",
"the",
"model",
"that",
"cannot",
"be",
"produced",
"."
] | python | train |
mar10/wsgidav | wsgidav/dav_provider.py | https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/dav_provider.py#L809-L814 | def remove_all_properties(self, recursive):
"""Remove all associated dead properties."""
if self.provider.prop_manager:
self.provider.prop_manager.remove_properties(
self.get_ref_url(), self.environ
) | [
"def",
"remove_all_properties",
"(",
"self",
",",
"recursive",
")",
":",
"if",
"self",
".",
"provider",
".",
"prop_manager",
":",
"self",
".",
"provider",
".",
"prop_manager",
".",
"remove_properties",
"(",
"self",
".",
"get_ref_url",
"(",
")",
",",
"self",
".",
"environ",
")"
] | Remove all associated dead properties. | [
"Remove",
"all",
"associated",
"dead",
"properties",
"."
] | python | valid |
manns/pyspread | pyspread/src/lib/_grid_cairo_renderer.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/_grid_cairo_renderer.py#L962-L971 | def draw(self):
"""Draws cell background to context"""
self.context.set_source_rgb(*self._get_background_color())
self.context.rectangle(*self.rect)
self.context.fill()
# If show frozen is active, show frozen pattern
if self.view_frozen and self.cell_attributes[self.key]["frozen"]:
self._draw_frozen_pattern() | [
"def",
"draw",
"(",
"self",
")",
":",
"self",
".",
"context",
".",
"set_source_rgb",
"(",
"*",
"self",
".",
"_get_background_color",
"(",
")",
")",
"self",
".",
"context",
".",
"rectangle",
"(",
"*",
"self",
".",
"rect",
")",
"self",
".",
"context",
".",
"fill",
"(",
")",
"# If show frozen is active, show frozen pattern",
"if",
"self",
".",
"view_frozen",
"and",
"self",
".",
"cell_attributes",
"[",
"self",
".",
"key",
"]",
"[",
"\"frozen\"",
"]",
":",
"self",
".",
"_draw_frozen_pattern",
"(",
")"
] | Draws cell background to context | [
"Draws",
"cell",
"background",
"to",
"context"
] | python | train |
O365/python-o365 | O365/utils/attachment.py | https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/utils/attachment.py#L140-L154 | def to_api_data(self):
""" Returns a dict to communicate with the server
:rtype: dict
"""
data = {'@odata.type': self._gk(
'{}_attachment_type'.format(self.attachment_type)),
self._cc('name'): self.name}
if self.attachment_type == 'file':
data[self._cc('contentBytes')] = self.content
else:
data[self._cc('item')] = self.content
return data | [
"def",
"to_api_data",
"(",
"self",
")",
":",
"data",
"=",
"{",
"'@odata.type'",
":",
"self",
".",
"_gk",
"(",
"'{}_attachment_type'",
".",
"format",
"(",
"self",
".",
"attachment_type",
")",
")",
",",
"self",
".",
"_cc",
"(",
"'name'",
")",
":",
"self",
".",
"name",
"}",
"if",
"self",
".",
"attachment_type",
"==",
"'file'",
":",
"data",
"[",
"self",
".",
"_cc",
"(",
"'contentBytes'",
")",
"]",
"=",
"self",
".",
"content",
"else",
":",
"data",
"[",
"self",
".",
"_cc",
"(",
"'item'",
")",
"]",
"=",
"self",
".",
"content",
"return",
"data"
] | Returns a dict to communicate with the server
:rtype: dict | [
"Returns",
"a",
"dict",
"to",
"communicate",
"with",
"the",
"server"
] | python | train |
andresriancho/w3af-api-client | w3af_api_client/connection.py | https://github.com/andresriancho/w3af-api-client/blob/adeb79bad75264d754de69f0bb981b366da96f32/w3af_api_client/connection.py#L156-L178 | def get_scans(self):
"""
:return: A list with all the Scan instances available in the remote API
"""
code, data = self.send_request('/scans/', method='GET')
if code != 200:
msg = 'Failed to retrieve scans. Unexpected code %s'
raise APIException(msg % code)
scans = data.get('items', None)
if scans is None:
raise APIException('Failed to retrieve scans, no "items" in JSON.')
scan_instances = []
for scan_json in scans:
scan_id = scan_json['id']
scan_status = scan_json['status']
scan = Scan(self, scan_id=scan_id, status=scan_status)
scan_instances.append(scan)
return scan_instances | [
"def",
"get_scans",
"(",
"self",
")",
":",
"code",
",",
"data",
"=",
"self",
".",
"send_request",
"(",
"'/scans/'",
",",
"method",
"=",
"'GET'",
")",
"if",
"code",
"!=",
"200",
":",
"msg",
"=",
"'Failed to retrieve scans. Unexpected code %s'",
"raise",
"APIException",
"(",
"msg",
"%",
"code",
")",
"scans",
"=",
"data",
".",
"get",
"(",
"'items'",
",",
"None",
")",
"if",
"scans",
"is",
"None",
":",
"raise",
"APIException",
"(",
"'Failed to retrieve scans, no \"items\" in JSON.'",
")",
"scan_instances",
"=",
"[",
"]",
"for",
"scan_json",
"in",
"scans",
":",
"scan_id",
"=",
"scan_json",
"[",
"'id'",
"]",
"scan_status",
"=",
"scan_json",
"[",
"'status'",
"]",
"scan",
"=",
"Scan",
"(",
"self",
",",
"scan_id",
"=",
"scan_id",
",",
"status",
"=",
"scan_status",
")",
"scan_instances",
".",
"append",
"(",
"scan",
")",
"return",
"scan_instances"
] | :return: A list with all the Scan instances available in the remote API | [
":",
"return",
":",
"A",
"list",
"with",
"all",
"the",
"Scan",
"instances",
"available",
"in",
"the",
"remote",
"API"
] | python | train |
mamrhein/specification | specification/_extd_ast_expr.py | https://github.com/mamrhein/specification/blob/a4c09a0d286cda7a04e8a189f12e23edd97f64ea/specification/_extd_ast_expr.py#L124-L130 | def wrap_expr(self, src: str, dfltChaining: bool) -> str:
"""Wrap `src` in parentheses if neccessary."""
diff_binding = self.op_man.diff_binding()
if diff_binding < 0 or diff_binding == 0 and not dfltChaining:
return self.parenthesize(src)
else:
return src | [
"def",
"wrap_expr",
"(",
"self",
",",
"src",
":",
"str",
",",
"dfltChaining",
":",
"bool",
")",
"->",
"str",
":",
"diff_binding",
"=",
"self",
".",
"op_man",
".",
"diff_binding",
"(",
")",
"if",
"diff_binding",
"<",
"0",
"or",
"diff_binding",
"==",
"0",
"and",
"not",
"dfltChaining",
":",
"return",
"self",
".",
"parenthesize",
"(",
"src",
")",
"else",
":",
"return",
"src"
] | Wrap `src` in parentheses if neccessary. | [
"Wrap",
"src",
"in",
"parentheses",
"if",
"neccessary",
"."
] | python | train |
ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/update_service/models/update_campaign_put_request.py | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/update_service/models/update_campaign_put_request.py#L174-L184 | def root_manifest_id(self, root_manifest_id):
"""
Sets the root_manifest_id of this UpdateCampaignPutRequest.
:param root_manifest_id: The root_manifest_id of this UpdateCampaignPutRequest.
:type: str
"""
if root_manifest_id is not None and len(root_manifest_id) > 32:
raise ValueError("Invalid value for `root_manifest_id`, length must be less than or equal to `32`")
self._root_manifest_id = root_manifest_id | [
"def",
"root_manifest_id",
"(",
"self",
",",
"root_manifest_id",
")",
":",
"if",
"root_manifest_id",
"is",
"not",
"None",
"and",
"len",
"(",
"root_manifest_id",
")",
">",
"32",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `root_manifest_id`, length must be less than or equal to `32`\"",
")",
"self",
".",
"_root_manifest_id",
"=",
"root_manifest_id"
] | Sets the root_manifest_id of this UpdateCampaignPutRequest.
:param root_manifest_id: The root_manifest_id of this UpdateCampaignPutRequest.
:type: str | [
"Sets",
"the",
"root_manifest_id",
"of",
"this",
"UpdateCampaignPutRequest",
"."
] | python | train |
KvasirSecurity/kvasirapi-python | KvasirAPI/jsonrpc/services.py | https://github.com/KvasirSecurity/kvasirapi-python/blob/ec8c5818bd5913f3afd150f25eaec6e7cc732f4c/KvasirAPI/jsonrpc/services.py#L44-L54 | def info(self, svc_rec=None, ipaddr=None, proto=None, port=None):
"""
Information about a service.
:param svc_rec: t_services.id
:param ipaddr: IP Address
:param proto: Protocol (tcp, udp, info)
:param port: Port (0-65535)
:return: [ service_id, host_id, ipv4, ipv6, hostname, proto, number, status, name, banner ]
"""
return self.send.service_info(svc_rec, ipaddr, proto, port) | [
"def",
"info",
"(",
"self",
",",
"svc_rec",
"=",
"None",
",",
"ipaddr",
"=",
"None",
",",
"proto",
"=",
"None",
",",
"port",
"=",
"None",
")",
":",
"return",
"self",
".",
"send",
".",
"service_info",
"(",
"svc_rec",
",",
"ipaddr",
",",
"proto",
",",
"port",
")"
] | Information about a service.
:param svc_rec: t_services.id
:param ipaddr: IP Address
:param proto: Protocol (tcp, udp, info)
:param port: Port (0-65535)
:return: [ service_id, host_id, ipv4, ipv6, hostname, proto, number, status, name, banner ] | [
"Information",
"about",
"a",
"service",
"."
] | python | train |
aio-libs/aiodocker | aiodocker/services.py | https://github.com/aio-libs/aiodocker/blob/88d0285ddba8e606ff684278e0a831347209189c/aiodocker/services.py#L168-L182 | async def inspect(self, service_id: str) -> Mapping[str, Any]:
"""
Inspect a service
Args:
service_id: ID or name of the service
Returns:
a dict with info about a service
"""
response = await self.docker._query_json(
"services/{service_id}".format(service_id=service_id), method="GET"
)
return response | [
"async",
"def",
"inspect",
"(",
"self",
",",
"service_id",
":",
"str",
")",
"->",
"Mapping",
"[",
"str",
",",
"Any",
"]",
":",
"response",
"=",
"await",
"self",
".",
"docker",
".",
"_query_json",
"(",
"\"services/{service_id}\"",
".",
"format",
"(",
"service_id",
"=",
"service_id",
")",
",",
"method",
"=",
"\"GET\"",
")",
"return",
"response"
] | Inspect a service
Args:
service_id: ID or name of the service
Returns:
a dict with info about a service | [
"Inspect",
"a",
"service"
] | python | train |
econ-ark/HARK | HARK/ConsumptionSaving/ConsGenIncProcessModel.py | https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsGenIncProcessModel.py#L836-L868 | def solve(self):
'''
Solves a one period consumption saving problem with risky income, with
persistent income explicitly tracked as a state variable.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem, including a consumption
function (defined over market resources and persistent income), a
marginal value function, bounding MPCs, and human wealth as a func-
tion of persistent income. Might also include a value function and
marginal marginal value function, depending on options selected.
'''
aLvl,pLvl = self.prepareToCalcEndOfPrdvP()
EndOfPrdvP = self.calcEndOfPrdvP()
if self.vFuncBool:
self.makeEndOfPrdvFunc(EndOfPrdvP)
if self.CubicBool:
interpolator = self.makeCubiccFunc
else:
interpolator = self.makeLinearcFunc
solution = self.makeBasicSolution(EndOfPrdvP,aLvl,pLvl,interpolator)
solution = self.addMPCandHumanWealth(solution)
if self.vFuncBool:
solution.vFunc = self.makevFunc(solution)
if self.CubicBool:
solution = self.addvPPfunc(solution)
return solution | [
"def",
"solve",
"(",
"self",
")",
":",
"aLvl",
",",
"pLvl",
"=",
"self",
".",
"prepareToCalcEndOfPrdvP",
"(",
")",
"EndOfPrdvP",
"=",
"self",
".",
"calcEndOfPrdvP",
"(",
")",
"if",
"self",
".",
"vFuncBool",
":",
"self",
".",
"makeEndOfPrdvFunc",
"(",
"EndOfPrdvP",
")",
"if",
"self",
".",
"CubicBool",
":",
"interpolator",
"=",
"self",
".",
"makeCubiccFunc",
"else",
":",
"interpolator",
"=",
"self",
".",
"makeLinearcFunc",
"solution",
"=",
"self",
".",
"makeBasicSolution",
"(",
"EndOfPrdvP",
",",
"aLvl",
",",
"pLvl",
",",
"interpolator",
")",
"solution",
"=",
"self",
".",
"addMPCandHumanWealth",
"(",
"solution",
")",
"if",
"self",
".",
"vFuncBool",
":",
"solution",
".",
"vFunc",
"=",
"self",
".",
"makevFunc",
"(",
"solution",
")",
"if",
"self",
".",
"CubicBool",
":",
"solution",
"=",
"self",
".",
"addvPPfunc",
"(",
"solution",
")",
"return",
"solution"
] | Solves a one period consumption saving problem with risky income, with
persistent income explicitly tracked as a state variable.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem, including a consumption
function (defined over market resources and persistent income), a
marginal value function, bounding MPCs, and human wealth as a func-
tion of persistent income. Might also include a value function and
marginal marginal value function, depending on options selected. | [
"Solves",
"a",
"one",
"period",
"consumption",
"saving",
"problem",
"with",
"risky",
"income",
"with",
"persistent",
"income",
"explicitly",
"tracked",
"as",
"a",
"state",
"variable",
"."
] | python | train |
suds-community/suds | suds/wsdl.py | https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/wsdl.py#L70-L82 | def resolve(self, definitions):
"""
Resolve named references to other WSDL objects.
Can be safely called multiple times.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
if not self.__resolved:
self.do_resolve(definitions)
self.__resolved = True | [
"def",
"resolve",
"(",
"self",
",",
"definitions",
")",
":",
"if",
"not",
"self",
".",
"__resolved",
":",
"self",
".",
"do_resolve",
"(",
"definitions",
")",
"self",
".",
"__resolved",
"=",
"True"
] | Resolve named references to other WSDL objects.
Can be safely called multiple times.
@param definitions: A definitions object.
@type definitions: L{Definitions} | [
"Resolve",
"named",
"references",
"to",
"other",
"WSDL",
"objects",
"."
] | python | train |
oxalorg/Stab | stab/watchman.py | https://github.com/oxalorg/Stab/blob/8f0ded780fd7a53a674835c9cb1b7ca08b98f562/stab/watchman.py#L24-L35 | def should_build(self, fpath, meta):
"""
Checks if the file should be built or not
Only skips layouts which are tagged as INCREMENTAL
Rebuilds only those files with mtime changed since previous build
"""
if meta.get('layout', self.default_template) in self.inc_layout:
if self.prev_mtime.get(fpath, 0) == os.path.getmtime(fpath):
return False
else:
return True
return True | [
"def",
"should_build",
"(",
"self",
",",
"fpath",
",",
"meta",
")",
":",
"if",
"meta",
".",
"get",
"(",
"'layout'",
",",
"self",
".",
"default_template",
")",
"in",
"self",
".",
"inc_layout",
":",
"if",
"self",
".",
"prev_mtime",
".",
"get",
"(",
"fpath",
",",
"0",
")",
"==",
"os",
".",
"path",
".",
"getmtime",
"(",
"fpath",
")",
":",
"return",
"False",
"else",
":",
"return",
"True",
"return",
"True"
] | Checks if the file should be built or not
Only skips layouts which are tagged as INCREMENTAL
Rebuilds only those files with mtime changed since previous build | [
"Checks",
"if",
"the",
"file",
"should",
"be",
"built",
"or",
"not",
"Only",
"skips",
"layouts",
"which",
"are",
"tagged",
"as",
"INCREMENTAL",
"Rebuilds",
"only",
"those",
"files",
"with",
"mtime",
"changed",
"since",
"previous",
"build"
] | python | train |
etoccalino/django-rest-framework-httpsignature | rest_framework_httpsignature/authentication.py | https://github.com/etoccalino/django-rest-framework-httpsignature/blob/03ac3c213153ae6084c84b8ff61e101798b342a4/rest_framework_httpsignature/authentication.py#L46-L56 | def build_dict_to_sign(self, request, signature_headers):
"""Build a dict with headers and values used in the signature.
"signature_headers" is a list of lowercase header names.
"""
d = {}
for header in signature_headers:
if header == '(request-target)':
continue
d[header] = request.META.get(self.header_canonical(header))
return d | [
"def",
"build_dict_to_sign",
"(",
"self",
",",
"request",
",",
"signature_headers",
")",
":",
"d",
"=",
"{",
"}",
"for",
"header",
"in",
"signature_headers",
":",
"if",
"header",
"==",
"'(request-target)'",
":",
"continue",
"d",
"[",
"header",
"]",
"=",
"request",
".",
"META",
".",
"get",
"(",
"self",
".",
"header_canonical",
"(",
"header",
")",
")",
"return",
"d"
] | Build a dict with headers and values used in the signature.
"signature_headers" is a list of lowercase header names. | [
"Build",
"a",
"dict",
"with",
"headers",
"and",
"values",
"used",
"in",
"the",
"signature",
"."
] | python | train |
Subsets and Splits