repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
cocaine/cocaine-tools | cocaine/tools/dispatch.py | https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/dispatch.py#L579-L645 | def metrics(ty, query, query_type, **kwargs):
"""
Outputs runtime metrics collected from cocaine-runtime and its services.
This command shows runtime metrics collected from cocaine-runtime and its services during their
lifetime.
There are four kind of metrics available: gauges, counters, meters and timers.
\b
- Gauges - an instantaneous measurement of a value.
- Counters - just a gauge for an atomic integer instance.
- Meters - measures the rate of events over time (e.g., "requests per second"). In addition
to the mean rate, meters also track 1-, 5-, and 15-minute moving averages.
- Timers - measures both the rate that a particular piece of code is called and the
distribution of its duration.
Every metric in has a unique name, which is just a dotted-name string like "connections.count"
or "node.queue.size".
An output type can be configured using --type option. The default one results in plain
formatting where there is only one depth level.
As an alternative you can expanded the JSON tree by specifying --type=json option. The depth of
the result tree depends on metric name which is split by dot symbol.
The result output will be probably too large without any customization. To reduce this output
there are custom filters, which can be specified using --query option. Technically it's a
special metrics query language (MQL) which supports the following operations and functions:
\b
- contains(<expr>, <expr>) - checks whether the result of second expression contains in the
result of first expression. These expressions must resolve in strings. An output type of this
function is bool.
- name() - resolves in metric name.
- type() - resolves in metric type (counter, meter, etc.).
- tag(<expr>) - extracts custom metric tag and results in string.
- && - combines several expressions in one, which applies when all of them apply.
- || - combines several expressions in one, which applies when any of them apply.
- == - compares two expressions for equality.
- != - compares two expressions for an non-equality.
- Also string literals (alphanumeric with dots) can be used as an expressions, for
example "name() == locator.connections.accepted".
Priorities can be specified using braces as in usual math expressions.
The grammar for this query language is:
\b
expr ::= term ((AND | OR) term)*
term ::= factor ((EQ | NE) factor)*
factor ::= func | literal | number | LPAREN expr RPAREN
func ::= literal LPAREN expr (,expr)* RPAREN
literal ::= alphanum | .
number ::= <floating point number>
An example of the query, which returns all meters (for all services) and the number of accepted
connections for the Locator
service: "contains(type(), meter) || name() == locator.connections.accepted".
"""
ctx = Context(**kwargs)
ctx.execute_action('metrics', **{
'metrics': ctx.repo.create_secure_service('metrics'),
'ty': ty,
'query': query,
'query_type': query_type,
}) | [
"def",
"metrics",
"(",
"ty",
",",
"query",
",",
"query_type",
",",
"*",
"*",
"kwargs",
")",
":",
"ctx",
"=",
"Context",
"(",
"*",
"*",
"kwargs",
")",
"ctx",
".",
"execute_action",
"(",
"'metrics'",
",",
"*",
"*",
"{",
"'metrics'",
":",
"ctx",
".",
"repo",
".",
"create_secure_service",
"(",
"'metrics'",
")",
",",
"'ty'",
":",
"ty",
",",
"'query'",
":",
"query",
",",
"'query_type'",
":",
"query_type",
",",
"}",
")"
] | Outputs runtime metrics collected from cocaine-runtime and its services.
This command shows runtime metrics collected from cocaine-runtime and its services during their
lifetime.
There are four kind of metrics available: gauges, counters, meters and timers.
\b
- Gauges - an instantaneous measurement of a value.
- Counters - just a gauge for an atomic integer instance.
- Meters - measures the rate of events over time (e.g., "requests per second"). In addition
to the mean rate, meters also track 1-, 5-, and 15-minute moving averages.
- Timers - measures both the rate that a particular piece of code is called and the
distribution of its duration.
Every metric in has a unique name, which is just a dotted-name string like "connections.count"
or "node.queue.size".
An output type can be configured using --type option. The default one results in plain
formatting where there is only one depth level.
As an alternative you can expanded the JSON tree by specifying --type=json option. The depth of
the result tree depends on metric name which is split by dot symbol.
The result output will be probably too large without any customization. To reduce this output
there are custom filters, which can be specified using --query option. Technically it's a
special metrics query language (MQL) which supports the following operations and functions:
\b
- contains(<expr>, <expr>) - checks whether the result of second expression contains in the
result of first expression. These expressions must resolve in strings. An output type of this
function is bool.
- name() - resolves in metric name.
- type() - resolves in metric type (counter, meter, etc.).
- tag(<expr>) - extracts custom metric tag and results in string.
- && - combines several expressions in one, which applies when all of them apply.
- || - combines several expressions in one, which applies when any of them apply.
- == - compares two expressions for equality.
- != - compares two expressions for an non-equality.
- Also string literals (alphanumeric with dots) can be used as an expressions, for
example "name() == locator.connections.accepted".
Priorities can be specified using braces as in usual math expressions.
The grammar for this query language is:
\b
expr ::= term ((AND | OR) term)*
term ::= factor ((EQ | NE) factor)*
factor ::= func | literal | number | LPAREN expr RPAREN
func ::= literal LPAREN expr (,expr)* RPAREN
literal ::= alphanum | .
number ::= <floating point number>
An example of the query, which returns all meters (for all services) and the number of accepted
connections for the Locator
service: "contains(type(), meter) || name() == locator.connections.accepted". | [
"Outputs",
"runtime",
"metrics",
"collected",
"from",
"cocaine",
"-",
"runtime",
"and",
"its",
"services",
"."
] | python | train |
twisted/epsilon | epsilon/ampauth.py | https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/ampauth.py#L275-L301 | def login(client, credentials):
"""
Authenticate using the given L{AMP} instance. The protocol must be
connected to a server with responders for L{PasswordLogin} and
L{PasswordChallengeResponse}.
@param client: A connected L{AMP} instance which will be used to issue
authentication commands.
@param credentials: An object providing L{IUsernamePassword} which will
be used to authenticate this connection to the server.
@return: A L{Deferred} which fires when authentication has succeeded or
which fails with L{UnauthorizedLogin} if the server rejects the
authentication attempt.
"""
if not IUsernamePassword.providedBy(credentials):
raise UnhandledCredentials()
d = client.callRemote(
PasswordLogin, username=credentials.username)
def cbChallenge(response):
args = PasswordChallengeResponse.determineFrom(
response['challenge'], credentials.password)
d = client.callRemote(PasswordChallengeResponse, **args)
return d.addCallback(lambda ignored: client)
d.addCallback(cbChallenge)
return d | [
"def",
"login",
"(",
"client",
",",
"credentials",
")",
":",
"if",
"not",
"IUsernamePassword",
".",
"providedBy",
"(",
"credentials",
")",
":",
"raise",
"UnhandledCredentials",
"(",
")",
"d",
"=",
"client",
".",
"callRemote",
"(",
"PasswordLogin",
",",
"username",
"=",
"credentials",
".",
"username",
")",
"def",
"cbChallenge",
"(",
"response",
")",
":",
"args",
"=",
"PasswordChallengeResponse",
".",
"determineFrom",
"(",
"response",
"[",
"'challenge'",
"]",
",",
"credentials",
".",
"password",
")",
"d",
"=",
"client",
".",
"callRemote",
"(",
"PasswordChallengeResponse",
",",
"*",
"*",
"args",
")",
"return",
"d",
".",
"addCallback",
"(",
"lambda",
"ignored",
":",
"client",
")",
"d",
".",
"addCallback",
"(",
"cbChallenge",
")",
"return",
"d"
] | Authenticate using the given L{AMP} instance. The protocol must be
connected to a server with responders for L{PasswordLogin} and
L{PasswordChallengeResponse}.
@param client: A connected L{AMP} instance which will be used to issue
authentication commands.
@param credentials: An object providing L{IUsernamePassword} which will
be used to authenticate this connection to the server.
@return: A L{Deferred} which fires when authentication has succeeded or
which fails with L{UnauthorizedLogin} if the server rejects the
authentication attempt. | [
"Authenticate",
"using",
"the",
"given",
"L",
"{",
"AMP",
"}",
"instance",
".",
"The",
"protocol",
"must",
"be",
"connected",
"to",
"a",
"server",
"with",
"responders",
"for",
"L",
"{",
"PasswordLogin",
"}",
"and",
"L",
"{",
"PasswordChallengeResponse",
"}",
"."
] | python | train |
tanghaibao/jcvi | jcvi/formats/gff.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L482-L488 | def _fasta_slice(fasta, seqid, start, stop, strand):
"""
Return slice of fasta, given (seqid, start, stop, strand)
"""
_strand = 1 if strand == '+' else -1
return fasta.sequence({'chr': seqid, 'start': start, 'stop': stop, \
'strand': _strand}) | [
"def",
"_fasta_slice",
"(",
"fasta",
",",
"seqid",
",",
"start",
",",
"stop",
",",
"strand",
")",
":",
"_strand",
"=",
"1",
"if",
"strand",
"==",
"'+'",
"else",
"-",
"1",
"return",
"fasta",
".",
"sequence",
"(",
"{",
"'chr'",
":",
"seqid",
",",
"'start'",
":",
"start",
",",
"'stop'",
":",
"stop",
",",
"'strand'",
":",
"_strand",
"}",
")"
] | Return slice of fasta, given (seqid, start, stop, strand) | [
"Return",
"slice",
"of",
"fasta",
"given",
"(",
"seqid",
"start",
"stop",
"strand",
")"
] | python | train |
svven/summary | summary/request.py | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/request.py#L11-L25 | def get(url, **kwargs):
"""
Wrapper for `request.get` function to set params.
"""
headers = kwargs.get('headers', {})
headers['User-Agent'] = config.USER_AGENT # overwrite
kwargs['headers'] = headers
timeout = kwargs.get('timeout', config.TIMEOUT)
kwargs['timeout'] = timeout
kwargs['verify'] = False # no SSLError
logger.debug("Getting: %s", url)
return requests.get(url, **kwargs) | [
"def",
"get",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"headers",
"=",
"kwargs",
".",
"get",
"(",
"'headers'",
",",
"{",
"}",
")",
"headers",
"[",
"'User-Agent'",
"]",
"=",
"config",
".",
"USER_AGENT",
"# overwrite",
"kwargs",
"[",
"'headers'",
"]",
"=",
"headers",
"timeout",
"=",
"kwargs",
".",
"get",
"(",
"'timeout'",
",",
"config",
".",
"TIMEOUT",
")",
"kwargs",
"[",
"'timeout'",
"]",
"=",
"timeout",
"kwargs",
"[",
"'verify'",
"]",
"=",
"False",
"# no SSLError",
"logger",
".",
"debug",
"(",
"\"Getting: %s\"",
",",
"url",
")",
"return",
"requests",
".",
"get",
"(",
"url",
",",
"*",
"*",
"kwargs",
")"
] | Wrapper for `request.get` function to set params. | [
"Wrapper",
"for",
"request",
".",
"get",
"function",
"to",
"set",
"params",
"."
] | python | train |
Becksteinlab/GromacsWrapper | gromacs/utilities.py | https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/utilities.py#L662-L672 | def iterable(obj):
"""Returns ``True`` if *obj* can be iterated over and is *not* a string."""
if isinstance(obj, string_types):
return False # avoid iterating over characters of a string
if hasattr(obj, 'next'):
return True # any iterator will do
try:
len(obj) # anything else that might work
except TypeError:
return False
return True | [
"def",
"iterable",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"string_types",
")",
":",
"return",
"False",
"# avoid iterating over characters of a string",
"if",
"hasattr",
"(",
"obj",
",",
"'next'",
")",
":",
"return",
"True",
"# any iterator will do",
"try",
":",
"len",
"(",
"obj",
")",
"# anything else that might work",
"except",
"TypeError",
":",
"return",
"False",
"return",
"True"
] | Returns ``True`` if *obj* can be iterated over and is *not* a string. | [
"Returns",
"True",
"if",
"*",
"obj",
"*",
"can",
"be",
"iterated",
"over",
"and",
"is",
"*",
"not",
"*",
"a",
"string",
"."
] | python | valid |
dead-beef/markovchain | markovchain/base.py | https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/base.py#L132-L145 | def from_storage(cls, storage):
"""Load from storage.
Parameters
----------
storage : `markovchain.storage.Storage`
Returns
-------
`markovchain.Markov`
"""
args = dict(storage.settings.get('markov', {}))
args['storage'] = storage
return cls(**args) | [
"def",
"from_storage",
"(",
"cls",
",",
"storage",
")",
":",
"args",
"=",
"dict",
"(",
"storage",
".",
"settings",
".",
"get",
"(",
"'markov'",
",",
"{",
"}",
")",
")",
"args",
"[",
"'storage'",
"]",
"=",
"storage",
"return",
"cls",
"(",
"*",
"*",
"args",
")"
] | Load from storage.
Parameters
----------
storage : `markovchain.storage.Storage`
Returns
-------
`markovchain.Markov` | [
"Load",
"from",
"storage",
"."
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L10381-L10401 | def command_int_send(self, target_system, target_component, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z, force_mavlink1=False):
'''
Message encoding a command with parameters as scaled integers. Scaling
depends on the actual command value.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
frame : The coordinate system of the COMMAND. see MAV_FRAME in mavlink_types.h (uint8_t)
command : The scheduled action for the mission item. see MAV_CMD in common.xml MAVLink specs (uint16_t)
current : false:0, true:1 (uint8_t)
autocontinue : autocontinue to next wp (uint8_t)
param1 : PARAM1, see MAV_CMD enum (float)
param2 : PARAM2, see MAV_CMD enum (float)
param3 : PARAM3, see MAV_CMD enum (float)
param4 : PARAM4, see MAV_CMD enum (float)
x : PARAM5 / local: x position in meters * 1e4, global: latitude in degrees * 10^7 (int32_t)
y : PARAM6 / local: y position in meters * 1e4, global: longitude in degrees * 10^7 (int32_t)
z : PARAM7 / z position: global: altitude in meters (relative or absolute, depending on frame. (float)
'''
return self.send(self.command_int_encode(target_system, target_component, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z), force_mavlink1=force_mavlink1) | [
"def",
"command_int_send",
"(",
"self",
",",
"target_system",
",",
"target_component",
",",
"frame",
",",
"command",
",",
"current",
",",
"autocontinue",
",",
"param1",
",",
"param2",
",",
"param3",
",",
"param4",
",",
"x",
",",
"y",
",",
"z",
",",
"force_mavlink1",
"=",
"False",
")",
":",
"return",
"self",
".",
"send",
"(",
"self",
".",
"command_int_encode",
"(",
"target_system",
",",
"target_component",
",",
"frame",
",",
"command",
",",
"current",
",",
"autocontinue",
",",
"param1",
",",
"param2",
",",
"param3",
",",
"param4",
",",
"x",
",",
"y",
",",
"z",
")",
",",
"force_mavlink1",
"=",
"force_mavlink1",
")"
] | Message encoding a command with parameters as scaled integers. Scaling
depends on the actual command value.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
frame : The coordinate system of the COMMAND. see MAV_FRAME in mavlink_types.h (uint8_t)
command : The scheduled action for the mission item. see MAV_CMD in common.xml MAVLink specs (uint16_t)
current : false:0, true:1 (uint8_t)
autocontinue : autocontinue to next wp (uint8_t)
param1 : PARAM1, see MAV_CMD enum (float)
param2 : PARAM2, see MAV_CMD enum (float)
param3 : PARAM3, see MAV_CMD enum (float)
param4 : PARAM4, see MAV_CMD enum (float)
x : PARAM5 / local: x position in meters * 1e4, global: latitude in degrees * 10^7 (int32_t)
y : PARAM6 / local: y position in meters * 1e4, global: longitude in degrees * 10^7 (int32_t)
z : PARAM7 / z position: global: altitude in meters (relative or absolute, depending on frame. (float) | [
"Message",
"encoding",
"a",
"command",
"with",
"parameters",
"as",
"scaled",
"integers",
".",
"Scaling",
"depends",
"on",
"the",
"actual",
"command",
"value",
"."
] | python | train |
dhermes/bezier | docs/make_images.py | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/docs/make_images.py#L797-L836 | def classify_intersection1(s, curve1, tangent1, curve2, tangent2):
"""Image for :func:`._surface_helpers.classify_intersection` docstring."""
if NO_IMAGES:
return
surface1 = bezier.Surface.from_nodes(
np.asfortranarray(
[[1.0, 1.75, 2.0, 1.0, 1.5, 1.0], [0.0, 0.25, 1.0, 1.0, 1.5, 2.0]]
)
)
surface2 = bezier.Surface.from_nodes(
np.asfortranarray(
[
[0.0, 1.6875, 2.0, 0.25, 1.25, 0.5],
[0.0, 0.0625, 0.5, 1.0, 1.25, 2.0],
]
)
)
ax = classify_help(s, curve1, surface1, curve2, surface2, 0)
(int_x,), (int_y,) = curve1.evaluate(s)
# Remove the alpha from the color
color1 = ax.patches[0].get_facecolor()[:3]
color2 = ax.patches[1].get_facecolor()[:3]
ax.plot(
[int_x, int_x + tangent1[0, 0]],
[int_y, int_y + tangent1[1, 0]],
color=color1,
linestyle="dashed",
)
ax.plot(
[int_x, int_x + tangent2[0, 0]],
[int_y, int_y + tangent2[1, 0]],
color=color2,
linestyle="dashed",
)
ax.plot([int_x], [int_y], color=color1, linestyle="None", marker="o")
ax.axis("scaled")
ax.set_xlim(-0.125, 2.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "classify_intersection1.png") | [
"def",
"classify_intersection1",
"(",
"s",
",",
"curve1",
",",
"tangent1",
",",
"curve2",
",",
"tangent2",
")",
":",
"if",
"NO_IMAGES",
":",
"return",
"surface1",
"=",
"bezier",
".",
"Surface",
".",
"from_nodes",
"(",
"np",
".",
"asfortranarray",
"(",
"[",
"[",
"1.0",
",",
"1.75",
",",
"2.0",
",",
"1.0",
",",
"1.5",
",",
"1.0",
"]",
",",
"[",
"0.0",
",",
"0.25",
",",
"1.0",
",",
"1.0",
",",
"1.5",
",",
"2.0",
"]",
"]",
")",
")",
"surface2",
"=",
"bezier",
".",
"Surface",
".",
"from_nodes",
"(",
"np",
".",
"asfortranarray",
"(",
"[",
"[",
"0.0",
",",
"1.6875",
",",
"2.0",
",",
"0.25",
",",
"1.25",
",",
"0.5",
"]",
",",
"[",
"0.0",
",",
"0.0625",
",",
"0.5",
",",
"1.0",
",",
"1.25",
",",
"2.0",
"]",
",",
"]",
")",
")",
"ax",
"=",
"classify_help",
"(",
"s",
",",
"curve1",
",",
"surface1",
",",
"curve2",
",",
"surface2",
",",
"0",
")",
"(",
"int_x",
",",
")",
",",
"(",
"int_y",
",",
")",
"=",
"curve1",
".",
"evaluate",
"(",
"s",
")",
"# Remove the alpha from the color",
"color1",
"=",
"ax",
".",
"patches",
"[",
"0",
"]",
".",
"get_facecolor",
"(",
")",
"[",
":",
"3",
"]",
"color2",
"=",
"ax",
".",
"patches",
"[",
"1",
"]",
".",
"get_facecolor",
"(",
")",
"[",
":",
"3",
"]",
"ax",
".",
"plot",
"(",
"[",
"int_x",
",",
"int_x",
"+",
"tangent1",
"[",
"0",
",",
"0",
"]",
"]",
",",
"[",
"int_y",
",",
"int_y",
"+",
"tangent1",
"[",
"1",
",",
"0",
"]",
"]",
",",
"color",
"=",
"color1",
",",
"linestyle",
"=",
"\"dashed\"",
",",
")",
"ax",
".",
"plot",
"(",
"[",
"int_x",
",",
"int_x",
"+",
"tangent2",
"[",
"0",
",",
"0",
"]",
"]",
",",
"[",
"int_y",
",",
"int_y",
"+",
"tangent2",
"[",
"1",
",",
"0",
"]",
"]",
",",
"color",
"=",
"color2",
",",
"linestyle",
"=",
"\"dashed\"",
",",
")",
"ax",
".",
"plot",
"(",
"[",
"int_x",
"]",
",",
"[",
"int_y",
"]",
",",
"color",
"=",
"color1",
",",
"linestyle",
"=",
"\"None\"",
",",
"marker",
"=",
"\"o\"",
")",
"ax",
".",
"axis",
"(",
"\"scaled\"",
")",
"ax",
".",
"set_xlim",
"(",
"-",
"0.125",
",",
"2.125",
")",
"ax",
".",
"set_ylim",
"(",
"-",
"0.125",
",",
"1.125",
")",
"save_image",
"(",
"ax",
".",
"figure",
",",
"\"classify_intersection1.png\"",
")"
] | Image for :func:`._surface_helpers.classify_intersection` docstring. | [
"Image",
"for",
":",
"func",
":",
".",
"_surface_helpers",
".",
"classify_intersection",
"docstring",
"."
] | python | train |
apriha/lineage | src/lineage/resources.py | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L156-L238 | def download_example_datasets(self):
""" Download example datasets from `openSNP <https://opensnp.org>`_.
Per openSNP, "the data is donated into the public domain using `CC0 1.0
<http://creativecommons.org/publicdomain/zero/1.0/>`_."
Returns
-------
paths : list of str or None
paths to example datasets
References
----------
..[1] Greshake B, Bayer PE, Rausch H, Reda J (2014), "openSNP-A Crowdsourced Web Resource
for Personal Genomics," PLOS ONE, 9(3): e89204,
https://doi.org/10.1371/journal.pone.0089204
"""
paths = []
paths.append(
self._download_file(
"https://opensnp.org/data/662.23andme.304",
"662.23andme.304.txt.gz",
compress=True,
)
)
paths.append(
self._download_file(
"https://opensnp.org/data/662.23andme.340",
"662.23andme.340.txt.gz",
compress=True,
)
)
paths.append(
self._download_file(
"https://opensnp.org/data/662.ftdna-illumina.341",
"662.ftdna-illumina.341.csv.gz",
compress=True,
)
)
paths.append(
self._download_file(
"https://opensnp.org/data/663.23andme.305",
"663.23andme.305.txt.gz",
compress=True,
)
)
# these two files consist of concatenated gzip files and therefore need special handling
paths.append(
self._download_file(
"https://opensnp.org/data/4583.ftdna-illumina.3482",
"4583.ftdna-illumina.3482.csv.gz",
)
)
paths.append(
self._download_file(
"https://opensnp.org/data/4584.ftdna-illumina.3483",
"4584.ftdna-illumina.3483.csv.gz",
)
)
try:
for gzip_path in paths[-2:]:
# https://stackoverflow.com/q/4928560
# https://stackoverflow.com/a/37042747
with open(gzip_path, "rb") as f:
decompressor = zlib.decompressobj(31)
# decompress data from first concatenated gzip file
data = decompressor.decompress(f.read())
if len(decompressor.unused_data) > 0:
# decompress data from second concatenated gzip file, if any
additional_data = zlib.decompress(decompressor.unused_data, 31)
data += additional_data[33:] # skip over second header
# recompress data
with gzip.open(gzip_path, "wb") as f:
f.write(data)
except Exception as err:
print(err)
return paths | [
"def",
"download_example_datasets",
"(",
"self",
")",
":",
"paths",
"=",
"[",
"]",
"paths",
".",
"append",
"(",
"self",
".",
"_download_file",
"(",
"\"https://opensnp.org/data/662.23andme.304\"",
",",
"\"662.23andme.304.txt.gz\"",
",",
"compress",
"=",
"True",
",",
")",
")",
"paths",
".",
"append",
"(",
"self",
".",
"_download_file",
"(",
"\"https://opensnp.org/data/662.23andme.340\"",
",",
"\"662.23andme.340.txt.gz\"",
",",
"compress",
"=",
"True",
",",
")",
")",
"paths",
".",
"append",
"(",
"self",
".",
"_download_file",
"(",
"\"https://opensnp.org/data/662.ftdna-illumina.341\"",
",",
"\"662.ftdna-illumina.341.csv.gz\"",
",",
"compress",
"=",
"True",
",",
")",
")",
"paths",
".",
"append",
"(",
"self",
".",
"_download_file",
"(",
"\"https://opensnp.org/data/663.23andme.305\"",
",",
"\"663.23andme.305.txt.gz\"",
",",
"compress",
"=",
"True",
",",
")",
")",
"# these two files consist of concatenated gzip files and therefore need special handling",
"paths",
".",
"append",
"(",
"self",
".",
"_download_file",
"(",
"\"https://opensnp.org/data/4583.ftdna-illumina.3482\"",
",",
"\"4583.ftdna-illumina.3482.csv.gz\"",
",",
")",
")",
"paths",
".",
"append",
"(",
"self",
".",
"_download_file",
"(",
"\"https://opensnp.org/data/4584.ftdna-illumina.3483\"",
",",
"\"4584.ftdna-illumina.3483.csv.gz\"",
",",
")",
")",
"try",
":",
"for",
"gzip_path",
"in",
"paths",
"[",
"-",
"2",
":",
"]",
":",
"# https://stackoverflow.com/q/4928560",
"# https://stackoverflow.com/a/37042747",
"with",
"open",
"(",
"gzip_path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"decompressor",
"=",
"zlib",
".",
"decompressobj",
"(",
"31",
")",
"# decompress data from first concatenated gzip file",
"data",
"=",
"decompressor",
".",
"decompress",
"(",
"f",
".",
"read",
"(",
")",
")",
"if",
"len",
"(",
"decompressor",
".",
"unused_data",
")",
">",
"0",
":",
"# decompress data from second concatenated gzip file, if any",
"additional_data",
"=",
"zlib",
".",
"decompress",
"(",
"decompressor",
".",
"unused_data",
",",
"31",
")",
"data",
"+=",
"additional_data",
"[",
"33",
":",
"]",
"# skip over second header",
"# recompress data",
"with",
"gzip",
".",
"open",
"(",
"gzip_path",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"data",
")",
"except",
"Exception",
"as",
"err",
":",
"print",
"(",
"err",
")",
"return",
"paths"
] | Download example datasets from `openSNP <https://opensnp.org>`_.
Per openSNP, "the data is donated into the public domain using `CC0 1.0
<http://creativecommons.org/publicdomain/zero/1.0/>`_."
Returns
-------
paths : list of str or None
paths to example datasets
References
----------
..[1] Greshake B, Bayer PE, Rausch H, Reda J (2014), "openSNP-A Crowdsourced Web Resource
for Personal Genomics," PLOS ONE, 9(3): e89204,
https://doi.org/10.1371/journal.pone.0089204 | [
"Download",
"example",
"datasets",
"from",
"openSNP",
"<https",
":",
"//",
"opensnp",
".",
"org",
">",
"_",
"."
] | python | train |
coldfix/udiskie | udiskie/prompt.py | https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/prompt.py#L160-L168 | async def get_password_tty(device, options):
"""Get the password to unlock a device from terminal."""
# TODO: make this a TRUE async
text = _('Enter password for {0.device_presentation}: ', device)
try:
return getpass.getpass(text)
except EOFError:
print("")
return None | [
"async",
"def",
"get_password_tty",
"(",
"device",
",",
"options",
")",
":",
"# TODO: make this a TRUE async",
"text",
"=",
"_",
"(",
"'Enter password for {0.device_presentation}: '",
",",
"device",
")",
"try",
":",
"return",
"getpass",
".",
"getpass",
"(",
"text",
")",
"except",
"EOFError",
":",
"print",
"(",
"\"\"",
")",
"return",
"None"
] | Get the password to unlock a device from terminal. | [
"Get",
"the",
"password",
"to",
"unlock",
"a",
"device",
"from",
"terminal",
"."
] | python | train |
lyst/lightfm | lightfm/datasets/movielens.py | https://github.com/lyst/lightfm/blob/87b942f87759b8336f9066a25e4762ae7d95455e/lightfm/datasets/movielens.py#L12-L23 | def _read_raw_data(path):
"""
Return the raw lines of the train and test files.
"""
with zipfile.ZipFile(path) as datafile:
return (
datafile.read("ml-100k/ua.base").decode().split("\n"),
datafile.read("ml-100k/ua.test").decode().split("\n"),
datafile.read("ml-100k/u.item").decode(errors="ignore").split("\n"),
datafile.read("ml-100k/u.genre").decode(errors="ignore").split("\n"),
) | [
"def",
"_read_raw_data",
"(",
"path",
")",
":",
"with",
"zipfile",
".",
"ZipFile",
"(",
"path",
")",
"as",
"datafile",
":",
"return",
"(",
"datafile",
".",
"read",
"(",
"\"ml-100k/ua.base\"",
")",
".",
"decode",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
",",
"datafile",
".",
"read",
"(",
"\"ml-100k/ua.test\"",
")",
".",
"decode",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
",",
"datafile",
".",
"read",
"(",
"\"ml-100k/u.item\"",
")",
".",
"decode",
"(",
"errors",
"=",
"\"ignore\"",
")",
".",
"split",
"(",
"\"\\n\"",
")",
",",
"datafile",
".",
"read",
"(",
"\"ml-100k/u.genre\"",
")",
".",
"decode",
"(",
"errors",
"=",
"\"ignore\"",
")",
".",
"split",
"(",
"\"\\n\"",
")",
",",
")"
] | Return the raw lines of the train and test files. | [
"Return",
"the",
"raw",
"lines",
"of",
"the",
"train",
"and",
"test",
"files",
"."
] | python | train |
icometrix/dicom2nifti | dicom2nifti/common.py | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L478-L492 | def write_bvec_file(bvecs, bvec_file):
"""
Write an array of bvecs to a bvec file
:param bvecs: array with the vectors
:param bvec_file: filepath to write to
"""
if bvec_file is None:
return
logger.info('Saving BVEC file: %s' % bvec_file)
with open(bvec_file, 'w') as text_file:
# Map a dicection to string join them using a space and write to the file
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 0])))
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 1])))
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 2]))) | [
"def",
"write_bvec_file",
"(",
"bvecs",
",",
"bvec_file",
")",
":",
"if",
"bvec_file",
"is",
"None",
":",
"return",
"logger",
".",
"info",
"(",
"'Saving BVEC file: %s'",
"%",
"bvec_file",
")",
"with",
"open",
"(",
"bvec_file",
",",
"'w'",
")",
"as",
"text_file",
":",
"# Map a dicection to string join them using a space and write to the file",
"text_file",
".",
"write",
"(",
"'%s\\n'",
"%",
"' '",
".",
"join",
"(",
"map",
"(",
"str",
",",
"bvecs",
"[",
":",
",",
"0",
"]",
")",
")",
")",
"text_file",
".",
"write",
"(",
"'%s\\n'",
"%",
"' '",
".",
"join",
"(",
"map",
"(",
"str",
",",
"bvecs",
"[",
":",
",",
"1",
"]",
")",
")",
")",
"text_file",
".",
"write",
"(",
"'%s\\n'",
"%",
"' '",
".",
"join",
"(",
"map",
"(",
"str",
",",
"bvecs",
"[",
":",
",",
"2",
"]",
")",
")",
")"
] | Write an array of bvecs to a bvec file
:param bvecs: array with the vectors
:param bvec_file: filepath to write to | [
"Write",
"an",
"array",
"of",
"bvecs",
"to",
"a",
"bvec",
"file"
] | python | train |
tcalmant/ipopo | pelix/remote/beans.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/remote/beans.py#L830-L848 | def format_specifications(specifications):
# type: (Iterable[str]) -> List[str]
"""
Transforms the interfaces names into URI strings, with the interface
implementation language as a scheme.
:param specifications: Specifications to transform
:return: The transformed names
"""
transformed = set()
for original in specifications:
try:
lang, spec = _extract_specification_parts(original)
transformed.add(_format_specification(lang, spec))
except ValueError:
# Ignore invalid specifications
pass
return list(transformed) | [
"def",
"format_specifications",
"(",
"specifications",
")",
":",
"# type: (Iterable[str]) -> List[str]",
"transformed",
"=",
"set",
"(",
")",
"for",
"original",
"in",
"specifications",
":",
"try",
":",
"lang",
",",
"spec",
"=",
"_extract_specification_parts",
"(",
"original",
")",
"transformed",
".",
"add",
"(",
"_format_specification",
"(",
"lang",
",",
"spec",
")",
")",
"except",
"ValueError",
":",
"# Ignore invalid specifications",
"pass",
"return",
"list",
"(",
"transformed",
")"
] | Transforms the interfaces names into URI strings, with the interface
implementation language as a scheme.
:param specifications: Specifications to transform
:return: The transformed names | [
"Transforms",
"the",
"interfaces",
"names",
"into",
"URI",
"strings",
"with",
"the",
"interface",
"implementation",
"language",
"as",
"a",
"scheme",
"."
] | python | train |
jjgomera/iapws | iapws/_utils.py | https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/_utils.py#L194-L261 | def deriv_G(state, z, x, y, fase):
r"""Calculate generic partial derivative
:math:`\left.\frac{\partial z}{\partial x}\right|_{y}` from a fundamental
Gibbs free energy equation of state
Parameters
----------
state : any python object
Only need to define P and T properties, non phase specific properties
z : str
Name of variables in numerator term of derivatives
x : str
Name of variables in denominator term of derivatives
y : str
Name of constant variable in partial derivaritive
fase : any python object
Define phase specific properties (v, cp, alfav, s, xkappa)
Notes
-----
x, y and z can be the following values:
* P: Pressure
* T: Temperature
* v: Specific volume
* rho: Density
* u: Internal Energy
* h: Enthalpy
* s: Entropy
* g: Gibbs free energy
* a: Helmholtz free energy
Returns
-------
deriv : float
∂z/∂x|y
References
----------
IAPWS, Revised Advisory Note No. 3: Thermodynamic Derivatives from IAPWS
Formulations, http://www.iapws.org/relguide/Advise3.pdf
"""
mul = 1
if z == "rho":
mul = -fase.rho**2
z = "v"
if x == "rho":
mul = -1/fase.rho**2
x = "v"
dT = {"P": 0,
"T": 1,
"v": fase.v*fase.alfav,
"u": fase.cp-state.P*1000*fase.v*fase.alfav,
"h": fase.cp,
"s": fase.cp/state.T,
"g": -fase.s,
"a": -state.P*1000*fase.v*fase.alfav-fase.s}
dP = {"P": 1,
"T": 0,
"v": -fase.v*fase.xkappa,
"u": fase.v*(state.P*1000*fase.xkappa-state.T*fase.alfav),
"h": fase.v*(1-state.T*fase.alfav),
"s": -fase.v*fase.alfav,
"g": fase.v,
"a": state.P*1000*fase.v*fase.xkappa}
deriv = (dP[z]*dT[y]-dT[z]*dP[y])/(dP[x]*dT[y]-dT[x]*dP[y])
return mul*deriv | [
"def",
"deriv_G",
"(",
"state",
",",
"z",
",",
"x",
",",
"y",
",",
"fase",
")",
":",
"mul",
"=",
"1",
"if",
"z",
"==",
"\"rho\"",
":",
"mul",
"=",
"-",
"fase",
".",
"rho",
"**",
"2",
"z",
"=",
"\"v\"",
"if",
"x",
"==",
"\"rho\"",
":",
"mul",
"=",
"-",
"1",
"/",
"fase",
".",
"rho",
"**",
"2",
"x",
"=",
"\"v\"",
"dT",
"=",
"{",
"\"P\"",
":",
"0",
",",
"\"T\"",
":",
"1",
",",
"\"v\"",
":",
"fase",
".",
"v",
"*",
"fase",
".",
"alfav",
",",
"\"u\"",
":",
"fase",
".",
"cp",
"-",
"state",
".",
"P",
"*",
"1000",
"*",
"fase",
".",
"v",
"*",
"fase",
".",
"alfav",
",",
"\"h\"",
":",
"fase",
".",
"cp",
",",
"\"s\"",
":",
"fase",
".",
"cp",
"/",
"state",
".",
"T",
",",
"\"g\"",
":",
"-",
"fase",
".",
"s",
",",
"\"a\"",
":",
"-",
"state",
".",
"P",
"*",
"1000",
"*",
"fase",
".",
"v",
"*",
"fase",
".",
"alfav",
"-",
"fase",
".",
"s",
"}",
"dP",
"=",
"{",
"\"P\"",
":",
"1",
",",
"\"T\"",
":",
"0",
",",
"\"v\"",
":",
"-",
"fase",
".",
"v",
"*",
"fase",
".",
"xkappa",
",",
"\"u\"",
":",
"fase",
".",
"v",
"*",
"(",
"state",
".",
"P",
"*",
"1000",
"*",
"fase",
".",
"xkappa",
"-",
"state",
".",
"T",
"*",
"fase",
".",
"alfav",
")",
",",
"\"h\"",
":",
"fase",
".",
"v",
"*",
"(",
"1",
"-",
"state",
".",
"T",
"*",
"fase",
".",
"alfav",
")",
",",
"\"s\"",
":",
"-",
"fase",
".",
"v",
"*",
"fase",
".",
"alfav",
",",
"\"g\"",
":",
"fase",
".",
"v",
",",
"\"a\"",
":",
"state",
".",
"P",
"*",
"1000",
"*",
"fase",
".",
"v",
"*",
"fase",
".",
"xkappa",
"}",
"deriv",
"=",
"(",
"dP",
"[",
"z",
"]",
"*",
"dT",
"[",
"y",
"]",
"-",
"dT",
"[",
"z",
"]",
"*",
"dP",
"[",
"y",
"]",
")",
"/",
"(",
"dP",
"[",
"x",
"]",
"*",
"dT",
"[",
"y",
"]",
"-",
"dT",
"[",
"x",
"]",
"*",
"dP",
"[",
"y",
"]",
")",
"return",
"mul",
"*",
"deriv"
] | r"""Calculate generic partial derivative
:math:`\left.\frac{\partial z}{\partial x}\right|_{y}` from a fundamental
Gibbs free energy equation of state
Parameters
----------
state : any python object
Only need to define P and T properties, non phase specific properties
z : str
Name of variables in numerator term of derivatives
x : str
Name of variables in denominator term of derivatives
y : str
Name of constant variable in partial derivaritive
fase : any python object
Define phase specific properties (v, cp, alfav, s, xkappa)
Notes
-----
x, y and z can be the following values:
* P: Pressure
* T: Temperature
* v: Specific volume
* rho: Density
* u: Internal Energy
* h: Enthalpy
* s: Entropy
* g: Gibbs free energy
* a: Helmholtz free energy
Returns
-------
deriv : float
∂z/∂x|y
References
----------
IAPWS, Revised Advisory Note No. 3: Thermodynamic Derivatives from IAPWS
Formulations, http://www.iapws.org/relguide/Advise3.pdf | [
"r",
"Calculate",
"generic",
"partial",
"derivative",
":",
"math",
":",
"\\",
"left",
".",
"\\",
"frac",
"{",
"\\",
"partial",
"z",
"}",
"{",
"\\",
"partial",
"x",
"}",
"\\",
"right|_",
"{",
"y",
"}",
"from",
"a",
"fundamental",
"Gibbs",
"free",
"energy",
"equation",
"of",
"state"
] | python | train |
ewels/MultiQC | multiqc/plots/linegraph.py | https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/plots/linegraph.py#L40-L161 | def plot (data, pconfig=None):
""" Plot a line graph with X,Y data.
:param data: 2D dict, first keys as sample names, then x:y data pairs
:param pconfig: optional dict with config key:value pairs. See CONTRIBUTING.md
:return: HTML and JS, ready to be inserted into the page
"""
# Don't just use {} as the default argument as it's mutable. See:
# http://python-guide-pt-br.readthedocs.io/en/latest/writing/gotchas/
if pconfig is None:
pconfig = {}
# Allow user to overwrite any given config for this plot
if 'id' in pconfig and pconfig['id'] and pconfig['id'] in config.custom_plot_config:
for k, v in config.custom_plot_config[pconfig['id']].items():
pconfig[k] = v
# Given one dataset - turn it into a list
if type(data) is not list:
data = [data]
# Smooth dataset if requested in config
if pconfig.get('smooth_points', None) is not None:
sumcounts = pconfig.get('smooth_points_sumcounts', True)
for i, d in enumerate(data):
if type(sumcounts) is list:
sumc = sumcounts[i]
else:
sumc = sumcounts
data[i] = smooth_line_data(d, pconfig['smooth_points'], sumc)
# Add sane plotting config defaults
for idx, yp in enumerate(pconfig.get('yPlotLines', [])):
pconfig['yPlotLines'][idx]["width"] = pconfig['yPlotLines'][idx].get("width", 2)
# Add initial axis labels if defined in `data_labels` but not main config
if pconfig.get('ylab') is None:
try:
pconfig['ylab'] = pconfig['data_labels'][0]['ylab']
except (KeyError, IndexError):
pass
if pconfig.get('xlab') is None:
try:
pconfig['xlab'] = pconfig['data_labels'][0]['xlab']
except (KeyError, IndexError):
pass
# Generate the data dict structure expected by HighCharts series
plotdata = list()
for data_index, d in enumerate(data):
thisplotdata = list()
for s in sorted(d.keys()):
# Ensure any overwritting conditionals from data_labels (e.g. ymax) are taken in consideration
series_config = pconfig.copy()
if 'data_labels' in pconfig and type(pconfig['data_labels'][data_index]) is dict: # if not a dict: only dataset name is provided
series_config.update(pconfig['data_labels'][data_index])
pairs = list()
maxval = 0
if 'categories' in series_config:
pconfig['categories'] = list()
for k in d[s].keys():
pconfig['categories'].append(k)
pairs.append(d[s][k])
maxval = max(maxval, d[s][k])
else:
for k in sorted(d[s].keys()):
if k is not None:
if 'xmax' in series_config and float(k) > float(series_config['xmax']):
continue
if 'xmin' in series_config and float(k) < float(series_config['xmin']):
continue
if d[s][k] is not None:
if 'ymax' in series_config and float(d[s][k]) > float(series_config['ymax']):
continue
if 'ymin' in series_config and float(d[s][k]) < float(series_config['ymin']):
continue
pairs.append([k, d[s][k]])
try:
maxval = max(maxval, d[s][k])
except TypeError:
pass
if maxval > 0 or series_config.get('hide_empty') is not True:
this_series = { 'name': s, 'data': pairs }
try:
this_series['color'] = series_config['colors'][s]
except:
pass
thisplotdata.append(this_series)
plotdata.append(thisplotdata)
# Add on annotation data series
try:
if pconfig.get('extra_series'):
extra_series = pconfig['extra_series']
if type(pconfig['extra_series']) == dict:
extra_series = [[ pconfig['extra_series'] ]]
elif type(pconfig['extra_series']) == list and type(pconfig['extra_series'][0]) == dict:
extra_series = [ pconfig['extra_series'] ]
for i, es in enumerate(extra_series):
for s in es:
plotdata[i].append(s)
except (KeyError, IndexError):
pass
# Make a plot - template custom, or interactive or flat
try:
return get_template_mod().linegraph(plotdata, pconfig)
except (AttributeError, TypeError):
if config.plots_force_flat or (not config.plots_force_interactive and len(plotdata[0]) > config.plots_flat_numseries):
try:
return matplotlib_linegraph(plotdata, pconfig)
except:
logger.error("############### Error making MatPlotLib figure! Falling back to HighCharts.")
return highcharts_linegraph(plotdata, pconfig)
else:
# Use MatPlotLib to generate static plots if requested
if config.export_plots:
matplotlib_linegraph(plotdata, pconfig)
# Return HTML for HighCharts dynamic plot
return highcharts_linegraph(plotdata, pconfig) | [
"def",
"plot",
"(",
"data",
",",
"pconfig",
"=",
"None",
")",
":",
"# Don't just use {} as the default argument as it's mutable. See:",
"# http://python-guide-pt-br.readthedocs.io/en/latest/writing/gotchas/",
"if",
"pconfig",
"is",
"None",
":",
"pconfig",
"=",
"{",
"}",
"# Allow user to overwrite any given config for this plot",
"if",
"'id'",
"in",
"pconfig",
"and",
"pconfig",
"[",
"'id'",
"]",
"and",
"pconfig",
"[",
"'id'",
"]",
"in",
"config",
".",
"custom_plot_config",
":",
"for",
"k",
",",
"v",
"in",
"config",
".",
"custom_plot_config",
"[",
"pconfig",
"[",
"'id'",
"]",
"]",
".",
"items",
"(",
")",
":",
"pconfig",
"[",
"k",
"]",
"=",
"v",
"# Given one dataset - turn it into a list",
"if",
"type",
"(",
"data",
")",
"is",
"not",
"list",
":",
"data",
"=",
"[",
"data",
"]",
"# Smooth dataset if requested in config",
"if",
"pconfig",
".",
"get",
"(",
"'smooth_points'",
",",
"None",
")",
"is",
"not",
"None",
":",
"sumcounts",
"=",
"pconfig",
".",
"get",
"(",
"'smooth_points_sumcounts'",
",",
"True",
")",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"data",
")",
":",
"if",
"type",
"(",
"sumcounts",
")",
"is",
"list",
":",
"sumc",
"=",
"sumcounts",
"[",
"i",
"]",
"else",
":",
"sumc",
"=",
"sumcounts",
"data",
"[",
"i",
"]",
"=",
"smooth_line_data",
"(",
"d",
",",
"pconfig",
"[",
"'smooth_points'",
"]",
",",
"sumc",
")",
"# Add sane plotting config defaults",
"for",
"idx",
",",
"yp",
"in",
"enumerate",
"(",
"pconfig",
".",
"get",
"(",
"'yPlotLines'",
",",
"[",
"]",
")",
")",
":",
"pconfig",
"[",
"'yPlotLines'",
"]",
"[",
"idx",
"]",
"[",
"\"width\"",
"]",
"=",
"pconfig",
"[",
"'yPlotLines'",
"]",
"[",
"idx",
"]",
".",
"get",
"(",
"\"width\"",
",",
"2",
")",
"# Add initial axis labels if defined in `data_labels` but not main config",
"if",
"pconfig",
".",
"get",
"(",
"'ylab'",
")",
"is",
"None",
":",
"try",
":",
"pconfig",
"[",
"'ylab'",
"]",
"=",
"pconfig",
"[",
"'data_labels'",
"]",
"[",
"0",
"]",
"[",
"'ylab'",
"]",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"pass",
"if",
"pconfig",
".",
"get",
"(",
"'xlab'",
")",
"is",
"None",
":",
"try",
":",
"pconfig",
"[",
"'xlab'",
"]",
"=",
"pconfig",
"[",
"'data_labels'",
"]",
"[",
"0",
"]",
"[",
"'xlab'",
"]",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"pass",
"# Generate the data dict structure expected by HighCharts series",
"plotdata",
"=",
"list",
"(",
")",
"for",
"data_index",
",",
"d",
"in",
"enumerate",
"(",
"data",
")",
":",
"thisplotdata",
"=",
"list",
"(",
")",
"for",
"s",
"in",
"sorted",
"(",
"d",
".",
"keys",
"(",
")",
")",
":",
"# Ensure any overwritting conditionals from data_labels (e.g. ymax) are taken in consideration",
"series_config",
"=",
"pconfig",
".",
"copy",
"(",
")",
"if",
"'data_labels'",
"in",
"pconfig",
"and",
"type",
"(",
"pconfig",
"[",
"'data_labels'",
"]",
"[",
"data_index",
"]",
")",
"is",
"dict",
":",
"# if not a dict: only dataset name is provided",
"series_config",
".",
"update",
"(",
"pconfig",
"[",
"'data_labels'",
"]",
"[",
"data_index",
"]",
")",
"pairs",
"=",
"list",
"(",
")",
"maxval",
"=",
"0",
"if",
"'categories'",
"in",
"series_config",
":",
"pconfig",
"[",
"'categories'",
"]",
"=",
"list",
"(",
")",
"for",
"k",
"in",
"d",
"[",
"s",
"]",
".",
"keys",
"(",
")",
":",
"pconfig",
"[",
"'categories'",
"]",
".",
"append",
"(",
"k",
")",
"pairs",
".",
"append",
"(",
"d",
"[",
"s",
"]",
"[",
"k",
"]",
")",
"maxval",
"=",
"max",
"(",
"maxval",
",",
"d",
"[",
"s",
"]",
"[",
"k",
"]",
")",
"else",
":",
"for",
"k",
"in",
"sorted",
"(",
"d",
"[",
"s",
"]",
".",
"keys",
"(",
")",
")",
":",
"if",
"k",
"is",
"not",
"None",
":",
"if",
"'xmax'",
"in",
"series_config",
"and",
"float",
"(",
"k",
")",
">",
"float",
"(",
"series_config",
"[",
"'xmax'",
"]",
")",
":",
"continue",
"if",
"'xmin'",
"in",
"series_config",
"and",
"float",
"(",
"k",
")",
"<",
"float",
"(",
"series_config",
"[",
"'xmin'",
"]",
")",
":",
"continue",
"if",
"d",
"[",
"s",
"]",
"[",
"k",
"]",
"is",
"not",
"None",
":",
"if",
"'ymax'",
"in",
"series_config",
"and",
"float",
"(",
"d",
"[",
"s",
"]",
"[",
"k",
"]",
")",
">",
"float",
"(",
"series_config",
"[",
"'ymax'",
"]",
")",
":",
"continue",
"if",
"'ymin'",
"in",
"series_config",
"and",
"float",
"(",
"d",
"[",
"s",
"]",
"[",
"k",
"]",
")",
"<",
"float",
"(",
"series_config",
"[",
"'ymin'",
"]",
")",
":",
"continue",
"pairs",
".",
"append",
"(",
"[",
"k",
",",
"d",
"[",
"s",
"]",
"[",
"k",
"]",
"]",
")",
"try",
":",
"maxval",
"=",
"max",
"(",
"maxval",
",",
"d",
"[",
"s",
"]",
"[",
"k",
"]",
")",
"except",
"TypeError",
":",
"pass",
"if",
"maxval",
">",
"0",
"or",
"series_config",
".",
"get",
"(",
"'hide_empty'",
")",
"is",
"not",
"True",
":",
"this_series",
"=",
"{",
"'name'",
":",
"s",
",",
"'data'",
":",
"pairs",
"}",
"try",
":",
"this_series",
"[",
"'color'",
"]",
"=",
"series_config",
"[",
"'colors'",
"]",
"[",
"s",
"]",
"except",
":",
"pass",
"thisplotdata",
".",
"append",
"(",
"this_series",
")",
"plotdata",
".",
"append",
"(",
"thisplotdata",
")",
"# Add on annotation data series",
"try",
":",
"if",
"pconfig",
".",
"get",
"(",
"'extra_series'",
")",
":",
"extra_series",
"=",
"pconfig",
"[",
"'extra_series'",
"]",
"if",
"type",
"(",
"pconfig",
"[",
"'extra_series'",
"]",
")",
"==",
"dict",
":",
"extra_series",
"=",
"[",
"[",
"pconfig",
"[",
"'extra_series'",
"]",
"]",
"]",
"elif",
"type",
"(",
"pconfig",
"[",
"'extra_series'",
"]",
")",
"==",
"list",
"and",
"type",
"(",
"pconfig",
"[",
"'extra_series'",
"]",
"[",
"0",
"]",
")",
"==",
"dict",
":",
"extra_series",
"=",
"[",
"pconfig",
"[",
"'extra_series'",
"]",
"]",
"for",
"i",
",",
"es",
"in",
"enumerate",
"(",
"extra_series",
")",
":",
"for",
"s",
"in",
"es",
":",
"plotdata",
"[",
"i",
"]",
".",
"append",
"(",
"s",
")",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"pass",
"# Make a plot - template custom, or interactive or flat",
"try",
":",
"return",
"get_template_mod",
"(",
")",
".",
"linegraph",
"(",
"plotdata",
",",
"pconfig",
")",
"except",
"(",
"AttributeError",
",",
"TypeError",
")",
":",
"if",
"config",
".",
"plots_force_flat",
"or",
"(",
"not",
"config",
".",
"plots_force_interactive",
"and",
"len",
"(",
"plotdata",
"[",
"0",
"]",
")",
">",
"config",
".",
"plots_flat_numseries",
")",
":",
"try",
":",
"return",
"matplotlib_linegraph",
"(",
"plotdata",
",",
"pconfig",
")",
"except",
":",
"logger",
".",
"error",
"(",
"\"############### Error making MatPlotLib figure! Falling back to HighCharts.\"",
")",
"return",
"highcharts_linegraph",
"(",
"plotdata",
",",
"pconfig",
")",
"else",
":",
"# Use MatPlotLib to generate static plots if requested",
"if",
"config",
".",
"export_plots",
":",
"matplotlib_linegraph",
"(",
"plotdata",
",",
"pconfig",
")",
"# Return HTML for HighCharts dynamic plot",
"return",
"highcharts_linegraph",
"(",
"plotdata",
",",
"pconfig",
")"
] | Plot a line graph with X,Y data.
:param data: 2D dict, first keys as sample names, then x:y data pairs
:param pconfig: optional dict with config key:value pairs. See CONTRIBUTING.md
:return: HTML and JS, ready to be inserted into the page | [
"Plot",
"a",
"line",
"graph",
"with",
"X",
"Y",
"data",
".",
":",
"param",
"data",
":",
"2D",
"dict",
"first",
"keys",
"as",
"sample",
"names",
"then",
"x",
":",
"y",
"data",
"pairs",
":",
"param",
"pconfig",
":",
"optional",
"dict",
"with",
"config",
"key",
":",
"value",
"pairs",
".",
"See",
"CONTRIBUTING",
".",
"md",
":",
"return",
":",
"HTML",
"and",
"JS",
"ready",
"to",
"be",
"inserted",
"into",
"the",
"page"
] | python | train |
razor-x/scipy-data_fitting | scipy_data_fitting/model.py | https://github.com/razor-x/scipy-data_fitting/blob/c756a645da8629699b3f22244bfb7d5d4d88b179/scipy_data_fitting/model.py#L135-L184 | def replace(self, expression, replacements):
"""
All purpose method to reduce an expression by applying
successive replacement rules.
`expression` is either a SymPy expression
or a key in `scipy_data_fitting.Model.expressions`.
`replacements` can be any of the following,
or a list of any combination of the following:
- A replacement tuple as in `scipy_data_fitting.Model.replacements`.
- The name of a replacement in `scipy_data_fitting.Model.replacements`.
- The name of a replacement group in `scipy_data_fitting.Model.replacement_groups`.
Examples:
#!python
>>> model.replace(x + y, (x, z))
z + y
>>> model.replace('expression', (x, z))
>>> model.replace('expression', 'replacement')
>>> model.replace('expression', ['replacement_1', 'replacement_2'])
>>> model.replace('expression', ['replacement', 'group'])
"""
# When expression is a string,
# get the expressions from self.expressions.
if isinstance(expression, str):
expression = self.expressions[expression]
# Allow for replacements to be empty.
if not replacements:
return expression
# Allow replacements to be a string.
if isinstance(replacements, str):
if replacements in self.replacements:
return self.replace(expression, self.replacements[replacements])
elif replacements in self.replacement_groups:
return self.replace(expression, self.replacement_groups[replacements])
# When replacements is a list of strings or tuples,
# Use reduce to make all the replacements.
if all(isinstance(item, str) for item in replacements) \
or all(isinstance(item, tuple) for item in replacements):
return functools.reduce(self.replace, replacements, expression)
# Otherwise make the replacement.
return expression.replace(*replacements) | [
"def",
"replace",
"(",
"self",
",",
"expression",
",",
"replacements",
")",
":",
"# When expression is a string,",
"# get the expressions from self.expressions.",
"if",
"isinstance",
"(",
"expression",
",",
"str",
")",
":",
"expression",
"=",
"self",
".",
"expressions",
"[",
"expression",
"]",
"# Allow for replacements to be empty.",
"if",
"not",
"replacements",
":",
"return",
"expression",
"# Allow replacements to be a string.",
"if",
"isinstance",
"(",
"replacements",
",",
"str",
")",
":",
"if",
"replacements",
"in",
"self",
".",
"replacements",
":",
"return",
"self",
".",
"replace",
"(",
"expression",
",",
"self",
".",
"replacements",
"[",
"replacements",
"]",
")",
"elif",
"replacements",
"in",
"self",
".",
"replacement_groups",
":",
"return",
"self",
".",
"replace",
"(",
"expression",
",",
"self",
".",
"replacement_groups",
"[",
"replacements",
"]",
")",
"# When replacements is a list of strings or tuples,",
"# Use reduce to make all the replacements.",
"if",
"all",
"(",
"isinstance",
"(",
"item",
",",
"str",
")",
"for",
"item",
"in",
"replacements",
")",
"or",
"all",
"(",
"isinstance",
"(",
"item",
",",
"tuple",
")",
"for",
"item",
"in",
"replacements",
")",
":",
"return",
"functools",
".",
"reduce",
"(",
"self",
".",
"replace",
",",
"replacements",
",",
"expression",
")",
"# Otherwise make the replacement.",
"return",
"expression",
".",
"replace",
"(",
"*",
"replacements",
")"
] | All purpose method to reduce an expression by applying
successive replacement rules.
`expression` is either a SymPy expression
or a key in `scipy_data_fitting.Model.expressions`.
`replacements` can be any of the following,
or a list of any combination of the following:
- A replacement tuple as in `scipy_data_fitting.Model.replacements`.
- The name of a replacement in `scipy_data_fitting.Model.replacements`.
- The name of a replacement group in `scipy_data_fitting.Model.replacement_groups`.
Examples:
#!python
>>> model.replace(x + y, (x, z))
z + y
>>> model.replace('expression', (x, z))
>>> model.replace('expression', 'replacement')
>>> model.replace('expression', ['replacement_1', 'replacement_2'])
>>> model.replace('expression', ['replacement', 'group']) | [
"All",
"purpose",
"method",
"to",
"reduce",
"an",
"expression",
"by",
"applying",
"successive",
"replacement",
"rules",
"."
] | python | train |
goerz/clusterjob | clusterjob/__init__.py | https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/__init__.py#L927-L952 | def status(self):
"""Return the job status as one of the codes defined in the
`clusterjob.status` module.
finished, communicate with the cluster to determine the job's status.
"""
if self._status >= COMPLETED:
return self._status
else:
cmd = self.backend.cmd_status(self, finished=False)
response = self._run_cmd(cmd, self.remote, ignore_exit_code=True,
ssh=self.ssh)
status = self.backend.get_status(response, finished=False)
if status is None:
cmd = self.backend.cmd_status(self, finished=True)
response = self._run_cmd(cmd, self.remote,
ignore_exit_code=True, ssh=self.ssh)
status = self.backend.get_status(response, finished=True)
prev_status = self._status
self._status = status
if self._status not in STATUS_CODES:
raise ValueError("Invalid status code %s", self._status)
if prev_status != self._status:
if self._status >= COMPLETED:
self.run_epilogue()
self.dump()
return self._status | [
"def",
"status",
"(",
"self",
")",
":",
"if",
"self",
".",
"_status",
">=",
"COMPLETED",
":",
"return",
"self",
".",
"_status",
"else",
":",
"cmd",
"=",
"self",
".",
"backend",
".",
"cmd_status",
"(",
"self",
",",
"finished",
"=",
"False",
")",
"response",
"=",
"self",
".",
"_run_cmd",
"(",
"cmd",
",",
"self",
".",
"remote",
",",
"ignore_exit_code",
"=",
"True",
",",
"ssh",
"=",
"self",
".",
"ssh",
")",
"status",
"=",
"self",
".",
"backend",
".",
"get_status",
"(",
"response",
",",
"finished",
"=",
"False",
")",
"if",
"status",
"is",
"None",
":",
"cmd",
"=",
"self",
".",
"backend",
".",
"cmd_status",
"(",
"self",
",",
"finished",
"=",
"True",
")",
"response",
"=",
"self",
".",
"_run_cmd",
"(",
"cmd",
",",
"self",
".",
"remote",
",",
"ignore_exit_code",
"=",
"True",
",",
"ssh",
"=",
"self",
".",
"ssh",
")",
"status",
"=",
"self",
".",
"backend",
".",
"get_status",
"(",
"response",
",",
"finished",
"=",
"True",
")",
"prev_status",
"=",
"self",
".",
"_status",
"self",
".",
"_status",
"=",
"status",
"if",
"self",
".",
"_status",
"not",
"in",
"STATUS_CODES",
":",
"raise",
"ValueError",
"(",
"\"Invalid status code %s\"",
",",
"self",
".",
"_status",
")",
"if",
"prev_status",
"!=",
"self",
".",
"_status",
":",
"if",
"self",
".",
"_status",
">=",
"COMPLETED",
":",
"self",
".",
"run_epilogue",
"(",
")",
"self",
".",
"dump",
"(",
")",
"return",
"self",
".",
"_status"
] | Return the job status as one of the codes defined in the
`clusterjob.status` module.
finished, communicate with the cluster to determine the job's status. | [
"Return",
"the",
"job",
"status",
"as",
"one",
"of",
"the",
"codes",
"defined",
"in",
"the",
"clusterjob",
".",
"status",
"module",
".",
"finished",
"communicate",
"with",
"the",
"cluster",
"to",
"determine",
"the",
"job",
"s",
"status",
"."
] | python | train |
subdownloader/subdownloader | subdownloader/util.py | https://github.com/subdownloader/subdownloader/blob/bbccedd11b18d925ad4c062b5eb65981e24d0433/subdownloader/util.py#L43-L50 | def write_stream(src_file, destination_path):
"""
Write the file-like src_file object to the string dest_path
:param src_file: file-like data to be written
:param destination_path: string of the destionation file
"""
with open(destination_path, 'wb') as destination_file:
shutil.copyfileobj(fsrc=src_file, fdst=destination_file) | [
"def",
"write_stream",
"(",
"src_file",
",",
"destination_path",
")",
":",
"with",
"open",
"(",
"destination_path",
",",
"'wb'",
")",
"as",
"destination_file",
":",
"shutil",
".",
"copyfileobj",
"(",
"fsrc",
"=",
"src_file",
",",
"fdst",
"=",
"destination_file",
")"
] | Write the file-like src_file object to the string dest_path
:param src_file: file-like data to be written
:param destination_path: string of the destionation file | [
"Write",
"the",
"file",
"-",
"like",
"src_file",
"object",
"to",
"the",
"string",
"dest_path",
":",
"param",
"src_file",
":",
"file",
"-",
"like",
"data",
"to",
"be",
"written",
":",
"param",
"destination_path",
":",
"string",
"of",
"the",
"destionation",
"file"
] | python | train |
spacetelescope/synphot_refactor | synphot/spectrum.py | https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/spectrum.py#L204-L239 | def _merge_meta(left, right, result, clean=True):
"""Merge metadata from left and right onto results.
This is used during class initialization.
This should also be used by operators to merge metadata after
creating a new instance but before returning it.
Result's metadata is modified in-place.
Parameters
----------
left, right : number, `BaseSpectrum`, or `~astropy.modeling.models`
Inputs of an operation.
result : `BaseSpectrum`
Output spectrum object.
clean : bool
Remove ``'header'`` and ``'expr'`` entries from inputs.
"""
# Copies are returned because they need some clean-up below.
left = BaseSpectrum._get_meta(left)
right = BaseSpectrum._get_meta(right)
# Remove these from going into result to avoid mess.
# header = FITS header metadata
# expr = ASTROLIB PYSYNPHOT expression
if clean:
for key in ('header', 'expr'):
for d in (left, right):
if key in d:
del d[key]
mid = metadata.merge(left, right, metadata_conflicts='silent')
result.meta = metadata.merge(result.meta, mid,
metadata_conflicts='silent') | [
"def",
"_merge_meta",
"(",
"left",
",",
"right",
",",
"result",
",",
"clean",
"=",
"True",
")",
":",
"# Copies are returned because they need some clean-up below.",
"left",
"=",
"BaseSpectrum",
".",
"_get_meta",
"(",
"left",
")",
"right",
"=",
"BaseSpectrum",
".",
"_get_meta",
"(",
"right",
")",
"# Remove these from going into result to avoid mess.",
"# header = FITS header metadata",
"# expr = ASTROLIB PYSYNPHOT expression",
"if",
"clean",
":",
"for",
"key",
"in",
"(",
"'header'",
",",
"'expr'",
")",
":",
"for",
"d",
"in",
"(",
"left",
",",
"right",
")",
":",
"if",
"key",
"in",
"d",
":",
"del",
"d",
"[",
"key",
"]",
"mid",
"=",
"metadata",
".",
"merge",
"(",
"left",
",",
"right",
",",
"metadata_conflicts",
"=",
"'silent'",
")",
"result",
".",
"meta",
"=",
"metadata",
".",
"merge",
"(",
"result",
".",
"meta",
",",
"mid",
",",
"metadata_conflicts",
"=",
"'silent'",
")"
] | Merge metadata from left and right onto results.
This is used during class initialization.
This should also be used by operators to merge metadata after
creating a new instance but before returning it.
Result's metadata is modified in-place.
Parameters
----------
left, right : number, `BaseSpectrum`, or `~astropy.modeling.models`
Inputs of an operation.
result : `BaseSpectrum`
Output spectrum object.
clean : bool
Remove ``'header'`` and ``'expr'`` entries from inputs. | [
"Merge",
"metadata",
"from",
"left",
"and",
"right",
"onto",
"results",
"."
] | python | train |
trombastic/PyScada | pyscada/utils/scheduler.py | https://github.com/trombastic/PyScada/blob/c5fc348a25f0df1340336f694ee9bc1aea62516a/pyscada/utils/scheduler.py#L818-L835 | def init_process(self):
"""
init a standard daq process for multiple devices
"""
for item in Device.objects.filter(protocol__daq_daemon=1, active=1, id__in=self.device_ids):
try:
tmp_device = item.get_device_instance()
if tmp_device is not None:
self.devices[item.pk] = tmp_device
self.dt_set = min(self.dt_set, item.polling_interval)
self.dt_query_data = min(self.dt_query_data, item.polling_interval)
except:
var = traceback.format_exc()
logger.error("exception while initialisation of DAQ Process for Device %d %s %s" % (
item.pk, linesep, var))
return True | [
"def",
"init_process",
"(",
"self",
")",
":",
"for",
"item",
"in",
"Device",
".",
"objects",
".",
"filter",
"(",
"protocol__daq_daemon",
"=",
"1",
",",
"active",
"=",
"1",
",",
"id__in",
"=",
"self",
".",
"device_ids",
")",
":",
"try",
":",
"tmp_device",
"=",
"item",
".",
"get_device_instance",
"(",
")",
"if",
"tmp_device",
"is",
"not",
"None",
":",
"self",
".",
"devices",
"[",
"item",
".",
"pk",
"]",
"=",
"tmp_device",
"self",
".",
"dt_set",
"=",
"min",
"(",
"self",
".",
"dt_set",
",",
"item",
".",
"polling_interval",
")",
"self",
".",
"dt_query_data",
"=",
"min",
"(",
"self",
".",
"dt_query_data",
",",
"item",
".",
"polling_interval",
")",
"except",
":",
"var",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"logger",
".",
"error",
"(",
"\"exception while initialisation of DAQ Process for Device %d %s %s\"",
"%",
"(",
"item",
".",
"pk",
",",
"linesep",
",",
"var",
")",
")",
"return",
"True"
] | init a standard daq process for multiple devices | [
"init",
"a",
"standard",
"daq",
"process",
"for",
"multiple",
"devices"
] | python | train |
ConsenSys/mythril-classic | mythril/mythril/mythril_config.py | https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/mythril/mythril_config.py#L124-L139 | def _add_leveldb_option(config: ConfigParser, leveldb_fallback_dir: str) -> None:
"""
Sets a default leveldb path in .mythril/config.ini file
:param config: The config file object
:param leveldb_fallback_dir: The leveldb dir to use by default for searches
:return: None
"""
config.set("defaults", "#Default chaindata locations:", "")
config.set("defaults", "#– Mac: ~/Library/Ethereum/geth/chaindata", "")
config.set("defaults", "#– Linux: ~/.ethereum/geth/chaindata", "")
config.set(
"defaults",
"#– Windows: %USERPROFILE%\\AppData\\Roaming\\Ethereum\\geth\\chaindata",
"",
)
config.set("defaults", "leveldb_dir", leveldb_fallback_dir) | [
"def",
"_add_leveldb_option",
"(",
"config",
":",
"ConfigParser",
",",
"leveldb_fallback_dir",
":",
"str",
")",
"->",
"None",
":",
"config",
".",
"set",
"(",
"\"defaults\"",
",",
"\"#Default chaindata locations:\"",
",",
"\"\"",
")",
"config",
".",
"set",
"(",
"\"defaults\"",
",",
"\"#– Mac: ~/Library/Ethereum/geth/chaindata\", ",
"\"",
")",
"",
"config",
".",
"set",
"(",
"\"defaults\"",
",",
"\"#– Linux: ~/.ethereum/geth/chaindata\", ",
"\"",
")",
"",
"config",
".",
"set",
"(",
"\"defaults\"",
",",
"\"#– Windows: %USERPROFILE%\\\\AppData\\\\Roaming\\\\Ethereum\\\\geth\\\\chaindata\",",
"",
"\"\"",
",",
")",
"config",
".",
"set",
"(",
"\"defaults\"",
",",
"\"leveldb_dir\"",
",",
"leveldb_fallback_dir",
")"
] | Sets a default leveldb path in .mythril/config.ini file
:param config: The config file object
:param leveldb_fallback_dir: The leveldb dir to use by default for searches
:return: None | [
"Sets",
"a",
"default",
"leveldb",
"path",
"in",
".",
"mythril",
"/",
"config",
".",
"ini",
"file",
":",
"param",
"config",
":",
"The",
"config",
"file",
"object",
":",
"param",
"leveldb_fallback_dir",
":",
"The",
"leveldb",
"dir",
"to",
"use",
"by",
"default",
"for",
"searches",
":",
"return",
":",
"None"
] | python | train |
pandas-profiling/pandas-profiling | pandas_profiling/describe.py | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/describe.py#L109-L131 | def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name) | [
"def",
"describe_boolean_1d",
"(",
"series",
")",
":",
"value_counts",
",",
"distinct_count",
"=",
"base",
".",
"get_groupby_statistic",
"(",
"series",
")",
"top",
",",
"freq",
"=",
"value_counts",
".",
"index",
"[",
"0",
"]",
",",
"value_counts",
".",
"iloc",
"[",
"0",
"]",
"# The mean of boolean is an interesting information",
"mean",
"=",
"series",
".",
"mean",
"(",
")",
"names",
"=",
"[",
"]",
"result",
"=",
"[",
"]",
"names",
"+=",
"[",
"'top'",
",",
"'freq'",
",",
"'type'",
",",
"'mean'",
"]",
"result",
"+=",
"[",
"top",
",",
"freq",
",",
"base",
".",
"TYPE_BOOL",
",",
"mean",
"]",
"return",
"pd",
".",
"Series",
"(",
"result",
",",
"index",
"=",
"names",
",",
"name",
"=",
"series",
".",
"name",
")"
] | Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. | [
"Compute",
"summary",
"statistics",
"of",
"a",
"boolean",
"(",
"TYPE_BOOL",
")",
"variable",
"(",
"a",
"Series",
")",
"."
] | python | train |
BlackEarth/bf | bf/scss.py | https://github.com/BlackEarth/bf/blob/376041168874bbd6dee5ccfeece4a9e553223316/bf/scss.py#L13-L23 | def render_css(self, fn=None, text=None, margin='', indent='\t'):
"""output css using the Sass processor"""
fn = fn or os.path.splitext(self.fn)[0]+'.css'
if not os.path.exists(os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn))
curdir = os.path.abspath(os.curdir)
os.chdir(os.path.dirname(fn)) # needed in order for scss to relative @import
text = text or self.render_styles()
if text != '': text = sass.compile(string=text)
os.chdir(curdir)
return CSS(fn=fn, text=text) | [
"def",
"render_css",
"(",
"self",
",",
"fn",
"=",
"None",
",",
"text",
"=",
"None",
",",
"margin",
"=",
"''",
",",
"indent",
"=",
"'\\t'",
")",
":",
"fn",
"=",
"fn",
"or",
"os",
".",
"path",
".",
"splitext",
"(",
"self",
".",
"fn",
")",
"[",
"0",
"]",
"+",
"'.css'",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"fn",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"fn",
")",
")",
"curdir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"curdir",
")",
"os",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"fn",
")",
")",
"# needed in order for scss to relative @import",
"text",
"=",
"text",
"or",
"self",
".",
"render_styles",
"(",
")",
"if",
"text",
"!=",
"''",
":",
"text",
"=",
"sass",
".",
"compile",
"(",
"string",
"=",
"text",
")",
"os",
".",
"chdir",
"(",
"curdir",
")",
"return",
"CSS",
"(",
"fn",
"=",
"fn",
",",
"text",
"=",
"text",
")"
] | output css using the Sass processor | [
"output",
"css",
"using",
"the",
"Sass",
"processor"
] | python | train |
jgillick/LendingClub | lendingclub/filters.py | https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/filters.py#L197-L207 | def __normalize_grades(self):
"""
Adjust the grades list.
If a grade has been set, set All to false
"""
if 'grades' in self and self['grades']['All'] is True:
for grade in self['grades']:
if grade != 'All' and self['grades'][grade] is True:
self['grades']['All'] = False
break | [
"def",
"__normalize_grades",
"(",
"self",
")",
":",
"if",
"'grades'",
"in",
"self",
"and",
"self",
"[",
"'grades'",
"]",
"[",
"'All'",
"]",
"is",
"True",
":",
"for",
"grade",
"in",
"self",
"[",
"'grades'",
"]",
":",
"if",
"grade",
"!=",
"'All'",
"and",
"self",
"[",
"'grades'",
"]",
"[",
"grade",
"]",
"is",
"True",
":",
"self",
"[",
"'grades'",
"]",
"[",
"'All'",
"]",
"=",
"False",
"break"
] | Adjust the grades list.
If a grade has been set, set All to false | [
"Adjust",
"the",
"grades",
"list",
".",
"If",
"a",
"grade",
"has",
"been",
"set",
"set",
"All",
"to",
"false"
] | python | train |
cnschema/cdata | cdata/summary.py | https://github.com/cnschema/cdata/blob/893e2e1e27b61c8551c8b5f5f9bf05ec61490e23/cdata/summary.py#L25-L117 | def summarize_entity_person(person):
"""
assume person entity using cnschma person vocabulary, http://cnschema.org/Person
"""
ret = []
value = person.get("name")
if not value:
return False
ret.append(value)
prop = "courtesyName"
value = json_get_first_item(person, prop)
if value == u"不详":
value = ""
if value:
ret.append(u'字{}'.format(value))
value = person.get("alternateName")
if value:
#ret.append(u'别名{}'.format(value))
# Bugged
pass
prop = "artName"
value = json_get_first_item(person, prop)
if value:
ret.append(u'号{}'.format(value))
value = person.get("dynasty")
if value:
ret.append(u'{}人'.format(value))
prop = "ancestralHome"
value = json_get_first_item(person, prop)
if value:
ret.append(u'祖籍{}'.format(value))
birth_date = person.get("birthDate", "")
birth_place = person.get("birthPlace", "")
# Special case for unknown birth date
if birth_date == u"不详":
birth_date = ""
if birth_place:
ret.append(u'{}出生于{}'.format(birth_date, birth_place))
elif birth_date:
ret.append(u'{}出生'.format(birth_date))
prop = "nationality"
nationality = json_get_first_item(person, prop)
prop = "occupation"
occupation = json_get_first_item(person, prop)
if occupation:
if nationality:
ret.append(u'{}{}'.format(nationality, occupation))
else:
ret.append(u'{}'.format(occupation))
elif nationality:
ret.append(u'{}人'.format(nationality))
prop = "authorOf"
value = json_get_list(person, prop)
if value:
logging.info(value)
value = u"、".join(value)
ret.append(u'主要作品:{}'.format(value) )
prop = "accomplishment"
value = json_get_list(person, prop)
if value:
value = u"、".join(value)
if len(value) < 30:
# Colon is handled by text reading software
ret.append( u"主要成就:{}".format(value) )
ret = u",".join(ret)
# Make all commas Chinese
ret = ret.replace(u',', u',')
ret = re.sub(u",+", u",", ret) # Removes repeat commas
# Handles periods at end
ret = re.sub(ur"[。,]+$", u"", ret)
# Converts brackets to Chinese
ret = ret.replace(u'(', u'(')
ret = ret.replace(u')', u')')
# Removes brackets and all contained info
ret = re.sub(ur"([^)]*)", u"", ret)
ret = u''.join([ret, u"。"])
return ret | [
"def",
"summarize_entity_person",
"(",
"person",
")",
":",
"ret",
"=",
"[",
"]",
"value",
"=",
"person",
".",
"get",
"(",
"\"name\"",
")",
"if",
"not",
"value",
":",
"return",
"False",
"ret",
".",
"append",
"(",
"value",
")",
"prop",
"=",
"\"courtesyName\"",
"value",
"=",
"json_get_first_item",
"(",
"person",
",",
"prop",
")",
"if",
"value",
"==",
"u\"不详\":",
"",
"value",
"=",
"\"\"",
"if",
"value",
":",
"ret",
".",
"append",
"(",
"u'字{}'.f",
"o",
"rmat(v",
"a",
"lue))",
"",
"",
"value",
"=",
"person",
".",
"get",
"(",
"\"alternateName\"",
")",
"if",
"value",
":",
"#ret.append(u'别名{}'.format(value))",
"# Bugged",
"pass",
"prop",
"=",
"\"artName\"",
"value",
"=",
"json_get_first_item",
"(",
"person",
",",
"prop",
")",
"if",
"value",
":",
"ret",
".",
"append",
"(",
"u'号{}'.f",
"o",
"rmat(v",
"a",
"lue))",
"",
"",
"value",
"=",
"person",
".",
"get",
"(",
"\"dynasty\"",
")",
"if",
"value",
":",
"ret",
".",
"append",
"(",
"u'{}人'.f",
"o",
"rmat(v",
"a",
"lue))",
"",
"",
"prop",
"=",
"\"ancestralHome\"",
"value",
"=",
"json_get_first_item",
"(",
"person",
",",
"prop",
")",
"if",
"value",
":",
"ret",
".",
"append",
"(",
"u'祖籍{}'.for",
"m",
"at(val",
"u",
"e))",
"",
"",
"birth_date",
"=",
"person",
".",
"get",
"(",
"\"birthDate\"",
",",
"\"\"",
")",
"birth_place",
"=",
"person",
".",
"get",
"(",
"\"birthPlace\"",
",",
"\"\"",
")",
"# Special case for unknown birth date",
"if",
"birth_date",
"==",
"u\"不详\":",
"",
"birth_date",
"=",
"\"\"",
"if",
"birth_place",
":",
"ret",
".",
"append",
"(",
"u'{}出生于{}'.forma",
"t",
"(birth",
"_",
"date, birt",
"h",
"place))",
"",
"",
"elif",
"birth_date",
":",
"ret",
".",
"append",
"(",
"u'{}出生'.for",
"m",
"at(bir",
"t",
"h_date))",
"",
"",
"prop",
"=",
"\"nationality\"",
"nationality",
"=",
"json_get_first_item",
"(",
"person",
",",
"prop",
")",
"prop",
"=",
"\"occupation\"",
"occupation",
"=",
"json_get_first_item",
"(",
"person",
",",
"prop",
")",
"if",
"occupation",
":",
"if",
"nationality",
":",
"ret",
".",
"append",
"(",
"u'{}{}'",
".",
"format",
"(",
"nationality",
",",
"occupation",
")",
")",
"else",
":",
"ret",
".",
"append",
"(",
"u'{}'",
".",
"format",
"(",
"occupation",
")",
")",
"elif",
"nationality",
":",
"ret",
".",
"append",
"(",
"u'{}人'.f",
"o",
"rmat(n",
"a",
"tionality))",
"",
"",
"prop",
"=",
"\"authorOf\"",
"value",
"=",
"json_get_list",
"(",
"person",
",",
"prop",
")",
"if",
"value",
":",
"logging",
".",
"info",
"(",
"value",
")",
"value",
"=",
"u\"、\".j",
"o",
"in(v",
"a",
"lue)",
"",
"ret",
".",
"append",
"(",
"u'主要作品:{}'.format(va",
"l",
"ue) )",
"",
"",
"",
"",
"prop",
"=",
"\"accomplishment\"",
"value",
"=",
"json_get_list",
"(",
"person",
",",
"prop",
")",
"if",
"value",
":",
"value",
"=",
"u\"、\".j",
"o",
"in(v",
"a",
"lue)",
"",
"if",
"len",
"(",
"value",
")",
"<",
"30",
":",
"# Colon is handled by text reading software",
"ret",
".",
"append",
"(",
"u\"主要成就:{}\".format(va",
"l",
"ue) )",
"",
"",
"",
"",
"ret",
"=",
"u\",\".j",
"o",
"in(r",
"e",
"t)",
"",
"# Make all commas Chinese",
"ret",
"=",
"ret",
".",
"replace",
"(",
"u','",
",",
"u',')",
"",
"ret",
"=",
"re",
".",
"sub",
"(",
"u\",+\", ",
"u",
",\", re",
"t",
" # ",
"R",
"moves repeat commas",
"# Handles periods at end",
"ret",
"=",
"re",
".",
"sub",
"(",
"ur\"[。,]+$\", u\"",
"\"",
" re",
"t",
"",
"",
"# Converts brackets to Chinese",
"ret",
"=",
"ret",
".",
"replace",
"(",
"u'('",
",",
"u'(')",
"",
"ret",
"=",
"ret",
".",
"replace",
"(",
"u')'",
",",
"u')')",
"",
"# Removes brackets and all contained info",
"ret",
"=",
"re",
".",
"sub",
"(",
"ur\"([^)]*)\", u\"\",",
" ",
"et)",
"",
"",
"",
"ret",
"=",
"u''",
".",
"join",
"(",
"[",
"ret",
",",
"u\"。\"])",
"",
"",
"return",
"ret"
] | assume person entity using cnschma person vocabulary, http://cnschema.org/Person | [
"assume",
"person",
"entity",
"using",
"cnschma",
"person",
"vocabulary",
"http",
":",
"//",
"cnschema",
".",
"org",
"/",
"Person"
] | python | train |
timothydmorton/simpledist | simpledist/distributions.py | https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L136-L158 | def pctile(self,pct,res=1000):
"""Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
"""
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))] | [
"def",
"pctile",
"(",
"self",
",",
"pct",
",",
"res",
"=",
"1000",
")",
":",
"grid",
"=",
"np",
".",
"linspace",
"(",
"self",
".",
"minval",
",",
"self",
".",
"maxval",
",",
"res",
")",
"return",
"grid",
"[",
"np",
".",
"argmin",
"(",
"np",
".",
"absolute",
"(",
"pct",
"-",
"self",
".",
"cdf",
"(",
"grid",
")",
")",
")",
"]"
] | Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float | [
"Returns",
"the",
"desired",
"percentile",
"of",
"the",
"distribution",
"."
] | python | train |
Neurita/boyle | boyle/nifti/storage.py | https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/storage.py#L229-L252 | def all_childnodes_to_nifti1img(h5group):
"""Returns in a list all images found under h5group.
Parameters
----------
h5group: h5py.Group
HDF group
Returns
-------
list of nifti1Image
"""
child_nodes = []
def append_parent_if_dataset(name, obj):
if isinstance(obj, h5py.Dataset):
if name.split('/')[-1] == 'data':
child_nodes.append(obj.parent)
vols = []
h5group.visititems(append_parent_if_dataset)
for c in child_nodes:
vols.append(hdfgroup_to_nifti1image(c))
return vols | [
"def",
"all_childnodes_to_nifti1img",
"(",
"h5group",
")",
":",
"child_nodes",
"=",
"[",
"]",
"def",
"append_parent_if_dataset",
"(",
"name",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"h5py",
".",
"Dataset",
")",
":",
"if",
"name",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"==",
"'data'",
":",
"child_nodes",
".",
"append",
"(",
"obj",
".",
"parent",
")",
"vols",
"=",
"[",
"]",
"h5group",
".",
"visititems",
"(",
"append_parent_if_dataset",
")",
"for",
"c",
"in",
"child_nodes",
":",
"vols",
".",
"append",
"(",
"hdfgroup_to_nifti1image",
"(",
"c",
")",
")",
"return",
"vols"
] | Returns in a list all images found under h5group.
Parameters
----------
h5group: h5py.Group
HDF group
Returns
-------
list of nifti1Image | [
"Returns",
"in",
"a",
"list",
"all",
"images",
"found",
"under",
"h5group",
"."
] | python | valid |
saltstack/salt | salt/modules/kapacitor.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kapacitor.py#L66-L106 | def get_task(name):
'''
Get a dict of data on a task.
name
Name of the task to get information about.
CLI Example:
.. code-block:: bash
salt '*' kapacitor.get_task cpu
'''
url = _get_url()
if version() < '0.13':
task_url = '{0}/task?name={1}'.format(url, name)
else:
task_url = '{0}/kapacitor/v1/tasks/{1}?skip-format=true'.format(url, name)
response = salt.utils.http.query(task_url, status=True)
if response['status'] == 404:
return None
data = salt.utils.json.loads(response['body'])
if version() < '0.13':
return {
'script': data['TICKscript'],
'type': data['Type'],
'dbrps': data['DBRPs'],
'enabled': data['Enabled'],
}
return {
'script': data['script'],
'type': data['type'],
'dbrps': data['dbrps'],
'enabled': data['status'] == 'enabled',
} | [
"def",
"get_task",
"(",
"name",
")",
":",
"url",
"=",
"_get_url",
"(",
")",
"if",
"version",
"(",
")",
"<",
"'0.13'",
":",
"task_url",
"=",
"'{0}/task?name={1}'",
".",
"format",
"(",
"url",
",",
"name",
")",
"else",
":",
"task_url",
"=",
"'{0}/kapacitor/v1/tasks/{1}?skip-format=true'",
".",
"format",
"(",
"url",
",",
"name",
")",
"response",
"=",
"salt",
".",
"utils",
".",
"http",
".",
"query",
"(",
"task_url",
",",
"status",
"=",
"True",
")",
"if",
"response",
"[",
"'status'",
"]",
"==",
"404",
":",
"return",
"None",
"data",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"loads",
"(",
"response",
"[",
"'body'",
"]",
")",
"if",
"version",
"(",
")",
"<",
"'0.13'",
":",
"return",
"{",
"'script'",
":",
"data",
"[",
"'TICKscript'",
"]",
",",
"'type'",
":",
"data",
"[",
"'Type'",
"]",
",",
"'dbrps'",
":",
"data",
"[",
"'DBRPs'",
"]",
",",
"'enabled'",
":",
"data",
"[",
"'Enabled'",
"]",
",",
"}",
"return",
"{",
"'script'",
":",
"data",
"[",
"'script'",
"]",
",",
"'type'",
":",
"data",
"[",
"'type'",
"]",
",",
"'dbrps'",
":",
"data",
"[",
"'dbrps'",
"]",
",",
"'enabled'",
":",
"data",
"[",
"'status'",
"]",
"==",
"'enabled'",
",",
"}"
] | Get a dict of data on a task.
name
Name of the task to get information about.
CLI Example:
.. code-block:: bash
salt '*' kapacitor.get_task cpu | [
"Get",
"a",
"dict",
"of",
"data",
"on",
"a",
"task",
"."
] | python | train |
knipknap/exscript | Exscript/stdlib/connection.py | https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/stdlib/connection.py#L124-L133 | def close(scope):
"""
Closes the existing connection with the remote host. This function is
rarely used, as normally Exscript closes the connection automatically
when the script has completed.
"""
conn = scope.get('__connection__')
conn.close(1)
scope.define(__response__=conn.response)
return True | [
"def",
"close",
"(",
"scope",
")",
":",
"conn",
"=",
"scope",
".",
"get",
"(",
"'__connection__'",
")",
"conn",
".",
"close",
"(",
"1",
")",
"scope",
".",
"define",
"(",
"__response__",
"=",
"conn",
".",
"response",
")",
"return",
"True"
] | Closes the existing connection with the remote host. This function is
rarely used, as normally Exscript closes the connection automatically
when the script has completed. | [
"Closes",
"the",
"existing",
"connection",
"with",
"the",
"remote",
"host",
".",
"This",
"function",
"is",
"rarely",
"used",
"as",
"normally",
"Exscript",
"closes",
"the",
"connection",
"automatically",
"when",
"the",
"script",
"has",
"completed",
"."
] | python | train |
yatiml/yatiml | yatiml/loader.py | https://github.com/yatiml/yatiml/blob/4f55c058b72388350f0af3076ac3ea9bc1c142b0/yatiml/loader.py#L81-L104 | def __savorize(self, node: yaml.Node, expected_type: Type) -> yaml.Node:
"""Removes syntactic sugar from the node.
This calls yatiml_savorize(), first on the class's base \
classes, then on the class itself.
Args:
node: The node to modify.
expected_type: The type to assume this type is.
"""
logger.debug('Savorizing node assuming type {}'.format(
expected_type.__name__))
for base_class in expected_type.__bases__:
if base_class in self._registered_classes.values():
node = self.__savorize(node, base_class)
if hasattr(expected_type, 'yatiml_savorize'):
logger.debug('Calling {}.yatiml_savorize()'.format(
expected_type.__name__))
cnode = Node(node)
expected_type.yatiml_savorize(cnode)
node = cnode.yaml_node
return node | [
"def",
"__savorize",
"(",
"self",
",",
"node",
":",
"yaml",
".",
"Node",
",",
"expected_type",
":",
"Type",
")",
"->",
"yaml",
".",
"Node",
":",
"logger",
".",
"debug",
"(",
"'Savorizing node assuming type {}'",
".",
"format",
"(",
"expected_type",
".",
"__name__",
")",
")",
"for",
"base_class",
"in",
"expected_type",
".",
"__bases__",
":",
"if",
"base_class",
"in",
"self",
".",
"_registered_classes",
".",
"values",
"(",
")",
":",
"node",
"=",
"self",
".",
"__savorize",
"(",
"node",
",",
"base_class",
")",
"if",
"hasattr",
"(",
"expected_type",
",",
"'yatiml_savorize'",
")",
":",
"logger",
".",
"debug",
"(",
"'Calling {}.yatiml_savorize()'",
".",
"format",
"(",
"expected_type",
".",
"__name__",
")",
")",
"cnode",
"=",
"Node",
"(",
"node",
")",
"expected_type",
".",
"yatiml_savorize",
"(",
"cnode",
")",
"node",
"=",
"cnode",
".",
"yaml_node",
"return",
"node"
] | Removes syntactic sugar from the node.
This calls yatiml_savorize(), first on the class's base \
classes, then on the class itself.
Args:
node: The node to modify.
expected_type: The type to assume this type is. | [
"Removes",
"syntactic",
"sugar",
"from",
"the",
"node",
"."
] | python | train |
cbrand/vpnchooser | src/vpnchooser/connection/client.py | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/connection/client.py#L82-L99 | def _parse(data: str) -> list:
"""
Parses the given data string and returns
a list of rule objects.
"""
if isinstance(data, bytes):
data = data.decode('utf-8')
lines = (
item for item in
(item.strip() for item in data.split('\n'))
if len(item) and not item.startswith('#')
)
rules = []
for line in lines:
rules.append(
Rule.parse(line)
)
return rules | [
"def",
"_parse",
"(",
"data",
":",
"str",
")",
"->",
"list",
":",
"if",
"isinstance",
"(",
"data",
",",
"bytes",
")",
":",
"data",
"=",
"data",
".",
"decode",
"(",
"'utf-8'",
")",
"lines",
"=",
"(",
"item",
"for",
"item",
"in",
"(",
"item",
".",
"strip",
"(",
")",
"for",
"item",
"in",
"data",
".",
"split",
"(",
"'\\n'",
")",
")",
"if",
"len",
"(",
"item",
")",
"and",
"not",
"item",
".",
"startswith",
"(",
"'#'",
")",
")",
"rules",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"rules",
".",
"append",
"(",
"Rule",
".",
"parse",
"(",
"line",
")",
")",
"return",
"rules"
] | Parses the given data string and returns
a list of rule objects. | [
"Parses",
"the",
"given",
"data",
"string",
"and",
"returns",
"a",
"list",
"of",
"rule",
"objects",
"."
] | python | train |
ajdavis/GreenletProfiler | _vendorized_yappi/yappi.py | https://github.com/ajdavis/GreenletProfiler/blob/700349864a4f368a8a73a2a60f048c2e818d7cea/_vendorized_yappi/yappi.py#L342-L369 | def print_all(self, out=sys.stdout):
"""
Prints all of the child function profiler results to a given file. (stdout by default)
"""
if self.empty():
return
FUNC_NAME_LEN = 38
CALLCOUNT_LEN = 9
out.write(CRLF)
out.write("name #n tsub ttot tavg")
out.write(CRLF)
for stat in self:
out.write(StatString(stat.full_name).ltrim(FUNC_NAME_LEN))
out.write(" " * COLUMN_GAP)
# the function is recursive?
if stat.is_recursive():
out.write(StatString("%d/%d" % (stat.ncall, stat.nactualcall)).rtrim(CALLCOUNT_LEN))
else:
out.write(StatString(stat.ncall).rtrim(CALLCOUNT_LEN))
out.write(" " * COLUMN_GAP)
out.write(StatString(_fft(stat.tsub)).rtrim(TIME_COLUMN_LEN))
out.write(" " * COLUMN_GAP)
out.write(StatString(_fft(stat.ttot)).rtrim(TIME_COLUMN_LEN))
out.write(" " * COLUMN_GAP)
out.write(StatString(_fft(stat.tavg)).rtrim(TIME_COLUMN_LEN))
out.write(CRLF) | [
"def",
"print_all",
"(",
"self",
",",
"out",
"=",
"sys",
".",
"stdout",
")",
":",
"if",
"self",
".",
"empty",
"(",
")",
":",
"return",
"FUNC_NAME_LEN",
"=",
"38",
"CALLCOUNT_LEN",
"=",
"9",
"out",
".",
"write",
"(",
"CRLF",
")",
"out",
".",
"write",
"(",
"\"name #n tsub ttot tavg\"",
")",
"out",
".",
"write",
"(",
"CRLF",
")",
"for",
"stat",
"in",
"self",
":",
"out",
".",
"write",
"(",
"StatString",
"(",
"stat",
".",
"full_name",
")",
".",
"ltrim",
"(",
"FUNC_NAME_LEN",
")",
")",
"out",
".",
"write",
"(",
"\" \"",
"*",
"COLUMN_GAP",
")",
"# the function is recursive?",
"if",
"stat",
".",
"is_recursive",
"(",
")",
":",
"out",
".",
"write",
"(",
"StatString",
"(",
"\"%d/%d\"",
"%",
"(",
"stat",
".",
"ncall",
",",
"stat",
".",
"nactualcall",
")",
")",
".",
"rtrim",
"(",
"CALLCOUNT_LEN",
")",
")",
"else",
":",
"out",
".",
"write",
"(",
"StatString",
"(",
"stat",
".",
"ncall",
")",
".",
"rtrim",
"(",
"CALLCOUNT_LEN",
")",
")",
"out",
".",
"write",
"(",
"\" \"",
"*",
"COLUMN_GAP",
")",
"out",
".",
"write",
"(",
"StatString",
"(",
"_fft",
"(",
"stat",
".",
"tsub",
")",
")",
".",
"rtrim",
"(",
"TIME_COLUMN_LEN",
")",
")",
"out",
".",
"write",
"(",
"\" \"",
"*",
"COLUMN_GAP",
")",
"out",
".",
"write",
"(",
"StatString",
"(",
"_fft",
"(",
"stat",
".",
"ttot",
")",
")",
".",
"rtrim",
"(",
"TIME_COLUMN_LEN",
")",
")",
"out",
".",
"write",
"(",
"\" \"",
"*",
"COLUMN_GAP",
")",
"out",
".",
"write",
"(",
"StatString",
"(",
"_fft",
"(",
"stat",
".",
"tavg",
")",
")",
".",
"rtrim",
"(",
"TIME_COLUMN_LEN",
")",
")",
"out",
".",
"write",
"(",
"CRLF",
")"
] | Prints all of the child function profiler results to a given file. (stdout by default) | [
"Prints",
"all",
"of",
"the",
"child",
"function",
"profiler",
"results",
"to",
"a",
"given",
"file",
".",
"(",
"stdout",
"by",
"default",
")"
] | python | train |
pudo/banal | banal/cache.py | https://github.com/pudo/banal/blob/528c339be5138458e387a058581cf7d261285447/banal/cache.py#L11-L40 | def bytes_iter(obj):
"""Turn a complex object into an iterator of byte strings.
The resulting iterator can be used for caching.
"""
if obj is None:
return
elif isinstance(obj, six.binary_type):
yield obj
elif isinstance(obj, six.string_types):
yield obj
elif isinstance(obj, (date, datetime)):
yield obj.isoformat()
elif is_mapping(obj):
for key in sorted(obj.keys()):
for out in chain(bytes_iter(key), bytes_iter(obj[key])):
yield out
elif is_sequence(obj):
if isinstance(obj, (list, set)):
try:
obj = sorted(obj)
except Exception:
pass
for item in obj:
for out in bytes_iter(item):
yield out
elif isinstance(obj, (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)):
yield getattr(obj, 'func_name', '')
else:
yield six.text_type(obj) | [
"def",
"bytes_iter",
"(",
"obj",
")",
":",
"if",
"obj",
"is",
"None",
":",
"return",
"elif",
"isinstance",
"(",
"obj",
",",
"six",
".",
"binary_type",
")",
":",
"yield",
"obj",
"elif",
"isinstance",
"(",
"obj",
",",
"six",
".",
"string_types",
")",
":",
"yield",
"obj",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"date",
",",
"datetime",
")",
")",
":",
"yield",
"obj",
".",
"isoformat",
"(",
")",
"elif",
"is_mapping",
"(",
"obj",
")",
":",
"for",
"key",
"in",
"sorted",
"(",
"obj",
".",
"keys",
"(",
")",
")",
":",
"for",
"out",
"in",
"chain",
"(",
"bytes_iter",
"(",
"key",
")",
",",
"bytes_iter",
"(",
"obj",
"[",
"key",
"]",
")",
")",
":",
"yield",
"out",
"elif",
"is_sequence",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"list",
",",
"set",
")",
")",
":",
"try",
":",
"obj",
"=",
"sorted",
"(",
"obj",
")",
"except",
"Exception",
":",
"pass",
"for",
"item",
"in",
"obj",
":",
"for",
"out",
"in",
"bytes_iter",
"(",
"item",
")",
":",
"yield",
"out",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"types",
".",
"FunctionType",
",",
"types",
".",
"BuiltinFunctionType",
",",
"types",
".",
"MethodType",
",",
"types",
".",
"BuiltinMethodType",
")",
")",
":",
"yield",
"getattr",
"(",
"obj",
",",
"'func_name'",
",",
"''",
")",
"else",
":",
"yield",
"six",
".",
"text_type",
"(",
"obj",
")"
] | Turn a complex object into an iterator of byte strings.
The resulting iterator can be used for caching. | [
"Turn",
"a",
"complex",
"object",
"into",
"an",
"iterator",
"of",
"byte",
"strings",
".",
"The",
"resulting",
"iterator",
"can",
"be",
"used",
"for",
"caching",
"."
] | python | train |
TAPPGuild/sqlalchemy-models | sqlalchemy_models/__init__.py | https://github.com/TAPPGuild/sqlalchemy-models/blob/75988a23bdd98e79af8b8b0711c657c79b2f8eac/sqlalchemy_models/__init__.py#L65-L83 | def generate_signature_class(cls):
"""
Generate a declarative model for storing signatures related to the given
cls parameter.
:param class cls: The declarative model to generate a signature class for.
:return: The signature class, as a declarative derived from Base.
"""
return type("%sSigs" % cls.__name__, (Base,),
{'__tablename__': "%s_sigs" % cls.__tablename__,
'id': sa.Column(sa.Integer,
sa.Sequence('%s_id_seq' % cls.__tablename__),
primary_key=True,
doc="primary key"),
'data': sa.Column(sa.Text(), nullable=False,
doc="The signed data"),
'%s_id' % cls.__tablename__: sa.Column(sa.Integer,
sa.ForeignKey("%s.id" % cls.__tablename__),
nullable=False)}) | [
"def",
"generate_signature_class",
"(",
"cls",
")",
":",
"return",
"type",
"(",
"\"%sSigs\"",
"%",
"cls",
".",
"__name__",
",",
"(",
"Base",
",",
")",
",",
"{",
"'__tablename__'",
":",
"\"%s_sigs\"",
"%",
"cls",
".",
"__tablename__",
",",
"'id'",
":",
"sa",
".",
"Column",
"(",
"sa",
".",
"Integer",
",",
"sa",
".",
"Sequence",
"(",
"'%s_id_seq'",
"%",
"cls",
".",
"__tablename__",
")",
",",
"primary_key",
"=",
"True",
",",
"doc",
"=",
"\"primary key\"",
")",
",",
"'data'",
":",
"sa",
".",
"Column",
"(",
"sa",
".",
"Text",
"(",
")",
",",
"nullable",
"=",
"False",
",",
"doc",
"=",
"\"The signed data\"",
")",
",",
"'%s_id'",
"%",
"cls",
".",
"__tablename__",
":",
"sa",
".",
"Column",
"(",
"sa",
".",
"Integer",
",",
"sa",
".",
"ForeignKey",
"(",
"\"%s.id\"",
"%",
"cls",
".",
"__tablename__",
")",
",",
"nullable",
"=",
"False",
")",
"}",
")"
] | Generate a declarative model for storing signatures related to the given
cls parameter.
:param class cls: The declarative model to generate a signature class for.
:return: The signature class, as a declarative derived from Base. | [
"Generate",
"a",
"declarative",
"model",
"for",
"storing",
"signatures",
"related",
"to",
"the",
"given",
"cls",
"parameter",
"."
] | python | train |
secure-systems-lab/securesystemslib | securesystemslib/ecdsa_keys.py | https://github.com/secure-systems-lab/securesystemslib/blob/beb3109d5bb462e5a60eed88fb40ed1167bd354e/securesystemslib/ecdsa_keys.py#L67-L155 | def generate_public_and_private(scheme='ecdsa-sha2-nistp256'):
"""
<Purpose>
Generate a pair of ECDSA public and private keys with one of the supported,
external cryptography libraries. The public and private keys returned
conform to 'securesystemslib.formats.PEMECDSA_SCHEMA' and
'securesystemslib.formats.PEMECDSA_SCHEMA', respectively.
The public ECDSA public key has the PEM format:
TODO: should we encrypt the private keys returned here? Should the
create_signature() accept encrypted keys?
'-----BEGIN PUBLIC KEY-----
...
'-----END PUBLIC KEY-----'
The private ECDSA private key has the PEM format:
'-----BEGIN EC PRIVATE KEY-----
...
-----END EC PRIVATE KEY-----'
>>> public, private = generate_public_and_private()
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
<Arguments>
scheme:
A string indicating which algorithm to use for the generation of the
public and private ECDSA keys. 'ecdsa-sha2-nistp256' is the only
currently supported ECDSA algorithm, which is supported by OpenSSH and
specified in RFC 5656 (https://tools.ietf.org/html/rfc5656).
<Exceptions>
securesystemslib.exceptions.FormatError, if 'algorithm' is improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is an
unsupported algorithm.
<Side Effects>
None.
<Returns>
A (public, private) tuple that conform to
'securesystemslib.formats.PEMECDSA_SCHEMA' and
'securesystemslib.formats.PEMECDSA_SCHEMA', respectively.
"""
# Does 'scheme' have the correct format?
# Verify that 'scheme' is of the correct type, and that it's one of the
# supported ECDSA . It must conform to
# 'securesystemslib.formats.ECDSA_SCHEME_SCHEMA'. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme)
public_key = None
private_key = None
# An if-clause is strictly not needed, since 'ecdsa_sha2-nistp256' is the
# only currently supported ECDSA signature scheme. Nevertheness, include the
# conditional statement to accomodate any schemes that might be added.
if scheme == 'ecdsa-sha2-nistp256':
private_key = ec.generate_private_key(ec.SECP256R1, default_backend())
public_key = private_key.public_key()
# The ECDSA_SCHEME_SCHEMA.check_match() above should have detected any
# invalid 'scheme'. This is a defensive check.
else: #pragma: no cover
raise securesystemslib.exceptions.UnsupportedAlgorithmError('An unsupported'
' scheme specified: ' + repr(scheme) + '.\n Supported'
' algorithms: ' + repr(_SUPPORTED_ECDSA_SCHEMES))
private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
public_pem = public_key.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
return public_pem.decode('utf-8'), private_pem.decode('utf-8') | [
"def",
"generate_public_and_private",
"(",
"scheme",
"=",
"'ecdsa-sha2-nistp256'",
")",
":",
"# Does 'scheme' have the correct format?",
"# Verify that 'scheme' is of the correct type, and that it's one of the",
"# supported ECDSA . It must conform to",
"# 'securesystemslib.formats.ECDSA_SCHEME_SCHEMA'. Raise",
"# 'securesystemslib.exceptions.FormatError' if the check fails.",
"securesystemslib",
".",
"formats",
".",
"ECDSA_SCHEME_SCHEMA",
".",
"check_match",
"(",
"scheme",
")",
"public_key",
"=",
"None",
"private_key",
"=",
"None",
"# An if-clause is strictly not needed, since 'ecdsa_sha2-nistp256' is the",
"# only currently supported ECDSA signature scheme. Nevertheness, include the",
"# conditional statement to accomodate any schemes that might be added.",
"if",
"scheme",
"==",
"'ecdsa-sha2-nistp256'",
":",
"private_key",
"=",
"ec",
".",
"generate_private_key",
"(",
"ec",
".",
"SECP256R1",
",",
"default_backend",
"(",
")",
")",
"public_key",
"=",
"private_key",
".",
"public_key",
"(",
")",
"# The ECDSA_SCHEME_SCHEMA.check_match() above should have detected any",
"# invalid 'scheme'. This is a defensive check.",
"else",
":",
"#pragma: no cover",
"raise",
"securesystemslib",
".",
"exceptions",
".",
"UnsupportedAlgorithmError",
"(",
"'An unsupported'",
"' scheme specified: '",
"+",
"repr",
"(",
"scheme",
")",
"+",
"'.\\n Supported'",
"' algorithms: '",
"+",
"repr",
"(",
"_SUPPORTED_ECDSA_SCHEMES",
")",
")",
"private_pem",
"=",
"private_key",
".",
"private_bytes",
"(",
"encoding",
"=",
"serialization",
".",
"Encoding",
".",
"PEM",
",",
"format",
"=",
"serialization",
".",
"PrivateFormat",
".",
"TraditionalOpenSSL",
",",
"encryption_algorithm",
"=",
"serialization",
".",
"NoEncryption",
"(",
")",
")",
"public_pem",
"=",
"public_key",
".",
"public_bytes",
"(",
"encoding",
"=",
"serialization",
".",
"Encoding",
".",
"PEM",
",",
"format",
"=",
"serialization",
".",
"PublicFormat",
".",
"SubjectPublicKeyInfo",
")",
"return",
"public_pem",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"private_pem",
".",
"decode",
"(",
"'utf-8'",
")"
] | <Purpose>
Generate a pair of ECDSA public and private keys with one of the supported,
external cryptography libraries. The public and private keys returned
conform to 'securesystemslib.formats.PEMECDSA_SCHEMA' and
'securesystemslib.formats.PEMECDSA_SCHEMA', respectively.
The public ECDSA public key has the PEM format:
TODO: should we encrypt the private keys returned here? Should the
create_signature() accept encrypted keys?
'-----BEGIN PUBLIC KEY-----
...
'-----END PUBLIC KEY-----'
The private ECDSA private key has the PEM format:
'-----BEGIN EC PRIVATE KEY-----
...
-----END EC PRIVATE KEY-----'
>>> public, private = generate_public_and_private()
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
<Arguments>
scheme:
A string indicating which algorithm to use for the generation of the
public and private ECDSA keys. 'ecdsa-sha2-nistp256' is the only
currently supported ECDSA algorithm, which is supported by OpenSSH and
specified in RFC 5656 (https://tools.ietf.org/html/rfc5656).
<Exceptions>
securesystemslib.exceptions.FormatError, if 'algorithm' is improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is an
unsupported algorithm.
<Side Effects>
None.
<Returns>
A (public, private) tuple that conform to
'securesystemslib.formats.PEMECDSA_SCHEMA' and
'securesystemslib.formats.PEMECDSA_SCHEMA', respectively. | [
"<Purpose",
">",
"Generate",
"a",
"pair",
"of",
"ECDSA",
"public",
"and",
"private",
"keys",
"with",
"one",
"of",
"the",
"supported",
"external",
"cryptography",
"libraries",
".",
"The",
"public",
"and",
"private",
"keys",
"returned",
"conform",
"to",
"securesystemslib",
".",
"formats",
".",
"PEMECDSA_SCHEMA",
"and",
"securesystemslib",
".",
"formats",
".",
"PEMECDSA_SCHEMA",
"respectively",
"."
] | python | train |
wuher/devil | devil/fields/representation.py | https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/fields/representation.py#L76-L92 | def get_declared_fields(bases, attrs):
""" Find all fields and return them as a dictionary.
note:: this function is copied and modified
from django.forms.get_declared_fields
"""
def is_field(prop):
return isinstance(prop, forms.Field) or \
isinstance(prop, BaseRepresentation)
fields = [(field_name, attrs.pop(field_name)) for field_name, obj in attrs.items() if is_field(obj)]
# add fields from base classes:
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = base.base_fields.items() + fields
return dict(fields) | [
"def",
"get_declared_fields",
"(",
"bases",
",",
"attrs",
")",
":",
"def",
"is_field",
"(",
"prop",
")",
":",
"return",
"isinstance",
"(",
"prop",
",",
"forms",
".",
"Field",
")",
"or",
"isinstance",
"(",
"prop",
",",
"BaseRepresentation",
")",
"fields",
"=",
"[",
"(",
"field_name",
",",
"attrs",
".",
"pop",
"(",
"field_name",
")",
")",
"for",
"field_name",
",",
"obj",
"in",
"attrs",
".",
"items",
"(",
")",
"if",
"is_field",
"(",
"obj",
")",
"]",
"# add fields from base classes:",
"for",
"base",
"in",
"bases",
"[",
":",
":",
"-",
"1",
"]",
":",
"if",
"hasattr",
"(",
"base",
",",
"'base_fields'",
")",
":",
"fields",
"=",
"base",
".",
"base_fields",
".",
"items",
"(",
")",
"+",
"fields",
"return",
"dict",
"(",
"fields",
")"
] | Find all fields and return them as a dictionary.
note:: this function is copied and modified
from django.forms.get_declared_fields | [
"Find",
"all",
"fields",
"and",
"return",
"them",
"as",
"a",
"dictionary",
"."
] | python | train |
DLR-RM/RAFCON | source/rafcon/gui/controllers/graphical_editor_gaphas.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/graphical_editor_gaphas.py#L322-L370 | def meta_changed_notify_after(self, state_machine_m, _, info):
"""Handle notification about the change of a state's meta data
The meta data of the affected state(s) are read and the view updated accordingly.
:param StateMachineModel state_machine_m: Always the state machine model belonging to this editor
:param str _: Always "state_meta_signal"
:param dict info: Information about the change, contains the MetaSignalMessage in the 'arg' key value
"""
meta_signal_message = info['arg']
if meta_signal_message.origin == "graphical_editor_gaphas": # Ignore changes caused by ourself
return
if meta_signal_message.origin == "load_meta_data": # Meta data can't be applied, as the view has not yet
return # been created
notification = meta_signal_message.notification
if not notification: # For changes applied to the root state, there are always two notifications
return # Ignore the one with less information
if self.model.ongoing_complex_actions:
return
model = notification.model
view = self.canvas.get_view_for_model(model)
if meta_signal_message.change == 'show_content':
library_state_m = model
library_state_v = view
if library_state_m.meta['gui']['show_content'] is not library_state_m.show_content():
logger.warning("The content of the LibraryState won't be shown, because "
"MAX_VISIBLE_LIBRARY_HIERARCHY is 1.")
if library_state_m.show_content():
if not library_state_m.state_copy_initialized:
logger.warning("Show library content without initialized state copy does not work {0}"
"".format(library_state_m))
logger.debug("Show content of {}".format(library_state_m.state))
gui_helper_meta_data.scale_library_content(library_state_m)
self.add_state_view_for_model(library_state_m.state_copy, view,
hierarchy_level=library_state_v.hierarchy_level + 1)
else:
logger.debug("Hide content of {}".format(library_state_m.state))
state_copy_v = self.canvas.get_view_for_model(library_state_m.state_copy)
if state_copy_v:
state_copy_v.remove()
else:
if isinstance(view, StateView):
view.apply_meta_data(recursive=meta_signal_message.affects_children)
else:
view.apply_meta_data()
self.canvas.request_update(view, matrix=True)
self.canvas.wait_for_update() | [
"def",
"meta_changed_notify_after",
"(",
"self",
",",
"state_machine_m",
",",
"_",
",",
"info",
")",
":",
"meta_signal_message",
"=",
"info",
"[",
"'arg'",
"]",
"if",
"meta_signal_message",
".",
"origin",
"==",
"\"graphical_editor_gaphas\"",
":",
"# Ignore changes caused by ourself",
"return",
"if",
"meta_signal_message",
".",
"origin",
"==",
"\"load_meta_data\"",
":",
"# Meta data can't be applied, as the view has not yet",
"return",
"# been created",
"notification",
"=",
"meta_signal_message",
".",
"notification",
"if",
"not",
"notification",
":",
"# For changes applied to the root state, there are always two notifications",
"return",
"# Ignore the one with less information",
"if",
"self",
".",
"model",
".",
"ongoing_complex_actions",
":",
"return",
"model",
"=",
"notification",
".",
"model",
"view",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"model",
")",
"if",
"meta_signal_message",
".",
"change",
"==",
"'show_content'",
":",
"library_state_m",
"=",
"model",
"library_state_v",
"=",
"view",
"if",
"library_state_m",
".",
"meta",
"[",
"'gui'",
"]",
"[",
"'show_content'",
"]",
"is",
"not",
"library_state_m",
".",
"show_content",
"(",
")",
":",
"logger",
".",
"warning",
"(",
"\"The content of the LibraryState won't be shown, because \"",
"\"MAX_VISIBLE_LIBRARY_HIERARCHY is 1.\"",
")",
"if",
"library_state_m",
".",
"show_content",
"(",
")",
":",
"if",
"not",
"library_state_m",
".",
"state_copy_initialized",
":",
"logger",
".",
"warning",
"(",
"\"Show library content without initialized state copy does not work {0}\"",
"\"\"",
".",
"format",
"(",
"library_state_m",
")",
")",
"logger",
".",
"debug",
"(",
"\"Show content of {}\"",
".",
"format",
"(",
"library_state_m",
".",
"state",
")",
")",
"gui_helper_meta_data",
".",
"scale_library_content",
"(",
"library_state_m",
")",
"self",
".",
"add_state_view_for_model",
"(",
"library_state_m",
".",
"state_copy",
",",
"view",
",",
"hierarchy_level",
"=",
"library_state_v",
".",
"hierarchy_level",
"+",
"1",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Hide content of {}\"",
".",
"format",
"(",
"library_state_m",
".",
"state",
")",
")",
"state_copy_v",
"=",
"self",
".",
"canvas",
".",
"get_view_for_model",
"(",
"library_state_m",
".",
"state_copy",
")",
"if",
"state_copy_v",
":",
"state_copy_v",
".",
"remove",
"(",
")",
"else",
":",
"if",
"isinstance",
"(",
"view",
",",
"StateView",
")",
":",
"view",
".",
"apply_meta_data",
"(",
"recursive",
"=",
"meta_signal_message",
".",
"affects_children",
")",
"else",
":",
"view",
".",
"apply_meta_data",
"(",
")",
"self",
".",
"canvas",
".",
"request_update",
"(",
"view",
",",
"matrix",
"=",
"True",
")",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")"
] | Handle notification about the change of a state's meta data
The meta data of the affected state(s) are read and the view updated accordingly.
:param StateMachineModel state_machine_m: Always the state machine model belonging to this editor
:param str _: Always "state_meta_signal"
:param dict info: Information about the change, contains the MetaSignalMessage in the 'arg' key value | [
"Handle",
"notification",
"about",
"the",
"change",
"of",
"a",
"state",
"s",
"meta",
"data"
] | python | train |
ssato/python-anyconfig | src/anyconfig/template.py | https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/template.py#L65-L89 | def make_template_paths(template_file, paths=None):
"""
Make up a list of template search paths from given 'template_file'
(absolute or relative path to the template file) and/or 'paths' (a list of
template search paths given by user).
NOTE: User-given 'paths' will take higher priority over a dir of
template_file.
:param template_file: Absolute or relative path to the template file
:param paths: A list of template search paths
:return: List of template paths ([str])
>>> make_template_paths("/path/to/a/template")
['/path/to/a']
>>> make_template_paths("/path/to/a/template", ["/tmp"])
['/tmp', '/path/to/a']
>>> os.chdir("/tmp")
>>> make_template_paths("./path/to/a/template")
['/tmp/path/to/a']
>>> make_template_paths("./path/to/a/template", ["/tmp"])
['/tmp', '/tmp/path/to/a']
"""
tmpldir = os.path.abspath(os.path.dirname(template_file))
return [tmpldir] if paths is None else paths + [tmpldir] | [
"def",
"make_template_paths",
"(",
"template_file",
",",
"paths",
"=",
"None",
")",
":",
"tmpldir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"template_file",
")",
")",
"return",
"[",
"tmpldir",
"]",
"if",
"paths",
"is",
"None",
"else",
"paths",
"+",
"[",
"tmpldir",
"]"
] | Make up a list of template search paths from given 'template_file'
(absolute or relative path to the template file) and/or 'paths' (a list of
template search paths given by user).
NOTE: User-given 'paths' will take higher priority over a dir of
template_file.
:param template_file: Absolute or relative path to the template file
:param paths: A list of template search paths
:return: List of template paths ([str])
>>> make_template_paths("/path/to/a/template")
['/path/to/a']
>>> make_template_paths("/path/to/a/template", ["/tmp"])
['/tmp', '/path/to/a']
>>> os.chdir("/tmp")
>>> make_template_paths("./path/to/a/template")
['/tmp/path/to/a']
>>> make_template_paths("./path/to/a/template", ["/tmp"])
['/tmp', '/tmp/path/to/a'] | [
"Make",
"up",
"a",
"list",
"of",
"template",
"search",
"paths",
"from",
"given",
"template_file",
"(",
"absolute",
"or",
"relative",
"path",
"to",
"the",
"template",
"file",
")",
"and",
"/",
"or",
"paths",
"(",
"a",
"list",
"of",
"template",
"search",
"paths",
"given",
"by",
"user",
")",
"."
] | python | train |
tomplus/kubernetes_asyncio | kubernetes_asyncio/client/api/autoscaling_v2beta1_api.py | https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/autoscaling_v2beta1_api.py#L623-L652 | def list_namespaced_horizontal_pod_autoscaler(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_horizontal_pod_autoscaler # noqa: E501
list or watch objects of kind HorizontalPodAutoscaler # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_horizontal_pod_autoscaler(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2beta1HorizontalPodAutoscalerList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, **kwargs) # noqa: E501
else:
(data) = self.list_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, **kwargs) # noqa: E501
return data | [
"def",
"list_namespaced_horizontal_pod_autoscaler",
"(",
"self",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"list_namespaced_horizontal_pod_autoscaler_with_http_info",
"(",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"list_namespaced_horizontal_pod_autoscaler_with_http_info",
"(",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | list_namespaced_horizontal_pod_autoscaler # noqa: E501
list or watch objects of kind HorizontalPodAutoscaler # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_horizontal_pod_autoscaler(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2beta1HorizontalPodAutoscalerList
If the method is called asynchronously,
returns the request thread. | [
"list_namespaced_horizontal_pod_autoscaler",
"#",
"noqa",
":",
"E501"
] | python | train |
faucamp/python-gsmmodem | tools/gsmtermlib/terminal.py | https://github.com/faucamp/python-gsmmodem/blob/834c68b1387ca2c91e2210faa8f75526b39723b5/tools/gsmtermlib/terminal.py#L297-L303 | def _handleEsc(self):
""" Handler for CTRL+Z keypresses """
if self._typingSms:
self.serial.write(self.ESC_CHARACTER)
self._typingSms = False
self.inputBuffer = []
self.cursorPos = 0 | [
"def",
"_handleEsc",
"(",
"self",
")",
":",
"if",
"self",
".",
"_typingSms",
":",
"self",
".",
"serial",
".",
"write",
"(",
"self",
".",
"ESC_CHARACTER",
")",
"self",
".",
"_typingSms",
"=",
"False",
"self",
".",
"inputBuffer",
"=",
"[",
"]",
"self",
".",
"cursorPos",
"=",
"0"
] | Handler for CTRL+Z keypresses | [
"Handler",
"for",
"CTRL",
"+",
"Z",
"keypresses"
] | python | train |
aws/sagemaker-python-sdk | src/sagemaker/amazon/knn.py | https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/amazon/knn.py#L102-L113 | def create_model(self, vpc_config_override=VPC_CONFIG_DEFAULT):
"""Return a :class:`~sagemaker.amazon.KNNModel` referencing the latest
s3 model data produced by this Estimator.
Args:
vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
"""
return KNNModel(self.model_data, self.role, sagemaker_session=self.sagemaker_session,
vpc_config=self.get_vpc_config(vpc_config_override)) | [
"def",
"create_model",
"(",
"self",
",",
"vpc_config_override",
"=",
"VPC_CONFIG_DEFAULT",
")",
":",
"return",
"KNNModel",
"(",
"self",
".",
"model_data",
",",
"self",
".",
"role",
",",
"sagemaker_session",
"=",
"self",
".",
"sagemaker_session",
",",
"vpc_config",
"=",
"self",
".",
"get_vpc_config",
"(",
"vpc_config_override",
")",
")"
] | Return a :class:`~sagemaker.amazon.KNNModel` referencing the latest
s3 model data produced by this Estimator.
Args:
vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids. | [
"Return",
"a",
":",
"class",
":",
"~sagemaker",
".",
"amazon",
".",
"KNNModel",
"referencing",
"the",
"latest",
"s3",
"model",
"data",
"produced",
"by",
"this",
"Estimator",
"."
] | python | train |
cltl/KafNafParserPy | KafNafParserPy/causal_data.py | https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/causal_data.py#L156-L165 | def remove_this_clink(self,clink_id):
"""
Removes the clink for the given clink identifier
@type clink_id: string
@param clink_id: the clink identifier to be removed
"""
for clink in self.get_clinks():
if clink.get_id() == clink_id:
self.node.remove(clink.get_node())
break | [
"def",
"remove_this_clink",
"(",
"self",
",",
"clink_id",
")",
":",
"for",
"clink",
"in",
"self",
".",
"get_clinks",
"(",
")",
":",
"if",
"clink",
".",
"get_id",
"(",
")",
"==",
"clink_id",
":",
"self",
".",
"node",
".",
"remove",
"(",
"clink",
".",
"get_node",
"(",
")",
")",
"break"
] | Removes the clink for the given clink identifier
@type clink_id: string
@param clink_id: the clink identifier to be removed | [
"Removes",
"the",
"clink",
"for",
"the",
"given",
"clink",
"identifier"
] | python | train |
PBR/MQ2 | MQ2/mapchart.py | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/mapchart.py#L124-L228 | def generate_map_chart_file(qtl_matrix, lod_threshold,
map_chart_file='MapChart.map'):
""" This function converts our QTL matrix file into a MapChart input
file.
:arg qtl_matrix: the path to the QTL matrix file generated by
the plugin.
:arg lod_threshold: threshold used to determine if a given LOD value
is reflective the presence of a QTL.
:kwarg map_chart_file: name of the output file containing the
MapChart information.
"""
qtl_matrix = read_input_file(qtl_matrix, sep=',')
tmp_dic = {}
cnt = 1
tmp = {}
block = {}
for row in qtl_matrix[1:]:
linkgrp = qtl_matrix[cnt - 1][1]
if cnt == 1:
linkgrp = qtl_matrix[cnt][1]
if not linkgrp in tmp_dic:
tmp_dic[linkgrp] = [[], []]
infos = row[0:3]
if qtl_matrix[cnt][1] != linkgrp:
if tmp:
qtls = _extrac_qtl(tmp, block, qtl_matrix[0])
tmp_dic[linkgrp][1] = qtls
linkgrp = qtl_matrix[cnt][1]
tmp_dic[linkgrp] = [[], []]
tmp = {}
block = {}
tmp_dic[linkgrp][0].append([row[0], row[2]])
colcnt = 3
for cel in row[3:-1]:
blockrow = infos[:]
blockrow.extend([qtl_matrix[0][colcnt], cel])
if colcnt in block:
block[colcnt].append(blockrow)
else:
block[colcnt] = [blockrow]
if cel.strip() != '' and float(cel) >= float(lod_threshold):
temp = infos[:]
if not tmp\
or (qtl_matrix[0][colcnt] in tmp
and float(cel) >= float(
tmp[qtl_matrix[0][colcnt]][-1])
) \
or qtl_matrix[0][colcnt] not in tmp:
temp.extend([qtl_matrix[0][colcnt], cel])
tmp[qtl_matrix[0][colcnt]] = temp
colcnt = colcnt + 1
cnt = cnt + 1
qtl_info = {}
try:
stream = open(map_chart_file, 'w')
keys = list(tmp_dic.keys())
## Remove unknown group, reason:
# The unlinked markers, if present, are always put in group U by
# MapQTL. If you don't omit them and there are many (often), then
# their names take so much space that it is difficult to fit them
# on the page.
if 'U' in keys:
keys.remove('U')
# Try to convert all the groups to int, which would result in
# a better sorting. If that fails, fail silently.
try:
keys = [int(key) for key in keys]
except ValueError:
pass
keys.sort()
for key in keys:
key = str(key) # Needed since we might have converted them to int
if tmp_dic[key]:
if key == 'U': # pragma: no cover
# We removed the key before, we should not be here
continue
stream.write('group %s\n' % key)
for entry in _order_linkage_group(tmp_dic[key][0]):
stream.write(' '.join(entry) + '\n')
if tmp_dic[key][1]:
stream.write('\n')
stream.write('qtls\n')
for qtl in tmp_dic[key][1]:
qtl_info[qtl.peak_mk] = qtl.get_flanking_markers()
stream.write('%s \n' % qtl.to_string())
stream.write('\n')
stream.write('\n')
except IOError as err: # pragma: no cover
LOG.info('An error occured while writing the map chart map '
'to the file %s' % map_chart_file)
LOG.debug("Error: %s" % err)
finally:
stream.close()
LOG.info('Wrote MapChart map in file %s' % map_chart_file)
return qtl_info | [
"def",
"generate_map_chart_file",
"(",
"qtl_matrix",
",",
"lod_threshold",
",",
"map_chart_file",
"=",
"'MapChart.map'",
")",
":",
"qtl_matrix",
"=",
"read_input_file",
"(",
"qtl_matrix",
",",
"sep",
"=",
"','",
")",
"tmp_dic",
"=",
"{",
"}",
"cnt",
"=",
"1",
"tmp",
"=",
"{",
"}",
"block",
"=",
"{",
"}",
"for",
"row",
"in",
"qtl_matrix",
"[",
"1",
":",
"]",
":",
"linkgrp",
"=",
"qtl_matrix",
"[",
"cnt",
"-",
"1",
"]",
"[",
"1",
"]",
"if",
"cnt",
"==",
"1",
":",
"linkgrp",
"=",
"qtl_matrix",
"[",
"cnt",
"]",
"[",
"1",
"]",
"if",
"not",
"linkgrp",
"in",
"tmp_dic",
":",
"tmp_dic",
"[",
"linkgrp",
"]",
"=",
"[",
"[",
"]",
",",
"[",
"]",
"]",
"infos",
"=",
"row",
"[",
"0",
":",
"3",
"]",
"if",
"qtl_matrix",
"[",
"cnt",
"]",
"[",
"1",
"]",
"!=",
"linkgrp",
":",
"if",
"tmp",
":",
"qtls",
"=",
"_extrac_qtl",
"(",
"tmp",
",",
"block",
",",
"qtl_matrix",
"[",
"0",
"]",
")",
"tmp_dic",
"[",
"linkgrp",
"]",
"[",
"1",
"]",
"=",
"qtls",
"linkgrp",
"=",
"qtl_matrix",
"[",
"cnt",
"]",
"[",
"1",
"]",
"tmp_dic",
"[",
"linkgrp",
"]",
"=",
"[",
"[",
"]",
",",
"[",
"]",
"]",
"tmp",
"=",
"{",
"}",
"block",
"=",
"{",
"}",
"tmp_dic",
"[",
"linkgrp",
"]",
"[",
"0",
"]",
".",
"append",
"(",
"[",
"row",
"[",
"0",
"]",
",",
"row",
"[",
"2",
"]",
"]",
")",
"colcnt",
"=",
"3",
"for",
"cel",
"in",
"row",
"[",
"3",
":",
"-",
"1",
"]",
":",
"blockrow",
"=",
"infos",
"[",
":",
"]",
"blockrow",
".",
"extend",
"(",
"[",
"qtl_matrix",
"[",
"0",
"]",
"[",
"colcnt",
"]",
",",
"cel",
"]",
")",
"if",
"colcnt",
"in",
"block",
":",
"block",
"[",
"colcnt",
"]",
".",
"append",
"(",
"blockrow",
")",
"else",
":",
"block",
"[",
"colcnt",
"]",
"=",
"[",
"blockrow",
"]",
"if",
"cel",
".",
"strip",
"(",
")",
"!=",
"''",
"and",
"float",
"(",
"cel",
")",
">=",
"float",
"(",
"lod_threshold",
")",
":",
"temp",
"=",
"infos",
"[",
":",
"]",
"if",
"not",
"tmp",
"or",
"(",
"qtl_matrix",
"[",
"0",
"]",
"[",
"colcnt",
"]",
"in",
"tmp",
"and",
"float",
"(",
"cel",
")",
">=",
"float",
"(",
"tmp",
"[",
"qtl_matrix",
"[",
"0",
"]",
"[",
"colcnt",
"]",
"]",
"[",
"-",
"1",
"]",
")",
")",
"or",
"qtl_matrix",
"[",
"0",
"]",
"[",
"colcnt",
"]",
"not",
"in",
"tmp",
":",
"temp",
".",
"extend",
"(",
"[",
"qtl_matrix",
"[",
"0",
"]",
"[",
"colcnt",
"]",
",",
"cel",
"]",
")",
"tmp",
"[",
"qtl_matrix",
"[",
"0",
"]",
"[",
"colcnt",
"]",
"]",
"=",
"temp",
"colcnt",
"=",
"colcnt",
"+",
"1",
"cnt",
"=",
"cnt",
"+",
"1",
"qtl_info",
"=",
"{",
"}",
"try",
":",
"stream",
"=",
"open",
"(",
"map_chart_file",
",",
"'w'",
")",
"keys",
"=",
"list",
"(",
"tmp_dic",
".",
"keys",
"(",
")",
")",
"## Remove unknown group, reason:",
"# The unlinked markers, if present, are always put in group U by",
"# MapQTL. If you don't omit them and there are many (often), then",
"# their names take so much space that it is difficult to fit them",
"# on the page.",
"if",
"'U'",
"in",
"keys",
":",
"keys",
".",
"remove",
"(",
"'U'",
")",
"# Try to convert all the groups to int, which would result in",
"# a better sorting. If that fails, fail silently.",
"try",
":",
"keys",
"=",
"[",
"int",
"(",
"key",
")",
"for",
"key",
"in",
"keys",
"]",
"except",
"ValueError",
":",
"pass",
"keys",
".",
"sort",
"(",
")",
"for",
"key",
"in",
"keys",
":",
"key",
"=",
"str",
"(",
"key",
")",
"# Needed since we might have converted them to int",
"if",
"tmp_dic",
"[",
"key",
"]",
":",
"if",
"key",
"==",
"'U'",
":",
"# pragma: no cover",
"# We removed the key before, we should not be here",
"continue",
"stream",
".",
"write",
"(",
"'group %s\\n'",
"%",
"key",
")",
"for",
"entry",
"in",
"_order_linkage_group",
"(",
"tmp_dic",
"[",
"key",
"]",
"[",
"0",
"]",
")",
":",
"stream",
".",
"write",
"(",
"' '",
".",
"join",
"(",
"entry",
")",
"+",
"'\\n'",
")",
"if",
"tmp_dic",
"[",
"key",
"]",
"[",
"1",
"]",
":",
"stream",
".",
"write",
"(",
"'\\n'",
")",
"stream",
".",
"write",
"(",
"'qtls\\n'",
")",
"for",
"qtl",
"in",
"tmp_dic",
"[",
"key",
"]",
"[",
"1",
"]",
":",
"qtl_info",
"[",
"qtl",
".",
"peak_mk",
"]",
"=",
"qtl",
".",
"get_flanking_markers",
"(",
")",
"stream",
".",
"write",
"(",
"'%s \\n'",
"%",
"qtl",
".",
"to_string",
"(",
")",
")",
"stream",
".",
"write",
"(",
"'\\n'",
")",
"stream",
".",
"write",
"(",
"'\\n'",
")",
"except",
"IOError",
"as",
"err",
":",
"# pragma: no cover",
"LOG",
".",
"info",
"(",
"'An error occured while writing the map chart map '",
"'to the file %s'",
"%",
"map_chart_file",
")",
"LOG",
".",
"debug",
"(",
"\"Error: %s\"",
"%",
"err",
")",
"finally",
":",
"stream",
".",
"close",
"(",
")",
"LOG",
".",
"info",
"(",
"'Wrote MapChart map in file %s'",
"%",
"map_chart_file",
")",
"return",
"qtl_info"
] | This function converts our QTL matrix file into a MapChart input
file.
:arg qtl_matrix: the path to the QTL matrix file generated by
the plugin.
:arg lod_threshold: threshold used to determine if a given LOD value
is reflective the presence of a QTL.
:kwarg map_chart_file: name of the output file containing the
MapChart information. | [
"This",
"function",
"converts",
"our",
"QTL",
"matrix",
"file",
"into",
"a",
"MapChart",
"input",
"file",
"."
] | python | train |
alkivi-sas/python-alkivi-logger | alkivi/logger/logger.py | https://github.com/alkivi-sas/python-alkivi-logger/blob/e96d5a987a5c8789c51d4fa7541709e05b1f51e1/alkivi/logger/logger.py#L249-L252 | def _update_handler(self, handler_class, level):
"""Update the level of an handler."""
handler = self._get_handler(handler_class)
handler.setLevel(level) | [
"def",
"_update_handler",
"(",
"self",
",",
"handler_class",
",",
"level",
")",
":",
"handler",
"=",
"self",
".",
"_get_handler",
"(",
"handler_class",
")",
"handler",
".",
"setLevel",
"(",
"level",
")"
] | Update the level of an handler. | [
"Update",
"the",
"level",
"of",
"an",
"handler",
"."
] | python | train |
pyca/pyopenssl | src/OpenSSL/SSL.py | https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/SSL.py#L1444-L1463 | def set_alpn_protos(self, protos):
"""
Specify the protocols that the client is prepared to speak after the
TLS connection has been negotiated using Application Layer Protocol
Negotiation.
:param protos: A list of the protocols to be offered to the server.
This list should be a Python list of bytestrings representing the
protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``.
"""
# Take the list of protocols and join them together, prefixing them
# with their lengths.
protostr = b''.join(
chain.from_iterable((int2byte(len(p)), p) for p in protos)
)
# Build a C string from the list. We don't need to save this off
# because OpenSSL immediately copies the data out.
input_str = _ffi.new("unsigned char[]", protostr)
_lib.SSL_CTX_set_alpn_protos(self._context, input_str, len(protostr)) | [
"def",
"set_alpn_protos",
"(",
"self",
",",
"protos",
")",
":",
"# Take the list of protocols and join them together, prefixing them",
"# with their lengths.",
"protostr",
"=",
"b''",
".",
"join",
"(",
"chain",
".",
"from_iterable",
"(",
"(",
"int2byte",
"(",
"len",
"(",
"p",
")",
")",
",",
"p",
")",
"for",
"p",
"in",
"protos",
")",
")",
"# Build a C string from the list. We don't need to save this off",
"# because OpenSSL immediately copies the data out.",
"input_str",
"=",
"_ffi",
".",
"new",
"(",
"\"unsigned char[]\"",
",",
"protostr",
")",
"_lib",
".",
"SSL_CTX_set_alpn_protos",
"(",
"self",
".",
"_context",
",",
"input_str",
",",
"len",
"(",
"protostr",
")",
")"
] | Specify the protocols that the client is prepared to speak after the
TLS connection has been negotiated using Application Layer Protocol
Negotiation.
:param protos: A list of the protocols to be offered to the server.
This list should be a Python list of bytestrings representing the
protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``. | [
"Specify",
"the",
"protocols",
"that",
"the",
"client",
"is",
"prepared",
"to",
"speak",
"after",
"the",
"TLS",
"connection",
"has",
"been",
"negotiated",
"using",
"Application",
"Layer",
"Protocol",
"Negotiation",
"."
] | python | test |
log2timeline/plaso | plaso/serializer/json_serializer.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/serializer/json_serializer.py#L419-L429 | def WriteSerialized(cls, attribute_container):
"""Writes an attribute container to serialized form.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
str: A JSON string containing the serialized form.
"""
json_dict = cls.WriteSerializedDict(attribute_container)
return json.dumps(json_dict) | [
"def",
"WriteSerialized",
"(",
"cls",
",",
"attribute_container",
")",
":",
"json_dict",
"=",
"cls",
".",
"WriteSerializedDict",
"(",
"attribute_container",
")",
"return",
"json",
".",
"dumps",
"(",
"json_dict",
")"
] | Writes an attribute container to serialized form.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
str: A JSON string containing the serialized form. | [
"Writes",
"an",
"attribute",
"container",
"to",
"serialized",
"form",
"."
] | python | train |
LCAV/pylocus | pylocus/algorithms.py | https://github.com/LCAV/pylocus/blob/c56a38c251d8a435caf4641a8ae6027ecba2c8c6/pylocus/algorithms.py#L326-L375 | def reconstruct_dwmds(edm, X0, W=None, n=None, r=None, X_bar=None, print_out=False, tol=1e-10, sweeps=100):
""" Reconstruct point set using d(istributed)w(eighted) MDS.
Refer to paper "Distributed Weighted-Multidimensional Scaling for Node Localization in Sensor Networks" for
implementation details (doi.org/10.1145/1138127.1138129)
:param X0: Nxd matrix of starting points.
:param n: Number of points of unknown position. The first n points in X0 and edm are considered unknown.
:param tol: Stopping criterion: when the cost is below this level, we stop.
:param sweeps: Maximum number of sweeps.
"""
from .basics import get_edm
from .distributed_mds import get_b, get_Si
N, d = X0.shape
if r is None and n is None:
raise ValueError('either r or n have to be given.')
elif n is None:
n = r.shape[0]
if W is None:
W = np.ones((N, N)) - np.eye(N)
X_k = X0.copy()
costs = []
# don't have to ignore i=j, because W[i,i] is zero.
a = np.sum(W[:n, :n], axis=1).flatten() + 2 * \
np.sum(W[:n, n:], axis=1).flatten()
if r is not None:
a += r.flatten()
for k in range(sweeps):
S = 0
for i in range(n):
edm_estimated = get_edm(X_k)
bi = get_b(i, edm_estimated, W, edm, n)
if r is not None and X_bar is not None:
X_k[i] = 1 / a[i] * (r[i] * X_bar[i, :] +
X_k.T.dot(bi).flatten())
Si = get_Si(i, edm_estimated, edm, W, n, r, X_bar[i], X_k[i])
else:
X_k[i] = 1 / a[i] * X_k.T.dot(bi).flatten()
Si = get_Si(i, edm_estimated, edm, W, n)
S += Si
costs.append(S)
if k > 1 and abs(costs[-1] - costs[-2]) < tol:
if (print_out):
print('dwMDS: converged after', k)
break
return X_k, costs | [
"def",
"reconstruct_dwmds",
"(",
"edm",
",",
"X0",
",",
"W",
"=",
"None",
",",
"n",
"=",
"None",
",",
"r",
"=",
"None",
",",
"X_bar",
"=",
"None",
",",
"print_out",
"=",
"False",
",",
"tol",
"=",
"1e-10",
",",
"sweeps",
"=",
"100",
")",
":",
"from",
".",
"basics",
"import",
"get_edm",
"from",
".",
"distributed_mds",
"import",
"get_b",
",",
"get_Si",
"N",
",",
"d",
"=",
"X0",
".",
"shape",
"if",
"r",
"is",
"None",
"and",
"n",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'either r or n have to be given.'",
")",
"elif",
"n",
"is",
"None",
":",
"n",
"=",
"r",
".",
"shape",
"[",
"0",
"]",
"if",
"W",
"is",
"None",
":",
"W",
"=",
"np",
".",
"ones",
"(",
"(",
"N",
",",
"N",
")",
")",
"-",
"np",
".",
"eye",
"(",
"N",
")",
"X_k",
"=",
"X0",
".",
"copy",
"(",
")",
"costs",
"=",
"[",
"]",
"# don't have to ignore i=j, because W[i,i] is zero.",
"a",
"=",
"np",
".",
"sum",
"(",
"W",
"[",
":",
"n",
",",
":",
"n",
"]",
",",
"axis",
"=",
"1",
")",
".",
"flatten",
"(",
")",
"+",
"2",
"*",
"np",
".",
"sum",
"(",
"W",
"[",
":",
"n",
",",
"n",
":",
"]",
",",
"axis",
"=",
"1",
")",
".",
"flatten",
"(",
")",
"if",
"r",
"is",
"not",
"None",
":",
"a",
"+=",
"r",
".",
"flatten",
"(",
")",
"for",
"k",
"in",
"range",
"(",
"sweeps",
")",
":",
"S",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"edm_estimated",
"=",
"get_edm",
"(",
"X_k",
")",
"bi",
"=",
"get_b",
"(",
"i",
",",
"edm_estimated",
",",
"W",
",",
"edm",
",",
"n",
")",
"if",
"r",
"is",
"not",
"None",
"and",
"X_bar",
"is",
"not",
"None",
":",
"X_k",
"[",
"i",
"]",
"=",
"1",
"/",
"a",
"[",
"i",
"]",
"*",
"(",
"r",
"[",
"i",
"]",
"*",
"X_bar",
"[",
"i",
",",
":",
"]",
"+",
"X_k",
".",
"T",
".",
"dot",
"(",
"bi",
")",
".",
"flatten",
"(",
")",
")",
"Si",
"=",
"get_Si",
"(",
"i",
",",
"edm_estimated",
",",
"edm",
",",
"W",
",",
"n",
",",
"r",
",",
"X_bar",
"[",
"i",
"]",
",",
"X_k",
"[",
"i",
"]",
")",
"else",
":",
"X_k",
"[",
"i",
"]",
"=",
"1",
"/",
"a",
"[",
"i",
"]",
"*",
"X_k",
".",
"T",
".",
"dot",
"(",
"bi",
")",
".",
"flatten",
"(",
")",
"Si",
"=",
"get_Si",
"(",
"i",
",",
"edm_estimated",
",",
"edm",
",",
"W",
",",
"n",
")",
"S",
"+=",
"Si",
"costs",
".",
"append",
"(",
"S",
")",
"if",
"k",
">",
"1",
"and",
"abs",
"(",
"costs",
"[",
"-",
"1",
"]",
"-",
"costs",
"[",
"-",
"2",
"]",
")",
"<",
"tol",
":",
"if",
"(",
"print_out",
")",
":",
"print",
"(",
"'dwMDS: converged after'",
",",
"k",
")",
"break",
"return",
"X_k",
",",
"costs"
] | Reconstruct point set using d(istributed)w(eighted) MDS.
Refer to paper "Distributed Weighted-Multidimensional Scaling for Node Localization in Sensor Networks" for
implementation details (doi.org/10.1145/1138127.1138129)
:param X0: Nxd matrix of starting points.
:param n: Number of points of unknown position. The first n points in X0 and edm are considered unknown.
:param tol: Stopping criterion: when the cost is below this level, we stop.
:param sweeps: Maximum number of sweeps. | [
"Reconstruct",
"point",
"set",
"using",
"d",
"(",
"istributed",
")",
"w",
"(",
"eighted",
")",
"MDS",
"."
] | python | train |
praekelt/panya-music | music/models.py | https://github.com/praekelt/panya-music/blob/9300b1866bc33178e721b6de4771ba866bfc4b11/music/models.py#L77-L97 | def get_primary_contributors(self, permitted=True):
"""
Returns a list of primary contributors, with primary being defined as those contributors that have the highest role assigned(in terms of priority). When permitted is set to True only permitted contributors are returned.
"""
primary_credits = []
credits = self.credits.exclude(role=None).order_by('role')
if credits:
primary_role = credits[0].role
for credit in credits:
if credit.role == primary_role:
primary_credits.append(credit)
contributors = []
for credit in primary_credits:
contributor = credit.contributor
if permitted and contributor.is_permitted:
contributors.append(contributor)
else:
contributors.append(contributor)
return contributors | [
"def",
"get_primary_contributors",
"(",
"self",
",",
"permitted",
"=",
"True",
")",
":",
"primary_credits",
"=",
"[",
"]",
"credits",
"=",
"self",
".",
"credits",
".",
"exclude",
"(",
"role",
"=",
"None",
")",
".",
"order_by",
"(",
"'role'",
")",
"if",
"credits",
":",
"primary_role",
"=",
"credits",
"[",
"0",
"]",
".",
"role",
"for",
"credit",
"in",
"credits",
":",
"if",
"credit",
".",
"role",
"==",
"primary_role",
":",
"primary_credits",
".",
"append",
"(",
"credit",
")",
"contributors",
"=",
"[",
"]",
"for",
"credit",
"in",
"primary_credits",
":",
"contributor",
"=",
"credit",
".",
"contributor",
"if",
"permitted",
"and",
"contributor",
".",
"is_permitted",
":",
"contributors",
".",
"append",
"(",
"contributor",
")",
"else",
":",
"contributors",
".",
"append",
"(",
"contributor",
")",
"return",
"contributors"
] | Returns a list of primary contributors, with primary being defined as those contributors that have the highest role assigned(in terms of priority). When permitted is set to True only permitted contributors are returned. | [
"Returns",
"a",
"list",
"of",
"primary",
"contributors",
"with",
"primary",
"being",
"defined",
"as",
"those",
"contributors",
"that",
"have",
"the",
"highest",
"role",
"assigned",
"(",
"in",
"terms",
"of",
"priority",
")",
".",
"When",
"permitted",
"is",
"set",
"to",
"True",
"only",
"permitted",
"contributors",
"are",
"returned",
"."
] | python | train |
mila-iqia/fuel | fuel/converters/mnist.py | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/mnist.py#L111-L159 | def read_mnist_images(filename, dtype=None):
"""Read MNIST images from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read images.
dtype : 'float32', 'float64', or 'bool'
If unspecified, images will be returned in their original
unsigned byte format.
Returns
-------
images : :class:`~numpy.ndarray`, shape (n_images, 1, n_rows, n_cols)
An image array, with individual examples indexed along the
first axis and the image dimensions along the second and
third axis.
Notes
-----
If the dtype provided was Boolean, the resulting array will
be Boolean with `True` if the corresponding pixel had a value
greater than or equal to 128, `False` otherwise.
If the dtype provided was a float dtype, the values will be mapped to
the unit interval [0, 1], with pixel values that were 255 in the
original unsigned byte representation equal to 1.0.
"""
with gzip.open(filename, 'rb') as f:
magic, number, rows, cols = struct.unpack('>iiii', f.read(16))
if magic != MNIST_IMAGE_MAGIC:
raise ValueError("Wrong magic number reading MNIST image file")
array = numpy.frombuffer(f.read(), dtype='uint8')
array = array.reshape((number, 1, rows, cols))
if dtype:
dtype = numpy.dtype(dtype)
if dtype.kind == 'b':
# If the user wants Booleans, threshold at half the range.
array = array >= 128
elif dtype.kind == 'f':
# Otherwise, just convert.
array = array.astype(dtype)
array /= 255.
else:
raise ValueError("Unknown dtype to convert MNIST to")
return array | [
"def",
"read_mnist_images",
"(",
"filename",
",",
"dtype",
"=",
"None",
")",
":",
"with",
"gzip",
".",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"magic",
",",
"number",
",",
"rows",
",",
"cols",
"=",
"struct",
".",
"unpack",
"(",
"'>iiii'",
",",
"f",
".",
"read",
"(",
"16",
")",
")",
"if",
"magic",
"!=",
"MNIST_IMAGE_MAGIC",
":",
"raise",
"ValueError",
"(",
"\"Wrong magic number reading MNIST image file\"",
")",
"array",
"=",
"numpy",
".",
"frombuffer",
"(",
"f",
".",
"read",
"(",
")",
",",
"dtype",
"=",
"'uint8'",
")",
"array",
"=",
"array",
".",
"reshape",
"(",
"(",
"number",
",",
"1",
",",
"rows",
",",
"cols",
")",
")",
"if",
"dtype",
":",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"dtype",
")",
"if",
"dtype",
".",
"kind",
"==",
"'b'",
":",
"# If the user wants Booleans, threshold at half the range.",
"array",
"=",
"array",
">=",
"128",
"elif",
"dtype",
".",
"kind",
"==",
"'f'",
":",
"# Otherwise, just convert.",
"array",
"=",
"array",
".",
"astype",
"(",
"dtype",
")",
"array",
"/=",
"255.",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown dtype to convert MNIST to\"",
")",
"return",
"array"
] | Read MNIST images from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read images.
dtype : 'float32', 'float64', or 'bool'
If unspecified, images will be returned in their original
unsigned byte format.
Returns
-------
images : :class:`~numpy.ndarray`, shape (n_images, 1, n_rows, n_cols)
An image array, with individual examples indexed along the
first axis and the image dimensions along the second and
third axis.
Notes
-----
If the dtype provided was Boolean, the resulting array will
be Boolean with `True` if the corresponding pixel had a value
greater than or equal to 128, `False` otherwise.
If the dtype provided was a float dtype, the values will be mapped to
the unit interval [0, 1], with pixel values that were 255 in the
original unsigned byte representation equal to 1.0. | [
"Read",
"MNIST",
"images",
"from",
"the",
"original",
"ubyte",
"file",
"format",
"."
] | python | train |
polyaxon/polyaxon-cli | polyaxon_cli/managers/ignore.py | https://github.com/polyaxon/polyaxon-cli/blob/a7f5eed74d4d909cad79059f3c21c58606881449/polyaxon_cli/managers/ignore.py#L105-L109 | def _remove_trailing_spaces(line):
"""Remove trailing spaces unless they are quoted with a backslash."""
while line.endswith(' ') and not line.endswith('\\ '):
line = line[:-1]
return line.replace('\\ ', ' ') | [
"def",
"_remove_trailing_spaces",
"(",
"line",
")",
":",
"while",
"line",
".",
"endswith",
"(",
"' '",
")",
"and",
"not",
"line",
".",
"endswith",
"(",
"'\\\\ '",
")",
":",
"line",
"=",
"line",
"[",
":",
"-",
"1",
"]",
"return",
"line",
".",
"replace",
"(",
"'\\\\ '",
",",
"' '",
")"
] | Remove trailing spaces unless they are quoted with a backslash. | [
"Remove",
"trailing",
"spaces",
"unless",
"they",
"are",
"quoted",
"with",
"a",
"backslash",
"."
] | python | valid |
heuer/cablemap | cablemap.tm/cablemap/tm/handler.py | https://github.com/heuer/cablemap/blob/42066c8fc2972d237a2c35578e14525aaf705f38/cablemap.tm/cablemap/tm/handler.py#L117-L124 | def create_ctm_handler(fileobj, title=u'Cablegate Topic Map', comment=u'Generated by Cablemap - https://github.com/heuer/cablemap', detect_prefixes=False):
"""\
Returns a `ICableHandler` instance which writes Compact Topic Maps syntax (CTM).
`fileobj`
A file-like object.
"""
return MIOCableHandler(create_ctm_miohandler(fileobj, title, comment, detect_prefixes=detect_prefixes)) | [
"def",
"create_ctm_handler",
"(",
"fileobj",
",",
"title",
"=",
"u'Cablegate Topic Map'",
",",
"comment",
"=",
"u'Generated by Cablemap - https://github.com/heuer/cablemap'",
",",
"detect_prefixes",
"=",
"False",
")",
":",
"return",
"MIOCableHandler",
"(",
"create_ctm_miohandler",
"(",
"fileobj",
",",
"title",
",",
"comment",
",",
"detect_prefixes",
"=",
"detect_prefixes",
")",
")"
] | \
Returns a `ICableHandler` instance which writes Compact Topic Maps syntax (CTM).
`fileobj`
A file-like object. | [
"\\",
"Returns",
"a",
"ICableHandler",
"instance",
"which",
"writes",
"Compact",
"Topic",
"Maps",
"syntax",
"(",
"CTM",
")",
"."
] | python | train |
twisted/mantissa | xmantissa/people.py | https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/people.py#L1658-L1669 | def getEditPerson(self, name):
"""
Get an L{EditPersonView} for editing the person named C{name}.
@param name: A person name.
@type name: C{unicode}
@rtype: L{EditPersonView}
"""
view = EditPersonView(self.organizer.personByName(name))
view.setFragmentParent(self)
return view | [
"def",
"getEditPerson",
"(",
"self",
",",
"name",
")",
":",
"view",
"=",
"EditPersonView",
"(",
"self",
".",
"organizer",
".",
"personByName",
"(",
"name",
")",
")",
"view",
".",
"setFragmentParent",
"(",
"self",
")",
"return",
"view"
] | Get an L{EditPersonView} for editing the person named C{name}.
@param name: A person name.
@type name: C{unicode}
@rtype: L{EditPersonView} | [
"Get",
"an",
"L",
"{",
"EditPersonView",
"}",
"for",
"editing",
"the",
"person",
"named",
"C",
"{",
"name",
"}",
"."
] | python | train |
jrief/django-websocket-redis | ws4redis/websocket.py | https://github.com/jrief/django-websocket-redis/blob/abcddaad2f579d71dbf375e5e34bc35eef795a81/ws4redis/websocket.py#L384-L425 | def encode_header(cls, fin, opcode, mask, length, flags):
"""
Encodes a WebSocket header.
:param fin: Whether this is the final frame for this opcode.
:param opcode: The opcode of the payload, see `OPCODE_*`
:param mask: Whether the payload is masked.
:param length: The length of the frame.
:param flags: The RSV* flags.
:return: A bytestring encoded header.
"""
first_byte = opcode
second_byte = 0
if six.PY2:
extra = ''
else:
extra = b''
if fin:
first_byte |= cls.FIN_MASK
if flags & cls.RSV0_MASK:
first_byte |= cls.RSV0_MASK
if flags & cls.RSV1_MASK:
first_byte |= cls.RSV1_MASK
if flags & cls.RSV2_MASK:
first_byte |= cls.RSV2_MASK
# now deal with length complexities
if length < 126:
second_byte += length
elif length <= 0xffff:
second_byte += 126
extra = struct.pack('!H', length)
elif length <= 0xffffffffffffffff:
second_byte += 127
extra = struct.pack('!Q', length)
else:
raise FrameTooLargeException
if mask:
second_byte |= cls.MASK_MASK
extra += mask
if six.PY3:
return bytes([first_byte, second_byte]) + extra
return chr(first_byte) + chr(second_byte) + extra | [
"def",
"encode_header",
"(",
"cls",
",",
"fin",
",",
"opcode",
",",
"mask",
",",
"length",
",",
"flags",
")",
":",
"first_byte",
"=",
"opcode",
"second_byte",
"=",
"0",
"if",
"six",
".",
"PY2",
":",
"extra",
"=",
"''",
"else",
":",
"extra",
"=",
"b''",
"if",
"fin",
":",
"first_byte",
"|=",
"cls",
".",
"FIN_MASK",
"if",
"flags",
"&",
"cls",
".",
"RSV0_MASK",
":",
"first_byte",
"|=",
"cls",
".",
"RSV0_MASK",
"if",
"flags",
"&",
"cls",
".",
"RSV1_MASK",
":",
"first_byte",
"|=",
"cls",
".",
"RSV1_MASK",
"if",
"flags",
"&",
"cls",
".",
"RSV2_MASK",
":",
"first_byte",
"|=",
"cls",
".",
"RSV2_MASK",
"# now deal with length complexities",
"if",
"length",
"<",
"126",
":",
"second_byte",
"+=",
"length",
"elif",
"length",
"<=",
"0xffff",
":",
"second_byte",
"+=",
"126",
"extra",
"=",
"struct",
".",
"pack",
"(",
"'!H'",
",",
"length",
")",
"elif",
"length",
"<=",
"0xffffffffffffffff",
":",
"second_byte",
"+=",
"127",
"extra",
"=",
"struct",
".",
"pack",
"(",
"'!Q'",
",",
"length",
")",
"else",
":",
"raise",
"FrameTooLargeException",
"if",
"mask",
":",
"second_byte",
"|=",
"cls",
".",
"MASK_MASK",
"extra",
"+=",
"mask",
"if",
"six",
".",
"PY3",
":",
"return",
"bytes",
"(",
"[",
"first_byte",
",",
"second_byte",
"]",
")",
"+",
"extra",
"return",
"chr",
"(",
"first_byte",
")",
"+",
"chr",
"(",
"second_byte",
")",
"+",
"extra"
] | Encodes a WebSocket header.
:param fin: Whether this is the final frame for this opcode.
:param opcode: The opcode of the payload, see `OPCODE_*`
:param mask: Whether the payload is masked.
:param length: The length of the frame.
:param flags: The RSV* flags.
:return: A bytestring encoded header. | [
"Encodes",
"a",
"WebSocket",
"header",
"."
] | python | train |
watson-developer-cloud/python-sdk | ibm_watson/natural_language_understanding_v1.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/natural_language_understanding_v1.py#L2230-L2239 | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'entities'",
")",
"and",
"self",
".",
"entities",
"is",
"not",
"None",
":",
"_dict",
"[",
"'entities'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"entities",
"]",
"if",
"hasattr",
"(",
"self",
",",
"'location'",
")",
"and",
"self",
".",
"location",
"is",
"not",
"None",
":",
"_dict",
"[",
"'location'",
"]",
"=",
"self",
".",
"location",
"if",
"hasattr",
"(",
"self",
",",
"'text'",
")",
"and",
"self",
".",
"text",
"is",
"not",
"None",
":",
"_dict",
"[",
"'text'",
"]",
"=",
"self",
".",
"text",
"return",
"_dict"
] | Return a json dictionary representing this model. | [
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] | python | train |
JarryShaw/DictDumper | src/tree.py | https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/tree.py#L221-L264 | def _append_branch(self, value, _file):
"""Call this function to write branch contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
return
# return self._append_none(None, _file)
self._tctr += 1
_vlen = len(value)
for (_vctr, (_item, _text)) in enumerate(value.items()):
_text = self.object_hook(_text)
_type = type(_text).__name__
flag_dict = (_type == 'dict')
flag_list = (_type == 'list' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long
flag_tuple = (_type == 'tuple' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long
flag_bytes = (_type == 'bytes' and len(_text) > 16)
if any((flag_dict, flag_list, flag_tuple, flag_bytes)):
_pref = '\n'
else:
_pref = ' ->'
_labs = ''
for _ in range(self._tctr):
_labs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
_keys = '{labs} |-- {item}{pref}'.format(labs=_labs, item=_item, pref=_pref)
_file.write(_keys)
if _vctr == _vlen - 1:
self._bctr[self._tctr] = 1
_MAGIC_TYPES[_type](self, _text, _file)
_suff = '' if _type == 'dict' else '\n'
_file.write(_suff)
self._bctr[self._tctr] = 0
self._tctr -= 1 | [
"def",
"_append_branch",
"(",
"self",
",",
"value",
",",
"_file",
")",
":",
"if",
"not",
"value",
":",
"return",
"# return self._append_none(None, _file)",
"self",
".",
"_tctr",
"+=",
"1",
"_vlen",
"=",
"len",
"(",
"value",
")",
"for",
"(",
"_vctr",
",",
"(",
"_item",
",",
"_text",
")",
")",
"in",
"enumerate",
"(",
"value",
".",
"items",
"(",
")",
")",
":",
"_text",
"=",
"self",
".",
"object_hook",
"(",
"_text",
")",
"_type",
"=",
"type",
"(",
"_text",
")",
".",
"__name__",
"flag_dict",
"=",
"(",
"_type",
"==",
"'dict'",
")",
"flag_list",
"=",
"(",
"_type",
"==",
"'list'",
"and",
"(",
"len",
"(",
"_text",
")",
">",
"1",
"or",
"(",
"len",
"(",
"_text",
")",
"==",
"1",
"and",
"type",
"(",
"_text",
"[",
"0",
"]",
")",
".",
"__name__",
"==",
"'dict'",
")",
")",
")",
"# noqa pylint: disable=line-too-long",
"flag_tuple",
"=",
"(",
"_type",
"==",
"'tuple'",
"and",
"(",
"len",
"(",
"_text",
")",
">",
"1",
"or",
"(",
"len",
"(",
"_text",
")",
"==",
"1",
"and",
"type",
"(",
"_text",
"[",
"0",
"]",
")",
".",
"__name__",
"==",
"'dict'",
")",
")",
")",
"# noqa pylint: disable=line-too-long",
"flag_bytes",
"=",
"(",
"_type",
"==",
"'bytes'",
"and",
"len",
"(",
"_text",
")",
">",
"16",
")",
"if",
"any",
"(",
"(",
"flag_dict",
",",
"flag_list",
",",
"flag_tuple",
",",
"flag_bytes",
")",
")",
":",
"_pref",
"=",
"'\\n'",
"else",
":",
"_pref",
"=",
"' ->'",
"_labs",
"=",
"''",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"_tctr",
")",
":",
"_labs",
"+=",
"_TEMP_SPACES",
"if",
"self",
".",
"_bctr",
"[",
"_",
"]",
"else",
"_TEMP_BRANCH",
"_keys",
"=",
"'{labs} |-- {item}{pref}'",
".",
"format",
"(",
"labs",
"=",
"_labs",
",",
"item",
"=",
"_item",
",",
"pref",
"=",
"_pref",
")",
"_file",
".",
"write",
"(",
"_keys",
")",
"if",
"_vctr",
"==",
"_vlen",
"-",
"1",
":",
"self",
".",
"_bctr",
"[",
"self",
".",
"_tctr",
"]",
"=",
"1",
"_MAGIC_TYPES",
"[",
"_type",
"]",
"(",
"self",
",",
"_text",
",",
"_file",
")",
"_suff",
"=",
"''",
"if",
"_type",
"==",
"'dict'",
"else",
"'\\n'",
"_file",
".",
"write",
"(",
"_suff",
")",
"self",
".",
"_bctr",
"[",
"self",
".",
"_tctr",
"]",
"=",
"0",
"self",
".",
"_tctr",
"-=",
"1"
] | Call this function to write branch contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file | [
"Call",
"this",
"function",
"to",
"write",
"branch",
"contents",
"."
] | python | train |
veltzer/pytconf | pytconf/config.py | https://github.com/veltzer/pytconf/blob/8dee43ace35d0dd2ab1105fb94057f650393360f/pytconf/config.py#L639-L654 | def create_list_int(help_string=NO_HELP, default=NO_DEFAULT):
# type: (str, Union[List[int], NO_DEFAULT_TYPE]) -> List[int]
"""
Create a List[int] parameter
:param help_string:
:param default:
:return:
"""
# noinspection PyTypeChecker
return ParamFunctions(
help_string=help_string,
default=default,
type_name="List[int]",
function_s2t=convert_string_to_list_int,
function_t2s=convert_list_int_to_string,
) | [
"def",
"create_list_int",
"(",
"help_string",
"=",
"NO_HELP",
",",
"default",
"=",
"NO_DEFAULT",
")",
":",
"# type: (str, Union[List[int], NO_DEFAULT_TYPE]) -> List[int]",
"# noinspection PyTypeChecker",
"return",
"ParamFunctions",
"(",
"help_string",
"=",
"help_string",
",",
"default",
"=",
"default",
",",
"type_name",
"=",
"\"List[int]\"",
",",
"function_s2t",
"=",
"convert_string_to_list_int",
",",
"function_t2s",
"=",
"convert_list_int_to_string",
",",
")"
] | Create a List[int] parameter
:param help_string:
:param default:
:return: | [
"Create",
"a",
"List",
"[",
"int",
"]",
"parameter",
":",
"param",
"help_string",
":",
":",
"param",
"default",
":",
":",
"return",
":"
] | python | train |
lpantano/seqcluster | seqcluster/libs/thinkbayes.py | https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1585-L1599 | def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in numpy.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf | [
"def",
"MakeExponentialPmf",
"(",
"lam",
",",
"high",
",",
"n",
"=",
"200",
")",
":",
"pmf",
"=",
"Pmf",
"(",
")",
"for",
"x",
"in",
"numpy",
".",
"linspace",
"(",
"0",
",",
"high",
",",
"n",
")",
":",
"p",
"=",
"EvalExponentialPdf",
"(",
"x",
",",
"lam",
")",
"pmf",
".",
"Set",
"(",
"x",
",",
"p",
")",
"pmf",
".",
"Normalize",
"(",
")",
"return",
"pmf"
] | Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf | [
"Makes",
"a",
"PMF",
"discrete",
"approx",
"to",
"an",
"exponential",
"distribution",
"."
] | python | train |
noxdafox/vminspect | vminspect/usnjrnl.py | https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/usnjrnl.py#L51-L71 | def parse_journal_file(journal_file):
"""Iterates over the journal's file taking care of paddings."""
counter = count()
for block in read_next_block(journal_file):
block = remove_nullchars(block)
while len(block) > MIN_RECORD_SIZE:
header = RECORD_HEADER.unpack_from(block)
size = header[0]
try:
yield parse_record(header, block[:size])
next(counter)
except RuntimeError:
yield CorruptedUsnRecord(next(counter))
finally:
block = remove_nullchars(block[size:])
journal_file.seek(- len(block), 1) | [
"def",
"parse_journal_file",
"(",
"journal_file",
")",
":",
"counter",
"=",
"count",
"(",
")",
"for",
"block",
"in",
"read_next_block",
"(",
"journal_file",
")",
":",
"block",
"=",
"remove_nullchars",
"(",
"block",
")",
"while",
"len",
"(",
"block",
")",
">",
"MIN_RECORD_SIZE",
":",
"header",
"=",
"RECORD_HEADER",
".",
"unpack_from",
"(",
"block",
")",
"size",
"=",
"header",
"[",
"0",
"]",
"try",
":",
"yield",
"parse_record",
"(",
"header",
",",
"block",
"[",
":",
"size",
"]",
")",
"next",
"(",
"counter",
")",
"except",
"RuntimeError",
":",
"yield",
"CorruptedUsnRecord",
"(",
"next",
"(",
"counter",
")",
")",
"finally",
":",
"block",
"=",
"remove_nullchars",
"(",
"block",
"[",
"size",
":",
"]",
")",
"journal_file",
".",
"seek",
"(",
"-",
"len",
"(",
"block",
")",
",",
"1",
")"
] | Iterates over the journal's file taking care of paddings. | [
"Iterates",
"over",
"the",
"journal",
"s",
"file",
"taking",
"care",
"of",
"paddings",
"."
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/scatter/layer_artist.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/scatter/layer_artist.py#L110-L122 | def remove(self):
"""
Remove the layer artist from the visualization
"""
if self._multiscat is None:
return
self._multiscat.deallocate(self.id)
self._multiscat = None
self._viewer_state.remove_global_callback(self._update_scatter)
self.state.remove_global_callback(self._update_scatter) | [
"def",
"remove",
"(",
"self",
")",
":",
"if",
"self",
".",
"_multiscat",
"is",
"None",
":",
"return",
"self",
".",
"_multiscat",
".",
"deallocate",
"(",
"self",
".",
"id",
")",
"self",
".",
"_multiscat",
"=",
"None",
"self",
".",
"_viewer_state",
".",
"remove_global_callback",
"(",
"self",
".",
"_update_scatter",
")",
"self",
".",
"state",
".",
"remove_global_callback",
"(",
"self",
".",
"_update_scatter",
")"
] | Remove the layer artist from the visualization | [
"Remove",
"the",
"layer",
"artist",
"from",
"the",
"visualization"
] | python | train |
dw/mitogen | mitogen/core.py | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L803-L837 | def unpickle(self, throw=True, throw_dead=True):
"""
Unpickle :attr:`data`, optionally raising any exceptions present.
:param bool throw_dead:
If :data:`True`, raise exceptions, otherwise it is the caller's
responsibility.
:raises CallError:
The serialized data contained CallError exception.
:raises ChannelError:
The `is_dead` field was set.
"""
_vv and IOLOG.debug('%r.unpickle()', self)
if throw_dead and self.is_dead:
self._throw_dead()
obj = self._unpickled
if obj is Message._unpickled:
fp = BytesIO(self.data)
unpickler = _Unpickler(fp, **self.UNPICKLER_KWARGS)
unpickler.find_global = self._find_global
try:
# Must occur off the broker thread.
obj = unpickler.load()
self._unpickled = obj
except (TypeError, ValueError):
e = sys.exc_info()[1]
raise StreamError('invalid message: %s', e)
if throw:
if isinstance(obj, CallError):
raise obj
return obj | [
"def",
"unpickle",
"(",
"self",
",",
"throw",
"=",
"True",
",",
"throw_dead",
"=",
"True",
")",
":",
"_vv",
"and",
"IOLOG",
".",
"debug",
"(",
"'%r.unpickle()'",
",",
"self",
")",
"if",
"throw_dead",
"and",
"self",
".",
"is_dead",
":",
"self",
".",
"_throw_dead",
"(",
")",
"obj",
"=",
"self",
".",
"_unpickled",
"if",
"obj",
"is",
"Message",
".",
"_unpickled",
":",
"fp",
"=",
"BytesIO",
"(",
"self",
".",
"data",
")",
"unpickler",
"=",
"_Unpickler",
"(",
"fp",
",",
"*",
"*",
"self",
".",
"UNPICKLER_KWARGS",
")",
"unpickler",
".",
"find_global",
"=",
"self",
".",
"_find_global",
"try",
":",
"# Must occur off the broker thread.",
"obj",
"=",
"unpickler",
".",
"load",
"(",
")",
"self",
".",
"_unpickled",
"=",
"obj",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"e",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"raise",
"StreamError",
"(",
"'invalid message: %s'",
",",
"e",
")",
"if",
"throw",
":",
"if",
"isinstance",
"(",
"obj",
",",
"CallError",
")",
":",
"raise",
"obj",
"return",
"obj"
] | Unpickle :attr:`data`, optionally raising any exceptions present.
:param bool throw_dead:
If :data:`True`, raise exceptions, otherwise it is the caller's
responsibility.
:raises CallError:
The serialized data contained CallError exception.
:raises ChannelError:
The `is_dead` field was set. | [
"Unpickle",
":",
"attr",
":",
"data",
"optionally",
"raising",
"any",
"exceptions",
"present",
"."
] | python | train |
azogue/esiosdata | esiosdata/__main__.py | https://github.com/azogue/esiosdata/blob/680c7918955bc6ceee5bded92b3a4485f5ea8151/esiosdata/__main__.py#L26-L96 | def main_cli():
"""
Actualiza la base de datos de PVPC/DEMANDA almacenados como dataframe en local,
creando una nueva si no existe o hubiere algún problema. Los datos registrados se guardan en HDF5
"""
def _get_parser_args():
p = argparse.ArgumentParser(description='Gestor de DB de PVPC/DEMANDA (esios.ree.es)')
p.add_argument('-d', '--dem', action='store_true', help='Selecciona BD de demanda (BD de PVPC por defecto)')
p.add_argument('-i', '--info', action='store', nargs='*',
help="Muestra información de la BD seleccionada. "
"* Puede usar intervalos temporales y nombres de columnas, "
"como '-i gen noc 2017-01-24 2017-01-26'")
p.add_argument('-fu', '-FU', '--forceupdate', action='store_true',
help="Fuerza la reconstrucción total de la BD seleccionada")
p.add_argument('-u', '-U', '--update', action='store_true',
help="Actualiza la información de la BD seleccionada hasta el instante actual")
p.add_argument('-p', '--plot', action='store_true', help="Genera plots de la información filtrada de la BD")
p.add_argument('-v', '--verbose', action='store_true', help='Muestra información extra')
arguments = p.parse_args()
return arguments, p
def _parse_date(string, columns):
try:
ts = pd.Timestamp(string)
print_cyan('{} es timestamp: {:%c} --> {}'.format(string, ts, ts.date()))
columns.remove(string)
return ts.date().isoformat()
except ValueError:
pass
args, parser = _get_parser_args()
print_secc('ESIOS PVPC/DEMANDA')
if args.dem:
db_web = DatosREE(update=args.update, force_update=args.forceupdate, verbose=args.verbose)
else:
db_web = PVPC(update=args.update, force_update=args.forceupdate, verbose=args.verbose)
data = db_web.data['data']
if args.info is not None:
if len(args.info) > 0:
cols = args.info.copy()
dates = [d for d in [_parse_date(s, cols) for s in args.info] if d]
if len(dates) == 2:
data = data.loc[dates[0]:dates[1]]
elif len(dates) == 1:
data = data.loc[dates[0]]
if len(cols) > 0:
try:
data = data[[c.upper() for c in cols]]
except KeyError as e:
print_red('NO SE PUEDE FILTRAR LA COLUMNA (Exception: {})\nLAS COLUMNAS DISPONIBLES SON:\n{}'
.format(e, data.columns))
print_info(data)
else:
print_secc('LAST 24h in DB:')
print_info(data.iloc[-24:])
print_cyan(data.columns)
if args.plot:
if args.dem:
from esiosdata.pvpcplot import pvpcplot_tarifas_hora, pvpcplot_grid_hora
print_red('IMPLEMENTAR PLOTS DEM')
else:
from esiosdata.pvpcplot import pvpcplot_tarifas_hora, pvpcplot_grid_hora
if len(data) < 750:
pvpcplot_grid_hora(data)
# pvpcplot_tarifas_hora(data)
else:
print_red('La selección para plot es excesiva: {} samples de {} a {}\nSe hace plot de las últimas 24h'.
format(len(data), data.index[0], data.index[-1]))
pvpcplot_grid_hora(db_web.data['data'].iloc[-24:])
pvpcplot_tarifas_hora(db_web.data['data'].iloc[-24:]) | [
"def",
"main_cli",
"(",
")",
":",
"def",
"_get_parser_args",
"(",
")",
":",
"p",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Gestor de DB de PVPC/DEMANDA (esios.ree.es)'",
")",
"p",
".",
"add_argument",
"(",
"'-d'",
",",
"'--dem'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Selecciona BD de demanda (BD de PVPC por defecto)'",
")",
"p",
".",
"add_argument",
"(",
"'-i'",
",",
"'--info'",
",",
"action",
"=",
"'store'",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"\"Muestra información de la BD seleccionada. \"",
"\"* Puede usar intervalos temporales y nombres de columnas, \"",
"\"como '-i gen noc 2017-01-24 2017-01-26'\"",
")",
"p",
".",
"add_argument",
"(",
"'-fu'",
",",
"'-FU'",
",",
"'--forceupdate'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Fuerza la reconstrucción total de la BD seleccionada\")",
"",
"p",
".",
"add_argument",
"(",
"'-u'",
",",
"'-U'",
",",
"'--update'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Actualiza la información de la BD seleccionada hasta el instante actual\")",
"",
"p",
".",
"add_argument",
"(",
"'-p'",
",",
"'--plot'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Genera plots de la información filtrada de la BD\")",
"",
"p",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Muestra información extra')",
"",
"arguments",
"=",
"p",
".",
"parse_args",
"(",
")",
"return",
"arguments",
",",
"p",
"def",
"_parse_date",
"(",
"string",
",",
"columns",
")",
":",
"try",
":",
"ts",
"=",
"pd",
".",
"Timestamp",
"(",
"string",
")",
"print_cyan",
"(",
"'{} es timestamp: {:%c} --> {}'",
".",
"format",
"(",
"string",
",",
"ts",
",",
"ts",
".",
"date",
"(",
")",
")",
")",
"columns",
".",
"remove",
"(",
"string",
")",
"return",
"ts",
".",
"date",
"(",
")",
".",
"isoformat",
"(",
")",
"except",
"ValueError",
":",
"pass",
"args",
",",
"parser",
"=",
"_get_parser_args",
"(",
")",
"print_secc",
"(",
"'ESIOS PVPC/DEMANDA'",
")",
"if",
"args",
".",
"dem",
":",
"db_web",
"=",
"DatosREE",
"(",
"update",
"=",
"args",
".",
"update",
",",
"force_update",
"=",
"args",
".",
"forceupdate",
",",
"verbose",
"=",
"args",
".",
"verbose",
")",
"else",
":",
"db_web",
"=",
"PVPC",
"(",
"update",
"=",
"args",
".",
"update",
",",
"force_update",
"=",
"args",
".",
"forceupdate",
",",
"verbose",
"=",
"args",
".",
"verbose",
")",
"data",
"=",
"db_web",
".",
"data",
"[",
"'data'",
"]",
"if",
"args",
".",
"info",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"args",
".",
"info",
")",
">",
"0",
":",
"cols",
"=",
"args",
".",
"info",
".",
"copy",
"(",
")",
"dates",
"=",
"[",
"d",
"for",
"d",
"in",
"[",
"_parse_date",
"(",
"s",
",",
"cols",
")",
"for",
"s",
"in",
"args",
".",
"info",
"]",
"if",
"d",
"]",
"if",
"len",
"(",
"dates",
")",
"==",
"2",
":",
"data",
"=",
"data",
".",
"loc",
"[",
"dates",
"[",
"0",
"]",
":",
"dates",
"[",
"1",
"]",
"]",
"elif",
"len",
"(",
"dates",
")",
"==",
"1",
":",
"data",
"=",
"data",
".",
"loc",
"[",
"dates",
"[",
"0",
"]",
"]",
"if",
"len",
"(",
"cols",
")",
">",
"0",
":",
"try",
":",
"data",
"=",
"data",
"[",
"[",
"c",
".",
"upper",
"(",
")",
"for",
"c",
"in",
"cols",
"]",
"]",
"except",
"KeyError",
"as",
"e",
":",
"print_red",
"(",
"'NO SE PUEDE FILTRAR LA COLUMNA (Exception: {})\\nLAS COLUMNAS DISPONIBLES SON:\\n{}'",
".",
"format",
"(",
"e",
",",
"data",
".",
"columns",
")",
")",
"print_info",
"(",
"data",
")",
"else",
":",
"print_secc",
"(",
"'LAST 24h in DB:'",
")",
"print_info",
"(",
"data",
".",
"iloc",
"[",
"-",
"24",
":",
"]",
")",
"print_cyan",
"(",
"data",
".",
"columns",
")",
"if",
"args",
".",
"plot",
":",
"if",
"args",
".",
"dem",
":",
"from",
"esiosdata",
".",
"pvpcplot",
"import",
"pvpcplot_tarifas_hora",
",",
"pvpcplot_grid_hora",
"print_red",
"(",
"'IMPLEMENTAR PLOTS DEM'",
")",
"else",
":",
"from",
"esiosdata",
".",
"pvpcplot",
"import",
"pvpcplot_tarifas_hora",
",",
"pvpcplot_grid_hora",
"if",
"len",
"(",
"data",
")",
"<",
"750",
":",
"pvpcplot_grid_hora",
"(",
"data",
")",
"# pvpcplot_tarifas_hora(data)",
"else",
":",
"print_red",
"(",
"'La selección para plot es excesiva: {} samples de {} a {}\\nSe hace plot de las últimas 24h'.",
"",
"format",
"(",
"len",
"(",
"data",
")",
",",
"data",
".",
"index",
"[",
"0",
"]",
",",
"data",
".",
"index",
"[",
"-",
"1",
"]",
")",
")",
"pvpcplot_grid_hora",
"(",
"db_web",
".",
"data",
"[",
"'data'",
"]",
".",
"iloc",
"[",
"-",
"24",
":",
"]",
")",
"pvpcplot_tarifas_hora",
"(",
"db_web",
".",
"data",
"[",
"'data'",
"]",
".",
"iloc",
"[",
"-",
"24",
":",
"]",
")"
] | Actualiza la base de datos de PVPC/DEMANDA almacenados como dataframe en local,
creando una nueva si no existe o hubiere algún problema. Los datos registrados se guardan en HDF5 | [
"Actualiza",
"la",
"base",
"de",
"datos",
"de",
"PVPC",
"/",
"DEMANDA",
"almacenados",
"como",
"dataframe",
"en",
"local",
"creando",
"una",
"nueva",
"si",
"no",
"existe",
"o",
"hubiere",
"algún",
"problema",
".",
"Los",
"datos",
"registrados",
"se",
"guardan",
"en",
"HDF5"
] | python | valid |
dlecocq/nsq-py | nsq/sockets/base.py | https://github.com/dlecocq/nsq-py/blob/3ecacf6ab7719d38031179277113d875554a0c16/nsq/sockets/base.py#L31-L38 | def sendall(self, data, flags=0):
'''Same as socket.sendall'''
count = len(data)
while count:
sent = self.send(data, flags)
# This could probably be a buffer object
data = data[sent:]
count -= sent | [
"def",
"sendall",
"(",
"self",
",",
"data",
",",
"flags",
"=",
"0",
")",
":",
"count",
"=",
"len",
"(",
"data",
")",
"while",
"count",
":",
"sent",
"=",
"self",
".",
"send",
"(",
"data",
",",
"flags",
")",
"# This could probably be a buffer object",
"data",
"=",
"data",
"[",
"sent",
":",
"]",
"count",
"-=",
"sent"
] | Same as socket.sendall | [
"Same",
"as",
"socket",
".",
"sendall"
] | python | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/grid.py | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/grid.py#L231-L259 | def _read_elem_elements(self, fid):
"""Read all FE elements from the file stream. Elements are stored in
the self.element_data dict. The keys refer to the element types:
* 3: Triangular grid (three nodes)
* 8: Quadrangular grid (four nodes)
* 11: Mixed boundary element
* 12: Neumann (no-flow) boundary element
"""
elements = {}
# read elements
for element_type in range(0, self.header['nr_element_types']):
element_list = []
for element_coordinates in range(
0, self.header['element_infos'][element_type, 1]):
element_coordinates_line = fid.readline().lstrip()
tmp_element = self.element()
tmp_element.nodes = np.fromstring(element_coordinates_line,
dtype=int, sep=' ')
tmp_element.xcoords = self.nodes['presort'][tmp_element.nodes -
1, 1]
tmp_element.zcoords = self.nodes['presort'][tmp_element.nodes -
1, 2]
element_list.append(tmp_element)
element_type_number = self.header['element_infos'][element_type, 0]
elements[element_type_number] = element_list
self.element_data = elements | [
"def",
"_read_elem_elements",
"(",
"self",
",",
"fid",
")",
":",
"elements",
"=",
"{",
"}",
"# read elements",
"for",
"element_type",
"in",
"range",
"(",
"0",
",",
"self",
".",
"header",
"[",
"'nr_element_types'",
"]",
")",
":",
"element_list",
"=",
"[",
"]",
"for",
"element_coordinates",
"in",
"range",
"(",
"0",
",",
"self",
".",
"header",
"[",
"'element_infos'",
"]",
"[",
"element_type",
",",
"1",
"]",
")",
":",
"element_coordinates_line",
"=",
"fid",
".",
"readline",
"(",
")",
".",
"lstrip",
"(",
")",
"tmp_element",
"=",
"self",
".",
"element",
"(",
")",
"tmp_element",
".",
"nodes",
"=",
"np",
".",
"fromstring",
"(",
"element_coordinates_line",
",",
"dtype",
"=",
"int",
",",
"sep",
"=",
"' '",
")",
"tmp_element",
".",
"xcoords",
"=",
"self",
".",
"nodes",
"[",
"'presort'",
"]",
"[",
"tmp_element",
".",
"nodes",
"-",
"1",
",",
"1",
"]",
"tmp_element",
".",
"zcoords",
"=",
"self",
".",
"nodes",
"[",
"'presort'",
"]",
"[",
"tmp_element",
".",
"nodes",
"-",
"1",
",",
"2",
"]",
"element_list",
".",
"append",
"(",
"tmp_element",
")",
"element_type_number",
"=",
"self",
".",
"header",
"[",
"'element_infos'",
"]",
"[",
"element_type",
",",
"0",
"]",
"elements",
"[",
"element_type_number",
"]",
"=",
"element_list",
"self",
".",
"element_data",
"=",
"elements"
] | Read all FE elements from the file stream. Elements are stored in
the self.element_data dict. The keys refer to the element types:
* 3: Triangular grid (three nodes)
* 8: Quadrangular grid (four nodes)
* 11: Mixed boundary element
* 12: Neumann (no-flow) boundary element | [
"Read",
"all",
"FE",
"elements",
"from",
"the",
"file",
"stream",
".",
"Elements",
"are",
"stored",
"in",
"the",
"self",
".",
"element_data",
"dict",
".",
"The",
"keys",
"refer",
"to",
"the",
"element",
"types",
":"
] | python | train |
Alignak-monitoring/alignak | alignak/basemodule.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/basemodule.py#L212-L232 | def clear_queues(self, manager):
"""Release the resources associated to the queues of this instance
:param manager: Manager() object
:type manager: None | object
:return: None
"""
for queue in (self.to_q, self.from_q):
if queue is None:
continue
# If we got no manager, we directly call the clean
if not manager:
try:
queue.close()
queue.join_thread()
except AttributeError:
pass
# else:
# q._callmethod('close')
# q._callmethod('join_thread')
self.to_q = self.from_q = None | [
"def",
"clear_queues",
"(",
"self",
",",
"manager",
")",
":",
"for",
"queue",
"in",
"(",
"self",
".",
"to_q",
",",
"self",
".",
"from_q",
")",
":",
"if",
"queue",
"is",
"None",
":",
"continue",
"# If we got no manager, we directly call the clean",
"if",
"not",
"manager",
":",
"try",
":",
"queue",
".",
"close",
"(",
")",
"queue",
".",
"join_thread",
"(",
")",
"except",
"AttributeError",
":",
"pass",
"# else:",
"# q._callmethod('close')",
"# q._callmethod('join_thread')",
"self",
".",
"to_q",
"=",
"self",
".",
"from_q",
"=",
"None"
] | Release the resources associated to the queues of this instance
:param manager: Manager() object
:type manager: None | object
:return: None | [
"Release",
"the",
"resources",
"associated",
"to",
"the",
"queues",
"of",
"this",
"instance"
] | python | train |
TaurusOlson/fntools | fntools/fntools.py | https://github.com/TaurusOlson/fntools/blob/316080c7b5bfdd88c9f3fac4a67deb5be3c319e5/fntools/fntools.py#L595-L608 | def find(fn, record):
"""Apply a function on the record and return the corresponding new record
:param fn: a function
:param record: a dictionary
:returns: a dictionary
>>> find(max, {'Terry': 30, 'Graham': 35, 'John': 27})
{'Graham': 35}
"""
values_result = fn(record.values())
keys_result = [k for k, v in record.items() if v == values_result]
return {keys_result[0]: values_result} | [
"def",
"find",
"(",
"fn",
",",
"record",
")",
":",
"values_result",
"=",
"fn",
"(",
"record",
".",
"values",
"(",
")",
")",
"keys_result",
"=",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"record",
".",
"items",
"(",
")",
"if",
"v",
"==",
"values_result",
"]",
"return",
"{",
"keys_result",
"[",
"0",
"]",
":",
"values_result",
"}"
] | Apply a function on the record and return the corresponding new record
:param fn: a function
:param record: a dictionary
:returns: a dictionary
>>> find(max, {'Terry': 30, 'Graham': 35, 'John': 27})
{'Graham': 35} | [
"Apply",
"a",
"function",
"on",
"the",
"record",
"and",
"return",
"the",
"corresponding",
"new",
"record"
] | python | train |
belbio/bel | bel/lang/migrate_1_2.py | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L128-L144 | def convert_activity(ast):
"""Convert BEL1 activities to BEL2 act()"""
if len(ast.args) > 1:
log.error(f"Activity should not have more than 1 argument {ast.to_string()}")
p_arg = ast.args[0] # protein argument
print("p_arg", p_arg)
ma_arg = Function("ma", bo.spec)
ma_arg.add_argument(StrArg(ast.name, ma_arg))
p_arg.change_parent_fn(ma_arg)
ast = Function("activity", bo.spec)
p_arg.change_parent_fn(ast)
ast.add_argument(p_arg)
ast.add_argument(ma_arg)
return ast | [
"def",
"convert_activity",
"(",
"ast",
")",
":",
"if",
"len",
"(",
"ast",
".",
"args",
")",
">",
"1",
":",
"log",
".",
"error",
"(",
"f\"Activity should not have more than 1 argument {ast.to_string()}\"",
")",
"p_arg",
"=",
"ast",
".",
"args",
"[",
"0",
"]",
"# protein argument",
"print",
"(",
"\"p_arg\"",
",",
"p_arg",
")",
"ma_arg",
"=",
"Function",
"(",
"\"ma\"",
",",
"bo",
".",
"spec",
")",
"ma_arg",
".",
"add_argument",
"(",
"StrArg",
"(",
"ast",
".",
"name",
",",
"ma_arg",
")",
")",
"p_arg",
".",
"change_parent_fn",
"(",
"ma_arg",
")",
"ast",
"=",
"Function",
"(",
"\"activity\"",
",",
"bo",
".",
"spec",
")",
"p_arg",
".",
"change_parent_fn",
"(",
"ast",
")",
"ast",
".",
"add_argument",
"(",
"p_arg",
")",
"ast",
".",
"add_argument",
"(",
"ma_arg",
")",
"return",
"ast"
] | Convert BEL1 activities to BEL2 act() | [
"Convert",
"BEL1",
"activities",
"to",
"BEL2",
"act",
"()"
] | python | train |
lsst-epo/vela | astropixie-widgets/astropixie_widgets/visual.py | https://github.com/lsst-epo/vela/blob/8e17ebec509be5c3cc2063f4645dfe9e26b49c18/astropixie-widgets/astropixie_widgets/visual.py#L164-L172 | def hr_diagram(cluster_name, output=None):
"""Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R
diagram using the cluster_name; then show it.
Re
"""
cluster = get_hr_data(cluster_name)
pf = hr_diagram_figure(cluster)
show_with_bokeh_server(pf) | [
"def",
"hr_diagram",
"(",
"cluster_name",
",",
"output",
"=",
"None",
")",
":",
"cluster",
"=",
"get_hr_data",
"(",
"cluster_name",
")",
"pf",
"=",
"hr_diagram_figure",
"(",
"cluster",
")",
"show_with_bokeh_server",
"(",
"pf",
")"
] | Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R
diagram using the cluster_name; then show it.
Re | [
"Create",
"a",
":",
"class",
":",
"~bokeh",
".",
"plotting",
".",
"figure",
".",
"Figure",
"to",
"create",
"an",
"H",
"-",
"R",
"diagram",
"using",
"the",
"cluster_name",
";",
"then",
"show",
"it",
"."
] | python | valid |
kwikteam/phy | phy/gui/actions.py | https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/gui/actions.py#L285-L289 | def remove(self, name):
"""Remove an action."""
self.gui.removeAction(self._actions_dict[name].qaction)
del self._actions_dict[name]
delattr(self, name) | [
"def",
"remove",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"gui",
".",
"removeAction",
"(",
"self",
".",
"_actions_dict",
"[",
"name",
"]",
".",
"qaction",
")",
"del",
"self",
".",
"_actions_dict",
"[",
"name",
"]",
"delattr",
"(",
"self",
",",
"name",
")"
] | Remove an action. | [
"Remove",
"an",
"action",
"."
] | python | train |
rwl/pylon | pylon/io/psat.py | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/psat.py#L245-L278 | def _get_demand_array_construct(self):
""" Returns a construct for an array of power demand data.
"""
bus_no = integer.setResultsName("bus_no")
s_rating = real.setResultsName("s_rating") # MVA
p_direction = real.setResultsName("p_direction") # p.u.
q_direction = real.setResultsName("q_direction") # p.u.
p_bid_max = real.setResultsName("p_bid_max") # p.u.
p_bid_min = real.setResultsName("p_bid_min") # p.u.
p_optimal_bid = Optional(real).setResultsName("p_optimal_bid")
p_fixed = real.setResultsName("p_fixed") # $/hr
p_proportional = real.setResultsName("p_proportional") # $/MWh
p_quadratic = real.setResultsName("p_quadratic") # $/MW^2h
q_fixed = real.setResultsName("q_fixed") # $/hr
q_proportional = real.setResultsName("q_proportional") # $/MVArh
q_quadratic = real.setResultsName("q_quadratic") # $/MVAr^2h
commitment = boolean.setResultsName("commitment")
cost_tie_break = real.setResultsName("cost_tie_break") # $/MWh
cost_cong_up = real.setResultsName("cost_cong_up") # $/h
cost_cong_down = real.setResultsName("cost_cong_down") # $/h
status = Optional(boolean).setResultsName("status")
demand_data = bus_no + s_rating + p_direction + q_direction + \
p_bid_max + p_bid_min + p_optimal_bid + p_fixed + \
p_proportional + p_quadratic + q_fixed + q_proportional + \
q_quadratic + commitment + cost_tie_break + cost_cong_up + \
cost_cong_down + status + scolon
demand_data.setParseAction(self.push_demand)
demand_array = Literal("Demand.con") + "=" + "[" + "..." + \
ZeroOrMore(demand_data + Optional("]" + scolon))
return demand_array | [
"def",
"_get_demand_array_construct",
"(",
"self",
")",
":",
"bus_no",
"=",
"integer",
".",
"setResultsName",
"(",
"\"bus_no\"",
")",
"s_rating",
"=",
"real",
".",
"setResultsName",
"(",
"\"s_rating\"",
")",
"# MVA",
"p_direction",
"=",
"real",
".",
"setResultsName",
"(",
"\"p_direction\"",
")",
"# p.u.",
"q_direction",
"=",
"real",
".",
"setResultsName",
"(",
"\"q_direction\"",
")",
"# p.u.",
"p_bid_max",
"=",
"real",
".",
"setResultsName",
"(",
"\"p_bid_max\"",
")",
"# p.u.",
"p_bid_min",
"=",
"real",
".",
"setResultsName",
"(",
"\"p_bid_min\"",
")",
"# p.u.",
"p_optimal_bid",
"=",
"Optional",
"(",
"real",
")",
".",
"setResultsName",
"(",
"\"p_optimal_bid\"",
")",
"p_fixed",
"=",
"real",
".",
"setResultsName",
"(",
"\"p_fixed\"",
")",
"# $/hr",
"p_proportional",
"=",
"real",
".",
"setResultsName",
"(",
"\"p_proportional\"",
")",
"# $/MWh",
"p_quadratic",
"=",
"real",
".",
"setResultsName",
"(",
"\"p_quadratic\"",
")",
"# $/MW^2h",
"q_fixed",
"=",
"real",
".",
"setResultsName",
"(",
"\"q_fixed\"",
")",
"# $/hr",
"q_proportional",
"=",
"real",
".",
"setResultsName",
"(",
"\"q_proportional\"",
")",
"# $/MVArh",
"q_quadratic",
"=",
"real",
".",
"setResultsName",
"(",
"\"q_quadratic\"",
")",
"# $/MVAr^2h",
"commitment",
"=",
"boolean",
".",
"setResultsName",
"(",
"\"commitment\"",
")",
"cost_tie_break",
"=",
"real",
".",
"setResultsName",
"(",
"\"cost_tie_break\"",
")",
"# $/MWh",
"cost_cong_up",
"=",
"real",
".",
"setResultsName",
"(",
"\"cost_cong_up\"",
")",
"# $/h",
"cost_cong_down",
"=",
"real",
".",
"setResultsName",
"(",
"\"cost_cong_down\"",
")",
"# $/h",
"status",
"=",
"Optional",
"(",
"boolean",
")",
".",
"setResultsName",
"(",
"\"status\"",
")",
"demand_data",
"=",
"bus_no",
"+",
"s_rating",
"+",
"p_direction",
"+",
"q_direction",
"+",
"p_bid_max",
"+",
"p_bid_min",
"+",
"p_optimal_bid",
"+",
"p_fixed",
"+",
"p_proportional",
"+",
"p_quadratic",
"+",
"q_fixed",
"+",
"q_proportional",
"+",
"q_quadratic",
"+",
"commitment",
"+",
"cost_tie_break",
"+",
"cost_cong_up",
"+",
"cost_cong_down",
"+",
"status",
"+",
"scolon",
"demand_data",
".",
"setParseAction",
"(",
"self",
".",
"push_demand",
")",
"demand_array",
"=",
"Literal",
"(",
"\"Demand.con\"",
")",
"+",
"\"=\"",
"+",
"\"[\"",
"+",
"\"...\"",
"+",
"ZeroOrMore",
"(",
"demand_data",
"+",
"Optional",
"(",
"\"]\"",
"+",
"scolon",
")",
")",
"return",
"demand_array"
] | Returns a construct for an array of power demand data. | [
"Returns",
"a",
"construct",
"for",
"an",
"array",
"of",
"power",
"demand",
"data",
"."
] | python | train |
nwilming/ocupy | ocupy/stimuli.py | https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/stimuli.py#L219-L278 | def DirectoryStimuliFactory(loader):
"""
Takes an input path to the images folder of an experiment and generates
automatically the category - filenumber list needed to construct an
appropriate _categories object.
Parameters :
loader : Loader object which contains
impath : string
path to the input, i.e. image-, files of the experiment. All
subfolders in that path will be treated as categories. If no
subfolders are present, category 1 will be assigned and all
files in the folder are considered input images.
Images have to end in '.png'.
ftrpath : string
path to the feature folder. It is expected that the folder
structure corresponds to the structure in impath, i.e.
ftrpath/category/featurefolder/featuremap.mat
Furthermore, features are assumed to be the same for all
categories.
"""
impath = loader.impath
ftrpath = loader.ftrpath
# checks whether user has reading permission for the path
assert os.access(impath, os.R_OK)
assert os.access(ftrpath, os.R_OK)
# EXTRACTING IMAGE NAMES
img_per_cat = {}
# extract only directories in the given folder
subfolders = [name for name in os.listdir(impath) if os.path.isdir(
os.path.join(impath, name))]
# if there are no subfolders, walk through files. Take 1 as key for the
# categories object
if not subfolders:
[_, _, files] = next(os.walk(os.path.join(impath)))
# this only takes entries that end with '.png'
entries = {1:
[int(cur_file[cur_file.find('_')+1:-4]) for cur_file
in files if cur_file.endswith('.png')]}
img_per_cat.update(entries)
subfolders = ['']
# if there are subfolders, walk through them
else:
for directory in subfolders:
[_, _, files] = next(os.walk(os.path.join(impath, directory)))
# this only takes entries that end with '.png'. Strips ending and
# considers everything after the first '_' as the imagenumber
imagenumbers = [int(cur_file[cur_file.find('_')+1:-4])
for cur_file in files
if (cur_file.endswith('.png') & (len(cur_file) > 4))]
entries = {int(directory): imagenumbers}
img_per_cat.update(entries)
del directory
del imagenumbers
# in case subfolders do not exist, '' is appended here.
_, features, files = next(os.walk(os.path.join(ftrpath,
subfolders[0])))
return Categories(loader, img_per_cat = img_per_cat, features = features) | [
"def",
"DirectoryStimuliFactory",
"(",
"loader",
")",
":",
"impath",
"=",
"loader",
".",
"impath",
"ftrpath",
"=",
"loader",
".",
"ftrpath",
"# checks whether user has reading permission for the path",
"assert",
"os",
".",
"access",
"(",
"impath",
",",
"os",
".",
"R_OK",
")",
"assert",
"os",
".",
"access",
"(",
"ftrpath",
",",
"os",
".",
"R_OK",
")",
"# EXTRACTING IMAGE NAMES",
"img_per_cat",
"=",
"{",
"}",
"# extract only directories in the given folder",
"subfolders",
"=",
"[",
"name",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"impath",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"impath",
",",
"name",
")",
")",
"]",
"# if there are no subfolders, walk through files. Take 1 as key for the ",
"# categories object",
"if",
"not",
"subfolders",
":",
"[",
"_",
",",
"_",
",",
"files",
"]",
"=",
"next",
"(",
"os",
".",
"walk",
"(",
"os",
".",
"path",
".",
"join",
"(",
"impath",
")",
")",
")",
"# this only takes entries that end with '.png'",
"entries",
"=",
"{",
"1",
":",
"[",
"int",
"(",
"cur_file",
"[",
"cur_file",
".",
"find",
"(",
"'_'",
")",
"+",
"1",
":",
"-",
"4",
"]",
")",
"for",
"cur_file",
"in",
"files",
"if",
"cur_file",
".",
"endswith",
"(",
"'.png'",
")",
"]",
"}",
"img_per_cat",
".",
"update",
"(",
"entries",
")",
"subfolders",
"=",
"[",
"''",
"]",
"# if there are subfolders, walk through them",
"else",
":",
"for",
"directory",
"in",
"subfolders",
":",
"[",
"_",
",",
"_",
",",
"files",
"]",
"=",
"next",
"(",
"os",
".",
"walk",
"(",
"os",
".",
"path",
".",
"join",
"(",
"impath",
",",
"directory",
")",
")",
")",
"# this only takes entries that end with '.png'. Strips ending and",
"# considers everything after the first '_' as the imagenumber",
"imagenumbers",
"=",
"[",
"int",
"(",
"cur_file",
"[",
"cur_file",
".",
"find",
"(",
"'_'",
")",
"+",
"1",
":",
"-",
"4",
"]",
")",
"for",
"cur_file",
"in",
"files",
"if",
"(",
"cur_file",
".",
"endswith",
"(",
"'.png'",
")",
"&",
"(",
"len",
"(",
"cur_file",
")",
">",
"4",
")",
")",
"]",
"entries",
"=",
"{",
"int",
"(",
"directory",
")",
":",
"imagenumbers",
"}",
"img_per_cat",
".",
"update",
"(",
"entries",
")",
"del",
"directory",
"del",
"imagenumbers",
"# in case subfolders do not exist, '' is appended here.",
"_",
",",
"features",
",",
"files",
"=",
"next",
"(",
"os",
".",
"walk",
"(",
"os",
".",
"path",
".",
"join",
"(",
"ftrpath",
",",
"subfolders",
"[",
"0",
"]",
")",
")",
")",
"return",
"Categories",
"(",
"loader",
",",
"img_per_cat",
"=",
"img_per_cat",
",",
"features",
"=",
"features",
")"
] | Takes an input path to the images folder of an experiment and generates
automatically the category - filenumber list needed to construct an
appropriate _categories object.
Parameters :
loader : Loader object which contains
impath : string
path to the input, i.e. image-, files of the experiment. All
subfolders in that path will be treated as categories. If no
subfolders are present, category 1 will be assigned and all
files in the folder are considered input images.
Images have to end in '.png'.
ftrpath : string
path to the feature folder. It is expected that the folder
structure corresponds to the structure in impath, i.e.
ftrpath/category/featurefolder/featuremap.mat
Furthermore, features are assumed to be the same for all
categories. | [
"Takes",
"an",
"input",
"path",
"to",
"the",
"images",
"folder",
"of",
"an",
"experiment",
"and",
"generates",
"automatically",
"the",
"category",
"-",
"filenumber",
"list",
"needed",
"to",
"construct",
"an",
"appropriate",
"_categories",
"object",
".",
"Parameters",
":",
"loader",
":",
"Loader",
"object",
"which",
"contains",
"impath",
":",
"string",
"path",
"to",
"the",
"input",
"i",
".",
"e",
".",
"image",
"-",
"files",
"of",
"the",
"experiment",
".",
"All",
"subfolders",
"in",
"that",
"path",
"will",
"be",
"treated",
"as",
"categories",
".",
"If",
"no",
"subfolders",
"are",
"present",
"category",
"1",
"will",
"be",
"assigned",
"and",
"all",
"files",
"in",
"the",
"folder",
"are",
"considered",
"input",
"images",
".",
"Images",
"have",
"to",
"end",
"in",
".",
"png",
".",
"ftrpath",
":",
"string",
"path",
"to",
"the",
"feature",
"folder",
".",
"It",
"is",
"expected",
"that",
"the",
"folder",
"structure",
"corresponds",
"to",
"the",
"structure",
"in",
"impath",
"i",
".",
"e",
".",
"ftrpath",
"/",
"category",
"/",
"featurefolder",
"/",
"featuremap",
".",
"mat",
"Furthermore",
"features",
"are",
"assumed",
"to",
"be",
"the",
"same",
"for",
"all",
"categories",
"."
] | python | train |
influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/client.py#L454-L514 | def write_points(self,
points,
time_precision=None,
database=None,
retention_policy=None,
tags=None,
batch_size=None,
protocol='json',
consistency=None
):
"""Write to multiple time series names.
:param points: the list of points to be written in the database
:type points: list of dictionaries, each dictionary represents a point
:type points: (if protocol is 'json') list of dicts, where each dict
represents a point.
(if protocol is 'line') sequence of line protocol strings.
:param time_precision: Either 's', 'm', 'ms' or 'u', defaults to None
:type time_precision: str
:param database: the database to write the points to. Defaults to
the client's current database
:type database: str
:param tags: a set of key-value pairs associated with each point. Both
keys and values must be strings. These are shared tags and will be
merged with point-specific tags, defaults to None
:type tags: dict
:param retention_policy: the retention policy for the points. Defaults
to None
:type retention_policy: str
:param batch_size: value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation,
defaults to None
:type batch_size: int
:param protocol: Protocol for writing data. Either 'line' or 'json'.
:type protocol: str
:param consistency: Consistency for the points.
One of {'any','one','quorum','all'}.
:type consistency: str
:returns: True, if the operation is successful
:rtype: bool
.. note:: if no retention policy is specified, the default retention
policy for the database is used
"""
if batch_size and batch_size > 0:
for batch in self._batches(points, batch_size):
self._write_points(points=batch,
time_precision=time_precision,
database=database,
retention_policy=retention_policy,
tags=tags, protocol=protocol,
consistency=consistency)
return True
return self._write_points(points=points,
time_precision=time_precision,
database=database,
retention_policy=retention_policy,
tags=tags, protocol=protocol,
consistency=consistency) | [
"def",
"write_points",
"(",
"self",
",",
"points",
",",
"time_precision",
"=",
"None",
",",
"database",
"=",
"None",
",",
"retention_policy",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"protocol",
"=",
"'json'",
",",
"consistency",
"=",
"None",
")",
":",
"if",
"batch_size",
"and",
"batch_size",
">",
"0",
":",
"for",
"batch",
"in",
"self",
".",
"_batches",
"(",
"points",
",",
"batch_size",
")",
":",
"self",
".",
"_write_points",
"(",
"points",
"=",
"batch",
",",
"time_precision",
"=",
"time_precision",
",",
"database",
"=",
"database",
",",
"retention_policy",
"=",
"retention_policy",
",",
"tags",
"=",
"tags",
",",
"protocol",
"=",
"protocol",
",",
"consistency",
"=",
"consistency",
")",
"return",
"True",
"return",
"self",
".",
"_write_points",
"(",
"points",
"=",
"points",
",",
"time_precision",
"=",
"time_precision",
",",
"database",
"=",
"database",
",",
"retention_policy",
"=",
"retention_policy",
",",
"tags",
"=",
"tags",
",",
"protocol",
"=",
"protocol",
",",
"consistency",
"=",
"consistency",
")"
] | Write to multiple time series names.
:param points: the list of points to be written in the database
:type points: list of dictionaries, each dictionary represents a point
:type points: (if protocol is 'json') list of dicts, where each dict
represents a point.
(if protocol is 'line') sequence of line protocol strings.
:param time_precision: Either 's', 'm', 'ms' or 'u', defaults to None
:type time_precision: str
:param database: the database to write the points to. Defaults to
the client's current database
:type database: str
:param tags: a set of key-value pairs associated with each point. Both
keys and values must be strings. These are shared tags and will be
merged with point-specific tags, defaults to None
:type tags: dict
:param retention_policy: the retention policy for the points. Defaults
to None
:type retention_policy: str
:param batch_size: value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation,
defaults to None
:type batch_size: int
:param protocol: Protocol for writing data. Either 'line' or 'json'.
:type protocol: str
:param consistency: Consistency for the points.
One of {'any','one','quorum','all'}.
:type consistency: str
:returns: True, if the operation is successful
:rtype: bool
.. note:: if no retention policy is specified, the default retention
policy for the database is used | [
"Write",
"to",
"multiple",
"time",
"series",
"names",
"."
] | python | train |
earwig/mwparserfromhell | mwparserfromhell/wikicode.py | https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/wikicode.py#L282-L298 | def set(self, index, value):
"""Set the ``Node`` at *index* to *value*.
Raises :exc:`IndexError` if *index* is out of range, or
:exc:`ValueError` if *value* cannot be coerced into one :class:`.Node`.
To insert multiple nodes at an index, use :meth:`get` with either
:meth:`remove` and :meth:`insert` or :meth:`replace`.
"""
nodes = parse_anything(value).nodes
if len(nodes) > 1:
raise ValueError("Cannot coerce multiple nodes into one index")
if index >= len(self.nodes) or -1 * index > len(self.nodes):
raise IndexError("List assignment index out of range")
if nodes:
self.nodes[index] = nodes[0]
else:
self.nodes.pop(index) | [
"def",
"set",
"(",
"self",
",",
"index",
",",
"value",
")",
":",
"nodes",
"=",
"parse_anything",
"(",
"value",
")",
".",
"nodes",
"if",
"len",
"(",
"nodes",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Cannot coerce multiple nodes into one index\"",
")",
"if",
"index",
">=",
"len",
"(",
"self",
".",
"nodes",
")",
"or",
"-",
"1",
"*",
"index",
">",
"len",
"(",
"self",
".",
"nodes",
")",
":",
"raise",
"IndexError",
"(",
"\"List assignment index out of range\"",
")",
"if",
"nodes",
":",
"self",
".",
"nodes",
"[",
"index",
"]",
"=",
"nodes",
"[",
"0",
"]",
"else",
":",
"self",
".",
"nodes",
".",
"pop",
"(",
"index",
")"
] | Set the ``Node`` at *index* to *value*.
Raises :exc:`IndexError` if *index* is out of range, or
:exc:`ValueError` if *value* cannot be coerced into one :class:`.Node`.
To insert multiple nodes at an index, use :meth:`get` with either
:meth:`remove` and :meth:`insert` or :meth:`replace`. | [
"Set",
"the",
"Node",
"at",
"*",
"index",
"*",
"to",
"*",
"value",
"*",
"."
] | python | train |
kata198/QueryableList | QueryableList/Base.py | https://github.com/kata198/QueryableList/blob/279286d46205ce8268af42e03b75820a7483fddb/QueryableList/Base.py#L22-L86 | def getFiltersFromArgs(kwargs):
'''
getFiltersFromArgs - Returns a dictionary of each filter type, and the corrosponding field/value
@param kwargs <dict> - Dictionary of filter arguments
@return - Dictionary of each filter type (minus the ones that are optimized into others), each containing a list of tuples, (fieldName, matchingValue)
'''
# Create a copy of each possible filter in FILTER_TYPES and link to empty list.
# This object will be filled with all of the filters requested
ret = { filterType : list() for filterType in FILTER_TYPES }
for key, value in kwargs.items():
matchObj = FILTER_PARAM_RE.match(key)
if not matchObj:
# Default ( no __$oper) is eq
filterType = 'eq'
field = key
else:
# We have an operation defined, extract it, and optimize if possible
# (like if op is a case-insensitive, lowercase the value here)
groupDict = matchObj.groupdict()
filterType = groupDict['filterType']
field = groupDict['field']
if filterType not in FILTER_TYPES:
raise ValueError('Unknown filter type: %s. Choices are: (%s)' %(filterType, ', '.join(FILTER_TYPES)))
if filterType == 'isnull':
# Convert "isnull" to one of the "is" or "isnot" filters against None
if type(value) is not bool:
raise ValueError('Filter type "isnull" requires True/False.')
if value is True:
filterType = "is"
else:
filterType = "isnot"
value = None
elif filterType in ('in', 'notin'):
# Try to make more efficient by making a set. Fallback to just using what they provide, could be an object implementing "in"
try:
value = set(value)
except:
pass
# Optimization - if case-insensitive, lowercase the comparison value here
elif filterType in ('ieq', 'ine', 'icontains', 'noticontains'):
value = value.lower()
elif filterType.startswith('split'):
if (not issubclass(type(value), tuple) and not issubclass(type(value), list)) or len(value) != 2:
raise ValueError('Filter type %s expects a tuple of two params. (splitBy, matchPortion)' %(filterType,))
ret[filterType].append( (field, value) )
return ret | [
"def",
"getFiltersFromArgs",
"(",
"kwargs",
")",
":",
"# Create a copy of each possible filter in FILTER_TYPES and link to empty list.",
"# This object will be filled with all of the filters requested",
"ret",
"=",
"{",
"filterType",
":",
"list",
"(",
")",
"for",
"filterType",
"in",
"FILTER_TYPES",
"}",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"matchObj",
"=",
"FILTER_PARAM_RE",
".",
"match",
"(",
"key",
")",
"if",
"not",
"matchObj",
":",
"# Default ( no __$oper) is eq",
"filterType",
"=",
"'eq'",
"field",
"=",
"key",
"else",
":",
"# We have an operation defined, extract it, and optimize if possible",
"# (like if op is a case-insensitive, lowercase the value here)",
"groupDict",
"=",
"matchObj",
".",
"groupdict",
"(",
")",
"filterType",
"=",
"groupDict",
"[",
"'filterType'",
"]",
"field",
"=",
"groupDict",
"[",
"'field'",
"]",
"if",
"filterType",
"not",
"in",
"FILTER_TYPES",
":",
"raise",
"ValueError",
"(",
"'Unknown filter type: %s. Choices are: (%s)'",
"%",
"(",
"filterType",
",",
"', '",
".",
"join",
"(",
"FILTER_TYPES",
")",
")",
")",
"if",
"filterType",
"==",
"'isnull'",
":",
"# Convert \"isnull\" to one of the \"is\" or \"isnot\" filters against None",
"if",
"type",
"(",
"value",
")",
"is",
"not",
"bool",
":",
"raise",
"ValueError",
"(",
"'Filter type \"isnull\" requires True/False.'",
")",
"if",
"value",
"is",
"True",
":",
"filterType",
"=",
"\"is\"",
"else",
":",
"filterType",
"=",
"\"isnot\"",
"value",
"=",
"None",
"elif",
"filterType",
"in",
"(",
"'in'",
",",
"'notin'",
")",
":",
"# Try to make more efficient by making a set. Fallback to just using what they provide, could be an object implementing \"in\"",
"try",
":",
"value",
"=",
"set",
"(",
"value",
")",
"except",
":",
"pass",
"# Optimization - if case-insensitive, lowercase the comparison value here",
"elif",
"filterType",
"in",
"(",
"'ieq'",
",",
"'ine'",
",",
"'icontains'",
",",
"'noticontains'",
")",
":",
"value",
"=",
"value",
".",
"lower",
"(",
")",
"elif",
"filterType",
".",
"startswith",
"(",
"'split'",
")",
":",
"if",
"(",
"not",
"issubclass",
"(",
"type",
"(",
"value",
")",
",",
"tuple",
")",
"and",
"not",
"issubclass",
"(",
"type",
"(",
"value",
")",
",",
"list",
")",
")",
"or",
"len",
"(",
"value",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Filter type %s expects a tuple of two params. (splitBy, matchPortion)'",
"%",
"(",
"filterType",
",",
")",
")",
"ret",
"[",
"filterType",
"]",
".",
"append",
"(",
"(",
"field",
",",
"value",
")",
")",
"return",
"ret"
] | getFiltersFromArgs - Returns a dictionary of each filter type, and the corrosponding field/value
@param kwargs <dict> - Dictionary of filter arguments
@return - Dictionary of each filter type (minus the ones that are optimized into others), each containing a list of tuples, (fieldName, matchingValue) | [
"getFiltersFromArgs",
"-",
"Returns",
"a",
"dictionary",
"of",
"each",
"filter",
"type",
"and",
"the",
"corrosponding",
"field",
"/",
"value"
] | python | train |
MisterY/gnucash-portfolio | gnucash_portfolio/accounts.py | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/accounts.py#L197-L217 | def get_transactions(self, date_from: datetime, date_to: datetime) -> List[Transaction]:
""" Returns account transactions """
assert isinstance(date_from, datetime)
assert isinstance(date_to, datetime)
# fix up the parameters as we need datetime
dt_from = Datum()
dt_from.from_datetime(date_from)
dt_from.start_of_day()
dt_to = Datum()
dt_to.from_datetime(date_to)
dt_to.end_of_day()
query = (
self.book.session.query(Transaction)
.join(Split)
.filter(Split.account_guid == self.account.guid)
.filter(Transaction.post_date >= dt_from.date, Transaction.post_date <= dt_to.date)
.order_by(Transaction.post_date)
)
return query.all() | [
"def",
"get_transactions",
"(",
"self",
",",
"date_from",
":",
"datetime",
",",
"date_to",
":",
"datetime",
")",
"->",
"List",
"[",
"Transaction",
"]",
":",
"assert",
"isinstance",
"(",
"date_from",
",",
"datetime",
")",
"assert",
"isinstance",
"(",
"date_to",
",",
"datetime",
")",
"# fix up the parameters as we need datetime",
"dt_from",
"=",
"Datum",
"(",
")",
"dt_from",
".",
"from_datetime",
"(",
"date_from",
")",
"dt_from",
".",
"start_of_day",
"(",
")",
"dt_to",
"=",
"Datum",
"(",
")",
"dt_to",
".",
"from_datetime",
"(",
"date_to",
")",
"dt_to",
".",
"end_of_day",
"(",
")",
"query",
"=",
"(",
"self",
".",
"book",
".",
"session",
".",
"query",
"(",
"Transaction",
")",
".",
"join",
"(",
"Split",
")",
".",
"filter",
"(",
"Split",
".",
"account_guid",
"==",
"self",
".",
"account",
".",
"guid",
")",
".",
"filter",
"(",
"Transaction",
".",
"post_date",
">=",
"dt_from",
".",
"date",
",",
"Transaction",
".",
"post_date",
"<=",
"dt_to",
".",
"date",
")",
".",
"order_by",
"(",
"Transaction",
".",
"post_date",
")",
")",
"return",
"query",
".",
"all",
"(",
")"
] | Returns account transactions | [
"Returns",
"account",
"transactions"
] | python | train |
RudolfCardinal/pythonlib | cardinal_pythonlib/rnc_db.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L863-L868 | def set_verbose_logging(verbose: bool) -> None:
"""Chooses basic or verbose logging."""
if verbose:
set_loglevel(logging.DEBUG)
else:
set_loglevel(logging.INFO) | [
"def",
"set_verbose_logging",
"(",
"verbose",
":",
"bool",
")",
"->",
"None",
":",
"if",
"verbose",
":",
"set_loglevel",
"(",
"logging",
".",
"DEBUG",
")",
"else",
":",
"set_loglevel",
"(",
"logging",
".",
"INFO",
")"
] | Chooses basic or verbose logging. | [
"Chooses",
"basic",
"or",
"verbose",
"logging",
"."
] | python | train |
splitkeycoffee/pyhottop | pyhottop/pyhottop.py | https://github.com/splitkeycoffee/pyhottop/blob/2986bbb2d848f7e41fa3ece5ebb1b33c8882219c/pyhottop/pyhottop.py#L668-L702 | def add_roast_event(self, event):
"""Add an event to the roast log.
This method should be used for registering events that may be worth
tracking like first crack, second crack and the dropping of coffee.
Similar to the standard reading output from the roaster, manually
created events will include the current configuration reading, time and
metadata passed in.
:param event: Details describing what happened
:type event: dict
:returns: dict
"""
event_time = self.get_roast_time()
def get_valid_config():
"""Keep grabbing configs until we have a valid one.
In rare cases, the configuration will be invalid when the user
registers an event. This malformation can occur across several
events, so we use this helper to find a valid config to associate
to the event while preserving the original time. Due to fast
interval checking, this is not liable to skew data that much and
it's better than extreme false data.
"""
config = self.get_roast_properties()['last']['config']
if not config['valid']:
self._log.debug("Invalid config at event time, retrying...")
self.get_valid_config()
return config
event.update({'time': event_time,
'config': get_valid_config()})
self._roast['events'].append(event)
return self.get_roast_properties() | [
"def",
"add_roast_event",
"(",
"self",
",",
"event",
")",
":",
"event_time",
"=",
"self",
".",
"get_roast_time",
"(",
")",
"def",
"get_valid_config",
"(",
")",
":",
"\"\"\"Keep grabbing configs until we have a valid one.\n\n In rare cases, the configuration will be invalid when the user\n registers an event. This malformation can occur across several\n events, so we use this helper to find a valid config to associate\n to the event while preserving the original time. Due to fast\n interval checking, this is not liable to skew data that much and\n it's better than extreme false data.\n \"\"\"",
"config",
"=",
"self",
".",
"get_roast_properties",
"(",
")",
"[",
"'last'",
"]",
"[",
"'config'",
"]",
"if",
"not",
"config",
"[",
"'valid'",
"]",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Invalid config at event time, retrying...\"",
")",
"self",
".",
"get_valid_config",
"(",
")",
"return",
"config",
"event",
".",
"update",
"(",
"{",
"'time'",
":",
"event_time",
",",
"'config'",
":",
"get_valid_config",
"(",
")",
"}",
")",
"self",
".",
"_roast",
"[",
"'events'",
"]",
".",
"append",
"(",
"event",
")",
"return",
"self",
".",
"get_roast_properties",
"(",
")"
] | Add an event to the roast log.
This method should be used for registering events that may be worth
tracking like first crack, second crack and the dropping of coffee.
Similar to the standard reading output from the roaster, manually
created events will include the current configuration reading, time and
metadata passed in.
:param event: Details describing what happened
:type event: dict
:returns: dict | [
"Add",
"an",
"event",
"to",
"the",
"roast",
"log",
"."
] | python | train |
trailofbits/manticore | manticore/platforms/evm.py | https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/evm.py#L1101-L1105 | def _store(self, offset, value, size=1):
"""Stores value in memory as a big endian"""
self.memory.write_BE(offset, value, size)
for i in range(size):
self._publish('did_evm_write_memory', offset + i, Operators.EXTRACT(value, (size - i - 1) * 8, 8)) | [
"def",
"_store",
"(",
"self",
",",
"offset",
",",
"value",
",",
"size",
"=",
"1",
")",
":",
"self",
".",
"memory",
".",
"write_BE",
"(",
"offset",
",",
"value",
",",
"size",
")",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"self",
".",
"_publish",
"(",
"'did_evm_write_memory'",
",",
"offset",
"+",
"i",
",",
"Operators",
".",
"EXTRACT",
"(",
"value",
",",
"(",
"size",
"-",
"i",
"-",
"1",
")",
"*",
"8",
",",
"8",
")",
")"
] | Stores value in memory as a big endian | [
"Stores",
"value",
"in",
"memory",
"as",
"a",
"big",
"endian"
] | python | valid |
pallets/flask-sqlalchemy | flask_sqlalchemy/__init__.py | https://github.com/pallets/flask-sqlalchemy/blob/3d3261f4fc6d28f5bf407cf7d523e36a09a8c144/flask_sqlalchemy/__init__.py#L758-L772 | def create_session(self, options):
"""Create the session factory used by :meth:`create_scoped_session`.
The factory **must** return an object that SQLAlchemy recognizes as a session,
or registering session events may raise an exception.
Valid factories include a :class:`~sqlalchemy.orm.session.Session`
class or a :class:`~sqlalchemy.orm.session.sessionmaker`.
The default implementation creates a ``sessionmaker`` for :class:`SignallingSession`.
:param options: dict of keyword arguments passed to session class
"""
return orm.sessionmaker(class_=SignallingSession, db=self, **options) | [
"def",
"create_session",
"(",
"self",
",",
"options",
")",
":",
"return",
"orm",
".",
"sessionmaker",
"(",
"class_",
"=",
"SignallingSession",
",",
"db",
"=",
"self",
",",
"*",
"*",
"options",
")"
] | Create the session factory used by :meth:`create_scoped_session`.
The factory **must** return an object that SQLAlchemy recognizes as a session,
or registering session events may raise an exception.
Valid factories include a :class:`~sqlalchemy.orm.session.Session`
class or a :class:`~sqlalchemy.orm.session.sessionmaker`.
The default implementation creates a ``sessionmaker`` for :class:`SignallingSession`.
:param options: dict of keyword arguments passed to session class | [
"Create",
"the",
"session",
"factory",
"used",
"by",
":",
"meth",
":",
"create_scoped_session",
"."
] | python | train |
zyga/call | call/__init__.py | https://github.com/zyga/call/blob/dcef9a5aac7f9085bd4829dd6bcedc5fc2945d87/call/__init__.py#L46-L62 | def bind(self, args, kwargs):
"""
Bind arguments and keyword arguments to the encapsulated function.
Returns a dictionary of parameters (named according to function
parameters) with the values that were bound to each name.
"""
spec = self._spec
resolution = self.resolve(args, kwargs)
params = dict(zip(spec.args, resolution.slots))
if spec.varargs:
params[spec.varargs] = resolution.varargs
if spec.varkw:
params[spec.varkw] = resolution.varkw
if spec.kwonlyargs:
params.update(resolution.kwonlyargs)
return params | [
"def",
"bind",
"(",
"self",
",",
"args",
",",
"kwargs",
")",
":",
"spec",
"=",
"self",
".",
"_spec",
"resolution",
"=",
"self",
".",
"resolve",
"(",
"args",
",",
"kwargs",
")",
"params",
"=",
"dict",
"(",
"zip",
"(",
"spec",
".",
"args",
",",
"resolution",
".",
"slots",
")",
")",
"if",
"spec",
".",
"varargs",
":",
"params",
"[",
"spec",
".",
"varargs",
"]",
"=",
"resolution",
".",
"varargs",
"if",
"spec",
".",
"varkw",
":",
"params",
"[",
"spec",
".",
"varkw",
"]",
"=",
"resolution",
".",
"varkw",
"if",
"spec",
".",
"kwonlyargs",
":",
"params",
".",
"update",
"(",
"resolution",
".",
"kwonlyargs",
")",
"return",
"params"
] | Bind arguments and keyword arguments to the encapsulated function.
Returns a dictionary of parameters (named according to function
parameters) with the values that were bound to each name. | [
"Bind",
"arguments",
"and",
"keyword",
"arguments",
"to",
"the",
"encapsulated",
"function",
"."
] | python | train |
dw/mitogen | ansible_mitogen/connection.py | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/connection.py#L259-L274 | def _connect_su(spec):
"""
Return ContextService arguments for su as a become method.
"""
return {
'method': 'su',
'enable_lru': True,
'kwargs': {
'username': spec.become_user(),
'password': spec.become_pass(),
'python_path': spec.python_path(),
'su_path': spec.become_exe(),
'connect_timeout': spec.timeout(),
'remote_name': get_remote_name(spec),
}
} | [
"def",
"_connect_su",
"(",
"spec",
")",
":",
"return",
"{",
"'method'",
":",
"'su'",
",",
"'enable_lru'",
":",
"True",
",",
"'kwargs'",
":",
"{",
"'username'",
":",
"spec",
".",
"become_user",
"(",
")",
",",
"'password'",
":",
"spec",
".",
"become_pass",
"(",
")",
",",
"'python_path'",
":",
"spec",
".",
"python_path",
"(",
")",
",",
"'su_path'",
":",
"spec",
".",
"become_exe",
"(",
")",
",",
"'connect_timeout'",
":",
"spec",
".",
"timeout",
"(",
")",
",",
"'remote_name'",
":",
"get_remote_name",
"(",
"spec",
")",
",",
"}",
"}"
] | Return ContextService arguments for su as a become method. | [
"Return",
"ContextService",
"arguments",
"for",
"su",
"as",
"a",
"become",
"method",
"."
] | python | train |
proycon/pynlpl | pynlpl/formats/folia.py | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L3263-L3266 | def layers(self, annotationtype=None,set=None):
"""Returns a list of annotation layers found *directly* under this element, does not include alternative layers"""
if inspect.isclass(annotationtype): annotationtype = annotationtype.ANNOTATIONTYPE
return [ x for x in self.select(AbstractAnnotationLayer,set,False,True) if annotationtype is None or x.ANNOTATIONTYPE == annotationtype ] | [
"def",
"layers",
"(",
"self",
",",
"annotationtype",
"=",
"None",
",",
"set",
"=",
"None",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"annotationtype",
")",
":",
"annotationtype",
"=",
"annotationtype",
".",
"ANNOTATIONTYPE",
"return",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"select",
"(",
"AbstractAnnotationLayer",
",",
"set",
",",
"False",
",",
"True",
")",
"if",
"annotationtype",
"is",
"None",
"or",
"x",
".",
"ANNOTATIONTYPE",
"==",
"annotationtype",
"]"
] | Returns a list of annotation layers found *directly* under this element, does not include alternative layers | [
"Returns",
"a",
"list",
"of",
"annotation",
"layers",
"found",
"*",
"directly",
"*",
"under",
"this",
"element",
"does",
"not",
"include",
"alternative",
"layers"
] | python | train |
pinterest/pymemcache | pymemcache/client/base.py | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L504-L526 | def delete(self, key, noreply=None):
"""
The memcached "delete" command.
Args:
key: str, see class docs for details.
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the key was deleted, and False if it wasn't found.
"""
if noreply is None:
noreply = self.default_noreply
cmd = b'delete ' + self.check_key(key)
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'delete', noreply)
if noreply:
return True
return results[0] == b'DELETED' | [
"def",
"delete",
"(",
"self",
",",
"key",
",",
"noreply",
"=",
"None",
")",
":",
"if",
"noreply",
"is",
"None",
":",
"noreply",
"=",
"self",
".",
"default_noreply",
"cmd",
"=",
"b'delete '",
"+",
"self",
".",
"check_key",
"(",
"key",
")",
"if",
"noreply",
":",
"cmd",
"+=",
"b' noreply'",
"cmd",
"+=",
"b'\\r\\n'",
"results",
"=",
"self",
".",
"_misc_cmd",
"(",
"[",
"cmd",
"]",
",",
"b'delete'",
",",
"noreply",
")",
"if",
"noreply",
":",
"return",
"True",
"return",
"results",
"[",
"0",
"]",
"==",
"b'DELETED'"
] | The memcached "delete" command.
Args:
key: str, see class docs for details.
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the key was deleted, and False if it wasn't found. | [
"The",
"memcached",
"delete",
"command",
"."
] | python | train |
senaite/senaite.core.supermodel | src/senaite/core/supermodel/model.py | https://github.com/senaite/senaite.core.supermodel/blob/1819154332b8776f187aa98a2e299701983a0119/src/senaite/core/supermodel/model.py#L243-L249 | def brain(self):
"""Catalog brain of the wrapped object
"""
if self._brain is None:
logger.debug("SuperModel::brain: *Fetch catalog brain*")
self._brain = self.get_brain_by_uid(self.uid)
return self._brain | [
"def",
"brain",
"(",
"self",
")",
":",
"if",
"self",
".",
"_brain",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"SuperModel::brain: *Fetch catalog brain*\"",
")",
"self",
".",
"_brain",
"=",
"self",
".",
"get_brain_by_uid",
"(",
"self",
".",
"uid",
")",
"return",
"self",
".",
"_brain"
] | Catalog brain of the wrapped object | [
"Catalog",
"brain",
"of",
"the",
"wrapped",
"object"
] | python | train |
selik/xport | xport/v56.py | https://github.com/selik/xport/blob/fafd15a24ccd102fc92d0c0123b9877a0c752182/xport/v56.py#L421-L426 | def header_match(cls, data):
'''
Parse a member namestrs header (1 line, 80 bytes).
'''
mo = cls.header_re.match(data)
return int(mo['n_variables']) | [
"def",
"header_match",
"(",
"cls",
",",
"data",
")",
":",
"mo",
"=",
"cls",
".",
"header_re",
".",
"match",
"(",
"data",
")",
"return",
"int",
"(",
"mo",
"[",
"'n_variables'",
"]",
")"
] | Parse a member namestrs header (1 line, 80 bytes). | [
"Parse",
"a",
"member",
"namestrs",
"header",
"(",
"1",
"line",
"80",
"bytes",
")",
"."
] | python | train |
ThreatConnect-Inc/tcex | tcex/tcex_bin_lib.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_lib.py#L144-L230 | def install_libs(self):
"""Install Required Libraries using pip."""
# default or current python version
lib_data = [{'python_executable': sys.executable, 'lib_dir': self.lib_directory}]
# check for requirements.txt
if not os.path.isfile(self.requirements_file):
self.handle_error('A requirements.txt file is required to install modules.')
# if branch arg is provide use git branch instead of pypi
if self.args.branch is not None:
self._create_temp_requirements()
# overwrite default with config data
if self.tcex_json.get('lib_versions'):
lib_data = self.tcex_json.get('lib_versions')
print('{}Using "lib" directories defined in tcex.json file.'.format(c.Style.BRIGHT))
# configure proxy settings
proxy_enabled = self._configure_proxy()
# install all requested lib directories
for data in lib_data:
# pattern to match env vars in data
env_var = re.compile(r'\$env\.([a-zA-Z0-9]+)')
lib_dir = data.get('lib_dir')
# replace env vars with env val in the lib dir
matches = re.findall(env_var, lib_dir)
if matches:
env_val = os.environ.get(matches[0])
if env_val is None:
self.handle_error(
'"{}" env variable set in tcex.json, but could not be resolved.'.format(
matches[0]
)
)
lib_dir = re.sub(env_var, env_val, lib_dir)
lib_dir_fq = os.path.join(self.app_path, lib_dir)
if os.access(lib_dir_fq, os.W_OK):
# remove lib directory from previous runs
shutil.rmtree(lib_dir_fq)
# replace env vars with env val in the python executable
python_executable = data.get('python_executable')
matches = re.findall(env_var, python_executable)
if matches:
env_val = os.environ.get(matches[0])
python_executable = re.sub(env_var, env_val, python_executable)
print('Building Lib Dir: {}{}{}'.format(c.Style.BRIGHT, c.Fore.CYAN, lib_dir_fq))
exe_command = self._build_command(python_executable, lib_dir_fq, proxy_enabled)
print('Running: {}{}{}'.format(c.Style.BRIGHT, c.Fore.GREEN, ' '.join(exe_command)))
p = subprocess.Popen(
exe_command,
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate() # pylint: disable=W0612
if p.returncode != 0:
print('{}{}FAIL'.format(c.Style.BRIGHT, c.Fore.RED))
print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err.decode('utf-8')))
sys.exit('ERROR: {}'.format(err.decode('utf-8')))
# version comparison
try:
python_version = lib_dir.split('_', 1)[1]
except IndexError:
self.handle_error('Could not determine version from lib string.')
# track the latest Python version
if self.latest_version is None:
self.latest_version = python_version
elif StrictVersion(python_version) > StrictVersion(self.latest_version):
self.latest_version = python_version
# cleanup temp file if required
if self.use_temp_requirements_file:
os.remove(self.requirements_file)
# create lib_latest
self._create_lib_latest() | [
"def",
"install_libs",
"(",
"self",
")",
":",
"# default or current python version",
"lib_data",
"=",
"[",
"{",
"'python_executable'",
":",
"sys",
".",
"executable",
",",
"'lib_dir'",
":",
"self",
".",
"lib_directory",
"}",
"]",
"# check for requirements.txt",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"requirements_file",
")",
":",
"self",
".",
"handle_error",
"(",
"'A requirements.txt file is required to install modules.'",
")",
"# if branch arg is provide use git branch instead of pypi",
"if",
"self",
".",
"args",
".",
"branch",
"is",
"not",
"None",
":",
"self",
".",
"_create_temp_requirements",
"(",
")",
"# overwrite default with config data",
"if",
"self",
".",
"tcex_json",
".",
"get",
"(",
"'lib_versions'",
")",
":",
"lib_data",
"=",
"self",
".",
"tcex_json",
".",
"get",
"(",
"'lib_versions'",
")",
"print",
"(",
"'{}Using \"lib\" directories defined in tcex.json file.'",
".",
"format",
"(",
"c",
".",
"Style",
".",
"BRIGHT",
")",
")",
"# configure proxy settings",
"proxy_enabled",
"=",
"self",
".",
"_configure_proxy",
"(",
")",
"# install all requested lib directories",
"for",
"data",
"in",
"lib_data",
":",
"# pattern to match env vars in data",
"env_var",
"=",
"re",
".",
"compile",
"(",
"r'\\$env\\.([a-zA-Z0-9]+)'",
")",
"lib_dir",
"=",
"data",
".",
"get",
"(",
"'lib_dir'",
")",
"# replace env vars with env val in the lib dir",
"matches",
"=",
"re",
".",
"findall",
"(",
"env_var",
",",
"lib_dir",
")",
"if",
"matches",
":",
"env_val",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"matches",
"[",
"0",
"]",
")",
"if",
"env_val",
"is",
"None",
":",
"self",
".",
"handle_error",
"(",
"'\"{}\" env variable set in tcex.json, but could not be resolved.'",
".",
"format",
"(",
"matches",
"[",
"0",
"]",
")",
")",
"lib_dir",
"=",
"re",
".",
"sub",
"(",
"env_var",
",",
"env_val",
",",
"lib_dir",
")",
"lib_dir_fq",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"app_path",
",",
"lib_dir",
")",
"if",
"os",
".",
"access",
"(",
"lib_dir_fq",
",",
"os",
".",
"W_OK",
")",
":",
"# remove lib directory from previous runs",
"shutil",
".",
"rmtree",
"(",
"lib_dir_fq",
")",
"# replace env vars with env val in the python executable",
"python_executable",
"=",
"data",
".",
"get",
"(",
"'python_executable'",
")",
"matches",
"=",
"re",
".",
"findall",
"(",
"env_var",
",",
"python_executable",
")",
"if",
"matches",
":",
"env_val",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"matches",
"[",
"0",
"]",
")",
"python_executable",
"=",
"re",
".",
"sub",
"(",
"env_var",
",",
"env_val",
",",
"python_executable",
")",
"print",
"(",
"'Building Lib Dir: {}{}{}'",
".",
"format",
"(",
"c",
".",
"Style",
".",
"BRIGHT",
",",
"c",
".",
"Fore",
".",
"CYAN",
",",
"lib_dir_fq",
")",
")",
"exe_command",
"=",
"self",
".",
"_build_command",
"(",
"python_executable",
",",
"lib_dir_fq",
",",
"proxy_enabled",
")",
"print",
"(",
"'Running: {}{}{}'",
".",
"format",
"(",
"c",
".",
"Style",
".",
"BRIGHT",
",",
"c",
".",
"Fore",
".",
"GREEN",
",",
"' '",
".",
"join",
"(",
"exe_command",
")",
")",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"exe_command",
",",
"shell",
"=",
"False",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
")",
"out",
",",
"err",
"=",
"p",
".",
"communicate",
"(",
")",
"# pylint: disable=W0612",
"if",
"p",
".",
"returncode",
"!=",
"0",
":",
"print",
"(",
"'{}{}FAIL'",
".",
"format",
"(",
"c",
".",
"Style",
".",
"BRIGHT",
",",
"c",
".",
"Fore",
".",
"RED",
")",
")",
"print",
"(",
"'{}{}{}'",
".",
"format",
"(",
"c",
".",
"Style",
".",
"BRIGHT",
",",
"c",
".",
"Fore",
".",
"RED",
",",
"err",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
"sys",
".",
"exit",
"(",
"'ERROR: {}'",
".",
"format",
"(",
"err",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
"# version comparison",
"try",
":",
"python_version",
"=",
"lib_dir",
".",
"split",
"(",
"'_'",
",",
"1",
")",
"[",
"1",
"]",
"except",
"IndexError",
":",
"self",
".",
"handle_error",
"(",
"'Could not determine version from lib string.'",
")",
"# track the latest Python version",
"if",
"self",
".",
"latest_version",
"is",
"None",
":",
"self",
".",
"latest_version",
"=",
"python_version",
"elif",
"StrictVersion",
"(",
"python_version",
")",
">",
"StrictVersion",
"(",
"self",
".",
"latest_version",
")",
":",
"self",
".",
"latest_version",
"=",
"python_version",
"# cleanup temp file if required",
"if",
"self",
".",
"use_temp_requirements_file",
":",
"os",
".",
"remove",
"(",
"self",
".",
"requirements_file",
")",
"# create lib_latest",
"self",
".",
"_create_lib_latest",
"(",
")"
] | Install Required Libraries using pip. | [
"Install",
"Required",
"Libraries",
"using",
"pip",
"."
] | python | train |
megacool/flask-canonical | flask_canonical/canonical_logger.py | https://github.com/megacool/flask-canonical/blob/384c10205a1f5eefe859b3ae3c3152327bd4e7b7/flask_canonical/canonical_logger.py#L160-L182 | def get_view_function(app, url, method):
"""Match a url and return the view and arguments
it will be called with, or None if there is no view.
Creds: http://stackoverflow.com/a/38488506
"""
# pylint: disable=too-many-return-statements
adapter = app.create_url_adapter(request)
try:
match = adapter.match(url, method=method)
except RequestRedirect as ex:
# recursively match redirects
return get_view_function(app, ex.new_url, method)
except (MethodNotAllowed, NotFound):
# no match
return None
try:
return app.view_functions[match[0]]
except KeyError:
# no view is associated with the endpoint
return None | [
"def",
"get_view_function",
"(",
"app",
",",
"url",
",",
"method",
")",
":",
"# pylint: disable=too-many-return-statements",
"adapter",
"=",
"app",
".",
"create_url_adapter",
"(",
"request",
")",
"try",
":",
"match",
"=",
"adapter",
".",
"match",
"(",
"url",
",",
"method",
"=",
"method",
")",
"except",
"RequestRedirect",
"as",
"ex",
":",
"# recursively match redirects",
"return",
"get_view_function",
"(",
"app",
",",
"ex",
".",
"new_url",
",",
"method",
")",
"except",
"(",
"MethodNotAllowed",
",",
"NotFound",
")",
":",
"# no match",
"return",
"None",
"try",
":",
"return",
"app",
".",
"view_functions",
"[",
"match",
"[",
"0",
"]",
"]",
"except",
"KeyError",
":",
"# no view is associated with the endpoint",
"return",
"None"
] | Match a url and return the view and arguments
it will be called with, or None if there is no view.
Creds: http://stackoverflow.com/a/38488506 | [
"Match",
"a",
"url",
"and",
"return",
"the",
"view",
"and",
"arguments",
"it",
"will",
"be",
"called",
"with",
"or",
"None",
"if",
"there",
"is",
"no",
"view",
".",
"Creds",
":",
"http",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"a",
"/",
"38488506"
] | python | valid |
fabiobatalha/crossrefapi | crossref/restful.py | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1418-L1427 | def works(self, member_id):
"""
This method retrieve a iterable of Works of the given member.
args: Member ID (Integer)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(member_id))
return Works(context=context) | [
"def",
"works",
"(",
"self",
",",
"member_id",
")",
":",
"context",
"=",
"'%s/%s'",
"%",
"(",
"self",
".",
"ENDPOINT",
",",
"str",
"(",
"member_id",
")",
")",
"return",
"Works",
"(",
"context",
"=",
"context",
")"
] | This method retrieve a iterable of Works of the given member.
args: Member ID (Integer)
return: Works() | [
"This",
"method",
"retrieve",
"a",
"iterable",
"of",
"Works",
"of",
"the",
"given",
"member",
"."
] | python | train |
openstack/proliantutils | proliantutils/hpssa/objects.py | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L113-L124 | def _convert_to_dict(stdout):
"""Wrapper function for parsing hpssacli/ssacli command.
This function gets the output from hpssacli/ssacli command
and calls the recursive function _get_dict to return
the complete dictionary containing the RAID information.
"""
lines = stdout.split("\n")
lines = list(filter(None, lines))
info_dict, j = _get_dict(lines, 0, 0, 0)
return info_dict | [
"def",
"_convert_to_dict",
"(",
"stdout",
")",
":",
"lines",
"=",
"stdout",
".",
"split",
"(",
"\"\\n\"",
")",
"lines",
"=",
"list",
"(",
"filter",
"(",
"None",
",",
"lines",
")",
")",
"info_dict",
",",
"j",
"=",
"_get_dict",
"(",
"lines",
",",
"0",
",",
"0",
",",
"0",
")",
"return",
"info_dict"
] | Wrapper function for parsing hpssacli/ssacli command.
This function gets the output from hpssacli/ssacli command
and calls the recursive function _get_dict to return
the complete dictionary containing the RAID information. | [
"Wrapper",
"function",
"for",
"parsing",
"hpssacli",
"/",
"ssacli",
"command",
"."
] | python | train |
chimera0/accel-brain-code | Reinforcement-Learning/pyqlearning/q_learning.py | https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/pyqlearning/q_learning.py#L255-L305 | def learn(self, state_key, limit=1000):
'''
Learning and searching the optimal solution.
Args:
state_key: Initial state.
limit: The maximum number of iterative updates based on value iteration algorithms.
'''
self.t = 1
while self.t <= limit:
next_action_list = self.extract_possible_actions(state_key)
if len(next_action_list):
action_key = self.select_action(
state_key=state_key,
next_action_list=next_action_list
)
reward_value = self.observe_reward_value(state_key, action_key)
if len(next_action_list):
# Max-Q-Value in next action time.
next_state_key = self.update_state(
state_key=state_key,
action_key=action_key
)
next_next_action_list = self.extract_possible_actions(next_state_key)
next_action_key = self.predict_next_action(next_state_key, next_next_action_list)
next_max_q = self.extract_q_df(next_state_key, next_action_key)
# Update Q-Value.
self.update_q(
state_key=state_key,
action_key=action_key,
reward_value=reward_value,
next_max_q=next_max_q
)
# Update State.
state_key = next_state_key
# Normalize.
self.normalize_q_value()
self.normalize_r_value()
# Vis.
self.visualize_learning_result(state_key)
# Check.
if self.check_the_end_flag(state_key) is True:
break
# Epsode.
self.t += 1 | [
"def",
"learn",
"(",
"self",
",",
"state_key",
",",
"limit",
"=",
"1000",
")",
":",
"self",
".",
"t",
"=",
"1",
"while",
"self",
".",
"t",
"<=",
"limit",
":",
"next_action_list",
"=",
"self",
".",
"extract_possible_actions",
"(",
"state_key",
")",
"if",
"len",
"(",
"next_action_list",
")",
":",
"action_key",
"=",
"self",
".",
"select_action",
"(",
"state_key",
"=",
"state_key",
",",
"next_action_list",
"=",
"next_action_list",
")",
"reward_value",
"=",
"self",
".",
"observe_reward_value",
"(",
"state_key",
",",
"action_key",
")",
"if",
"len",
"(",
"next_action_list",
")",
":",
"# Max-Q-Value in next action time.",
"next_state_key",
"=",
"self",
".",
"update_state",
"(",
"state_key",
"=",
"state_key",
",",
"action_key",
"=",
"action_key",
")",
"next_next_action_list",
"=",
"self",
".",
"extract_possible_actions",
"(",
"next_state_key",
")",
"next_action_key",
"=",
"self",
".",
"predict_next_action",
"(",
"next_state_key",
",",
"next_next_action_list",
")",
"next_max_q",
"=",
"self",
".",
"extract_q_df",
"(",
"next_state_key",
",",
"next_action_key",
")",
"# Update Q-Value.",
"self",
".",
"update_q",
"(",
"state_key",
"=",
"state_key",
",",
"action_key",
"=",
"action_key",
",",
"reward_value",
"=",
"reward_value",
",",
"next_max_q",
"=",
"next_max_q",
")",
"# Update State.",
"state_key",
"=",
"next_state_key",
"# Normalize.",
"self",
".",
"normalize_q_value",
"(",
")",
"self",
".",
"normalize_r_value",
"(",
")",
"# Vis.",
"self",
".",
"visualize_learning_result",
"(",
"state_key",
")",
"# Check.",
"if",
"self",
".",
"check_the_end_flag",
"(",
"state_key",
")",
"is",
"True",
":",
"break",
"# Epsode.",
"self",
".",
"t",
"+=",
"1"
] | Learning and searching the optimal solution.
Args:
state_key: Initial state.
limit: The maximum number of iterative updates based on value iteration algorithms. | [
"Learning",
"and",
"searching",
"the",
"optimal",
"solution",
".",
"Args",
":",
"state_key",
":",
"Initial",
"state",
".",
"limit",
":",
"The",
"maximum",
"number",
"of",
"iterative",
"updates",
"based",
"on",
"value",
"iteration",
"algorithms",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py#L372-L389 | def overlay_gateway_site_bfd_params_interval_min_tx(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
bfd = ET.SubElement(site, "bfd")
params = ET.SubElement(bfd, "params")
interval = ET.SubElement(params, "interval")
min_tx = ET.SubElement(interval, "min-tx")
min_tx.text = kwargs.pop('min_tx')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"overlay_gateway_site_bfd_params_interval_min_tx",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"overlay_gateway",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"overlay-gateway\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-tunnels\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"overlay_gateway",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"site",
"=",
"ET",
".",
"SubElement",
"(",
"overlay_gateway",
",",
"\"site\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"site",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"bfd",
"=",
"ET",
".",
"SubElement",
"(",
"site",
",",
"\"bfd\"",
")",
"params",
"=",
"ET",
".",
"SubElement",
"(",
"bfd",
",",
"\"params\"",
")",
"interval",
"=",
"ET",
".",
"SubElement",
"(",
"params",
",",
"\"interval\"",
")",
"min_tx",
"=",
"ET",
".",
"SubElement",
"(",
"interval",
",",
"\"min-tx\"",
")",
"min_tx",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'min_tx'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
yymao/generic-catalog-reader | GCR/query.py | https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/query.py#L28-L33 | def check_scalar(self, scalar_dict):
"""
check if `scalar_dict` satisfy query
"""
table = {k: np.array([v]) for k, v in scalar_dict.items()}
return self.mask(table)[0] | [
"def",
"check_scalar",
"(",
"self",
",",
"scalar_dict",
")",
":",
"table",
"=",
"{",
"k",
":",
"np",
".",
"array",
"(",
"[",
"v",
"]",
")",
"for",
"k",
",",
"v",
"in",
"scalar_dict",
".",
"items",
"(",
")",
"}",
"return",
"self",
".",
"mask",
"(",
"table",
")",
"[",
"0",
"]"
] | check if `scalar_dict` satisfy query | [
"check",
"if",
"scalar_dict",
"satisfy",
"query"
] | python | train |
gem/oq-engine | openquake/baselib/sap.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/sap.py#L116-L128 | def _add(self, name, *args, **kw):
"""
Add an argument to the underlying parser and grow the list
.all_arguments and the set .names
"""
argname = list(self.argdict)[self._argno]
if argname != name:
raise NameError(
'Setting argument %s, but it should be %s' % (name, argname))
self._group.add_argument(*args, **kw)
self.all_arguments.append((args, kw))
self.names.append(name)
self._argno += 1 | [
"def",
"_add",
"(",
"self",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"argname",
"=",
"list",
"(",
"self",
".",
"argdict",
")",
"[",
"self",
".",
"_argno",
"]",
"if",
"argname",
"!=",
"name",
":",
"raise",
"NameError",
"(",
"'Setting argument %s, but it should be %s'",
"%",
"(",
"name",
",",
"argname",
")",
")",
"self",
".",
"_group",
".",
"add_argument",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"self",
".",
"all_arguments",
".",
"append",
"(",
"(",
"args",
",",
"kw",
")",
")",
"self",
".",
"names",
".",
"append",
"(",
"name",
")",
"self",
".",
"_argno",
"+=",
"1"
] | Add an argument to the underlying parser and grow the list
.all_arguments and the set .names | [
"Add",
"an",
"argument",
"to",
"the",
"underlying",
"parser",
"and",
"grow",
"the",
"list",
".",
"all_arguments",
"and",
"the",
"set",
".",
"names"
] | python | train |
NiklasRosenstein/py-bundler | bundler/nativedeps/windll.py | https://github.com/NiklasRosenstein/py-bundler/blob/80dd6dc971667ba015f7f67481417c45cc757231/bundler/nativedeps/windll.py#L60-L94 | def get_dependency_walker():
"""
Checks if `depends.exe` is in the system PATH. If not, it will be downloaded
and extracted to a temporary directory. Note that the file will not be
deleted afterwards.
Returns the path to the Dependency Walker executable.
"""
for dirname in os.getenv('PATH', '').split(os.pathsep):
filename = os.path.join(dirname, 'depends.exe')
if os.path.isfile(filename):
logger.info('Dependency Walker found at "{}"'.format(filename))
return filename
temp_exe = os.path.join(tempfile.gettempdir(), 'depends.exe')
temp_dll = os.path.join(tempfile.gettempdir(), 'depends.dll')
if os.path.isfile(temp_exe):
logger.info('Dependency Walker found at "{}"'.format(temp_exe))
return temp_exe
logger.info('Dependency Walker not found. Downloading ...')
with urlopen('http://dependencywalker.com/depends22_x64.zip') as fp:
data = fp.read()
logger.info('Extracting Dependency Walker to "{}"'.format(temp_exe))
with zipfile.ZipFile(io.BytesIO(data)) as fp:
with fp.open('depends.exe') as src:
with open(temp_exe, 'wb') as dst:
shutil.copyfileobj(src, dst)
with fp.open('depends.dll') as src:
with open(temp_dll, 'wb') as dst:
shutil.copyfileobj(src, dst)
return temp_exe | [
"def",
"get_dependency_walker",
"(",
")",
":",
"for",
"dirname",
"in",
"os",
".",
"getenv",
"(",
"'PATH'",
",",
"''",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"'depends.exe'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"logger",
".",
"info",
"(",
"'Dependency Walker found at \"{}\"'",
".",
"format",
"(",
"filename",
")",
")",
"return",
"filename",
"temp_exe",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"'depends.exe'",
")",
"temp_dll",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"'depends.dll'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"temp_exe",
")",
":",
"logger",
".",
"info",
"(",
"'Dependency Walker found at \"{}\"'",
".",
"format",
"(",
"temp_exe",
")",
")",
"return",
"temp_exe",
"logger",
".",
"info",
"(",
"'Dependency Walker not found. Downloading ...'",
")",
"with",
"urlopen",
"(",
"'http://dependencywalker.com/depends22_x64.zip'",
")",
"as",
"fp",
":",
"data",
"=",
"fp",
".",
"read",
"(",
")",
"logger",
".",
"info",
"(",
"'Extracting Dependency Walker to \"{}\"'",
".",
"format",
"(",
"temp_exe",
")",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"io",
".",
"BytesIO",
"(",
"data",
")",
")",
"as",
"fp",
":",
"with",
"fp",
".",
"open",
"(",
"'depends.exe'",
")",
"as",
"src",
":",
"with",
"open",
"(",
"temp_exe",
",",
"'wb'",
")",
"as",
"dst",
":",
"shutil",
".",
"copyfileobj",
"(",
"src",
",",
"dst",
")",
"with",
"fp",
".",
"open",
"(",
"'depends.dll'",
")",
"as",
"src",
":",
"with",
"open",
"(",
"temp_dll",
",",
"'wb'",
")",
"as",
"dst",
":",
"shutil",
".",
"copyfileobj",
"(",
"src",
",",
"dst",
")",
"return",
"temp_exe"
] | Checks if `depends.exe` is in the system PATH. If not, it will be downloaded
and extracted to a temporary directory. Note that the file will not be
deleted afterwards.
Returns the path to the Dependency Walker executable. | [
"Checks",
"if",
"depends",
".",
"exe",
"is",
"in",
"the",
"system",
"PATH",
".",
"If",
"not",
"it",
"will",
"be",
"downloaded",
"and",
"extracted",
"to",
"a",
"temporary",
"directory",
".",
"Note",
"that",
"the",
"file",
"will",
"not",
"be",
"deleted",
"afterwards",
"."
] | python | train |
LettError/MutatorMath | Lib/mutatorMath/ufo/document.py | https://github.com/LettError/MutatorMath/blob/10318fc4e7c9cee9df6130826829baea3054a42b/Lib/mutatorMath/ufo/document.py#L231-L238 | def endInstance(self):
"""
Finalise the instance definition started by startInstance().
"""
if self.currentInstance is None:
return
allInstances = self.root.findall('.instances')[0].append(self.currentInstance)
self.currentInstance = None | [
"def",
"endInstance",
"(",
"self",
")",
":",
"if",
"self",
".",
"currentInstance",
"is",
"None",
":",
"return",
"allInstances",
"=",
"self",
".",
"root",
".",
"findall",
"(",
"'.instances'",
")",
"[",
"0",
"]",
".",
"append",
"(",
"self",
".",
"currentInstance",
")",
"self",
".",
"currentInstance",
"=",
"None"
] | Finalise the instance definition started by startInstance(). | [
"Finalise",
"the",
"instance",
"definition",
"started",
"by",
"startInstance",
"()",
"."
] | python | train |
saltstack/salt | salt/modules/neutron.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/neutron.py#L643-L668 | def update_router(router,
name=None,
admin_state_up=None,
profile=None,
**kwargs):
'''
Updates a router
CLI Example:
.. code-block:: bash
salt '*' neutron.update_router router_id name=new-router-name
admin_state_up=True
:param router: ID or name of router to update
:param name: Name of this router
:param ext_network: ID or name of the external for the gateway (Optional)
:param admin_state_up: Set admin state up to true or false,
default: true (Optional)
:param profile: Profile to build on (Optional)
:param kwargs:
:return: Value of updated router information
'''
conn = _auth(profile)
return conn.update_router(router, name, admin_state_up, **kwargs) | [
"def",
"update_router",
"(",
"router",
",",
"name",
"=",
"None",
",",
"admin_state_up",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"conn",
"=",
"_auth",
"(",
"profile",
")",
"return",
"conn",
".",
"update_router",
"(",
"router",
",",
"name",
",",
"admin_state_up",
",",
"*",
"*",
"kwargs",
")"
] | Updates a router
CLI Example:
.. code-block:: bash
salt '*' neutron.update_router router_id name=new-router-name
admin_state_up=True
:param router: ID or name of router to update
:param name: Name of this router
:param ext_network: ID or name of the external for the gateway (Optional)
:param admin_state_up: Set admin state up to true or false,
default: true (Optional)
:param profile: Profile to build on (Optional)
:param kwargs:
:return: Value of updated router information | [
"Updates",
"a",
"router"
] | python | train |
pycontribs/pyrax | pyrax/cloudloadbalancers.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudloadbalancers.py#L1052-L1061 | def _get_lb(self, lb_or_id):
"""
Accepts either a loadbalancer or the ID of a loadbalancer, and returns
the CloudLoadBalancer instance.
"""
if isinstance(lb_or_id, CloudLoadBalancer):
ret = lb_or_id
else:
ret = self.get(lb_or_id)
return ret | [
"def",
"_get_lb",
"(",
"self",
",",
"lb_or_id",
")",
":",
"if",
"isinstance",
"(",
"lb_or_id",
",",
"CloudLoadBalancer",
")",
":",
"ret",
"=",
"lb_or_id",
"else",
":",
"ret",
"=",
"self",
".",
"get",
"(",
"lb_or_id",
")",
"return",
"ret"
] | Accepts either a loadbalancer or the ID of a loadbalancer, and returns
the CloudLoadBalancer instance. | [
"Accepts",
"either",
"a",
"loadbalancer",
"or",
"the",
"ID",
"of",
"a",
"loadbalancer",
"and",
"returns",
"the",
"CloudLoadBalancer",
"instance",
"."
] | python | train |
arokem/python-matlab-bridge | pymatbridge/messenger/make.py | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/messenger/make.py#L88-L118 | def which(filename):
'''This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None.
Note
----
This function is taken from the pexpect module, see module doc-string for
license.
'''
# Special case where filename contains an explicit path.
if os.path.dirname(filename) != '' and is_executable_file(filename):
return filename
if 'PATH' not in os.environ or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = p.split(os.pathsep)
for path in pathlist:
ff = os.path.join(path, filename)
if pty:
if is_executable_file(ff):
return ff
else:
pathext = os.environ.get('Pathext', '.exe;.com;.bat;.cmd')
pathext = pathext.split(os.pathsep) + ['']
for ext in pathext:
if os.access(ff + ext, os.X_OK):
return ff + ext
return None | [
"def",
"which",
"(",
"filename",
")",
":",
"# Special case where filename contains an explicit path.",
"if",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
"!=",
"''",
"and",
"is_executable_file",
"(",
"filename",
")",
":",
"return",
"filename",
"if",
"'PATH'",
"not",
"in",
"os",
".",
"environ",
"or",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
"==",
"''",
":",
"p",
"=",
"os",
".",
"defpath",
"else",
":",
"p",
"=",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
"pathlist",
"=",
"p",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"for",
"path",
"in",
"pathlist",
":",
"ff",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
"if",
"pty",
":",
"if",
"is_executable_file",
"(",
"ff",
")",
":",
"return",
"ff",
"else",
":",
"pathext",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'Pathext'",
",",
"'.exe;.com;.bat;.cmd'",
")",
"pathext",
"=",
"pathext",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"+",
"[",
"''",
"]",
"for",
"ext",
"in",
"pathext",
":",
"if",
"os",
".",
"access",
"(",
"ff",
"+",
"ext",
",",
"os",
".",
"X_OK",
")",
":",
"return",
"ff",
"+",
"ext",
"return",
"None"
] | This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None.
Note
----
This function is taken from the pexpect module, see module doc-string for
license. | [
"This",
"takes",
"a",
"given",
"filename",
";",
"tries",
"to",
"find",
"it",
"in",
"the",
"environment",
"path",
";",
"then",
"checks",
"if",
"it",
"is",
"executable",
".",
"This",
"returns",
"the",
"full",
"path",
"to",
"the",
"filename",
"if",
"found",
"and",
"executable",
".",
"Otherwise",
"this",
"returns",
"None",
"."
] | python | train |
eandersson/amqpstorm | examples/flask_threaded_rpc_client.py | https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/examples/flask_threaded_rpc_client.py#L38-L44 | def _create_process_thread(self):
"""Create a thread responsible for consuming messages in response
to RPC requests.
"""
thread = threading.Thread(target=self._process_data_events)
thread.setDaemon(True)
thread.start() | [
"def",
"_create_process_thread",
"(",
"self",
")",
":",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_process_data_events",
")",
"thread",
".",
"setDaemon",
"(",
"True",
")",
"thread",
".",
"start",
"(",
")"
] | Create a thread responsible for consuming messages in response
to RPC requests. | [
"Create",
"a",
"thread",
"responsible",
"for",
"consuming",
"messages",
"in",
"response",
"to",
"RPC",
"requests",
"."
] | python | train |
idlesign/django-admirarchy | admirarchy/utils.py | https://github.com/idlesign/django-admirarchy/blob/723e4fd212fdebcc156492cb16b9d65356f5ca73/admirarchy/utils.py#L56-L92 | def hierarchy_nav(self, obj):
"""Renders hierarchy navigation elements (folders)."""
result_repr = '' # For items without children.
ch_count = getattr(obj, Hierarchy.CHILD_COUNT_MODEL_ATTR, 0)
is_parent_link = getattr(obj, Hierarchy.UPPER_LEVEL_MODEL_ATTR, False)
if is_parent_link or ch_count: # For items with children and parent links.
icon = 'icon icon-folder'
title = _('Objects inside: %s') % ch_count
if is_parent_link:
icon = 'icon icon-folder-up'
title = _('Upper level')
url = './'
if obj.pk:
url = '?%s=%s' % (Hierarchy.PARENT_ID_QS_PARAM, obj.pk)
if self._current_changelist.is_popup:
qs_get = copy(self._current_changelist._request.GET)
try:
del qs_get[Hierarchy.PARENT_ID_QS_PARAM]
except KeyError:
pass
qs_get = qs_get.urlencode()
url = ('%s&%s' if '?' in url else '%s?%s') % (url, qs_get)
result_repr = format_html('<a href="{0}" class="{1}" title="{2}"></a>', url, icon, force_text(title))
return result_repr | [
"def",
"hierarchy_nav",
"(",
"self",
",",
"obj",
")",
":",
"result_repr",
"=",
"''",
"# For items without children.",
"ch_count",
"=",
"getattr",
"(",
"obj",
",",
"Hierarchy",
".",
"CHILD_COUNT_MODEL_ATTR",
",",
"0",
")",
"is_parent_link",
"=",
"getattr",
"(",
"obj",
",",
"Hierarchy",
".",
"UPPER_LEVEL_MODEL_ATTR",
",",
"False",
")",
"if",
"is_parent_link",
"or",
"ch_count",
":",
"# For items with children and parent links.",
"icon",
"=",
"'icon icon-folder'",
"title",
"=",
"_",
"(",
"'Objects inside: %s'",
")",
"%",
"ch_count",
"if",
"is_parent_link",
":",
"icon",
"=",
"'icon icon-folder-up'",
"title",
"=",
"_",
"(",
"'Upper level'",
")",
"url",
"=",
"'./'",
"if",
"obj",
".",
"pk",
":",
"url",
"=",
"'?%s=%s'",
"%",
"(",
"Hierarchy",
".",
"PARENT_ID_QS_PARAM",
",",
"obj",
".",
"pk",
")",
"if",
"self",
".",
"_current_changelist",
".",
"is_popup",
":",
"qs_get",
"=",
"copy",
"(",
"self",
".",
"_current_changelist",
".",
"_request",
".",
"GET",
")",
"try",
":",
"del",
"qs_get",
"[",
"Hierarchy",
".",
"PARENT_ID_QS_PARAM",
"]",
"except",
"KeyError",
":",
"pass",
"qs_get",
"=",
"qs_get",
".",
"urlencode",
"(",
")",
"url",
"=",
"(",
"'%s&%s'",
"if",
"'?'",
"in",
"url",
"else",
"'%s?%s'",
")",
"%",
"(",
"url",
",",
"qs_get",
")",
"result_repr",
"=",
"format_html",
"(",
"'<a href=\"{0}\" class=\"{1}\" title=\"{2}\"></a>'",
",",
"url",
",",
"icon",
",",
"force_text",
"(",
"title",
")",
")",
"return",
"result_repr"
] | Renders hierarchy navigation elements (folders). | [
"Renders",
"hierarchy",
"navigation",
"elements",
"(",
"folders",
")",
"."
] | python | train |
peri-source/peri | peri/util.py | https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/util.py#L34-L59 | def listify(a):
"""
Convert a scalar ``a`` to a list and all iterables to list as well.
Examples
--------
>>> listify(0)
[0]
>>> listify([1,2,3])
[1, 2, 3]
>>> listify('a')
['a']
>>> listify(np.array([1,2,3]))
[1, 2, 3]
>>> listify('string')
['string']
"""
if a is None:
return []
elif not isinstance(a, (tuple, list, np.ndarray)):
return [a]
return list(a) | [
"def",
"listify",
"(",
"a",
")",
":",
"if",
"a",
"is",
"None",
":",
"return",
"[",
"]",
"elif",
"not",
"isinstance",
"(",
"a",
",",
"(",
"tuple",
",",
"list",
",",
"np",
".",
"ndarray",
")",
")",
":",
"return",
"[",
"a",
"]",
"return",
"list",
"(",
"a",
")"
] | Convert a scalar ``a`` to a list and all iterables to list as well.
Examples
--------
>>> listify(0)
[0]
>>> listify([1,2,3])
[1, 2, 3]
>>> listify('a')
['a']
>>> listify(np.array([1,2,3]))
[1, 2, 3]
>>> listify('string')
['string'] | [
"Convert",
"a",
"scalar",
"a",
"to",
"a",
"list",
"and",
"all",
"iterables",
"to",
"list",
"as",
"well",
"."
] | python | valid |