repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
pantsbuild/pex | pex/vendor/_vendored/wheel/wheel/signatures/__init__.py | https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/vendor/_vendored/wheel/wheel/signatures/__init__.py#L29-L52 | def sign(payload, keypair):
"""Return a JWS-JS format signature given a JSON-serializable payload and
an Ed25519 keypair."""
get_ed25519ll()
#
header = {
"alg": ALG,
"jwk": {
"kty": ALG, # alg -> kty in jwk-08.
"vk": native(urlsafe_b64encode(keypair.vk))
}
}
encoded_header = urlsafe_b64encode(binary(json.dumps(header, sort_keys=True)))
encoded_payload = urlsafe_b64encode(binary(json.dumps(payload, sort_keys=True)))
secured_input = b".".join((encoded_header, encoded_payload))
sig_msg = ed25519ll.crypto_sign(secured_input, keypair.sk)
signature = sig_msg[:ed25519ll.SIGNATUREBYTES]
encoded_signature = urlsafe_b64encode(signature)
return {"recipients":
[{"header": native(encoded_header),
"signature": native(encoded_signature)}],
"payload": native(encoded_payload)} | [
"def",
"sign",
"(",
"payload",
",",
"keypair",
")",
":",
"get_ed25519ll",
"(",
")",
"#",
"header",
"=",
"{",
"\"alg\"",
":",
"ALG",
",",
"\"jwk\"",
":",
"{",
"\"kty\"",
":",
"ALG",
",",
"# alg -> kty in jwk-08.",
"\"vk\"",
":",
"native",
"(",
"urlsafe_b64encode",
"(",
"keypair",
".",
"vk",
")",
")",
"}",
"}",
"encoded_header",
"=",
"urlsafe_b64encode",
"(",
"binary",
"(",
"json",
".",
"dumps",
"(",
"header",
",",
"sort_keys",
"=",
"True",
")",
")",
")",
"encoded_payload",
"=",
"urlsafe_b64encode",
"(",
"binary",
"(",
"json",
".",
"dumps",
"(",
"payload",
",",
"sort_keys",
"=",
"True",
")",
")",
")",
"secured_input",
"=",
"b\".\"",
".",
"join",
"(",
"(",
"encoded_header",
",",
"encoded_payload",
")",
")",
"sig_msg",
"=",
"ed25519ll",
".",
"crypto_sign",
"(",
"secured_input",
",",
"keypair",
".",
"sk",
")",
"signature",
"=",
"sig_msg",
"[",
":",
"ed25519ll",
".",
"SIGNATUREBYTES",
"]",
"encoded_signature",
"=",
"urlsafe_b64encode",
"(",
"signature",
")",
"return",
"{",
"\"recipients\"",
":",
"[",
"{",
"\"header\"",
":",
"native",
"(",
"encoded_header",
")",
",",
"\"signature\"",
":",
"native",
"(",
"encoded_signature",
")",
"}",
"]",
",",
"\"payload\"",
":",
"native",
"(",
"encoded_payload",
")",
"}"
] | Return a JWS-JS format signature given a JSON-serializable payload and
an Ed25519 keypair. | [
"Return",
"a",
"JWS",
"-",
"JS",
"format",
"signature",
"given",
"a",
"JSON",
"-",
"serializable",
"payload",
"and",
"an",
"Ed25519",
"keypair",
"."
] | python | train |
ionelmc/python-cogen | examples/static-serve.py | https://github.com/ionelmc/python-cogen/blob/83b0edb88425eba6e5bfda9f1dcd34642517e2a8/examples/static-serve.py#L76-L87 | def get_entries(path):
"""Return sorted lists of directories and files in the given path."""
dirs, files = [], []
for entry in os.listdir(path):
# Categorize entry as directory or file.
if os.path.isdir(os.path.join(path, entry)):
dirs.append(entry)
else:
files.append(entry)
dirs.sort()
files.sort()
return dirs, files | [
"def",
"get_entries",
"(",
"path",
")",
":",
"dirs",
",",
"files",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"entry",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"# Categorize entry as directory or file.\r",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"entry",
")",
")",
":",
"dirs",
".",
"append",
"(",
"entry",
")",
"else",
":",
"files",
".",
"append",
"(",
"entry",
")",
"dirs",
".",
"sort",
"(",
")",
"files",
".",
"sort",
"(",
")",
"return",
"dirs",
",",
"files"
] | Return sorted lists of directories and files in the given path. | [
"Return",
"sorted",
"lists",
"of",
"directories",
"and",
"files",
"in",
"the",
"given",
"path",
"."
] | python | train |
thomasballinger/trellocardupdate | trellocardupdate/trelloupdate.py | https://github.com/thomasballinger/trellocardupdate/blob/16a648fa15efef144c07cd56fcdb1d8920fac889/trellocardupdate/trelloupdate.py#L110-L116 | def ask_for_board_id(self):
"""Factored out in case interface isn't keyboard"""
board_id = raw_input("paste in board id or url: ").strip()
m = re.search(r"(?:https?://)?(?:trello.com)?/?b?/?([a-zA-Z]{8})/(?:.*)", board_id)
if m:
board_id = m.group(1)
return board_id | [
"def",
"ask_for_board_id",
"(",
"self",
")",
":",
"board_id",
"=",
"raw_input",
"(",
"\"paste in board id or url: \"",
")",
".",
"strip",
"(",
")",
"m",
"=",
"re",
".",
"search",
"(",
"r\"(?:https?://)?(?:trello.com)?/?b?/?([a-zA-Z]{8})/(?:.*)\"",
",",
"board_id",
")",
"if",
"m",
":",
"board_id",
"=",
"m",
".",
"group",
"(",
"1",
")",
"return",
"board_id"
] | Factored out in case interface isn't keyboard | [
"Factored",
"out",
"in",
"case",
"interface",
"isn",
"t",
"keyboard"
] | python | train |
marshallward/f90nml | f90nml/namelist.py | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L705-L708 | def _f90complex(self, value):
"""Return a Fortran 90 representation of a complex number."""
return '({0:{fmt}}, {1:{fmt}})'.format(value.real, value.imag,
fmt=self.float_format) | [
"def",
"_f90complex",
"(",
"self",
",",
"value",
")",
":",
"return",
"'({0:{fmt}}, {1:{fmt}})'",
".",
"format",
"(",
"value",
".",
"real",
",",
"value",
".",
"imag",
",",
"fmt",
"=",
"self",
".",
"float_format",
")"
] | Return a Fortran 90 representation of a complex number. | [
"Return",
"a",
"Fortran",
"90",
"representation",
"of",
"a",
"complex",
"number",
"."
] | python | train |
duniter/duniter-python-api | duniterpy/api/bma/blockchain.py | https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/blockchain.py#L429-L437 | async def hardship(client: Client, pubkey: str) -> dict:
"""
GET hardship level for given member's public key for writing next block
:param client: Client to connect to the api
:param pubkey: Public key of the member
:return:
"""
return await client.get(MODULE + '/hardship/%s' % pubkey, schema=HARDSHIP_SCHEMA) | [
"async",
"def",
"hardship",
"(",
"client",
":",
"Client",
",",
"pubkey",
":",
"str",
")",
"->",
"dict",
":",
"return",
"await",
"client",
".",
"get",
"(",
"MODULE",
"+",
"'/hardship/%s'",
"%",
"pubkey",
",",
"schema",
"=",
"HARDSHIP_SCHEMA",
")"
] | GET hardship level for given member's public key for writing next block
:param client: Client to connect to the api
:param pubkey: Public key of the member
:return: | [
"GET",
"hardship",
"level",
"for",
"given",
"member",
"s",
"public",
"key",
"for",
"writing",
"next",
"block"
] | python | train |
summa-tx/riemann | riemann/tx/tx_builder.py | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L35-L59 | def make_pkh_output_script(pubkey, witness=False):
'''
bytearray -> bytearray
'''
if witness and not riemann.network.SEGWIT:
raise ValueError(
'Network {} does not support witness scripts.'
.format(riemann.get_current_network_name()))
output_script = bytearray()
if type(pubkey) is not bytearray and type(pubkey) is not bytes:
raise ValueError('Unknown pubkey format. '
'Expected bytes. Got: {}'.format(type(pubkey)))
pubkey_hash = utils.hash160(pubkey)
if witness:
output_script.extend(riemann.network.P2WPKH_PREFIX)
output_script.extend(pubkey_hash)
else:
output_script.extend(b'\x76\xa9\x14') # OP_DUP OP_HASH160 PUSH14
output_script.extend(pubkey_hash)
output_script.extend(b'\x88\xac') # OP_EQUALVERIFY OP_CHECKSIG
return output_script | [
"def",
"make_pkh_output_script",
"(",
"pubkey",
",",
"witness",
"=",
"False",
")",
":",
"if",
"witness",
"and",
"not",
"riemann",
".",
"network",
".",
"SEGWIT",
":",
"raise",
"ValueError",
"(",
"'Network {} does not support witness scripts.'",
".",
"format",
"(",
"riemann",
".",
"get_current_network_name",
"(",
")",
")",
")",
"output_script",
"=",
"bytearray",
"(",
")",
"if",
"type",
"(",
"pubkey",
")",
"is",
"not",
"bytearray",
"and",
"type",
"(",
"pubkey",
")",
"is",
"not",
"bytes",
":",
"raise",
"ValueError",
"(",
"'Unknown pubkey format. '",
"'Expected bytes. Got: {}'",
".",
"format",
"(",
"type",
"(",
"pubkey",
")",
")",
")",
"pubkey_hash",
"=",
"utils",
".",
"hash160",
"(",
"pubkey",
")",
"if",
"witness",
":",
"output_script",
".",
"extend",
"(",
"riemann",
".",
"network",
".",
"P2WPKH_PREFIX",
")",
"output_script",
".",
"extend",
"(",
"pubkey_hash",
")",
"else",
":",
"output_script",
".",
"extend",
"(",
"b'\\x76\\xa9\\x14'",
")",
"# OP_DUP OP_HASH160 PUSH14",
"output_script",
".",
"extend",
"(",
"pubkey_hash",
")",
"output_script",
".",
"extend",
"(",
"b'\\x88\\xac'",
")",
"# OP_EQUALVERIFY OP_CHECKSIG",
"return",
"output_script"
] | bytearray -> bytearray | [
"bytearray",
"-",
">",
"bytearray"
] | python | train |
tango-controls/pytango | tango/databaseds/database.py | https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/databaseds/database.py#L914-L929 | def DbDeleteServer(self, argin):
""" Delete server from the database but dont delete device properties
:param argin: Device server name
:type: tango.DevString
:return:
:rtype: tango.DevVoid """
self._log.debug("In DbDeleteServer()")
if '*' in argin or '%' in argin or not '/' in argin:
self.warn_stream("DataBase::db_delete_server(): server name " + argin + " incorrect ")
th_exc(DB_IncorrectServerName,
"failed to delete server, server name incorrect",
"DataBase::DeleteServer()")
self.db.delete_server(argin) | [
"def",
"DbDeleteServer",
"(",
"self",
",",
"argin",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"In DbDeleteServer()\"",
")",
"if",
"'*'",
"in",
"argin",
"or",
"'%'",
"in",
"argin",
"or",
"not",
"'/'",
"in",
"argin",
":",
"self",
".",
"warn_stream",
"(",
"\"DataBase::db_delete_server(): server name \"",
"+",
"argin",
"+",
"\" incorrect \"",
")",
"th_exc",
"(",
"DB_IncorrectServerName",
",",
"\"failed to delete server, server name incorrect\"",
",",
"\"DataBase::DeleteServer()\"",
")",
"self",
".",
"db",
".",
"delete_server",
"(",
"argin",
")"
] | Delete server from the database but dont delete device properties
:param argin: Device server name
:type: tango.DevString
:return:
:rtype: tango.DevVoid | [
"Delete",
"server",
"from",
"the",
"database",
"but",
"dont",
"delete",
"device",
"properties"
] | python | train |
ttu/ruuvitag-sensor | ruuvitag_sensor/decoder.py | https://github.com/ttu/ruuvitag-sensor/blob/b5d1367c26844ae5875b2964c68e7b2f4e1cb082/ruuvitag_sensor/decoder.py#L60-L66 | def _get_temperature(self, decoded):
'''Return temperature in celsius'''
temp = (decoded[2] & 127) + decoded[3] / 100
sign = (decoded[2] >> 7) & 1
if sign == 0:
return round(temp, 2)
return round(-1 * temp, 2) | [
"def",
"_get_temperature",
"(",
"self",
",",
"decoded",
")",
":",
"temp",
"=",
"(",
"decoded",
"[",
"2",
"]",
"&",
"127",
")",
"+",
"decoded",
"[",
"3",
"]",
"/",
"100",
"sign",
"=",
"(",
"decoded",
"[",
"2",
"]",
">>",
"7",
")",
"&",
"1",
"if",
"sign",
"==",
"0",
":",
"return",
"round",
"(",
"temp",
",",
"2",
")",
"return",
"round",
"(",
"-",
"1",
"*",
"temp",
",",
"2",
")"
] | Return temperature in celsius | [
"Return",
"temperature",
"in",
"celsius"
] | python | train |
odlgroup/odl | odl/operator/operator.py | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/operator.py#L1384-L1410 | def derivative(self, x):
"""Return the operator derivative.
The derivative of the operator composition follows the chain
rule:
``OperatorComp(left, right).derivative(y) ==
OperatorComp(left.derivative(right(y)), right.derivative(y))``
Parameters
----------
x : `domain` `element-like`
Evaluation point of the derivative. Needs to be usable as
input for the ``right`` operator.
"""
if self.is_linear:
return self
else:
if self.left.is_linear:
left_deriv = self.left
else:
left_deriv = self.left.derivative(self.right(x))
right_deriv = self.right.derivative(x)
return OperatorComp(left_deriv, right_deriv,
self.__tmp) | [
"def",
"derivative",
"(",
"self",
",",
"x",
")",
":",
"if",
"self",
".",
"is_linear",
":",
"return",
"self",
"else",
":",
"if",
"self",
".",
"left",
".",
"is_linear",
":",
"left_deriv",
"=",
"self",
".",
"left",
"else",
":",
"left_deriv",
"=",
"self",
".",
"left",
".",
"derivative",
"(",
"self",
".",
"right",
"(",
"x",
")",
")",
"right_deriv",
"=",
"self",
".",
"right",
".",
"derivative",
"(",
"x",
")",
"return",
"OperatorComp",
"(",
"left_deriv",
",",
"right_deriv",
",",
"self",
".",
"__tmp",
")"
] | Return the operator derivative.
The derivative of the operator composition follows the chain
rule:
``OperatorComp(left, right).derivative(y) ==
OperatorComp(left.derivative(right(y)), right.derivative(y))``
Parameters
----------
x : `domain` `element-like`
Evaluation point of the derivative. Needs to be usable as
input for the ``right`` operator. | [
"Return",
"the",
"operator",
"derivative",
"."
] | python | train |
h2oai/h2o-3 | h2o-bindings/bin/bindings.py | https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-bindings/bin/bindings.py#L199-L295 | def endpoints(raw=False):
"""
Return the list of REST API endpoints. The data is enriched with the following fields:
class_name: which back-end class handles this endpoint (the class is derived from the URL);
ischema: input schema object (input_schema is the schema's name)
oschema: output schema object (output_schema is the schema's name)
algo: for special-cased calls (ModelBuilders/train and Grid/train) -- name of the ML algo requested
input_params: list of all input parameters (first path parameters, then all the others). The parameters are
given as objects, not just names. There is a flag "is_path_param" on each field.
Additionally certain buggy/deprecated endpoints are removed.
For Grid/train and ModelBuilders/train endpoints we fix the method name and parameters info (there is some mangling
of those on the server side).
:param raw: if True, then the complete untouched response to .../endpoints is returned (including the metadata)
"""
json = _request_or_exit("/3/Metadata/endpoints")
if raw: return json
schmap = schemas_map()
apinames = {} # Used for checking for api name duplicates
assert "routes" in json, "Unexpected result from /3/Metadata/endpoints call"
re_api_name = re.compile(r"^\w+$")
def gen_rich_route():
for e in json["routes"]:
path = e["url_pattern"]
method = e["handler_method"]
apiname = e["api_name"]
assert apiname not in apinames, "Duplicate api name %s (for %s and %s)" % (apiname, apinames[apiname], path)
assert re_api_name.match(apiname), "Bad api name %s" % apiname
apinames[apiname] = path
# These redundant paths cause conflicts, remove them
if path == "/3/NodePersistentStorage/categories/{category}/exists": continue
if path == "/3/ModelMetrics/frames/{frame}/models/{model}": continue
if path == "/3/ModelMetrics/frames/{frame}": continue
if path == "/3/ModelMetrics/models/{model}": continue
if path == "/3/ModelMetrics": continue
if "AutoML" in path: continue # Generation code doesn't know how to deal with defaults for complex objects yet
if apiname.endswith("_deprecated"): continue
# Resolve one name conflict
if path == "/3/DKV": e["handler_method"] = "removeAll"
# Find the class_name (first part of the URL after the version: "/3/About" => "About")
mm = classname_pattern.match(path)
assert mm, "Cannot determine class name in URL " + path
e["class_name"] = mm.group(1)
if e["class_name"].islower():
e["class_name"] = e["class_name"].capitalize()
# Resolve input/output schemas into actual objects
assert e["input_schema"] in schmap, "Encountered unknown schema %s in %s" % (e["input_schema"], path)
assert e["output_schema"] in schmap, "Encountered unknown schema %s in %s" % (e["output_schema"], path)
e["ischema"] = schmap[e["input_schema"]]
e["oschema"] = schmap[e["output_schema"]]
# For these special cases, the actual input schema is not the one reported by the endpoint, but the schema
# of the 'parameters' field (which is fake).
if (e["class_name"], method) in {("Grid", "train"), ("ModelBuilders", "train"),
("ModelBuilders", "validate_parameters")}:
pieces = path.split("/")
assert len(pieces) >= 4, "Expected to see algo name in the path: " + path
e["algo"] = pieces[3]
method = method + e["algo"].capitalize() # e.g. trainGlm()
e["handler_method"] = method
for field in e["ischema"]["fields"]:
if field["name"] == "parameters":
e["input_schema"] = field["schema_name"]
e["ischema"] = schmap[e["input_schema"]]
break
# Create the list of input_params (as objects, not just names)
e["input_params"] = []
for parm in e["path_params"]:
# find the metadata for the field from the input schema:
fields = [field for field in e["ischema"]["fields"] if field["name"] == parm]
assert len(fields) == 1, \
"Failed to find parameter: %s for endpoint: %s in the input schema %s" \
% (parm, e["url_pattern"], e["ischema"]["name"])
field = fields[0].copy()
schema = field["schema_name"] or "" # {schema} is null for primitive types
ftype = field["type"]
assert ftype == "string" or ftype == "int" or schema.endswith("KeyV3") or schema == "ColSpecifierV3", \
"Unexpected param %s of type %s (schema %s)" % (field["name"], ftype, schema)
assert field["direction"] != "OUTPUT", "A path param %s cannot be of type OUTPUT" % field["name"]
field["is_path_param"] = True
field["required"] = True
e["input_params"].append(field)
for parm in e["ischema"]["fields"]:
if parm["direction"] == "OUTPUT" or parm["name"] in e["path_params"]: continue
field = parm.copy()
field["is_path_param"] = False
e["input_params"].append(field)
yield e
return list(gen_rich_route()) | [
"def",
"endpoints",
"(",
"raw",
"=",
"False",
")",
":",
"json",
"=",
"_request_or_exit",
"(",
"\"/3/Metadata/endpoints\"",
")",
"if",
"raw",
":",
"return",
"json",
"schmap",
"=",
"schemas_map",
"(",
")",
"apinames",
"=",
"{",
"}",
"# Used for checking for api name duplicates",
"assert",
"\"routes\"",
"in",
"json",
",",
"\"Unexpected result from /3/Metadata/endpoints call\"",
"re_api_name",
"=",
"re",
".",
"compile",
"(",
"r\"^\\w+$\"",
")",
"def",
"gen_rich_route",
"(",
")",
":",
"for",
"e",
"in",
"json",
"[",
"\"routes\"",
"]",
":",
"path",
"=",
"e",
"[",
"\"url_pattern\"",
"]",
"method",
"=",
"e",
"[",
"\"handler_method\"",
"]",
"apiname",
"=",
"e",
"[",
"\"api_name\"",
"]",
"assert",
"apiname",
"not",
"in",
"apinames",
",",
"\"Duplicate api name %s (for %s and %s)\"",
"%",
"(",
"apiname",
",",
"apinames",
"[",
"apiname",
"]",
",",
"path",
")",
"assert",
"re_api_name",
".",
"match",
"(",
"apiname",
")",
",",
"\"Bad api name %s\"",
"%",
"apiname",
"apinames",
"[",
"apiname",
"]",
"=",
"path",
"# These redundant paths cause conflicts, remove them",
"if",
"path",
"==",
"\"/3/NodePersistentStorage/categories/{category}/exists\"",
":",
"continue",
"if",
"path",
"==",
"\"/3/ModelMetrics/frames/{frame}/models/{model}\"",
":",
"continue",
"if",
"path",
"==",
"\"/3/ModelMetrics/frames/{frame}\"",
":",
"continue",
"if",
"path",
"==",
"\"/3/ModelMetrics/models/{model}\"",
":",
"continue",
"if",
"path",
"==",
"\"/3/ModelMetrics\"",
":",
"continue",
"if",
"\"AutoML\"",
"in",
"path",
":",
"continue",
"# Generation code doesn't know how to deal with defaults for complex objects yet",
"if",
"apiname",
".",
"endswith",
"(",
"\"_deprecated\"",
")",
":",
"continue",
"# Resolve one name conflict",
"if",
"path",
"==",
"\"/3/DKV\"",
":",
"e",
"[",
"\"handler_method\"",
"]",
"=",
"\"removeAll\"",
"# Find the class_name (first part of the URL after the version: \"/3/About\" => \"About\")",
"mm",
"=",
"classname_pattern",
".",
"match",
"(",
"path",
")",
"assert",
"mm",
",",
"\"Cannot determine class name in URL \"",
"+",
"path",
"e",
"[",
"\"class_name\"",
"]",
"=",
"mm",
".",
"group",
"(",
"1",
")",
"if",
"e",
"[",
"\"class_name\"",
"]",
".",
"islower",
"(",
")",
":",
"e",
"[",
"\"class_name\"",
"]",
"=",
"e",
"[",
"\"class_name\"",
"]",
".",
"capitalize",
"(",
")",
"# Resolve input/output schemas into actual objects",
"assert",
"e",
"[",
"\"input_schema\"",
"]",
"in",
"schmap",
",",
"\"Encountered unknown schema %s in %s\"",
"%",
"(",
"e",
"[",
"\"input_schema\"",
"]",
",",
"path",
")",
"assert",
"e",
"[",
"\"output_schema\"",
"]",
"in",
"schmap",
",",
"\"Encountered unknown schema %s in %s\"",
"%",
"(",
"e",
"[",
"\"output_schema\"",
"]",
",",
"path",
")",
"e",
"[",
"\"ischema\"",
"]",
"=",
"schmap",
"[",
"e",
"[",
"\"input_schema\"",
"]",
"]",
"e",
"[",
"\"oschema\"",
"]",
"=",
"schmap",
"[",
"e",
"[",
"\"output_schema\"",
"]",
"]",
"# For these special cases, the actual input schema is not the one reported by the endpoint, but the schema",
"# of the 'parameters' field (which is fake).",
"if",
"(",
"e",
"[",
"\"class_name\"",
"]",
",",
"method",
")",
"in",
"{",
"(",
"\"Grid\"",
",",
"\"train\"",
")",
",",
"(",
"\"ModelBuilders\"",
",",
"\"train\"",
")",
",",
"(",
"\"ModelBuilders\"",
",",
"\"validate_parameters\"",
")",
"}",
":",
"pieces",
"=",
"path",
".",
"split",
"(",
"\"/\"",
")",
"assert",
"len",
"(",
"pieces",
")",
">=",
"4",
",",
"\"Expected to see algo name in the path: \"",
"+",
"path",
"e",
"[",
"\"algo\"",
"]",
"=",
"pieces",
"[",
"3",
"]",
"method",
"=",
"method",
"+",
"e",
"[",
"\"algo\"",
"]",
".",
"capitalize",
"(",
")",
"# e.g. trainGlm()",
"e",
"[",
"\"handler_method\"",
"]",
"=",
"method",
"for",
"field",
"in",
"e",
"[",
"\"ischema\"",
"]",
"[",
"\"fields\"",
"]",
":",
"if",
"field",
"[",
"\"name\"",
"]",
"==",
"\"parameters\"",
":",
"e",
"[",
"\"input_schema\"",
"]",
"=",
"field",
"[",
"\"schema_name\"",
"]",
"e",
"[",
"\"ischema\"",
"]",
"=",
"schmap",
"[",
"e",
"[",
"\"input_schema\"",
"]",
"]",
"break",
"# Create the list of input_params (as objects, not just names)",
"e",
"[",
"\"input_params\"",
"]",
"=",
"[",
"]",
"for",
"parm",
"in",
"e",
"[",
"\"path_params\"",
"]",
":",
"# find the metadata for the field from the input schema:",
"fields",
"=",
"[",
"field",
"for",
"field",
"in",
"e",
"[",
"\"ischema\"",
"]",
"[",
"\"fields\"",
"]",
"if",
"field",
"[",
"\"name\"",
"]",
"==",
"parm",
"]",
"assert",
"len",
"(",
"fields",
")",
"==",
"1",
",",
"\"Failed to find parameter: %s for endpoint: %s in the input schema %s\"",
"%",
"(",
"parm",
",",
"e",
"[",
"\"url_pattern\"",
"]",
",",
"e",
"[",
"\"ischema\"",
"]",
"[",
"\"name\"",
"]",
")",
"field",
"=",
"fields",
"[",
"0",
"]",
".",
"copy",
"(",
")",
"schema",
"=",
"field",
"[",
"\"schema_name\"",
"]",
"or",
"\"\"",
"# {schema} is null for primitive types",
"ftype",
"=",
"field",
"[",
"\"type\"",
"]",
"assert",
"ftype",
"==",
"\"string\"",
"or",
"ftype",
"==",
"\"int\"",
"or",
"schema",
".",
"endswith",
"(",
"\"KeyV3\"",
")",
"or",
"schema",
"==",
"\"ColSpecifierV3\"",
",",
"\"Unexpected param %s of type %s (schema %s)\"",
"%",
"(",
"field",
"[",
"\"name\"",
"]",
",",
"ftype",
",",
"schema",
")",
"assert",
"field",
"[",
"\"direction\"",
"]",
"!=",
"\"OUTPUT\"",
",",
"\"A path param %s cannot be of type OUTPUT\"",
"%",
"field",
"[",
"\"name\"",
"]",
"field",
"[",
"\"is_path_param\"",
"]",
"=",
"True",
"field",
"[",
"\"required\"",
"]",
"=",
"True",
"e",
"[",
"\"input_params\"",
"]",
".",
"append",
"(",
"field",
")",
"for",
"parm",
"in",
"e",
"[",
"\"ischema\"",
"]",
"[",
"\"fields\"",
"]",
":",
"if",
"parm",
"[",
"\"direction\"",
"]",
"==",
"\"OUTPUT\"",
"or",
"parm",
"[",
"\"name\"",
"]",
"in",
"e",
"[",
"\"path_params\"",
"]",
":",
"continue",
"field",
"=",
"parm",
".",
"copy",
"(",
")",
"field",
"[",
"\"is_path_param\"",
"]",
"=",
"False",
"e",
"[",
"\"input_params\"",
"]",
".",
"append",
"(",
"field",
")",
"yield",
"e",
"return",
"list",
"(",
"gen_rich_route",
"(",
")",
")"
] | Return the list of REST API endpoints. The data is enriched with the following fields:
class_name: which back-end class handles this endpoint (the class is derived from the URL);
ischema: input schema object (input_schema is the schema's name)
oschema: output schema object (output_schema is the schema's name)
algo: for special-cased calls (ModelBuilders/train and Grid/train) -- name of the ML algo requested
input_params: list of all input parameters (first path parameters, then all the others). The parameters are
given as objects, not just names. There is a flag "is_path_param" on each field.
Additionally certain buggy/deprecated endpoints are removed.
For Grid/train and ModelBuilders/train endpoints we fix the method name and parameters info (there is some mangling
of those on the server side).
:param raw: if True, then the complete untouched response to .../endpoints is returned (including the metadata) | [
"Return",
"the",
"list",
"of",
"REST",
"API",
"endpoints",
".",
"The",
"data",
"is",
"enriched",
"with",
"the",
"following",
"fields",
":",
"class_name",
":",
"which",
"back",
"-",
"end",
"class",
"handles",
"this",
"endpoint",
"(",
"the",
"class",
"is",
"derived",
"from",
"the",
"URL",
")",
";",
"ischema",
":",
"input",
"schema",
"object",
"(",
"input_schema",
"is",
"the",
"schema",
"s",
"name",
")",
"oschema",
":",
"output",
"schema",
"object",
"(",
"output_schema",
"is",
"the",
"schema",
"s",
"name",
")",
"algo",
":",
"for",
"special",
"-",
"cased",
"calls",
"(",
"ModelBuilders",
"/",
"train",
"and",
"Grid",
"/",
"train",
")",
"--",
"name",
"of",
"the",
"ML",
"algo",
"requested",
"input_params",
":",
"list",
"of",
"all",
"input",
"parameters",
"(",
"first",
"path",
"parameters",
"then",
"all",
"the",
"others",
")",
".",
"The",
"parameters",
"are",
"given",
"as",
"objects",
"not",
"just",
"names",
".",
"There",
"is",
"a",
"flag",
"is_path_param",
"on",
"each",
"field",
".",
"Additionally",
"certain",
"buggy",
"/",
"deprecated",
"endpoints",
"are",
"removed",
".",
"For",
"Grid",
"/",
"train",
"and",
"ModelBuilders",
"/",
"train",
"endpoints",
"we",
"fix",
"the",
"method",
"name",
"and",
"parameters",
"info",
"(",
"there",
"is",
"some",
"mangling",
"of",
"those",
"on",
"the",
"server",
"side",
")",
"."
] | python | test |
manns/pyspread | pyspread/src/gui/_toolbars.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_toolbars.py#L344-L353 | def OnUpdate(self, event):
"""Updates the toolbar states"""
attributes = event.attr
self._update_buttoncell(attributes["button_cell"])
self.Refresh()
event.Skip() | [
"def",
"OnUpdate",
"(",
"self",
",",
"event",
")",
":",
"attributes",
"=",
"event",
".",
"attr",
"self",
".",
"_update_buttoncell",
"(",
"attributes",
"[",
"\"button_cell\"",
"]",
")",
"self",
".",
"Refresh",
"(",
")",
"event",
".",
"Skip",
"(",
")"
] | Updates the toolbar states | [
"Updates",
"the",
"toolbar",
"states"
] | python | train |
wonambi-python/wonambi | wonambi/widgets/notes.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L1406-L1409 | def remove_event(self, name=None, time=None, chan=None):
"""Action: remove single event."""
self.annot.remove_event(name=name, time=time, chan=chan)
self.update_annotations() | [
"def",
"remove_event",
"(",
"self",
",",
"name",
"=",
"None",
",",
"time",
"=",
"None",
",",
"chan",
"=",
"None",
")",
":",
"self",
".",
"annot",
".",
"remove_event",
"(",
"name",
"=",
"name",
",",
"time",
"=",
"time",
",",
"chan",
"=",
"chan",
")",
"self",
".",
"update_annotations",
"(",
")"
] | Action: remove single event. | [
"Action",
":",
"remove",
"single",
"event",
"."
] | python | train |
KeplerGO/K2fov | K2fov/fov.py | https://github.com/KeplerGO/K2fov/blob/fb122b35687340e0357cba9e0dd47b3be0760693/K2fov/fov.py#L618-L634 | def plotSpacecraftYAxis(self, maptype=None):
"""Plot a line pointing in the direction of the spacecraft
y-axis (i.e normal to the solar panel
"""
if maptype is None:
maptype=self.defaultMap
#Plot direction of spacecraft +y axis. The subtraction of
#90 degrees accounts for the different defintions of where
#zero roll is.
yAngle_deg = getSpacecraftRollAngleFromFovAngle(self.roll0_deg)
yAngle_deg -=90
a,d = gcircle.sphericalAngDestination(self.ra0_deg, self.dec0_deg, -yAngle_deg, 12.0)
x0, y0 = maptype.skyToPix(self.ra0_deg, self.dec0_deg)
x1, y1 = maptype.skyToPix(a, d)
mp.plot([x0, x1], [y0, y1], 'k-') | [
"def",
"plotSpacecraftYAxis",
"(",
"self",
",",
"maptype",
"=",
"None",
")",
":",
"if",
"maptype",
"is",
"None",
":",
"maptype",
"=",
"self",
".",
"defaultMap",
"#Plot direction of spacecraft +y axis. The subtraction of",
"#90 degrees accounts for the different defintions of where",
"#zero roll is.",
"yAngle_deg",
"=",
"getSpacecraftRollAngleFromFovAngle",
"(",
"self",
".",
"roll0_deg",
")",
"yAngle_deg",
"-=",
"90",
"a",
",",
"d",
"=",
"gcircle",
".",
"sphericalAngDestination",
"(",
"self",
".",
"ra0_deg",
",",
"self",
".",
"dec0_deg",
",",
"-",
"yAngle_deg",
",",
"12.0",
")",
"x0",
",",
"y0",
"=",
"maptype",
".",
"skyToPix",
"(",
"self",
".",
"ra0_deg",
",",
"self",
".",
"dec0_deg",
")",
"x1",
",",
"y1",
"=",
"maptype",
".",
"skyToPix",
"(",
"a",
",",
"d",
")",
"mp",
".",
"plot",
"(",
"[",
"x0",
",",
"x1",
"]",
",",
"[",
"y0",
",",
"y1",
"]",
",",
"'k-'",
")"
] | Plot a line pointing in the direction of the spacecraft
y-axis (i.e normal to the solar panel | [
"Plot",
"a",
"line",
"pointing",
"in",
"the",
"direction",
"of",
"the",
"spacecraft",
"y",
"-",
"axis",
"(",
"i",
".",
"e",
"normal",
"to",
"the",
"solar",
"panel"
] | python | train |
raphaelgyory/django-rest-messaging | rest_messaging/models.py | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L230-L250 | def get_lasts_messages_of_threads(self, participant_id, check_who_read=True, check_is_notification=True):
""" Returns the last message in each thread """
# we get the last message for each thread
# we must query the messages using two queries because only Postgres supports .order_by('thread', '-sent_at').distinct('thread')
threads = Thread.managers.\
get_threads_where_participant_is_active(participant_id).\
annotate(last_message_id=Max('message__id'))
messages = Message.objects.filter(id__in=[thread.last_message_id for thread in threads]).\
order_by('-id').\
distinct().\
select_related('thread', 'sender')
if check_who_read is True:
messages = messages.prefetch_related('thread__participation_set', 'thread__participation_set__participant')
messages = self.check_who_read(messages)
else:
messages = messages.prefetch_related('thread__participants')
if check_is_notification is True:
messages = self.check_is_notification(participant_id, messages)
return messages | [
"def",
"get_lasts_messages_of_threads",
"(",
"self",
",",
"participant_id",
",",
"check_who_read",
"=",
"True",
",",
"check_is_notification",
"=",
"True",
")",
":",
"# we get the last message for each thread",
"# we must query the messages using two queries because only Postgres supports .order_by('thread', '-sent_at').distinct('thread')",
"threads",
"=",
"Thread",
".",
"managers",
".",
"get_threads_where_participant_is_active",
"(",
"participant_id",
")",
".",
"annotate",
"(",
"last_message_id",
"=",
"Max",
"(",
"'message__id'",
")",
")",
"messages",
"=",
"Message",
".",
"objects",
".",
"filter",
"(",
"id__in",
"=",
"[",
"thread",
".",
"last_message_id",
"for",
"thread",
"in",
"threads",
"]",
")",
".",
"order_by",
"(",
"'-id'",
")",
".",
"distinct",
"(",
")",
".",
"select_related",
"(",
"'thread'",
",",
"'sender'",
")",
"if",
"check_who_read",
"is",
"True",
":",
"messages",
"=",
"messages",
".",
"prefetch_related",
"(",
"'thread__participation_set'",
",",
"'thread__participation_set__participant'",
")",
"messages",
"=",
"self",
".",
"check_who_read",
"(",
"messages",
")",
"else",
":",
"messages",
"=",
"messages",
".",
"prefetch_related",
"(",
"'thread__participants'",
")",
"if",
"check_is_notification",
"is",
"True",
":",
"messages",
"=",
"self",
".",
"check_is_notification",
"(",
"participant_id",
",",
"messages",
")",
"return",
"messages"
] | Returns the last message in each thread | [
"Returns",
"the",
"last",
"message",
"in",
"each",
"thread"
] | python | train |
ioam/lancet | lancet/core.py | https://github.com/ioam/lancet/blob/1fbbf88fa0e8974ff9ed462e3cb11722ddebdd6e/lancet/core.py#L54-L63 | def pprint_args(self, pos_args, keyword_args, infix_operator=None, extra_params={}):
"""
Method to define the positional arguments and keyword order
for pretty printing.
"""
if infix_operator and not (len(pos_args)==2 and keyword_args==[]):
raise Exception('Infix format requires exactly two'
' positional arguments and no keywords')
(kwargs,_,_,_) = self._pprint_args
self._pprint_args = (keyword_args + kwargs, pos_args, infix_operator, extra_params) | [
"def",
"pprint_args",
"(",
"self",
",",
"pos_args",
",",
"keyword_args",
",",
"infix_operator",
"=",
"None",
",",
"extra_params",
"=",
"{",
"}",
")",
":",
"if",
"infix_operator",
"and",
"not",
"(",
"len",
"(",
"pos_args",
")",
"==",
"2",
"and",
"keyword_args",
"==",
"[",
"]",
")",
":",
"raise",
"Exception",
"(",
"'Infix format requires exactly two'",
"' positional arguments and no keywords'",
")",
"(",
"kwargs",
",",
"_",
",",
"_",
",",
"_",
")",
"=",
"self",
".",
"_pprint_args",
"self",
".",
"_pprint_args",
"=",
"(",
"keyword_args",
"+",
"kwargs",
",",
"pos_args",
",",
"infix_operator",
",",
"extra_params",
")"
] | Method to define the positional arguments and keyword order
for pretty printing. | [
"Method",
"to",
"define",
"the",
"positional",
"arguments",
"and",
"keyword",
"order",
"for",
"pretty",
"printing",
"."
] | python | valid |
callowayproject/Calloway | calloway/apps/custom_registration/backends/email/__init__.py | https://github.com/callowayproject/Calloway/blob/d22e98d41fbd298ab6393ba7bd84a75528be9f81/calloway/apps/custom_registration/backends/email/__init__.py#L147-L169 | def send_activation_email(self, user, profile, password, site):
"""
Custom send email method to supplied the activation link and
new generated password.
"""
ctx_dict = { 'password': password,
'site': site,
'activation_key': profile.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS}
subject = render_to_string(
'registration/email/emails/password_subject.txt',
ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/email/emails/password.txt',
ctx_dict)
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except:
pass | [
"def",
"send_activation_email",
"(",
"self",
",",
"user",
",",
"profile",
",",
"password",
",",
"site",
")",
":",
"ctx_dict",
"=",
"{",
"'password'",
":",
"password",
",",
"'site'",
":",
"site",
",",
"'activation_key'",
":",
"profile",
".",
"activation_key",
",",
"'expiration_days'",
":",
"settings",
".",
"ACCOUNT_ACTIVATION_DAYS",
"}",
"subject",
"=",
"render_to_string",
"(",
"'registration/email/emails/password_subject.txt'",
",",
"ctx_dict",
")",
"# Email subject *must not* contain newlines",
"subject",
"=",
"''",
".",
"join",
"(",
"subject",
".",
"splitlines",
"(",
")",
")",
"message",
"=",
"render_to_string",
"(",
"'registration/email/emails/password.txt'",
",",
"ctx_dict",
")",
"try",
":",
"user",
".",
"email_user",
"(",
"subject",
",",
"message",
",",
"settings",
".",
"DEFAULT_FROM_EMAIL",
")",
"except",
":",
"pass"
] | Custom send email method to supplied the activation link and
new generated password. | [
"Custom",
"send",
"email",
"method",
"to",
"supplied",
"the",
"activation",
"link",
"and",
"new",
"generated",
"password",
"."
] | python | train |
osrg/ryu | ryu/lib/ovs/vsctl.py | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/ovs/vsctl.py#L1167-L1190 | def _do_main(self, commands):
"""
:type commands: list of VSCtlCommand
"""
self._reset()
self._init_schema_helper()
self._run_prerequisites(commands)
idl_ = idl.Idl(self.remote, self.schema_helper)
seqno = idl_.change_seqno
while True:
self._idl_wait(idl_, seqno)
seqno = idl_.change_seqno
if self._do_vsctl(idl_, commands):
break
if self.txn:
self.txn.abort()
self.txn = None
# TODO:XXX
# ovsdb_symbol_table_destroy(symtab)
idl_.close() | [
"def",
"_do_main",
"(",
"self",
",",
"commands",
")",
":",
"self",
".",
"_reset",
"(",
")",
"self",
".",
"_init_schema_helper",
"(",
")",
"self",
".",
"_run_prerequisites",
"(",
"commands",
")",
"idl_",
"=",
"idl",
".",
"Idl",
"(",
"self",
".",
"remote",
",",
"self",
".",
"schema_helper",
")",
"seqno",
"=",
"idl_",
".",
"change_seqno",
"while",
"True",
":",
"self",
".",
"_idl_wait",
"(",
"idl_",
",",
"seqno",
")",
"seqno",
"=",
"idl_",
".",
"change_seqno",
"if",
"self",
".",
"_do_vsctl",
"(",
"idl_",
",",
"commands",
")",
":",
"break",
"if",
"self",
".",
"txn",
":",
"self",
".",
"txn",
".",
"abort",
"(",
")",
"self",
".",
"txn",
"=",
"None",
"# TODO:XXX",
"# ovsdb_symbol_table_destroy(symtab)",
"idl_",
".",
"close",
"(",
")"
] | :type commands: list of VSCtlCommand | [
":",
"type",
"commands",
":",
"list",
"of",
"VSCtlCommand"
] | python | train |
arista-eosplus/pyeapi | pyeapi/api/interfaces.py | https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/interfaces.py#L447-L467 | def set_flowcontrol_send(self, name, value=None, default=False,
disable=False):
"""Configures the interface flowcontrol send value
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
value (boolean): True if the interface should enable sending flow
control packets, otherwise False
default (boolean): Specifies to default the interface flow
control send value
disable (boolean): Specifies to disable the interface flow
control send value
Returns:
True if the operation succeeds otherwise False is returned
"""
return self.set_flowcontrol(name, 'send', value, default, disable) | [
"def",
"set_flowcontrol_send",
"(",
"self",
",",
"name",
",",
"value",
"=",
"None",
",",
"default",
"=",
"False",
",",
"disable",
"=",
"False",
")",
":",
"return",
"self",
".",
"set_flowcontrol",
"(",
"name",
",",
"'send'",
",",
"value",
",",
"default",
",",
"disable",
")"
] | Configures the interface flowcontrol send value
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
value (boolean): True if the interface should enable sending flow
control packets, otherwise False
default (boolean): Specifies to default the interface flow
control send value
disable (boolean): Specifies to disable the interface flow
control send value
Returns:
True if the operation succeeds otherwise False is returned | [
"Configures",
"the",
"interface",
"flowcontrol",
"send",
"value"
] | python | train |
ktbyers/netmiko | netmiko/cisco_base_connection.py | https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/cisco_base_connection.py#L31-L39 | def check_config_mode(self, check_string=")#", pattern=""):
"""
Checks if the device is in configuration mode or not.
Cisco IOS devices abbreviate the prompt at 20 chars in config mode
"""
return super(CiscoBaseConnection, self).check_config_mode(
check_string=check_string, pattern=pattern
) | [
"def",
"check_config_mode",
"(",
"self",
",",
"check_string",
"=",
"\")#\"",
",",
"pattern",
"=",
"\"\"",
")",
":",
"return",
"super",
"(",
"CiscoBaseConnection",
",",
"self",
")",
".",
"check_config_mode",
"(",
"check_string",
"=",
"check_string",
",",
"pattern",
"=",
"pattern",
")"
] | Checks if the device is in configuration mode or not.
Cisco IOS devices abbreviate the prompt at 20 chars in config mode | [
"Checks",
"if",
"the",
"device",
"is",
"in",
"configuration",
"mode",
"or",
"not",
"."
] | python | train |
bloomreach/s4cmd | s4cmd.py | https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L1734-L1737 | def du_handler(self, args):
'''Handler for size command'''
for src, size in self.s3handler().size(args[1:]):
message('%s\t%s' % (size, src)) | [
"def",
"du_handler",
"(",
"self",
",",
"args",
")",
":",
"for",
"src",
",",
"size",
"in",
"self",
".",
"s3handler",
"(",
")",
".",
"size",
"(",
"args",
"[",
"1",
":",
"]",
")",
":",
"message",
"(",
"'%s\\t%s'",
"%",
"(",
"size",
",",
"src",
")",
")"
] | Handler for size command | [
"Handler",
"for",
"size",
"command"
] | python | test |
KoffeinFlummi/Chronyk | chronyk/chronyk.py | https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L64-L72 | def _gmtime(timestamp):
"""Custom gmtime because yada yada.
"""
try:
return time.gmtime(timestamp)
except OSError:
dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp)
dst = int(_isdst(dt))
return time.struct_time(dt.timetuple()[:8] + tuple([dst])) | [
"def",
"_gmtime",
"(",
"timestamp",
")",
":",
"try",
":",
"return",
"time",
".",
"gmtime",
"(",
"timestamp",
")",
"except",
"OSError",
":",
"dt",
"=",
"datetime",
".",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
")",
"+",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"timestamp",
")",
"dst",
"=",
"int",
"(",
"_isdst",
"(",
"dt",
")",
")",
"return",
"time",
".",
"struct_time",
"(",
"dt",
".",
"timetuple",
"(",
")",
"[",
":",
"8",
"]",
"+",
"tuple",
"(",
"[",
"dst",
"]",
")",
")"
] | Custom gmtime because yada yada. | [
"Custom",
"gmtime",
"because",
"yada",
"yada",
"."
] | python | train |
Calysto/calysto | calysto/ai/conx.py | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L1973-L2039 | def propagate(self, **args):
"""
Propagates activation through the network. Optionally, takes input layer names
as keywords, and their associated activations. If input layer(s) are given, then
propagate() will return the output layer's activation. If there is more than
one output layer, then a dictionary is returned.
Examples:
>>> net = Network() # doctest: +ELLIPSIS
Conx using seed: ...
>>> net.addLayers(2, 5, 1)
>>> len(net.propagate(input = [0, .5]))
1
"""
self.prePropagate(**args)
for key in args:
layer = self.getLayer(key)
if layer.kind == 'Input':
if self[key].verify and not self[key].activationSet == 0:
raise AttributeError("attempt to set activations on input layer '%s' without reset" % key)
self.copyActivations(layer, args[key])
elif layer.kind == 'Context':
self.copyActivations(layer, args[key])
elif layer.kind == 'Output' and len(args[key]) == layer.size: # in case you expect propagate to handle the outputs
self.copyTargets(layer, args[key])
self.verifyInputs() # better have inputs set
if self.verbosity > 2: print("Propagate Network '" + self.name + "':")
# initialize netinput:
for layer in self.layers:
if layer.type != 'Input' and layer.active:
layer.netinput = (layer.weight).copy()
# for each connection, in order:
for layer in self.layers:
if layer.active:
for connection in self.connections:
if (connection.toLayer.name == layer.name
and connection.fromLayer.active
and connection.active):
a = connection.fromLayer.activation
w = connection.weight
m = Numeric.matrixmultiply(a, w)
ni = m + connection.toLayer.netinput
connection.toLayer.netinput = ni
#connection.toLayer.netinput = \
# (connection.toLayer.netinput +
# Numeric.matrixmultiply(connection.fromLayer.activation,
# connection.weight)) # propagate!
if layer.type != 'Input':
layer.activation = self.activationFunction(layer.netinput)
for layer in self.layers:
if layer.log and layer.active:
layer.writeLog(self)
self.count += 1 # counts number of times propagate() is called
if len(args) != 0:
dict = {}
for layer in self.layers:
if layer.type == "Output":
dict[layer.name] = layer.activation.copy()
if len(dict) == 1:
return dict[list(dict.keys())[0]]
else:
return dict | [
"def",
"propagate",
"(",
"self",
",",
"*",
"*",
"args",
")",
":",
"self",
".",
"prePropagate",
"(",
"*",
"*",
"args",
")",
"for",
"key",
"in",
"args",
":",
"layer",
"=",
"self",
".",
"getLayer",
"(",
"key",
")",
"if",
"layer",
".",
"kind",
"==",
"'Input'",
":",
"if",
"self",
"[",
"key",
"]",
".",
"verify",
"and",
"not",
"self",
"[",
"key",
"]",
".",
"activationSet",
"==",
"0",
":",
"raise",
"AttributeError",
"(",
"\"attempt to set activations on input layer '%s' without reset\"",
"%",
"key",
")",
"self",
".",
"copyActivations",
"(",
"layer",
",",
"args",
"[",
"key",
"]",
")",
"elif",
"layer",
".",
"kind",
"==",
"'Context'",
":",
"self",
".",
"copyActivations",
"(",
"layer",
",",
"args",
"[",
"key",
"]",
")",
"elif",
"layer",
".",
"kind",
"==",
"'Output'",
"and",
"len",
"(",
"args",
"[",
"key",
"]",
")",
"==",
"layer",
".",
"size",
":",
"# in case you expect propagate to handle the outputs",
"self",
".",
"copyTargets",
"(",
"layer",
",",
"args",
"[",
"key",
"]",
")",
"self",
".",
"verifyInputs",
"(",
")",
"# better have inputs set",
"if",
"self",
".",
"verbosity",
">",
"2",
":",
"print",
"(",
"\"Propagate Network '\"",
"+",
"self",
".",
"name",
"+",
"\"':\"",
")",
"# initialize netinput:",
"for",
"layer",
"in",
"self",
".",
"layers",
":",
"if",
"layer",
".",
"type",
"!=",
"'Input'",
"and",
"layer",
".",
"active",
":",
"layer",
".",
"netinput",
"=",
"(",
"layer",
".",
"weight",
")",
".",
"copy",
"(",
")",
"# for each connection, in order:",
"for",
"layer",
"in",
"self",
".",
"layers",
":",
"if",
"layer",
".",
"active",
":",
"for",
"connection",
"in",
"self",
".",
"connections",
":",
"if",
"(",
"connection",
".",
"toLayer",
".",
"name",
"==",
"layer",
".",
"name",
"and",
"connection",
".",
"fromLayer",
".",
"active",
"and",
"connection",
".",
"active",
")",
":",
"a",
"=",
"connection",
".",
"fromLayer",
".",
"activation",
"w",
"=",
"connection",
".",
"weight",
"m",
"=",
"Numeric",
".",
"matrixmultiply",
"(",
"a",
",",
"w",
")",
"ni",
"=",
"m",
"+",
"connection",
".",
"toLayer",
".",
"netinput",
"connection",
".",
"toLayer",
".",
"netinput",
"=",
"ni",
"#connection.toLayer.netinput = \\",
"# (connection.toLayer.netinput + ",
"# Numeric.matrixmultiply(connection.fromLayer.activation,",
"# connection.weight)) # propagate!",
"if",
"layer",
".",
"type",
"!=",
"'Input'",
":",
"layer",
".",
"activation",
"=",
"self",
".",
"activationFunction",
"(",
"layer",
".",
"netinput",
")",
"for",
"layer",
"in",
"self",
".",
"layers",
":",
"if",
"layer",
".",
"log",
"and",
"layer",
".",
"active",
":",
"layer",
".",
"writeLog",
"(",
"self",
")",
"self",
".",
"count",
"+=",
"1",
"# counts number of times propagate() is called",
"if",
"len",
"(",
"args",
")",
"!=",
"0",
":",
"dict",
"=",
"{",
"}",
"for",
"layer",
"in",
"self",
".",
"layers",
":",
"if",
"layer",
".",
"type",
"==",
"\"Output\"",
":",
"dict",
"[",
"layer",
".",
"name",
"]",
"=",
"layer",
".",
"activation",
".",
"copy",
"(",
")",
"if",
"len",
"(",
"dict",
")",
"==",
"1",
":",
"return",
"dict",
"[",
"list",
"(",
"dict",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"]",
"else",
":",
"return",
"dict"
] | Propagates activation through the network. Optionally, takes input layer names
as keywords, and their associated activations. If input layer(s) are given, then
propagate() will return the output layer's activation. If there is more than
one output layer, then a dictionary is returned.
Examples:
>>> net = Network() # doctest: +ELLIPSIS
Conx using seed: ...
>>> net.addLayers(2, 5, 1)
>>> len(net.propagate(input = [0, .5]))
1 | [
"Propagates",
"activation",
"through",
"the",
"network",
".",
"Optionally",
"takes",
"input",
"layer",
"names",
"as",
"keywords",
"and",
"their",
"associated",
"activations",
".",
"If",
"input",
"layer",
"(",
"s",
")",
"are",
"given",
"then",
"propagate",
"()",
"will",
"return",
"the",
"output",
"layer",
"s",
"activation",
".",
"If",
"there",
"is",
"more",
"than",
"one",
"output",
"layer",
"then",
"a",
"dictionary",
"is",
"returned",
"."
] | python | train |
gem/oq-engine | openquake/hmtk/sources/point_source.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/sources/point_source.py#L115-L141 | def create_geometry(self, input_geometry, upper_depth, lower_depth):
'''
If geometry is defined as a numpy array then create instance of
nhlib.geo.point.Point class, otherwise if already instance of class
accept class
:param input_geometry:
Input geometry (point) as either
i) instance of nhlib.geo.point.Point class
ii) numpy.ndarray [Longitude, Latitude]
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
'''
self._check_seismogenic_depths(upper_depth, lower_depth)
# Check/create the geometry class
if not isinstance(input_geometry, Point):
if not isinstance(input_geometry, np.ndarray):
raise ValueError('Unrecognised or unsupported geometry '
'definition')
self.geometry = Point(input_geometry[0], input_geometry[1])
else:
self.geometry = input_geometry | [
"def",
"create_geometry",
"(",
"self",
",",
"input_geometry",
",",
"upper_depth",
",",
"lower_depth",
")",
":",
"self",
".",
"_check_seismogenic_depths",
"(",
"upper_depth",
",",
"lower_depth",
")",
"# Check/create the geometry class",
"if",
"not",
"isinstance",
"(",
"input_geometry",
",",
"Point",
")",
":",
"if",
"not",
"isinstance",
"(",
"input_geometry",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"ValueError",
"(",
"'Unrecognised or unsupported geometry '",
"'definition'",
")",
"self",
".",
"geometry",
"=",
"Point",
"(",
"input_geometry",
"[",
"0",
"]",
",",
"input_geometry",
"[",
"1",
"]",
")",
"else",
":",
"self",
".",
"geometry",
"=",
"input_geometry"
] | If geometry is defined as a numpy array then create instance of
nhlib.geo.point.Point class, otherwise if already instance of class
accept class
:param input_geometry:
Input geometry (point) as either
i) instance of nhlib.geo.point.Point class
ii) numpy.ndarray [Longitude, Latitude]
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km) | [
"If",
"geometry",
"is",
"defined",
"as",
"a",
"numpy",
"array",
"then",
"create",
"instance",
"of",
"nhlib",
".",
"geo",
".",
"point",
".",
"Point",
"class",
"otherwise",
"if",
"already",
"instance",
"of",
"class",
"accept",
"class"
] | python | train |
Alignak-monitoring/alignak | alignak/daemons/schedulerdaemon.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/schedulerdaemon.py#L311-L501 | def setup_new_conf(self):
# pylint: disable=too-many-statements, too-many-branches, too-many-locals
"""Setup new conf received for scheduler
:return: None
"""
# Execute the base class treatment...
super(Alignak, self).setup_new_conf()
# ...then our own specific treatment!
with self.conf_lock:
# self_conf is our own configuration from the alignak environment
# self_conf = self.cur_conf['self_conf']
logger.debug("Got config: %s", self.cur_conf)
if 'conf_part' not in self.cur_conf:
self.cur_conf['conf_part'] = None
conf_part = self.cur_conf['conf_part']
# Ok now we can save the retention data
if self.sched.pushed_conf is not None:
self.sched.update_retention()
# Get the monitored objects configuration
t00 = time.time()
received_conf_part = None
try:
received_conf_part = unserialize(conf_part)
assert received_conf_part is not None
except AssertionError as exp:
# This to indicate that no configuration is managed by this scheduler...
logger.warning("No managed configuration received from arbiter")
except AlignakClassLookupException as exp: # pragma: no cover
# This to indicate that the new configuration is not managed...
self.new_conf = {
"_status": "Cannot un-serialize configuration received from arbiter",
"_error": str(exp)
}
logger.error(self.new_conf)
logger.error("Back trace of the error:\n%s", traceback.format_exc())
return
except Exception as exp: # pylint: disable=broad-except
# This to indicate that the new configuration is not managed...
self.new_conf = {
"_status": "Cannot un-serialize configuration received from arbiter",
"_error": str(exp)
}
logger.error(self.new_conf)
self.exit_on_exception(exp, str(self.new_conf))
# if not received_conf_part:
# return
logger.info("Monitored configuration %s received at %d. Un-serialized in %d secs",
received_conf_part, t00, time.time() - t00)
logger.info("Scheduler received configuration : %s", received_conf_part)
# Now we create our pollers, reactionners and brokers
for link_type in ['pollers', 'reactionners', 'brokers']:
if link_type not in self.cur_conf['satellites']:
logger.error("Missing %s in the configuration!", link_type)
continue
my_satellites = getattr(self, link_type, {})
received_satellites = self.cur_conf['satellites'][link_type]
for link_uuid in received_satellites:
rs_conf = received_satellites[link_uuid]
logger.debug("- received %s - %s: %s", rs_conf['instance_id'],
rs_conf['type'], rs_conf['name'])
# Must look if we already had a configuration and save our broks
already_got = rs_conf['instance_id'] in my_satellites
broks = []
actions = {}
wait_homerun = {}
external_commands = {}
running_id = 0
if already_got:
logger.warning("I already got: %s", rs_conf['instance_id'])
# Save some information
running_id = my_satellites[link_uuid].running_id
(broks, actions,
wait_homerun, external_commands) = \
my_satellites[link_uuid].get_and_clear_context()
# Delete the former link
del my_satellites[link_uuid]
# My new satellite link...
new_link = SatelliteLink.get_a_satellite_link(link_type[:-1],
rs_conf)
my_satellites[new_link.uuid] = new_link
logger.info("I got a new %s satellite: %s", link_type[:-1], new_link)
new_link.running_id = running_id
new_link.external_commands = external_commands
new_link.broks = broks
new_link.wait_homerun = wait_homerun
new_link.actions = actions
# Replacing the satellite address and port by those defined in satellite_map
if new_link.name in self.cur_conf['override_conf'].get('satellite_map', {}):
override_conf = self.cur_conf['override_conf']
overriding = override_conf.get('satellite_map')[new_link.name]
logger.warning("Do not override the configuration for: %s, with: %s. "
"Please check whether this is necessary!",
new_link.name, overriding)
# First mix conf and override_conf to have our definitive conf
for prop in getattr(self.cur_conf, 'override_conf', []):
logger.debug("Overriden: %s / %s ", prop, getattr(received_conf_part, prop, None))
logger.debug("Overriding: %s / %s ", prop, self.cur_conf['override_conf'])
setattr(received_conf_part, prop, self.cur_conf['override_conf'].get(prop, None))
# Scheduler modules
if not self.have_modules:
try:
logger.debug("Modules configuration: %s", self.cur_conf['modules'])
self.modules = unserialize(self.cur_conf['modules'], no_load=True)
except AlignakClassLookupException as exp: # pragma: no cover, simple protection
logger.error('Cannot un-serialize modules configuration '
'received from arbiter: %s', exp)
if self.modules:
logger.debug("I received some modules configuration: %s", self.modules)
self.have_modules = True
self.do_load_modules(self.modules)
# and start external modules too
self.modules_manager.start_external_instances()
else:
logger.info("I do not have modules")
if received_conf_part:
logger.info("Loading configuration...")
# Propagate the global parameters to the configuration items
received_conf_part.explode_global_conf()
# We give the configuration to our scheduler
self.sched.reset()
self.sched.load_conf(self.cur_conf['instance_id'],
self.cur_conf['instance_name'],
received_conf_part)
# Once loaded, the scheduler has an inner pushed_conf object
logger.info("Loaded: %s", self.sched.pushed_conf)
# Update the scheduler ticks according to the daemon configuration
self.sched.update_recurrent_works_tick(self)
# We must update our pushed configuration macros with correct values
# from the configuration parameters
# self.sched.pushed_conf.fill_resource_macros_names_macros()
# Creating the Macroresolver Class & unique instance
m_solver = MacroResolver()
m_solver.init(received_conf_part)
# Now create the external commands manager
# We are an applyer: our role is not to dispatch commands, but to apply them
ecm = ExternalCommandManager(
received_conf_part, 'applyer', self.sched,
received_conf_part.accept_passive_unknown_check_results,
received_conf_part.log_external_commands)
# Scheduler needs to know about this external command manager to use it if necessary
self.sched.external_commands_manager = ecm
# Ok now we can load the retention data
self.sched.retention_load()
# Log hosts/services initial states
self.sched.log_initial_states()
# Create brok new conf
brok = Brok({'type': 'new_conf', 'data': {}})
self.sched.add_brok(brok)
# Initialize connection with all our satellites
logger.info("Initializing connection with my satellites:")
my_satellites = self.get_links_of_type(s_type='')
for satellite in list(my_satellites.values()):
logger.info("- : %s/%s", satellite.type, satellite.name)
if not self.daemon_connection_init(satellite):
logger.error("Satellite connection failed: %s", satellite)
if received_conf_part:
# Enable the scheduling process
logger.info("Loaded: %s", self.sched.pushed_conf)
self.sched.start_scheduling()
# Now I have a configuration!
self.have_conf = True | [
"def",
"setup_new_conf",
"(",
"self",
")",
":",
"# pylint: disable=too-many-statements, too-many-branches, too-many-locals",
"# Execute the base class treatment...",
"super",
"(",
"Alignak",
",",
"self",
")",
".",
"setup_new_conf",
"(",
")",
"# ...then our own specific treatment!",
"with",
"self",
".",
"conf_lock",
":",
"# self_conf is our own configuration from the alignak environment",
"# self_conf = self.cur_conf['self_conf']",
"logger",
".",
"debug",
"(",
"\"Got config: %s\"",
",",
"self",
".",
"cur_conf",
")",
"if",
"'conf_part'",
"not",
"in",
"self",
".",
"cur_conf",
":",
"self",
".",
"cur_conf",
"[",
"'conf_part'",
"]",
"=",
"None",
"conf_part",
"=",
"self",
".",
"cur_conf",
"[",
"'conf_part'",
"]",
"# Ok now we can save the retention data",
"if",
"self",
".",
"sched",
".",
"pushed_conf",
"is",
"not",
"None",
":",
"self",
".",
"sched",
".",
"update_retention",
"(",
")",
"# Get the monitored objects configuration",
"t00",
"=",
"time",
".",
"time",
"(",
")",
"received_conf_part",
"=",
"None",
"try",
":",
"received_conf_part",
"=",
"unserialize",
"(",
"conf_part",
")",
"assert",
"received_conf_part",
"is",
"not",
"None",
"except",
"AssertionError",
"as",
"exp",
":",
"# This to indicate that no configuration is managed by this scheduler...",
"logger",
".",
"warning",
"(",
"\"No managed configuration received from arbiter\"",
")",
"except",
"AlignakClassLookupException",
"as",
"exp",
":",
"# pragma: no cover",
"# This to indicate that the new configuration is not managed...",
"self",
".",
"new_conf",
"=",
"{",
"\"_status\"",
":",
"\"Cannot un-serialize configuration received from arbiter\"",
",",
"\"_error\"",
":",
"str",
"(",
"exp",
")",
"}",
"logger",
".",
"error",
"(",
"self",
".",
"new_conf",
")",
"logger",
".",
"error",
"(",
"\"Back trace of the error:\\n%s\"",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"# This to indicate that the new configuration is not managed...",
"self",
".",
"new_conf",
"=",
"{",
"\"_status\"",
":",
"\"Cannot un-serialize configuration received from arbiter\"",
",",
"\"_error\"",
":",
"str",
"(",
"exp",
")",
"}",
"logger",
".",
"error",
"(",
"self",
".",
"new_conf",
")",
"self",
".",
"exit_on_exception",
"(",
"exp",
",",
"str",
"(",
"self",
".",
"new_conf",
")",
")",
"# if not received_conf_part:",
"# return",
"logger",
".",
"info",
"(",
"\"Monitored configuration %s received at %d. Un-serialized in %d secs\"",
",",
"received_conf_part",
",",
"t00",
",",
"time",
".",
"time",
"(",
")",
"-",
"t00",
")",
"logger",
".",
"info",
"(",
"\"Scheduler received configuration : %s\"",
",",
"received_conf_part",
")",
"# Now we create our pollers, reactionners and brokers",
"for",
"link_type",
"in",
"[",
"'pollers'",
",",
"'reactionners'",
",",
"'brokers'",
"]",
":",
"if",
"link_type",
"not",
"in",
"self",
".",
"cur_conf",
"[",
"'satellites'",
"]",
":",
"logger",
".",
"error",
"(",
"\"Missing %s in the configuration!\"",
",",
"link_type",
")",
"continue",
"my_satellites",
"=",
"getattr",
"(",
"self",
",",
"link_type",
",",
"{",
"}",
")",
"received_satellites",
"=",
"self",
".",
"cur_conf",
"[",
"'satellites'",
"]",
"[",
"link_type",
"]",
"for",
"link_uuid",
"in",
"received_satellites",
":",
"rs_conf",
"=",
"received_satellites",
"[",
"link_uuid",
"]",
"logger",
".",
"debug",
"(",
"\"- received %s - %s: %s\"",
",",
"rs_conf",
"[",
"'instance_id'",
"]",
",",
"rs_conf",
"[",
"'type'",
"]",
",",
"rs_conf",
"[",
"'name'",
"]",
")",
"# Must look if we already had a configuration and save our broks",
"already_got",
"=",
"rs_conf",
"[",
"'instance_id'",
"]",
"in",
"my_satellites",
"broks",
"=",
"[",
"]",
"actions",
"=",
"{",
"}",
"wait_homerun",
"=",
"{",
"}",
"external_commands",
"=",
"{",
"}",
"running_id",
"=",
"0",
"if",
"already_got",
":",
"logger",
".",
"warning",
"(",
"\"I already got: %s\"",
",",
"rs_conf",
"[",
"'instance_id'",
"]",
")",
"# Save some information",
"running_id",
"=",
"my_satellites",
"[",
"link_uuid",
"]",
".",
"running_id",
"(",
"broks",
",",
"actions",
",",
"wait_homerun",
",",
"external_commands",
")",
"=",
"my_satellites",
"[",
"link_uuid",
"]",
".",
"get_and_clear_context",
"(",
")",
"# Delete the former link",
"del",
"my_satellites",
"[",
"link_uuid",
"]",
"# My new satellite link...",
"new_link",
"=",
"SatelliteLink",
".",
"get_a_satellite_link",
"(",
"link_type",
"[",
":",
"-",
"1",
"]",
",",
"rs_conf",
")",
"my_satellites",
"[",
"new_link",
".",
"uuid",
"]",
"=",
"new_link",
"logger",
".",
"info",
"(",
"\"I got a new %s satellite: %s\"",
",",
"link_type",
"[",
":",
"-",
"1",
"]",
",",
"new_link",
")",
"new_link",
".",
"running_id",
"=",
"running_id",
"new_link",
".",
"external_commands",
"=",
"external_commands",
"new_link",
".",
"broks",
"=",
"broks",
"new_link",
".",
"wait_homerun",
"=",
"wait_homerun",
"new_link",
".",
"actions",
"=",
"actions",
"# Replacing the satellite address and port by those defined in satellite_map",
"if",
"new_link",
".",
"name",
"in",
"self",
".",
"cur_conf",
"[",
"'override_conf'",
"]",
".",
"get",
"(",
"'satellite_map'",
",",
"{",
"}",
")",
":",
"override_conf",
"=",
"self",
".",
"cur_conf",
"[",
"'override_conf'",
"]",
"overriding",
"=",
"override_conf",
".",
"get",
"(",
"'satellite_map'",
")",
"[",
"new_link",
".",
"name",
"]",
"logger",
".",
"warning",
"(",
"\"Do not override the configuration for: %s, with: %s. \"",
"\"Please check whether this is necessary!\"",
",",
"new_link",
".",
"name",
",",
"overriding",
")",
"# First mix conf and override_conf to have our definitive conf",
"for",
"prop",
"in",
"getattr",
"(",
"self",
".",
"cur_conf",
",",
"'override_conf'",
",",
"[",
"]",
")",
":",
"logger",
".",
"debug",
"(",
"\"Overriden: %s / %s \"",
",",
"prop",
",",
"getattr",
"(",
"received_conf_part",
",",
"prop",
",",
"None",
")",
")",
"logger",
".",
"debug",
"(",
"\"Overriding: %s / %s \"",
",",
"prop",
",",
"self",
".",
"cur_conf",
"[",
"'override_conf'",
"]",
")",
"setattr",
"(",
"received_conf_part",
",",
"prop",
",",
"self",
".",
"cur_conf",
"[",
"'override_conf'",
"]",
".",
"get",
"(",
"prop",
",",
"None",
")",
")",
"# Scheduler modules",
"if",
"not",
"self",
".",
"have_modules",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Modules configuration: %s\"",
",",
"self",
".",
"cur_conf",
"[",
"'modules'",
"]",
")",
"self",
".",
"modules",
"=",
"unserialize",
"(",
"self",
".",
"cur_conf",
"[",
"'modules'",
"]",
",",
"no_load",
"=",
"True",
")",
"except",
"AlignakClassLookupException",
"as",
"exp",
":",
"# pragma: no cover, simple protection",
"logger",
".",
"error",
"(",
"'Cannot un-serialize modules configuration '",
"'received from arbiter: %s'",
",",
"exp",
")",
"if",
"self",
".",
"modules",
":",
"logger",
".",
"debug",
"(",
"\"I received some modules configuration: %s\"",
",",
"self",
".",
"modules",
")",
"self",
".",
"have_modules",
"=",
"True",
"self",
".",
"do_load_modules",
"(",
"self",
".",
"modules",
")",
"# and start external modules too",
"self",
".",
"modules_manager",
".",
"start_external_instances",
"(",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"I do not have modules\"",
")",
"if",
"received_conf_part",
":",
"logger",
".",
"info",
"(",
"\"Loading configuration...\"",
")",
"# Propagate the global parameters to the configuration items",
"received_conf_part",
".",
"explode_global_conf",
"(",
")",
"# We give the configuration to our scheduler",
"self",
".",
"sched",
".",
"reset",
"(",
")",
"self",
".",
"sched",
".",
"load_conf",
"(",
"self",
".",
"cur_conf",
"[",
"'instance_id'",
"]",
",",
"self",
".",
"cur_conf",
"[",
"'instance_name'",
"]",
",",
"received_conf_part",
")",
"# Once loaded, the scheduler has an inner pushed_conf object",
"logger",
".",
"info",
"(",
"\"Loaded: %s\"",
",",
"self",
".",
"sched",
".",
"pushed_conf",
")",
"# Update the scheduler ticks according to the daemon configuration",
"self",
".",
"sched",
".",
"update_recurrent_works_tick",
"(",
"self",
")",
"# We must update our pushed configuration macros with correct values",
"# from the configuration parameters",
"# self.sched.pushed_conf.fill_resource_macros_names_macros()",
"# Creating the Macroresolver Class & unique instance",
"m_solver",
"=",
"MacroResolver",
"(",
")",
"m_solver",
".",
"init",
"(",
"received_conf_part",
")",
"# Now create the external commands manager",
"# We are an applyer: our role is not to dispatch commands, but to apply them",
"ecm",
"=",
"ExternalCommandManager",
"(",
"received_conf_part",
",",
"'applyer'",
",",
"self",
".",
"sched",
",",
"received_conf_part",
".",
"accept_passive_unknown_check_results",
",",
"received_conf_part",
".",
"log_external_commands",
")",
"# Scheduler needs to know about this external command manager to use it if necessary",
"self",
".",
"sched",
".",
"external_commands_manager",
"=",
"ecm",
"# Ok now we can load the retention data",
"self",
".",
"sched",
".",
"retention_load",
"(",
")",
"# Log hosts/services initial states",
"self",
".",
"sched",
".",
"log_initial_states",
"(",
")",
"# Create brok new conf",
"brok",
"=",
"Brok",
"(",
"{",
"'type'",
":",
"'new_conf'",
",",
"'data'",
":",
"{",
"}",
"}",
")",
"self",
".",
"sched",
".",
"add_brok",
"(",
"brok",
")",
"# Initialize connection with all our satellites",
"logger",
".",
"info",
"(",
"\"Initializing connection with my satellites:\"",
")",
"my_satellites",
"=",
"self",
".",
"get_links_of_type",
"(",
"s_type",
"=",
"''",
")",
"for",
"satellite",
"in",
"list",
"(",
"my_satellites",
".",
"values",
"(",
")",
")",
":",
"logger",
".",
"info",
"(",
"\"- : %s/%s\"",
",",
"satellite",
".",
"type",
",",
"satellite",
".",
"name",
")",
"if",
"not",
"self",
".",
"daemon_connection_init",
"(",
"satellite",
")",
":",
"logger",
".",
"error",
"(",
"\"Satellite connection failed: %s\"",
",",
"satellite",
")",
"if",
"received_conf_part",
":",
"# Enable the scheduling process",
"logger",
".",
"info",
"(",
"\"Loaded: %s\"",
",",
"self",
".",
"sched",
".",
"pushed_conf",
")",
"self",
".",
"sched",
".",
"start_scheduling",
"(",
")",
"# Now I have a configuration!",
"self",
".",
"have_conf",
"=",
"True"
] | Setup new conf received for scheduler
:return: None | [
"Setup",
"new",
"conf",
"received",
"for",
"scheduler"
] | python | train |
ANTsX/ANTsPy | ants/utils/scalar_rgb_vector.py | https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/utils/scalar_rgb_vector.py#L78-L106 | def rgb_to_vector(image):
"""
Convert an RGB ANTsImage to a Vector ANTsImage
Arguments
---------
image : ANTsImage
RGB image to be converted
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni_rgb = mni.scalar_to_rgb()
>>> mni_vector = mni.rgb_to_vector()
>>> mni_rgb2 = mni.vector_to_rgb()
"""
if image.pixeltype != 'unsigned char':
image = image.clone('unsigned char')
idim = image.dimension
libfn = utils.get_lib_fn('RgbToVector%i' % idim)
new_ptr = libfn(image.pointer)
new_img = iio.ANTsImage(pixeltype=image.pixeltype, dimension=image.dimension,
components=3, pointer=new_ptr, is_rgb=False)
return new_img | [
"def",
"rgb_to_vector",
"(",
"image",
")",
":",
"if",
"image",
".",
"pixeltype",
"!=",
"'unsigned char'",
":",
"image",
"=",
"image",
".",
"clone",
"(",
"'unsigned char'",
")",
"idim",
"=",
"image",
".",
"dimension",
"libfn",
"=",
"utils",
".",
"get_lib_fn",
"(",
"'RgbToVector%i'",
"%",
"idim",
")",
"new_ptr",
"=",
"libfn",
"(",
"image",
".",
"pointer",
")",
"new_img",
"=",
"iio",
".",
"ANTsImage",
"(",
"pixeltype",
"=",
"image",
".",
"pixeltype",
",",
"dimension",
"=",
"image",
".",
"dimension",
",",
"components",
"=",
"3",
",",
"pointer",
"=",
"new_ptr",
",",
"is_rgb",
"=",
"False",
")",
"return",
"new_img"
] | Convert an RGB ANTsImage to a Vector ANTsImage
Arguments
---------
image : ANTsImage
RGB image to be converted
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni_rgb = mni.scalar_to_rgb()
>>> mni_vector = mni.rgb_to_vector()
>>> mni_rgb2 = mni.vector_to_rgb() | [
"Convert",
"an",
"RGB",
"ANTsImage",
"to",
"a",
"Vector",
"ANTsImage"
] | python | train |
portfors-lab/sparkle | sparkle/run/acquisition_manager.py | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/acquisition_manager.py#L421-L432 | def calibration_template(self):
"""Gets the template documentation for the both the tone curve calibration and noise calibration
:returns: dict -- all information necessary to recreate calibration objects
"""
temp = {}
temp['tone_doc'] = self.tone_calibrator.stimulus.templateDoc()
comp_doc = []
for calstim in self.bs_calibrator.get_stims():
comp_doc.append(calstim.stateDict())
temp['noise_doc'] = comp_doc
return temp | [
"def",
"calibration_template",
"(",
"self",
")",
":",
"temp",
"=",
"{",
"}",
"temp",
"[",
"'tone_doc'",
"]",
"=",
"self",
".",
"tone_calibrator",
".",
"stimulus",
".",
"templateDoc",
"(",
")",
"comp_doc",
"=",
"[",
"]",
"for",
"calstim",
"in",
"self",
".",
"bs_calibrator",
".",
"get_stims",
"(",
")",
":",
"comp_doc",
".",
"append",
"(",
"calstim",
".",
"stateDict",
"(",
")",
")",
"temp",
"[",
"'noise_doc'",
"]",
"=",
"comp_doc",
"return",
"temp"
] | Gets the template documentation for the both the tone curve calibration and noise calibration
:returns: dict -- all information necessary to recreate calibration objects | [
"Gets",
"the",
"template",
"documentation",
"for",
"the",
"both",
"the",
"tone",
"curve",
"calibration",
"and",
"noise",
"calibration"
] | python | train |
pallets/werkzeug | src/werkzeug/routing.py | https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/routing.py#L2083-L2200 | def build(
self,
endpoint,
values=None,
method=None,
force_external=False,
append_unknown=True,
):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
When processing those additional values, lists are furthermore
interpreted as multiple values (as per
:py:class:`werkzeug.datastructures.MultiDict`):
>>> urls.build("index", {'q': ['a', 'b', 'c']})
'/?q=a&q=b&q=c'
Passing a ``MultiDict`` will also add multiple values:
>>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
'/?p=z&q=a&q=b'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
temp_values = {}
# iteritems(dict, values) is like `values.lists()`
# without the call or `list()` coercion overhead.
for key, value in iteritems(dict, values):
if not value:
continue
if len(value) == 1: # flatten single item lists
value = value[0]
if value is None: # drop None
continue
temp_values[key] = value
values = temp_values
else:
# drop None
values = dict(i for i in iteritems(values) if i[1] is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method, self)
domain_part, path = rv
host = self.get_host(domain_part)
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name)
or (not self.map.host_matching and domain_part == self.subdomain)
):
return "%s/%s" % (self.script_name.rstrip("/"), path.lstrip("/"))
return str(
"%s//%s%s/%s"
% (
self.url_scheme + ":" if self.url_scheme else "",
host,
self.script_name[:-1],
path.lstrip("/"),
)
) | [
"def",
"build",
"(",
"self",
",",
"endpoint",
",",
"values",
"=",
"None",
",",
"method",
"=",
"None",
",",
"force_external",
"=",
"False",
",",
"append_unknown",
"=",
"True",
",",
")",
":",
"self",
".",
"map",
".",
"update",
"(",
")",
"if",
"values",
":",
"if",
"isinstance",
"(",
"values",
",",
"MultiDict",
")",
":",
"temp_values",
"=",
"{",
"}",
"# iteritems(dict, values) is like `values.lists()`",
"# without the call or `list()` coercion overhead.",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"dict",
",",
"values",
")",
":",
"if",
"not",
"value",
":",
"continue",
"if",
"len",
"(",
"value",
")",
"==",
"1",
":",
"# flatten single item lists",
"value",
"=",
"value",
"[",
"0",
"]",
"if",
"value",
"is",
"None",
":",
"# drop None",
"continue",
"temp_values",
"[",
"key",
"]",
"=",
"value",
"values",
"=",
"temp_values",
"else",
":",
"# drop None",
"values",
"=",
"dict",
"(",
"i",
"for",
"i",
"in",
"iteritems",
"(",
"values",
")",
"if",
"i",
"[",
"1",
"]",
"is",
"not",
"None",
")",
"else",
":",
"values",
"=",
"{",
"}",
"rv",
"=",
"self",
".",
"_partial_build",
"(",
"endpoint",
",",
"values",
",",
"method",
",",
"append_unknown",
")",
"if",
"rv",
"is",
"None",
":",
"raise",
"BuildError",
"(",
"endpoint",
",",
"values",
",",
"method",
",",
"self",
")",
"domain_part",
",",
"path",
"=",
"rv",
"host",
"=",
"self",
".",
"get_host",
"(",
"domain_part",
")",
"# shortcut this.",
"if",
"not",
"force_external",
"and",
"(",
"(",
"self",
".",
"map",
".",
"host_matching",
"and",
"host",
"==",
"self",
".",
"server_name",
")",
"or",
"(",
"not",
"self",
".",
"map",
".",
"host_matching",
"and",
"domain_part",
"==",
"self",
".",
"subdomain",
")",
")",
":",
"return",
"\"%s/%s\"",
"%",
"(",
"self",
".",
"script_name",
".",
"rstrip",
"(",
"\"/\"",
")",
",",
"path",
".",
"lstrip",
"(",
"\"/\"",
")",
")",
"return",
"str",
"(",
"\"%s//%s%s/%s\"",
"%",
"(",
"self",
".",
"url_scheme",
"+",
"\":\"",
"if",
"self",
".",
"url_scheme",
"else",
"\"\"",
",",
"host",
",",
"self",
".",
"script_name",
"[",
":",
"-",
"1",
"]",
",",
"path",
".",
"lstrip",
"(",
"\"/\"",
")",
",",
")",
")"
] | Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
When processing those additional values, lists are furthermore
interpreted as multiple values (as per
:py:class:`werkzeug.datastructures.MultiDict`):
>>> urls.build("index", {'q': ['a', 'b', 'c']})
'/?q=a&q=b&q=c'
Passing a ``MultiDict`` will also add multiple values:
>>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
'/?p=z&q=a&q=b'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those. | [
"Building",
"URLs",
"works",
"pretty",
"much",
"the",
"other",
"way",
"round",
".",
"Instead",
"of",
"match",
"you",
"call",
"build",
"and",
"pass",
"it",
"the",
"endpoint",
"and",
"a",
"dict",
"of",
"arguments",
"for",
"the",
"placeholders",
"."
] | python | train |
tensorflow/cleverhans | cleverhans/attacks/carlini_wagner_l2.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/carlini_wagner_l2.py#L87-L143 | def parse_params(self,
y=None,
y_target=None,
batch_size=1,
confidence=0,
learning_rate=5e-3,
binary_search_steps=5,
max_iterations=1000,
abort_early=True,
initial_const=1e-2,
clip_min=0,
clip_max=1):
"""
:param y: (optional) A tensor with the true labels for an untargeted
attack. If None (and y_target is None) then use the
original labels the classifier assigns.
:param y_target: (optional) A tensor with the target labels for a
targeted attack.
:param confidence: Confidence of adversarial examples: higher produces
examples with larger l2 distortion, but more
strongly classified as adversarial.
:param batch_size: Number of attacks to run simultaneously.
:param learning_rate: The learning rate for the attack algorithm.
Smaller values produce better results but are
slower to converge.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and confidence of the classification.
:param max_iterations: The maximum number of iterations. Setting this
to a larger value will produce lower distortion
results. Using only a few iterations requires
a larger learning rate, and will produce larger
distortion results.
:param abort_early: If true, allows early aborts if gradient descent
is unable to make progress (i.e., gets stuck in
a local minimum).
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the perturbation
and confidence of classification.
If binary_search_steps is large, the initial
constant is not important. A smaller value of
this constant gives lower distortion results.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# ignore the y and y_target argument
self.batch_size = batch_size
self.confidence = confidence
self.learning_rate = learning_rate
self.binary_search_steps = binary_search_steps
self.max_iterations = max_iterations
self.abort_early = abort_early
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max | [
"def",
"parse_params",
"(",
"self",
",",
"y",
"=",
"None",
",",
"y_target",
"=",
"None",
",",
"batch_size",
"=",
"1",
",",
"confidence",
"=",
"0",
",",
"learning_rate",
"=",
"5e-3",
",",
"binary_search_steps",
"=",
"5",
",",
"max_iterations",
"=",
"1000",
",",
"abort_early",
"=",
"True",
",",
"initial_const",
"=",
"1e-2",
",",
"clip_min",
"=",
"0",
",",
"clip_max",
"=",
"1",
")",
":",
"# ignore the y and y_target argument",
"self",
".",
"batch_size",
"=",
"batch_size",
"self",
".",
"confidence",
"=",
"confidence",
"self",
".",
"learning_rate",
"=",
"learning_rate",
"self",
".",
"binary_search_steps",
"=",
"binary_search_steps",
"self",
".",
"max_iterations",
"=",
"max_iterations",
"self",
".",
"abort_early",
"=",
"abort_early",
"self",
".",
"initial_const",
"=",
"initial_const",
"self",
".",
"clip_min",
"=",
"clip_min",
"self",
".",
"clip_max",
"=",
"clip_max"
] | :param y: (optional) A tensor with the true labels for an untargeted
attack. If None (and y_target is None) then use the
original labels the classifier assigns.
:param y_target: (optional) A tensor with the target labels for a
targeted attack.
:param confidence: Confidence of adversarial examples: higher produces
examples with larger l2 distortion, but more
strongly classified as adversarial.
:param batch_size: Number of attacks to run simultaneously.
:param learning_rate: The learning rate for the attack algorithm.
Smaller values produce better results but are
slower to converge.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and confidence of the classification.
:param max_iterations: The maximum number of iterations. Setting this
to a larger value will produce lower distortion
results. Using only a few iterations requires
a larger learning rate, and will produce larger
distortion results.
:param abort_early: If true, allows early aborts if gradient descent
is unable to make progress (i.e., gets stuck in
a local minimum).
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the perturbation
and confidence of classification.
If binary_search_steps is large, the initial
constant is not important. A smaller value of
this constant gives lower distortion results.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value | [
":",
"param",
"y",
":",
"(",
"optional",
")",
"A",
"tensor",
"with",
"the",
"true",
"labels",
"for",
"an",
"untargeted",
"attack",
".",
"If",
"None",
"(",
"and",
"y_target",
"is",
"None",
")",
"then",
"use",
"the",
"original",
"labels",
"the",
"classifier",
"assigns",
".",
":",
"param",
"y_target",
":",
"(",
"optional",
")",
"A",
"tensor",
"with",
"the",
"target",
"labels",
"for",
"a",
"targeted",
"attack",
".",
":",
"param",
"confidence",
":",
"Confidence",
"of",
"adversarial",
"examples",
":",
"higher",
"produces",
"examples",
"with",
"larger",
"l2",
"distortion",
"but",
"more",
"strongly",
"classified",
"as",
"adversarial",
".",
":",
"param",
"batch_size",
":",
"Number",
"of",
"attacks",
"to",
"run",
"simultaneously",
".",
":",
"param",
"learning_rate",
":",
"The",
"learning",
"rate",
"for",
"the",
"attack",
"algorithm",
".",
"Smaller",
"values",
"produce",
"better",
"results",
"but",
"are",
"slower",
"to",
"converge",
".",
":",
"param",
"binary_search_steps",
":",
"The",
"number",
"of",
"times",
"we",
"perform",
"binary",
"search",
"to",
"find",
"the",
"optimal",
"tradeoff",
"-",
"constant",
"between",
"norm",
"of",
"the",
"purturbation",
"and",
"confidence",
"of",
"the",
"classification",
".",
":",
"param",
"max_iterations",
":",
"The",
"maximum",
"number",
"of",
"iterations",
".",
"Setting",
"this",
"to",
"a",
"larger",
"value",
"will",
"produce",
"lower",
"distortion",
"results",
".",
"Using",
"only",
"a",
"few",
"iterations",
"requires",
"a",
"larger",
"learning",
"rate",
"and",
"will",
"produce",
"larger",
"distortion",
"results",
".",
":",
"param",
"abort_early",
":",
"If",
"true",
"allows",
"early",
"aborts",
"if",
"gradient",
"descent",
"is",
"unable",
"to",
"make",
"progress",
"(",
"i",
".",
"e",
".",
"gets",
"stuck",
"in",
"a",
"local",
"minimum",
")",
".",
":",
"param",
"initial_const",
":",
"The",
"initial",
"tradeoff",
"-",
"constant",
"to",
"use",
"to",
"tune",
"the",
"relative",
"importance",
"of",
"size",
"of",
"the",
"perturbation",
"and",
"confidence",
"of",
"classification",
".",
"If",
"binary_search_steps",
"is",
"large",
"the",
"initial",
"constant",
"is",
"not",
"important",
".",
"A",
"smaller",
"value",
"of",
"this",
"constant",
"gives",
"lower",
"distortion",
"results",
".",
":",
"param",
"clip_min",
":",
"(",
"optional",
"float",
")",
"Minimum",
"input",
"component",
"value",
":",
"param",
"clip_max",
":",
"(",
"optional",
"float",
")",
"Maximum",
"input",
"component",
"value"
] | python | train |
pandas-dev/pandas | pandas/core/internals/blocks.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1564-L1597 | def putmask(self, mask, new, align=True, inplace=False, axis=0,
transpose=False):
"""
putmask the data to the block; we must be a single block and not
generate other blocks
return the resulting block
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block, the result of the putmask
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
# use block's copy logic.
# .values may be an Index which does shallow copy by default
new_values = self.values if inplace else self.copy().values
new_values, new = self._try_coerce_args(new_values, new)
if isinstance(new, np.ndarray) and len(new) == len(mask):
new = new[mask]
mask = _safe_reshape(mask, new_values.shape)
new_values[mask] = new
new_values = self._try_coerce_result(new_values)
return [self.make_block(values=new_values)] | [
"def",
"putmask",
"(",
"self",
",",
"mask",
",",
"new",
",",
"align",
"=",
"True",
",",
"inplace",
"=",
"False",
",",
"axis",
"=",
"0",
",",
"transpose",
"=",
"False",
")",
":",
"inplace",
"=",
"validate_bool_kwarg",
"(",
"inplace",
",",
"'inplace'",
")",
"# use block's copy logic.",
"# .values may be an Index which does shallow copy by default",
"new_values",
"=",
"self",
".",
"values",
"if",
"inplace",
"else",
"self",
".",
"copy",
"(",
")",
".",
"values",
"new_values",
",",
"new",
"=",
"self",
".",
"_try_coerce_args",
"(",
"new_values",
",",
"new",
")",
"if",
"isinstance",
"(",
"new",
",",
"np",
".",
"ndarray",
")",
"and",
"len",
"(",
"new",
")",
"==",
"len",
"(",
"mask",
")",
":",
"new",
"=",
"new",
"[",
"mask",
"]",
"mask",
"=",
"_safe_reshape",
"(",
"mask",
",",
"new_values",
".",
"shape",
")",
"new_values",
"[",
"mask",
"]",
"=",
"new",
"new_values",
"=",
"self",
".",
"_try_coerce_result",
"(",
"new_values",
")",
"return",
"[",
"self",
".",
"make_block",
"(",
"values",
"=",
"new_values",
")",
"]"
] | putmask the data to the block; we must be a single block and not
generate other blocks
return the resulting block
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block, the result of the putmask | [
"putmask",
"the",
"data",
"to",
"the",
"block",
";",
"we",
"must",
"be",
"a",
"single",
"block",
"and",
"not",
"generate",
"other",
"blocks"
] | python | train |
pyrogram/pyrogram | pyrogram/client/client.py | https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/client.py#L357-L398 | def stop(self):
"""Use this method to manually stop the Client.
Requires no parameters.
Raises:
``ConnectionError`` in case you try to stop an already stopped Client.
"""
if not self.is_started:
raise ConnectionError("Client is already stopped")
if self.takeout_id:
self.send(functions.account.FinishTakeoutSession())
log.warning("Takeout session {} finished".format(self.takeout_id))
Syncer.remove(self)
self.dispatcher.stop()
for _ in range(self.DOWNLOAD_WORKERS):
self.download_queue.put(None)
for i in self.download_workers_list:
i.join()
self.download_workers_list.clear()
for _ in range(self.UPDATES_WORKERS):
self.updates_queue.put(None)
for i in self.updates_workers_list:
i.join()
self.updates_workers_list.clear()
for i in self.media_sessions.values():
i.stop()
self.media_sessions.clear()
self.is_started = False
self.session.stop()
return self | [
"def",
"stop",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_started",
":",
"raise",
"ConnectionError",
"(",
"\"Client is already stopped\"",
")",
"if",
"self",
".",
"takeout_id",
":",
"self",
".",
"send",
"(",
"functions",
".",
"account",
".",
"FinishTakeoutSession",
"(",
")",
")",
"log",
".",
"warning",
"(",
"\"Takeout session {} finished\"",
".",
"format",
"(",
"self",
".",
"takeout_id",
")",
")",
"Syncer",
".",
"remove",
"(",
"self",
")",
"self",
".",
"dispatcher",
".",
"stop",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"DOWNLOAD_WORKERS",
")",
":",
"self",
".",
"download_queue",
".",
"put",
"(",
"None",
")",
"for",
"i",
"in",
"self",
".",
"download_workers_list",
":",
"i",
".",
"join",
"(",
")",
"self",
".",
"download_workers_list",
".",
"clear",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"UPDATES_WORKERS",
")",
":",
"self",
".",
"updates_queue",
".",
"put",
"(",
"None",
")",
"for",
"i",
"in",
"self",
".",
"updates_workers_list",
":",
"i",
".",
"join",
"(",
")",
"self",
".",
"updates_workers_list",
".",
"clear",
"(",
")",
"for",
"i",
"in",
"self",
".",
"media_sessions",
".",
"values",
"(",
")",
":",
"i",
".",
"stop",
"(",
")",
"self",
".",
"media_sessions",
".",
"clear",
"(",
")",
"self",
".",
"is_started",
"=",
"False",
"self",
".",
"session",
".",
"stop",
"(",
")",
"return",
"self"
] | Use this method to manually stop the Client.
Requires no parameters.
Raises:
``ConnectionError`` in case you try to stop an already stopped Client. | [
"Use",
"this",
"method",
"to",
"manually",
"stop",
"the",
"Client",
".",
"Requires",
"no",
"parameters",
"."
] | python | train |
PyCQA/pylint | pylint/pyreverse/diadefslib.py | https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/pyreverse/diadefslib.py#L40-L45 | def get_title(self, node):
"""get title for objects"""
title = node.name
if self.module_names:
title = "%s.%s" % (node.root().name, title)
return title | [
"def",
"get_title",
"(",
"self",
",",
"node",
")",
":",
"title",
"=",
"node",
".",
"name",
"if",
"self",
".",
"module_names",
":",
"title",
"=",
"\"%s.%s\"",
"%",
"(",
"node",
".",
"root",
"(",
")",
".",
"name",
",",
"title",
")",
"return",
"title"
] | get title for objects | [
"get",
"title",
"for",
"objects"
] | python | test |
shoebot/shoebot | shoebot/data/bezier.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/data/bezier.py#L92-L98 | def _append_element(self, render_func, pe):
'''
Append a render function and the parameters to pass
an equivilent PathElement, or the PathElement itself.
'''
self._render_funcs.append(render_func)
self._elements.append(pe) | [
"def",
"_append_element",
"(",
"self",
",",
"render_func",
",",
"pe",
")",
":",
"self",
".",
"_render_funcs",
".",
"append",
"(",
"render_func",
")",
"self",
".",
"_elements",
".",
"append",
"(",
"pe",
")"
] | Append a render function and the parameters to pass
an equivilent PathElement, or the PathElement itself. | [
"Append",
"a",
"render",
"function",
"and",
"the",
"parameters",
"to",
"pass",
"an",
"equivilent",
"PathElement",
"or",
"the",
"PathElement",
"itself",
"."
] | python | valid |
chop-dbhi/varify-data-warehouse | vdw/variants/migrations/0009_rename_evs_maf_datafields.py | https://github.com/chop-dbhi/varify-data-warehouse/blob/1600ee1bc5fae6c68fd03b23624467298570cca8/vdw/variants/migrations/0009_rename_evs_maf_datafields.py#L9-L15 | def forwards(self, orm):
"Write your forwards methods here."
fields = orm['avocado.DataField'].objects.filter(app_name='variants',
model_name='evs', field_name__in=('all_maf', 'aa_maf', 'ea_maf'))
for f in fields:
f.field_name = f.field_name.replace('maf', 'af')
f.save() | [
"def",
"forwards",
"(",
"self",
",",
"orm",
")",
":",
"fields",
"=",
"orm",
"[",
"'avocado.DataField'",
"]",
".",
"objects",
".",
"filter",
"(",
"app_name",
"=",
"'variants'",
",",
"model_name",
"=",
"'evs'",
",",
"field_name__in",
"=",
"(",
"'all_maf'",
",",
"'aa_maf'",
",",
"'ea_maf'",
")",
")",
"for",
"f",
"in",
"fields",
":",
"f",
".",
"field_name",
"=",
"f",
".",
"field_name",
".",
"replace",
"(",
"'maf'",
",",
"'af'",
")",
"f",
".",
"save",
"(",
")"
] | Write your forwards methods here. | [
"Write",
"your",
"forwards",
"methods",
"here",
"."
] | python | train |
mitsei/dlkit | dlkit/records/osid/base_records.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/osid/base_records.py#L52-L97 | def _get_asset_content(self, asset_id, asset_content_type_str=None, asset_content_id=None):
"""stub"""
rm = self.my_osid_object._get_provider_manager('REPOSITORY')
if 'assignedBankIds' in self.my_osid_object._my_map:
if self.my_osid_object._proxy is not None:
als = rm.get_asset_lookup_session_for_repository(
Id(self.my_osid_object._my_map['assignedBankIds'][0]),
self.my_osid_object._proxy)
else:
als = rm.get_asset_lookup_session_for_repository(
Id(self.my_osid_object._my_map['assignedBankIds'][0]))
elif 'assignedBookIds' in self.my_osid_object._my_map:
if self.my_osid_object._proxy is not None:
als = rm.get_asset_lookup_session_for_repository(
Id(self.my_osid_object._my_map['assignedBookIds'][0]),
self.my_osid_object._proxy)
else:
als = rm.get_asset_lookup_session_for_repository(
Id(self.my_osid_object._my_map['assignedBookIds'][0]))
elif 'assignedRepositoryIds' in self.my_osid_object._my_map:
if self.my_osid_object._proxy is not None:
als = rm.get_asset_lookup_session_for_repository(
Id(self.my_osid_object._my_map['assignedRepositoryIds'][0]),
self.my_osid_object._proxy)
else:
als = rm.get_asset_lookup_session_for_repository(
Id(self.my_osid_object._my_map['assignedRepositoryIds'][0]))
else:
raise KeyError
if asset_content_id is not None:
ac_list = als.get_asset(asset_id).get_asset_contents()
for ac in ac_list:
if str(ac.ident) == str(asset_content_id):
return ac
if not asset_content_type_str:
return next(als.get_asset(asset_id).get_asset_contents()) # Just return first one
else:
if isinstance(asset_content_type_str, Type):
asset_content_type_str = str(asset_content_type_str)
for ac in als.get_asset(asset_id).get_asset_contents():
if ac.get_genus_type() == Type(asset_content_type_str):
return ac
raise NotFound() | [
"def",
"_get_asset_content",
"(",
"self",
",",
"asset_id",
",",
"asset_content_type_str",
"=",
"None",
",",
"asset_content_id",
"=",
"None",
")",
":",
"rm",
"=",
"self",
".",
"my_osid_object",
".",
"_get_provider_manager",
"(",
"'REPOSITORY'",
")",
"if",
"'assignedBankIds'",
"in",
"self",
".",
"my_osid_object",
".",
"_my_map",
":",
"if",
"self",
".",
"my_osid_object",
".",
"_proxy",
"is",
"not",
"None",
":",
"als",
"=",
"rm",
".",
"get_asset_lookup_session_for_repository",
"(",
"Id",
"(",
"self",
".",
"my_osid_object",
".",
"_my_map",
"[",
"'assignedBankIds'",
"]",
"[",
"0",
"]",
")",
",",
"self",
".",
"my_osid_object",
".",
"_proxy",
")",
"else",
":",
"als",
"=",
"rm",
".",
"get_asset_lookup_session_for_repository",
"(",
"Id",
"(",
"self",
".",
"my_osid_object",
".",
"_my_map",
"[",
"'assignedBankIds'",
"]",
"[",
"0",
"]",
")",
")",
"elif",
"'assignedBookIds'",
"in",
"self",
".",
"my_osid_object",
".",
"_my_map",
":",
"if",
"self",
".",
"my_osid_object",
".",
"_proxy",
"is",
"not",
"None",
":",
"als",
"=",
"rm",
".",
"get_asset_lookup_session_for_repository",
"(",
"Id",
"(",
"self",
".",
"my_osid_object",
".",
"_my_map",
"[",
"'assignedBookIds'",
"]",
"[",
"0",
"]",
")",
",",
"self",
".",
"my_osid_object",
".",
"_proxy",
")",
"else",
":",
"als",
"=",
"rm",
".",
"get_asset_lookup_session_for_repository",
"(",
"Id",
"(",
"self",
".",
"my_osid_object",
".",
"_my_map",
"[",
"'assignedBookIds'",
"]",
"[",
"0",
"]",
")",
")",
"elif",
"'assignedRepositoryIds'",
"in",
"self",
".",
"my_osid_object",
".",
"_my_map",
":",
"if",
"self",
".",
"my_osid_object",
".",
"_proxy",
"is",
"not",
"None",
":",
"als",
"=",
"rm",
".",
"get_asset_lookup_session_for_repository",
"(",
"Id",
"(",
"self",
".",
"my_osid_object",
".",
"_my_map",
"[",
"'assignedRepositoryIds'",
"]",
"[",
"0",
"]",
")",
",",
"self",
".",
"my_osid_object",
".",
"_proxy",
")",
"else",
":",
"als",
"=",
"rm",
".",
"get_asset_lookup_session_for_repository",
"(",
"Id",
"(",
"self",
".",
"my_osid_object",
".",
"_my_map",
"[",
"'assignedRepositoryIds'",
"]",
"[",
"0",
"]",
")",
")",
"else",
":",
"raise",
"KeyError",
"if",
"asset_content_id",
"is",
"not",
"None",
":",
"ac_list",
"=",
"als",
".",
"get_asset",
"(",
"asset_id",
")",
".",
"get_asset_contents",
"(",
")",
"for",
"ac",
"in",
"ac_list",
":",
"if",
"str",
"(",
"ac",
".",
"ident",
")",
"==",
"str",
"(",
"asset_content_id",
")",
":",
"return",
"ac",
"if",
"not",
"asset_content_type_str",
":",
"return",
"next",
"(",
"als",
".",
"get_asset",
"(",
"asset_id",
")",
".",
"get_asset_contents",
"(",
")",
")",
"# Just return first one",
"else",
":",
"if",
"isinstance",
"(",
"asset_content_type_str",
",",
"Type",
")",
":",
"asset_content_type_str",
"=",
"str",
"(",
"asset_content_type_str",
")",
"for",
"ac",
"in",
"als",
".",
"get_asset",
"(",
"asset_id",
")",
".",
"get_asset_contents",
"(",
")",
":",
"if",
"ac",
".",
"get_genus_type",
"(",
")",
"==",
"Type",
"(",
"asset_content_type_str",
")",
":",
"return",
"ac",
"raise",
"NotFound",
"(",
")"
] | stub | [
"stub"
] | python | train |
SoCo/SoCo | soco/music_library.py | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/music_library.py#L336-L398 | def browse(self, ml_item=None, start=0, max_items=100,
full_album_art_uri=False, search_term=None, subcategories=None):
"""Browse (get sub-elements from) a music library item.
Args:
ml_item (`DidlItem`): the item to browse, if left out or
`None`, items at the root level will be searched.
start (int): the starting index of the results.
max_items (int): the maximum number of items to return.
full_album_art_uri (bool): whether the album art URI should be
fully qualified with the relevant IP address.
search_term (str): A string that will be used to perform a fuzzy
search among the search results. If used in combination with
subcategories, the fuzzy search will be performed on the
subcategory. Note: Searching will not work if ``ml_item`` is
`None`.
subcategories (list): A list of strings that indicate one or more
subcategories to descend into. Note: Providing sub categories
will not work if ``ml_item`` is `None`.
Returns:
A `SearchResult` instance.
Raises:
AttributeError: if ``ml_item`` has no ``item_id`` attribute.
SoCoUPnPException: with ``error_code='701'`` if the item cannot be
browsed.
"""
if ml_item is None:
search = 'A:'
else:
search = ml_item.item_id
# Add sub categories
if subcategories is not None:
for category in subcategories:
search += '/' + url_escape_path(really_unicode(category))
# Add fuzzy search
if search_term is not None:
search += ':' + url_escape_path(really_unicode(search_term))
try:
response, metadata = \
self._music_lib_search(search, start, max_items)
except SoCoUPnPException as exception:
# 'No such object' UPnP errors
if exception.error_code == '701':
return SearchResult([], 'browse', 0, 0, None)
else:
raise exception
metadata['search_type'] = 'browse'
# Parse the results
containers = from_didl_string(response['Result'])
item_list = []
for container in containers:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self._update_album_art_to_full_uri(container)
item_list.append(container)
# pylint: disable=star-args
return SearchResult(item_list, **metadata) | [
"def",
"browse",
"(",
"self",
",",
"ml_item",
"=",
"None",
",",
"start",
"=",
"0",
",",
"max_items",
"=",
"100",
",",
"full_album_art_uri",
"=",
"False",
",",
"search_term",
"=",
"None",
",",
"subcategories",
"=",
"None",
")",
":",
"if",
"ml_item",
"is",
"None",
":",
"search",
"=",
"'A:'",
"else",
":",
"search",
"=",
"ml_item",
".",
"item_id",
"# Add sub categories",
"if",
"subcategories",
"is",
"not",
"None",
":",
"for",
"category",
"in",
"subcategories",
":",
"search",
"+=",
"'/'",
"+",
"url_escape_path",
"(",
"really_unicode",
"(",
"category",
")",
")",
"# Add fuzzy search",
"if",
"search_term",
"is",
"not",
"None",
":",
"search",
"+=",
"':'",
"+",
"url_escape_path",
"(",
"really_unicode",
"(",
"search_term",
")",
")",
"try",
":",
"response",
",",
"metadata",
"=",
"self",
".",
"_music_lib_search",
"(",
"search",
",",
"start",
",",
"max_items",
")",
"except",
"SoCoUPnPException",
"as",
"exception",
":",
"# 'No such object' UPnP errors",
"if",
"exception",
".",
"error_code",
"==",
"'701'",
":",
"return",
"SearchResult",
"(",
"[",
"]",
",",
"'browse'",
",",
"0",
",",
"0",
",",
"None",
")",
"else",
":",
"raise",
"exception",
"metadata",
"[",
"'search_type'",
"]",
"=",
"'browse'",
"# Parse the results",
"containers",
"=",
"from_didl_string",
"(",
"response",
"[",
"'Result'",
"]",
")",
"item_list",
"=",
"[",
"]",
"for",
"container",
"in",
"containers",
":",
"# Check if the album art URI should be fully qualified",
"if",
"full_album_art_uri",
":",
"self",
".",
"_update_album_art_to_full_uri",
"(",
"container",
")",
"item_list",
".",
"append",
"(",
"container",
")",
"# pylint: disable=star-args",
"return",
"SearchResult",
"(",
"item_list",
",",
"*",
"*",
"metadata",
")"
] | Browse (get sub-elements from) a music library item.
Args:
ml_item (`DidlItem`): the item to browse, if left out or
`None`, items at the root level will be searched.
start (int): the starting index of the results.
max_items (int): the maximum number of items to return.
full_album_art_uri (bool): whether the album art URI should be
fully qualified with the relevant IP address.
search_term (str): A string that will be used to perform a fuzzy
search among the search results. If used in combination with
subcategories, the fuzzy search will be performed on the
subcategory. Note: Searching will not work if ``ml_item`` is
`None`.
subcategories (list): A list of strings that indicate one or more
subcategories to descend into. Note: Providing sub categories
will not work if ``ml_item`` is `None`.
Returns:
A `SearchResult` instance.
Raises:
AttributeError: if ``ml_item`` has no ``item_id`` attribute.
SoCoUPnPException: with ``error_code='701'`` if the item cannot be
browsed. | [
"Browse",
"(",
"get",
"sub",
"-",
"elements",
"from",
")",
"a",
"music",
"library",
"item",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/lib/pretty.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/pretty.py#L568-L575 | def _super_pprint(obj, p, cycle):
"""The pprint for the super type."""
p.begin_group(8, '<super: ')
p.pretty(obj.__self_class__)
p.text(',')
p.breakable()
p.pretty(obj.__self__)
p.end_group(8, '>') | [
"def",
"_super_pprint",
"(",
"obj",
",",
"p",
",",
"cycle",
")",
":",
"p",
".",
"begin_group",
"(",
"8",
",",
"'<super: '",
")",
"p",
".",
"pretty",
"(",
"obj",
".",
"__self_class__",
")",
"p",
".",
"text",
"(",
"','",
")",
"p",
".",
"breakable",
"(",
")",
"p",
".",
"pretty",
"(",
"obj",
".",
"__self__",
")",
"p",
".",
"end_group",
"(",
"8",
",",
"'>'",
")"
] | The pprint for the super type. | [
"The",
"pprint",
"for",
"the",
"super",
"type",
"."
] | python | test |
ladybug-tools/ladybug | ladybug/header.py | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/header.py#L105-L109 | def duplicate(self):
"""Return a copy of the header."""
a_per = self.analysis_period.duplicate() if self.analysis_period else None
return self.__class__(self.data_type, self.unit,
a_per, deepcopy(self.metadata)) | [
"def",
"duplicate",
"(",
"self",
")",
":",
"a_per",
"=",
"self",
".",
"analysis_period",
".",
"duplicate",
"(",
")",
"if",
"self",
".",
"analysis_period",
"else",
"None",
"return",
"self",
".",
"__class__",
"(",
"self",
".",
"data_type",
",",
"self",
".",
"unit",
",",
"a_per",
",",
"deepcopy",
"(",
"self",
".",
"metadata",
")",
")"
] | Return a copy of the header. | [
"Return",
"a",
"copy",
"of",
"the",
"header",
"."
] | python | train |
jurismarches/chopper | chopper/html/extractor.py | https://github.com/jurismarches/chopper/blob/53c5489a53e3a5d205a5cb207df751c09633e7ce/chopper/html/extractor.py#L40-L68 | def parse(self):
"""
Returns a cleaned lxml ElementTree
:returns: Whether the cleaned HTML has matches or not
:rtype: bool
"""
# Create the element tree
self.tree = self._build_tree(self.html_contents)
# Get explicits elements to keep and discard
self.elts_to_keep = self._get_elements_to_keep()
self.elts_to_discard = self._get_elements_to_discard()
# Init an empty list of Elements to remove
self.elts_to_remove = []
# Check if the root is a match or if there is any matches
is_root = self._is_keep(self.tree)
has_descendant = self._has_keep_elt_in_descendants(self.tree)
if not(is_root or has_descendant):
return False
# Parse and clean the ElementTree
self._parse_element(self.tree, parent_is_keep=is_root)
self._remove_elements(self.elts_to_remove)
return True | [
"def",
"parse",
"(",
"self",
")",
":",
"# Create the element tree",
"self",
".",
"tree",
"=",
"self",
".",
"_build_tree",
"(",
"self",
".",
"html_contents",
")",
"# Get explicits elements to keep and discard",
"self",
".",
"elts_to_keep",
"=",
"self",
".",
"_get_elements_to_keep",
"(",
")",
"self",
".",
"elts_to_discard",
"=",
"self",
".",
"_get_elements_to_discard",
"(",
")",
"# Init an empty list of Elements to remove",
"self",
".",
"elts_to_remove",
"=",
"[",
"]",
"# Check if the root is a match or if there is any matches",
"is_root",
"=",
"self",
".",
"_is_keep",
"(",
"self",
".",
"tree",
")",
"has_descendant",
"=",
"self",
".",
"_has_keep_elt_in_descendants",
"(",
"self",
".",
"tree",
")",
"if",
"not",
"(",
"is_root",
"or",
"has_descendant",
")",
":",
"return",
"False",
"# Parse and clean the ElementTree",
"self",
".",
"_parse_element",
"(",
"self",
".",
"tree",
",",
"parent_is_keep",
"=",
"is_root",
")",
"self",
".",
"_remove_elements",
"(",
"self",
".",
"elts_to_remove",
")",
"return",
"True"
] | Returns a cleaned lxml ElementTree
:returns: Whether the cleaned HTML has matches or not
:rtype: bool | [
"Returns",
"a",
"cleaned",
"lxml",
"ElementTree"
] | python | train |
wonambi-python/wonambi | wonambi/widgets/analysis.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/analysis.py#L1043-L1098 | def toggle_pac(self):
"""Enable and disable PAC options."""
if Pac is not None:
pac_on = self.pac['pac_on'].get_value()
self.pac['prep'].setEnabled(pac_on)
self.pac['box_metric'].setEnabled(pac_on)
self.pac['box_complex'].setEnabled(pac_on)
self.pac['box_surro'].setEnabled(pac_on)
self.pac['box_opts'].setEnabled(pac_on)
if not pac_on:
self.pac['prep'].set_value(False)
if Pac is not None and pac_on:
pac = self.pac
hilb_on = pac['hilbert_on'].isChecked()
wav_on = pac['wavelet_on'].isChecked()
for button in pac['hilbert'].values():
button[0].setEnabled(hilb_on)
if button[1] is not None:
button[1].setEnabled(hilb_on)
pac['wav_width'][0].setEnabled(wav_on)
pac['wav_width'][1].setEnabled(wav_on)
if pac['metric'].get_value() in [
'Kullback-Leibler Distance',
'Heights ratio']:
pac['nbin'][0].setEnabled(True)
pac['nbin'][1].setEnabled(True)
else:
pac['nbin'][0].setEnabled(False)
pac['nbin'][1].setEnabled(False)
if pac['metric'] == 'ndPac':
for button in pac['surro'].values():
button[0].setEnabled(False)
if button[1] is not None:
button[1].setEnabled(False)
pac['surro']['pval'][0].setEnabled(True)
ndpac_on = pac['metric'].get_value() == 'ndPac'
surro_on = logical_and(pac['surro_method'].get_value() != ''
'No surrogates', not ndpac_on)
norm_on = pac['surro_norm'].get_value() != 'No normalization'
blocks_on = 'across time' in pac['surro_method'].get_value()
pac['surro_method'].setEnabled(not ndpac_on)
for button in pac['surro'].values():
button[0].setEnabled(surro_on and norm_on)
if button[1] is not None:
button[1].setEnabled(surro_on and norm_on)
pac['surro']['nblocks'][0].setEnabled(blocks_on)
pac['surro']['nblocks'][1].setEnabled(blocks_on)
if ndpac_on:
pac['surro_method'].set_value('No surrogates')
pac['surro']['pval'][0].setEnabled(True) | [
"def",
"toggle_pac",
"(",
"self",
")",
":",
"if",
"Pac",
"is",
"not",
"None",
":",
"pac_on",
"=",
"self",
".",
"pac",
"[",
"'pac_on'",
"]",
".",
"get_value",
"(",
")",
"self",
".",
"pac",
"[",
"'prep'",
"]",
".",
"setEnabled",
"(",
"pac_on",
")",
"self",
".",
"pac",
"[",
"'box_metric'",
"]",
".",
"setEnabled",
"(",
"pac_on",
")",
"self",
".",
"pac",
"[",
"'box_complex'",
"]",
".",
"setEnabled",
"(",
"pac_on",
")",
"self",
".",
"pac",
"[",
"'box_surro'",
"]",
".",
"setEnabled",
"(",
"pac_on",
")",
"self",
".",
"pac",
"[",
"'box_opts'",
"]",
".",
"setEnabled",
"(",
"pac_on",
")",
"if",
"not",
"pac_on",
":",
"self",
".",
"pac",
"[",
"'prep'",
"]",
".",
"set_value",
"(",
"False",
")",
"if",
"Pac",
"is",
"not",
"None",
"and",
"pac_on",
":",
"pac",
"=",
"self",
".",
"pac",
"hilb_on",
"=",
"pac",
"[",
"'hilbert_on'",
"]",
".",
"isChecked",
"(",
")",
"wav_on",
"=",
"pac",
"[",
"'wavelet_on'",
"]",
".",
"isChecked",
"(",
")",
"for",
"button",
"in",
"pac",
"[",
"'hilbert'",
"]",
".",
"values",
"(",
")",
":",
"button",
"[",
"0",
"]",
".",
"setEnabled",
"(",
"hilb_on",
")",
"if",
"button",
"[",
"1",
"]",
"is",
"not",
"None",
":",
"button",
"[",
"1",
"]",
".",
"setEnabled",
"(",
"hilb_on",
")",
"pac",
"[",
"'wav_width'",
"]",
"[",
"0",
"]",
".",
"setEnabled",
"(",
"wav_on",
")",
"pac",
"[",
"'wav_width'",
"]",
"[",
"1",
"]",
".",
"setEnabled",
"(",
"wav_on",
")",
"if",
"pac",
"[",
"'metric'",
"]",
".",
"get_value",
"(",
")",
"in",
"[",
"'Kullback-Leibler Distance'",
",",
"'Heights ratio'",
"]",
":",
"pac",
"[",
"'nbin'",
"]",
"[",
"0",
"]",
".",
"setEnabled",
"(",
"True",
")",
"pac",
"[",
"'nbin'",
"]",
"[",
"1",
"]",
".",
"setEnabled",
"(",
"True",
")",
"else",
":",
"pac",
"[",
"'nbin'",
"]",
"[",
"0",
"]",
".",
"setEnabled",
"(",
"False",
")",
"pac",
"[",
"'nbin'",
"]",
"[",
"1",
"]",
".",
"setEnabled",
"(",
"False",
")",
"if",
"pac",
"[",
"'metric'",
"]",
"==",
"'ndPac'",
":",
"for",
"button",
"in",
"pac",
"[",
"'surro'",
"]",
".",
"values",
"(",
")",
":",
"button",
"[",
"0",
"]",
".",
"setEnabled",
"(",
"False",
")",
"if",
"button",
"[",
"1",
"]",
"is",
"not",
"None",
":",
"button",
"[",
"1",
"]",
".",
"setEnabled",
"(",
"False",
")",
"pac",
"[",
"'surro'",
"]",
"[",
"'pval'",
"]",
"[",
"0",
"]",
".",
"setEnabled",
"(",
"True",
")",
"ndpac_on",
"=",
"pac",
"[",
"'metric'",
"]",
".",
"get_value",
"(",
")",
"==",
"'ndPac'",
"surro_on",
"=",
"logical_and",
"(",
"pac",
"[",
"'surro_method'",
"]",
".",
"get_value",
"(",
")",
"!=",
"''",
"'No surrogates'",
",",
"not",
"ndpac_on",
")",
"norm_on",
"=",
"pac",
"[",
"'surro_norm'",
"]",
".",
"get_value",
"(",
")",
"!=",
"'No normalization'",
"blocks_on",
"=",
"'across time'",
"in",
"pac",
"[",
"'surro_method'",
"]",
".",
"get_value",
"(",
")",
"pac",
"[",
"'surro_method'",
"]",
".",
"setEnabled",
"(",
"not",
"ndpac_on",
")",
"for",
"button",
"in",
"pac",
"[",
"'surro'",
"]",
".",
"values",
"(",
")",
":",
"button",
"[",
"0",
"]",
".",
"setEnabled",
"(",
"surro_on",
"and",
"norm_on",
")",
"if",
"button",
"[",
"1",
"]",
"is",
"not",
"None",
":",
"button",
"[",
"1",
"]",
".",
"setEnabled",
"(",
"surro_on",
"and",
"norm_on",
")",
"pac",
"[",
"'surro'",
"]",
"[",
"'nblocks'",
"]",
"[",
"0",
"]",
".",
"setEnabled",
"(",
"blocks_on",
")",
"pac",
"[",
"'surro'",
"]",
"[",
"'nblocks'",
"]",
"[",
"1",
"]",
".",
"setEnabled",
"(",
"blocks_on",
")",
"if",
"ndpac_on",
":",
"pac",
"[",
"'surro_method'",
"]",
".",
"set_value",
"(",
"'No surrogates'",
")",
"pac",
"[",
"'surro'",
"]",
"[",
"'pval'",
"]",
"[",
"0",
"]",
".",
"setEnabled",
"(",
"True",
")"
] | Enable and disable PAC options. | [
"Enable",
"and",
"disable",
"PAC",
"options",
"."
] | python | train |
hardbyte/python-can | can/interfaces/systec/ucan.py | https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/systec/ucan.py#L855-L866 | def check_support_user_port(cls, hw_info_ex):
"""
Checks whether the module supports a user I/O port.
:param HardwareInfoEx hw_info_ex:
Extended hardware information structure (see method :meth:`get_hardware_info`).
:return: True when the module supports a user I/O port, otherwise False.
:rtype: bool
"""
return ((hw_info_ex.m_dwProductCode & PRODCODE_MASK_PID) != ProductCode.PRODCODE_PID_BASIC) \
and ((hw_info_ex.m_dwProductCode & PRODCODE_MASK_PID) != ProductCode.PRODCODE_PID_RESERVED1) \
and cls.check_version_is_equal_or_higher(hw_info_ex.m_dwFwVersionEx, 2, 16) | [
"def",
"check_support_user_port",
"(",
"cls",
",",
"hw_info_ex",
")",
":",
"return",
"(",
"(",
"hw_info_ex",
".",
"m_dwProductCode",
"&",
"PRODCODE_MASK_PID",
")",
"!=",
"ProductCode",
".",
"PRODCODE_PID_BASIC",
")",
"and",
"(",
"(",
"hw_info_ex",
".",
"m_dwProductCode",
"&",
"PRODCODE_MASK_PID",
")",
"!=",
"ProductCode",
".",
"PRODCODE_PID_RESERVED1",
")",
"and",
"cls",
".",
"check_version_is_equal_or_higher",
"(",
"hw_info_ex",
".",
"m_dwFwVersionEx",
",",
"2",
",",
"16",
")"
] | Checks whether the module supports a user I/O port.
:param HardwareInfoEx hw_info_ex:
Extended hardware information structure (see method :meth:`get_hardware_info`).
:return: True when the module supports a user I/O port, otherwise False.
:rtype: bool | [
"Checks",
"whether",
"the",
"module",
"supports",
"a",
"user",
"I",
"/",
"O",
"port",
"."
] | python | train |
brian-rose/climlab | climlab/utils/thermo.py | https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/utils/thermo.py#L177-L185 | def Planck_frequency(nu, T):
'''The Planck function B(nu,T):
the flux density for blackbody radiation in frequency space
nu is frequency in 1/s
T is temperature in Kelvin
Formula (3.1) from Raymond Pierrehumbert, "Principles of Planetary Climate"
'''
return 2*hPlanck*nu**3/c_light**2/(exp(hPlanck*nu/kBoltzmann/T)-1) | [
"def",
"Planck_frequency",
"(",
"nu",
",",
"T",
")",
":",
"return",
"2",
"*",
"hPlanck",
"*",
"nu",
"**",
"3",
"/",
"c_light",
"**",
"2",
"/",
"(",
"exp",
"(",
"hPlanck",
"*",
"nu",
"/",
"kBoltzmann",
"/",
"T",
")",
"-",
"1",
")"
] | The Planck function B(nu,T):
the flux density for blackbody radiation in frequency space
nu is frequency in 1/s
T is temperature in Kelvin
Formula (3.1) from Raymond Pierrehumbert, "Principles of Planetary Climate" | [
"The",
"Planck",
"function",
"B",
"(",
"nu",
"T",
")",
":",
"the",
"flux",
"density",
"for",
"blackbody",
"radiation",
"in",
"frequency",
"space",
"nu",
"is",
"frequency",
"in",
"1",
"/",
"s",
"T",
"is",
"temperature",
"in",
"Kelvin"
] | python | train |
ttm/socialLegacy | social/tw.py | https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/tw.py#L293-L307 | def searchTag(self,HTAG="#python"):
"""Set Twitter search or stream criteria for the selection of tweets"""
self.t = Twython(app_key =self.app_key ,
app_secret =self.app_secret ,
oauth_token =self.oauth_token ,
oauth_token_secret =self.oauth_token_secret)
search =self.t.search(q=HTAG,count=100,result_type="recent")
ss=search[:]
search = self.t.search(q=HTAG,count=150,max_id=ss[-1]['id']-1,result_type="recent")
#search = t.search(q=HTAG,count=150,since_id=ss[-1]['id'],result_type="recent")
while seach:
ss+=search[:]
search = self.t.search(q=HTAG,count=150,max_id=ss[-1]['id']-1,result_type="recent")
self.ss=ss | [
"def",
"searchTag",
"(",
"self",
",",
"HTAG",
"=",
"\"#python\"",
")",
":",
"self",
".",
"t",
"=",
"Twython",
"(",
"app_key",
"=",
"self",
".",
"app_key",
",",
"app_secret",
"=",
"self",
".",
"app_secret",
",",
"oauth_token",
"=",
"self",
".",
"oauth_token",
",",
"oauth_token_secret",
"=",
"self",
".",
"oauth_token_secret",
")",
"search",
"=",
"self",
".",
"t",
".",
"search",
"(",
"q",
"=",
"HTAG",
",",
"count",
"=",
"100",
",",
"result_type",
"=",
"\"recent\"",
")",
"ss",
"=",
"search",
"[",
":",
"]",
"search",
"=",
"self",
".",
"t",
".",
"search",
"(",
"q",
"=",
"HTAG",
",",
"count",
"=",
"150",
",",
"max_id",
"=",
"ss",
"[",
"-",
"1",
"]",
"[",
"'id'",
"]",
"-",
"1",
",",
"result_type",
"=",
"\"recent\"",
")",
"#search = t.search(q=HTAG,count=150,since_id=ss[-1]['id'],result_type=\"recent\")",
"while",
"seach",
":",
"ss",
"+=",
"search",
"[",
":",
"]",
"search",
"=",
"self",
".",
"t",
".",
"search",
"(",
"q",
"=",
"HTAG",
",",
"count",
"=",
"150",
",",
"max_id",
"=",
"ss",
"[",
"-",
"1",
"]",
"[",
"'id'",
"]",
"-",
"1",
",",
"result_type",
"=",
"\"recent\"",
")",
"self",
".",
"ss",
"=",
"ss"
] | Set Twitter search or stream criteria for the selection of tweets | [
"Set",
"Twitter",
"search",
"or",
"stream",
"criteria",
"for",
"the",
"selection",
"of",
"tweets"
] | python | train |
cggh/scikit-allel | allel/stats/sf.py | https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L333-L367 | def joint_sfs_folded_scaled(ac1, ac2, n1=None, n2=None):
"""Compute the joint folded site frequency spectrum between two
populations, scaled such that a constant value is expected across the
spectrum for neutral variation, constant population size and unrelated
populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, 2)
Allele counts for the first population.
ac2 : array_like, int, shape (n_variants, 2)
Allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded_scaled : ndarray, int, shape (n1//2 + 1, n2//2 + 1)
Array where the (i, j)th element is the scaled frequency of variant
sites with a minor allele count of i in the first population and j
in the second population.
""" # noqa
# check inputs
ac1, n1 = _check_ac_n(ac1, n1)
ac2, n2 = _check_ac_n(ac2, n2)
# compute site frequency spectrum
s = joint_sfs_folded(ac1, ac2, n1=n1, n2=n2)
# apply scaling
s = scale_joint_sfs_folded(s, n1, n2)
return s | [
"def",
"joint_sfs_folded_scaled",
"(",
"ac1",
",",
"ac2",
",",
"n1",
"=",
"None",
",",
"n2",
"=",
"None",
")",
":",
"# noqa",
"# check inputs",
"ac1",
",",
"n1",
"=",
"_check_ac_n",
"(",
"ac1",
",",
"n1",
")",
"ac2",
",",
"n2",
"=",
"_check_ac_n",
"(",
"ac2",
",",
"n2",
")",
"# compute site frequency spectrum",
"s",
"=",
"joint_sfs_folded",
"(",
"ac1",
",",
"ac2",
",",
"n1",
"=",
"n1",
",",
"n2",
"=",
"n2",
")",
"# apply scaling",
"s",
"=",
"scale_joint_sfs_folded",
"(",
"s",
",",
"n1",
",",
"n2",
")",
"return",
"s"
] | Compute the joint folded site frequency spectrum between two
populations, scaled such that a constant value is expected across the
spectrum for neutral variation, constant population size and unrelated
populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, 2)
Allele counts for the first population.
ac2 : array_like, int, shape (n_variants, 2)
Allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded_scaled : ndarray, int, shape (n1//2 + 1, n2//2 + 1)
Array where the (i, j)th element is the scaled frequency of variant
sites with a minor allele count of i in the first population and j
in the second population. | [
"Compute",
"the",
"joint",
"folded",
"site",
"frequency",
"spectrum",
"between",
"two",
"populations",
"scaled",
"such",
"that",
"a",
"constant",
"value",
"is",
"expected",
"across",
"the",
"spectrum",
"for",
"neutral",
"variation",
"constant",
"population",
"size",
"and",
"unrelated",
"populations",
"."
] | python | train |
Basic-Components/msgpack-rpc-protocol | python/pymprpc/client/sync.py | https://github.com/Basic-Components/msgpack-rpc-protocol/blob/7983ace5d5cfd7214df6803f9b1de458df5fe3b1/python/pymprpc/client/sync.py#L171-L214 | def _status_code_check(self, response: Dict[str, Any]):
"""检查响应码并进行对不同的响应进行处理.
主要包括:
+ 编码在500~599段为服务异常,直接抛出对应异常
+ 编码在400~499段为调用异常,为对应ID的future设置异常
+ 编码在300~399段为警告,会抛出对应警告
+ 编码在200~399段为执行成功响应,将结果设置给对应ID的future.
+ 编码在100~199段为服务器响应,主要是处理验证响应和心跳响应
Parameters:
response (Dict[str, Any]): - 响应的python字典形式数据
Return:
(bool): - 如果是非服务异常类的响应,那么返回True
"""
code = response.get("CODE")
if self.debug:
print("resv:{}".format(response))
print(code)
if code >= 500:
if self.debug:
print("server error")
return self._server_error_handler(code)
elif 500 > code >= 400:
if self.debug:
print("call method error")
return self._method_error_handler(response)
elif 400 > code >= 200:
if code >= 300:
self._warning_handler(code)
if code in (200, 201, 202, 206, 300, 301):
if self.debug is True:
print("resv resp {}".format(response))
return self._method_response_handler(response)
elif 200 > code >= 100:
return self._server_response_handler(response)
else:
raise MprpcException("unknow status code {}".format(code)) | [
"def",
"_status_code_check",
"(",
"self",
",",
"response",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
":",
"code",
"=",
"response",
".",
"get",
"(",
"\"CODE\"",
")",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"\"resv:{}\"",
".",
"format",
"(",
"response",
")",
")",
"print",
"(",
"code",
")",
"if",
"code",
">=",
"500",
":",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"\"server error\"",
")",
"return",
"self",
".",
"_server_error_handler",
"(",
"code",
")",
"elif",
"500",
">",
"code",
">=",
"400",
":",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"\"call method error\"",
")",
"return",
"self",
".",
"_method_error_handler",
"(",
"response",
")",
"elif",
"400",
">",
"code",
">=",
"200",
":",
"if",
"code",
">=",
"300",
":",
"self",
".",
"_warning_handler",
"(",
"code",
")",
"if",
"code",
"in",
"(",
"200",
",",
"201",
",",
"202",
",",
"206",
",",
"300",
",",
"301",
")",
":",
"if",
"self",
".",
"debug",
"is",
"True",
":",
"print",
"(",
"\"resv resp {}\"",
".",
"format",
"(",
"response",
")",
")",
"return",
"self",
".",
"_method_response_handler",
"(",
"response",
")",
"elif",
"200",
">",
"code",
">=",
"100",
":",
"return",
"self",
".",
"_server_response_handler",
"(",
"response",
")",
"else",
":",
"raise",
"MprpcException",
"(",
"\"unknow status code {}\"",
".",
"format",
"(",
"code",
")",
")"
] | 检查响应码并进行对不同的响应进行处理.
主要包括:
+ 编码在500~599段为服务异常,直接抛出对应异常
+ 编码在400~499段为调用异常,为对应ID的future设置异常
+ 编码在300~399段为警告,会抛出对应警告
+ 编码在200~399段为执行成功响应,将结果设置给对应ID的future.
+ 编码在100~199段为服务器响应,主要是处理验证响应和心跳响应
Parameters:
response (Dict[str, Any]): - 响应的python字典形式数据
Return:
(bool): - 如果是非服务异常类的响应,那么返回True | [
"检查响应码并进行对不同的响应进行处理",
"."
] | python | train |
senaite/senaite.core | bika/lims/decorators.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/decorators.py#L33-L42 | def XXX_REMOVEME(func):
"""Decorator for dead code removal
"""
@wraps(func)
def decorator(self, *args, **kwargs):
msg = "~~~~~~~ XXX REMOVEME marked method called: {}.{}".format(
self.__class__.__name__, func.func_name)
raise RuntimeError(msg)
return func(self, *args, **kwargs)
return decorator | [
"def",
"XXX_REMOVEME",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorator",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"msg",
"=",
"\"~~~~~~~ XXX REMOVEME marked method called: {}.{}\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"func",
".",
"func_name",
")",
"raise",
"RuntimeError",
"(",
"msg",
")",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorator"
] | Decorator for dead code removal | [
"Decorator",
"for",
"dead",
"code",
"removal"
] | python | train |
dahlia/sqlalchemy-imageattach | sqlalchemy_imageattach/entity.py | https://github.com/dahlia/sqlalchemy-imageattach/blob/b4bafa73f3bb576ecf67ed7b40b702704a0fbdc8/sqlalchemy_imageattach/entity.py#L457-L494 | def _original_images(self, **kwargs):
"""A list of the original images.
:returns: A list of the original images.
:rtype: :class:`typing.Sequence`\ [:class:`Image`]
"""
def test(image):
if not image.original:
return False
for filter, value in kwargs.items():
if getattr(image, filter) != value:
return False
return True
if Session.object_session(self.instance) is None:
images = []
for image, store in self._stored_images:
if test(image):
images.append(image)
state = instance_state(self.instance)
try:
added = state.committed_state[self.attr.key].added_items
except KeyError:
pass
else:
for image in added:
if test(image):
images.append(image)
if self.session:
for image in self.session.new:
if test(image):
images.append(image)
else:
query = self.filter_by(original=True, **kwargs)
images = query.all()
return images | [
"def",
"_original_images",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"test",
"(",
"image",
")",
":",
"if",
"not",
"image",
".",
"original",
":",
"return",
"False",
"for",
"filter",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"getattr",
"(",
"image",
",",
"filter",
")",
"!=",
"value",
":",
"return",
"False",
"return",
"True",
"if",
"Session",
".",
"object_session",
"(",
"self",
".",
"instance",
")",
"is",
"None",
":",
"images",
"=",
"[",
"]",
"for",
"image",
",",
"store",
"in",
"self",
".",
"_stored_images",
":",
"if",
"test",
"(",
"image",
")",
":",
"images",
".",
"append",
"(",
"image",
")",
"state",
"=",
"instance_state",
"(",
"self",
".",
"instance",
")",
"try",
":",
"added",
"=",
"state",
".",
"committed_state",
"[",
"self",
".",
"attr",
".",
"key",
"]",
".",
"added_items",
"except",
"KeyError",
":",
"pass",
"else",
":",
"for",
"image",
"in",
"added",
":",
"if",
"test",
"(",
"image",
")",
":",
"images",
".",
"append",
"(",
"image",
")",
"if",
"self",
".",
"session",
":",
"for",
"image",
"in",
"self",
".",
"session",
".",
"new",
":",
"if",
"test",
"(",
"image",
")",
":",
"images",
".",
"append",
"(",
"image",
")",
"else",
":",
"query",
"=",
"self",
".",
"filter_by",
"(",
"original",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"images",
"=",
"query",
".",
"all",
"(",
")",
"return",
"images"
] | A list of the original images.
:returns: A list of the original images.
:rtype: :class:`typing.Sequence`\ [:class:`Image`] | [
"A",
"list",
"of",
"the",
"original",
"images",
"."
] | python | train |
belbio/bel | bel/lang/partialparse.py | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L365-L408 | def arg_types(parsed: Parsed, errors: Errors) -> Tuple[Parsed, Errors]:
"""Add argument types to parsed function data structure
Args:
parsed: function and arg locations in BEL string
errors: error messages
Returns:
(parsed, errors): parsed, arguments with arg types plus error messages
"""
func_pattern = re.compile(r"\s*[a-zA-Z]+\(")
nsarg_pattern = re.compile(r"^\s*([A-Z]+):(.*?)\s*$")
for span in parsed:
if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]:
continue
for i, arg in enumerate(parsed[span]["args"]):
nsarg_matches = nsarg_pattern.match(arg["arg"])
if func_pattern.match(arg["arg"]):
parsed[span]["args"][i].update({"type": "Function"})
elif nsarg_matches:
(start, end) = arg["span"]
ns = nsarg_matches.group(1)
ns_val = nsarg_matches.group(2)
ns_span = nsarg_matches.span(1)
ns_span = (ns_span[0] + start, ns_span[1] + start - 1)
ns_val_span = nsarg_matches.span(2)
ns_val_span = (ns_val_span[0] + start, ns_val_span[1] + start - 1)
parsed[span]["args"][i].update(
{
"type": "NSArg",
"ns": ns,
"ns_span": ns_span,
"ns_val": ns_val,
"ns_val_span": ns_val_span,
}
)
else:
parsed[span]["args"][i].update({"type": "StrArg"})
return parsed, errors | [
"def",
"arg_types",
"(",
"parsed",
":",
"Parsed",
",",
"errors",
":",
"Errors",
")",
"->",
"Tuple",
"[",
"Parsed",
",",
"Errors",
"]",
":",
"func_pattern",
"=",
"re",
".",
"compile",
"(",
"r\"\\s*[a-zA-Z]+\\(\"",
")",
"nsarg_pattern",
"=",
"re",
".",
"compile",
"(",
"r\"^\\s*([A-Z]+):(.*?)\\s*$\"",
")",
"for",
"span",
"in",
"parsed",
":",
"if",
"parsed",
"[",
"span",
"]",
"[",
"\"type\"",
"]",
"!=",
"\"Function\"",
"or",
"\"parens_span\"",
"not",
"in",
"parsed",
"[",
"span",
"]",
":",
"continue",
"for",
"i",
",",
"arg",
"in",
"enumerate",
"(",
"parsed",
"[",
"span",
"]",
"[",
"\"args\"",
"]",
")",
":",
"nsarg_matches",
"=",
"nsarg_pattern",
".",
"match",
"(",
"arg",
"[",
"\"arg\"",
"]",
")",
"if",
"func_pattern",
".",
"match",
"(",
"arg",
"[",
"\"arg\"",
"]",
")",
":",
"parsed",
"[",
"span",
"]",
"[",
"\"args\"",
"]",
"[",
"i",
"]",
".",
"update",
"(",
"{",
"\"type\"",
":",
"\"Function\"",
"}",
")",
"elif",
"nsarg_matches",
":",
"(",
"start",
",",
"end",
")",
"=",
"arg",
"[",
"\"span\"",
"]",
"ns",
"=",
"nsarg_matches",
".",
"group",
"(",
"1",
")",
"ns_val",
"=",
"nsarg_matches",
".",
"group",
"(",
"2",
")",
"ns_span",
"=",
"nsarg_matches",
".",
"span",
"(",
"1",
")",
"ns_span",
"=",
"(",
"ns_span",
"[",
"0",
"]",
"+",
"start",
",",
"ns_span",
"[",
"1",
"]",
"+",
"start",
"-",
"1",
")",
"ns_val_span",
"=",
"nsarg_matches",
".",
"span",
"(",
"2",
")",
"ns_val_span",
"=",
"(",
"ns_val_span",
"[",
"0",
"]",
"+",
"start",
",",
"ns_val_span",
"[",
"1",
"]",
"+",
"start",
"-",
"1",
")",
"parsed",
"[",
"span",
"]",
"[",
"\"args\"",
"]",
"[",
"i",
"]",
".",
"update",
"(",
"{",
"\"type\"",
":",
"\"NSArg\"",
",",
"\"ns\"",
":",
"ns",
",",
"\"ns_span\"",
":",
"ns_span",
",",
"\"ns_val\"",
":",
"ns_val",
",",
"\"ns_val_span\"",
":",
"ns_val_span",
",",
"}",
")",
"else",
":",
"parsed",
"[",
"span",
"]",
"[",
"\"args\"",
"]",
"[",
"i",
"]",
".",
"update",
"(",
"{",
"\"type\"",
":",
"\"StrArg\"",
"}",
")",
"return",
"parsed",
",",
"errors"
] | Add argument types to parsed function data structure
Args:
parsed: function and arg locations in BEL string
errors: error messages
Returns:
(parsed, errors): parsed, arguments with arg types plus error messages | [
"Add",
"argument",
"types",
"to",
"parsed",
"function",
"data",
"structure"
] | python | train |
wandb/client | wandb/vendor/prompt_toolkit/buffer.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/buffer.py#L643-L653 | def delete(self, count=1):
"""
Delete specified number of characters and Return the deleted text.
"""
if self.cursor_position < len(self.text):
deleted = self.document.text_after_cursor[:count]
self.text = self.text[:self.cursor_position] + \
self.text[self.cursor_position + len(deleted):]
return deleted
else:
return '' | [
"def",
"delete",
"(",
"self",
",",
"count",
"=",
"1",
")",
":",
"if",
"self",
".",
"cursor_position",
"<",
"len",
"(",
"self",
".",
"text",
")",
":",
"deleted",
"=",
"self",
".",
"document",
".",
"text_after_cursor",
"[",
":",
"count",
"]",
"self",
".",
"text",
"=",
"self",
".",
"text",
"[",
":",
"self",
".",
"cursor_position",
"]",
"+",
"self",
".",
"text",
"[",
"self",
".",
"cursor_position",
"+",
"len",
"(",
"deleted",
")",
":",
"]",
"return",
"deleted",
"else",
":",
"return",
"''"
] | Delete specified number of characters and Return the deleted text. | [
"Delete",
"specified",
"number",
"of",
"characters",
"and",
"Return",
"the",
"deleted",
"text",
"."
] | python | train |
sashahart/cookies | cookies.py | https://github.com/sashahart/cookies/blob/ab8185e06f221eaf65305f15e05852393723ac95/cookies.py#L659-L678 | def _parse_response(header_data, ignore_bad_cookies=False,
ignore_bad_attributes=True):
"""Turn one or more lines of 'Set-Cookie:' header data into a list of dicts
mapping attribute names to attribute values (as plain strings).
"""
cookie_dicts = []
for line in Definitions.EOL.split(header_data.strip()):
if not line:
break
cookie_dict = parse_one_response(
line, ignore_bad_cookies=ignore_bad_cookies,
ignore_bad_attributes=ignore_bad_attributes)
if not cookie_dict:
continue
cookie_dicts.append(cookie_dict)
if not cookie_dicts:
if not ignore_bad_cookies:
raise InvalidCookieError(data=header_data)
_report_invalid_cookie(header_data)
return cookie_dicts | [
"def",
"_parse_response",
"(",
"header_data",
",",
"ignore_bad_cookies",
"=",
"False",
",",
"ignore_bad_attributes",
"=",
"True",
")",
":",
"cookie_dicts",
"=",
"[",
"]",
"for",
"line",
"in",
"Definitions",
".",
"EOL",
".",
"split",
"(",
"header_data",
".",
"strip",
"(",
")",
")",
":",
"if",
"not",
"line",
":",
"break",
"cookie_dict",
"=",
"parse_one_response",
"(",
"line",
",",
"ignore_bad_cookies",
"=",
"ignore_bad_cookies",
",",
"ignore_bad_attributes",
"=",
"ignore_bad_attributes",
")",
"if",
"not",
"cookie_dict",
":",
"continue",
"cookie_dicts",
".",
"append",
"(",
"cookie_dict",
")",
"if",
"not",
"cookie_dicts",
":",
"if",
"not",
"ignore_bad_cookies",
":",
"raise",
"InvalidCookieError",
"(",
"data",
"=",
"header_data",
")",
"_report_invalid_cookie",
"(",
"header_data",
")",
"return",
"cookie_dicts"
] | Turn one or more lines of 'Set-Cookie:' header data into a list of dicts
mapping attribute names to attribute values (as plain strings). | [
"Turn",
"one",
"or",
"more",
"lines",
"of",
"Set",
"-",
"Cookie",
":",
"header",
"data",
"into",
"a",
"list",
"of",
"dicts",
"mapping",
"attribute",
"names",
"to",
"attribute",
"values",
"(",
"as",
"plain",
"strings",
")",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/structural/convert.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/convert.py#L65-L80 | def to_bed(call, sample, work_dir, calls, data):
"""Create a simplified BED file from caller specific input.
"""
out_file = os.path.join(work_dir, "%s-%s-flat.bed" % (sample, call["variantcaller"]))
if call.get("vrn_file") and not utils.file_uptodate(out_file, call["vrn_file"]):
with file_transaction(data, out_file) as tx_out_file:
convert_fn = CALLER_TO_BED.get(call["variantcaller"])
if convert_fn:
vrn_file = call["vrn_file"]
if call["variantcaller"] in SUBSET_BY_SUPPORT:
ecalls = [x for x in calls if x["variantcaller"] in SUBSET_BY_SUPPORT[call["variantcaller"]]]
if len(ecalls) > 0:
vrn_file = _subset_by_support(call["vrn_file"], ecalls, data)
convert_fn(vrn_file, call["variantcaller"], tx_out_file)
if utils.file_exists(out_file):
return out_file | [
"def",
"to_bed",
"(",
"call",
",",
"sample",
",",
"work_dir",
",",
"calls",
",",
"data",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"%s-%s-flat.bed\"",
"%",
"(",
"sample",
",",
"call",
"[",
"\"variantcaller\"",
"]",
")",
")",
"if",
"call",
".",
"get",
"(",
"\"vrn_file\"",
")",
"and",
"not",
"utils",
".",
"file_uptodate",
"(",
"out_file",
",",
"call",
"[",
"\"vrn_file\"",
"]",
")",
":",
"with",
"file_transaction",
"(",
"data",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
"convert_fn",
"=",
"CALLER_TO_BED",
".",
"get",
"(",
"call",
"[",
"\"variantcaller\"",
"]",
")",
"if",
"convert_fn",
":",
"vrn_file",
"=",
"call",
"[",
"\"vrn_file\"",
"]",
"if",
"call",
"[",
"\"variantcaller\"",
"]",
"in",
"SUBSET_BY_SUPPORT",
":",
"ecalls",
"=",
"[",
"x",
"for",
"x",
"in",
"calls",
"if",
"x",
"[",
"\"variantcaller\"",
"]",
"in",
"SUBSET_BY_SUPPORT",
"[",
"call",
"[",
"\"variantcaller\"",
"]",
"]",
"]",
"if",
"len",
"(",
"ecalls",
")",
">",
"0",
":",
"vrn_file",
"=",
"_subset_by_support",
"(",
"call",
"[",
"\"vrn_file\"",
"]",
",",
"ecalls",
",",
"data",
")",
"convert_fn",
"(",
"vrn_file",
",",
"call",
"[",
"\"variantcaller\"",
"]",
",",
"tx_out_file",
")",
"if",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"return",
"out_file"
] | Create a simplified BED file from caller specific input. | [
"Create",
"a",
"simplified",
"BED",
"file",
"from",
"caller",
"specific",
"input",
"."
] | python | train |
cirruscluster/cirruscluster | cirruscluster/ext/ansible/runner/__init__.py | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L705-L710 | def run_async(self, time_limit):
''' Run this module asynchronously and return a poller. '''
self.background = time_limit
results = self.run()
return results, poller.AsyncPoller(results, self) | [
"def",
"run_async",
"(",
"self",
",",
"time_limit",
")",
":",
"self",
".",
"background",
"=",
"time_limit",
"results",
"=",
"self",
".",
"run",
"(",
")",
"return",
"results",
",",
"poller",
".",
"AsyncPoller",
"(",
"results",
",",
"self",
")"
] | Run this module asynchronously and return a poller. | [
"Run",
"this",
"module",
"asynchronously",
"and",
"return",
"a",
"poller",
"."
] | python | train |
manns/pyspread | pyspread/src/lib/_grid_cairo_renderer.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/_grid_cairo_renderer.py#L444-L471 | def draw_bitmap(self, content):
"""Draws bitmap cell content to context"""
if content.HasAlpha():
image = wx.ImageFromBitmap(content)
image.ConvertAlphaToMask()
image.SetMask(False)
content = wx.BitmapFromImage(image)
ims = wx.lib.wxcairo.ImageSurfaceFromBitmap(content)
ims_width = ims.get_width()
ims_height = ims.get_height()
transx, transy = self._get_translation(ims_width, ims_height)
scale_x, scale_y = self._get_scalexy(ims_width, ims_height)
scale = min(scale_x, scale_y)
angle = float(self.code_array.cell_attributes[self.key]["angle"])
self.context.save()
self.context.rotate(-angle / 360 * 2 * math.pi)
self.context.translate(transx, transy)
self.context.scale(scale, scale)
self.context.set_source_surface(ims, 0, 0)
self.context.paint()
self.context.restore() | [
"def",
"draw_bitmap",
"(",
"self",
",",
"content",
")",
":",
"if",
"content",
".",
"HasAlpha",
"(",
")",
":",
"image",
"=",
"wx",
".",
"ImageFromBitmap",
"(",
"content",
")",
"image",
".",
"ConvertAlphaToMask",
"(",
")",
"image",
".",
"SetMask",
"(",
"False",
")",
"content",
"=",
"wx",
".",
"BitmapFromImage",
"(",
"image",
")",
"ims",
"=",
"wx",
".",
"lib",
".",
"wxcairo",
".",
"ImageSurfaceFromBitmap",
"(",
"content",
")",
"ims_width",
"=",
"ims",
".",
"get_width",
"(",
")",
"ims_height",
"=",
"ims",
".",
"get_height",
"(",
")",
"transx",
",",
"transy",
"=",
"self",
".",
"_get_translation",
"(",
"ims_width",
",",
"ims_height",
")",
"scale_x",
",",
"scale_y",
"=",
"self",
".",
"_get_scalexy",
"(",
"ims_width",
",",
"ims_height",
")",
"scale",
"=",
"min",
"(",
"scale_x",
",",
"scale_y",
")",
"angle",
"=",
"float",
"(",
"self",
".",
"code_array",
".",
"cell_attributes",
"[",
"self",
".",
"key",
"]",
"[",
"\"angle\"",
"]",
")",
"self",
".",
"context",
".",
"save",
"(",
")",
"self",
".",
"context",
".",
"rotate",
"(",
"-",
"angle",
"/",
"360",
"*",
"2",
"*",
"math",
".",
"pi",
")",
"self",
".",
"context",
".",
"translate",
"(",
"transx",
",",
"transy",
")",
"self",
".",
"context",
".",
"scale",
"(",
"scale",
",",
"scale",
")",
"self",
".",
"context",
".",
"set_source_surface",
"(",
"ims",
",",
"0",
",",
"0",
")",
"self",
".",
"context",
".",
"paint",
"(",
")",
"self",
".",
"context",
".",
"restore",
"(",
")"
] | Draws bitmap cell content to context | [
"Draws",
"bitmap",
"cell",
"content",
"to",
"context"
] | python | train |
HazyResearch/pdftotree | pdftotree/visual/visual_utils.py | https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/visual/visual_utils.py#L74-L141 | def get_bboxes(
img,
mask,
nb_boxes=100,
score_thresh=0.5,
iou_thresh=0.2,
prop_size=0.09,
prop_scale=1.2,
):
"""
Uses selective search to generate candidate bounding boxes and keeps the
ones that have the largest iou with the predicted mask.
:param img: original image
:param mask: predicted mask
:param nb_boxes: max number of candidate bounding boxes
:param score_thresh: scre threshold to consider prediction is True
:param iou_thresh: iou threshold to consider a candidate is a correct region
:param prop_size: selective search parameter
:param prop_scale: selective search parameter, larger prop_scale favorizes
large boudning boxes
:return: list of bounding boxes and ious, boudning boxes are tuples (left,
top, width, height)
"""
min_size = int(img.shape[0] * prop_size * img.shape[1] * prop_size)
scale = int(img.shape[0] * prop_scale)
# TODO: cross validate for multiple values of prop_size, prop_scale, and nb_bboxes
img_lbl, regions = selectivesearch.selective_search(
img, scale=scale, sigma=0.8, min_size=min_size
)
rect = [None] * nb_boxes
max_iou = -1 * np.ones(nb_boxes)
mask = 1.0 * (mask > score_thresh)
# compute iou for each candidate bounding box and save top nb_bboxes
for region in regions:
left, top, width, height = region["rect"]
intersection = mask[top : top + height, left : left + width].sum()
union = height * width + mask.sum() - intersection
iou = intersection / union
idx = np.argmin(max_iou)
if iou > max_iou[idx]:
max_iou[idx] = iou
rect[idx] = region["rect"]
# Exclusive maximum
remove_indexes = max_iou == -1
bboxes = []
filtered_ious = []
for idx in np.argsort([-x for x in max_iou]):
if remove_indexes[idx]:
# no more tables bounding boxes
break
if len(bboxes) == 0:
# first candidate table bounding box
if max_iou[idx] > iou_thresh:
bboxes += [rect[idx]]
filtered_ious += [max_iou[idx]]
else:
# No tables in this document
break
else:
# If it doensn't intersect with any other bounding box
if not any(
[do_intersect(rect[idx], bboxes[k]) for k in range(len(bboxes))]
):
if max_iou[idx] > iou_thresh:
bboxes += [rect[idx]]
filtered_ious += [max_iou[idx]]
return bboxes, filtered_ious | [
"def",
"get_bboxes",
"(",
"img",
",",
"mask",
",",
"nb_boxes",
"=",
"100",
",",
"score_thresh",
"=",
"0.5",
",",
"iou_thresh",
"=",
"0.2",
",",
"prop_size",
"=",
"0.09",
",",
"prop_scale",
"=",
"1.2",
",",
")",
":",
"min_size",
"=",
"int",
"(",
"img",
".",
"shape",
"[",
"0",
"]",
"*",
"prop_size",
"*",
"img",
".",
"shape",
"[",
"1",
"]",
"*",
"prop_size",
")",
"scale",
"=",
"int",
"(",
"img",
".",
"shape",
"[",
"0",
"]",
"*",
"prop_scale",
")",
"# TODO: cross validate for multiple values of prop_size, prop_scale, and nb_bboxes",
"img_lbl",
",",
"regions",
"=",
"selectivesearch",
".",
"selective_search",
"(",
"img",
",",
"scale",
"=",
"scale",
",",
"sigma",
"=",
"0.8",
",",
"min_size",
"=",
"min_size",
")",
"rect",
"=",
"[",
"None",
"]",
"*",
"nb_boxes",
"max_iou",
"=",
"-",
"1",
"*",
"np",
".",
"ones",
"(",
"nb_boxes",
")",
"mask",
"=",
"1.0",
"*",
"(",
"mask",
">",
"score_thresh",
")",
"# compute iou for each candidate bounding box and save top nb_bboxes",
"for",
"region",
"in",
"regions",
":",
"left",
",",
"top",
",",
"width",
",",
"height",
"=",
"region",
"[",
"\"rect\"",
"]",
"intersection",
"=",
"mask",
"[",
"top",
":",
"top",
"+",
"height",
",",
"left",
":",
"left",
"+",
"width",
"]",
".",
"sum",
"(",
")",
"union",
"=",
"height",
"*",
"width",
"+",
"mask",
".",
"sum",
"(",
")",
"-",
"intersection",
"iou",
"=",
"intersection",
"/",
"union",
"idx",
"=",
"np",
".",
"argmin",
"(",
"max_iou",
")",
"if",
"iou",
">",
"max_iou",
"[",
"idx",
"]",
":",
"max_iou",
"[",
"idx",
"]",
"=",
"iou",
"rect",
"[",
"idx",
"]",
"=",
"region",
"[",
"\"rect\"",
"]",
"# Exclusive maximum",
"remove_indexes",
"=",
"max_iou",
"==",
"-",
"1",
"bboxes",
"=",
"[",
"]",
"filtered_ious",
"=",
"[",
"]",
"for",
"idx",
"in",
"np",
".",
"argsort",
"(",
"[",
"-",
"x",
"for",
"x",
"in",
"max_iou",
"]",
")",
":",
"if",
"remove_indexes",
"[",
"idx",
"]",
":",
"# no more tables bounding boxes",
"break",
"if",
"len",
"(",
"bboxes",
")",
"==",
"0",
":",
"# first candidate table bounding box",
"if",
"max_iou",
"[",
"idx",
"]",
">",
"iou_thresh",
":",
"bboxes",
"+=",
"[",
"rect",
"[",
"idx",
"]",
"]",
"filtered_ious",
"+=",
"[",
"max_iou",
"[",
"idx",
"]",
"]",
"else",
":",
"# No tables in this document",
"break",
"else",
":",
"# If it doensn't intersect with any other bounding box",
"if",
"not",
"any",
"(",
"[",
"do_intersect",
"(",
"rect",
"[",
"idx",
"]",
",",
"bboxes",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"bboxes",
")",
")",
"]",
")",
":",
"if",
"max_iou",
"[",
"idx",
"]",
">",
"iou_thresh",
":",
"bboxes",
"+=",
"[",
"rect",
"[",
"idx",
"]",
"]",
"filtered_ious",
"+=",
"[",
"max_iou",
"[",
"idx",
"]",
"]",
"return",
"bboxes",
",",
"filtered_ious"
] | Uses selective search to generate candidate bounding boxes and keeps the
ones that have the largest iou with the predicted mask.
:param img: original image
:param mask: predicted mask
:param nb_boxes: max number of candidate bounding boxes
:param score_thresh: scre threshold to consider prediction is True
:param iou_thresh: iou threshold to consider a candidate is a correct region
:param prop_size: selective search parameter
:param prop_scale: selective search parameter, larger prop_scale favorizes
large boudning boxes
:return: list of bounding boxes and ious, boudning boxes are tuples (left,
top, width, height) | [
"Uses",
"selective",
"search",
"to",
"generate",
"candidate",
"bounding",
"boxes",
"and",
"keeps",
"the",
"ones",
"that",
"have",
"the",
"largest",
"iou",
"with",
"the",
"predicted",
"mask",
"."
] | python | train |
Scille/autobahn-sync | autobahn_sync/core.py | https://github.com/Scille/autobahn-sync/blob/d75fceff0d1aee61fa6dd0168eb1cd40794ad827/autobahn_sync/core.py#L55-L89 | def run_in_twisted(self, url=DEFAULT_AUTOBAHN_ROUTER, realm=DEFAULT_AUTOBAHN_REALM,
authmethods=None, authid=None, authrole=None, authextra=None,
callback=None, **kwargs):
"""
Start the WAMP connection. Given we cannot run synchronous stuff inside the
twisted thread, use this function (which returns immediately) to do the
initialization from a spawned thread.
:param callback: function that will be called inside the spawned thread.
Put the rest of you init (or you main loop if you have one) inside it
:param authmethods: Passed to :meth:`autobahn.wamp.protocol.ApplicationSession.join`
:param authid: Passed to :meth:`autobahn.wamp.protocol.ApplicationSession.join`
:param authrole: Passed to :meth:`autobahn.wamp.protocol.ApplicationSession.join`
:param authextra: Passed to :meth:`autobahn.wamp.protocol.ApplicationSession.join`
.. note::
This function must be called instead of :meth:`AutobahnSync.run`
if we are calling from twisted application (typically if we are running
our application inside crossbar as a `wsgi` component)
"""
_init_crochet(in_twisted=True)
logger.debug('run_in_crossbar, bootstraping')
# No need to go non-blocking if no callback has been provided
blocking = callback is None
def bootstrap_and_callback():
self._bootstrap(blocking, url=url, realm=realm,
authmethods=authmethods, authid=authid, authrole=authrole,
authextra=authextra, **kwargs)
if callback:
callback()
self._callbacks_runner.start()
threads.deferToThread(bootstrap_and_callback) | [
"def",
"run_in_twisted",
"(",
"self",
",",
"url",
"=",
"DEFAULT_AUTOBAHN_ROUTER",
",",
"realm",
"=",
"DEFAULT_AUTOBAHN_REALM",
",",
"authmethods",
"=",
"None",
",",
"authid",
"=",
"None",
",",
"authrole",
"=",
"None",
",",
"authextra",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"_init_crochet",
"(",
"in_twisted",
"=",
"True",
")",
"logger",
".",
"debug",
"(",
"'run_in_crossbar, bootstraping'",
")",
"# No need to go non-blocking if no callback has been provided",
"blocking",
"=",
"callback",
"is",
"None",
"def",
"bootstrap_and_callback",
"(",
")",
":",
"self",
".",
"_bootstrap",
"(",
"blocking",
",",
"url",
"=",
"url",
",",
"realm",
"=",
"realm",
",",
"authmethods",
"=",
"authmethods",
",",
"authid",
"=",
"authid",
",",
"authrole",
"=",
"authrole",
",",
"authextra",
"=",
"authextra",
",",
"*",
"*",
"kwargs",
")",
"if",
"callback",
":",
"callback",
"(",
")",
"self",
".",
"_callbacks_runner",
".",
"start",
"(",
")",
"threads",
".",
"deferToThread",
"(",
"bootstrap_and_callback",
")"
] | Start the WAMP connection. Given we cannot run synchronous stuff inside the
twisted thread, use this function (which returns immediately) to do the
initialization from a spawned thread.
:param callback: function that will be called inside the spawned thread.
Put the rest of you init (or you main loop if you have one) inside it
:param authmethods: Passed to :meth:`autobahn.wamp.protocol.ApplicationSession.join`
:param authid: Passed to :meth:`autobahn.wamp.protocol.ApplicationSession.join`
:param authrole: Passed to :meth:`autobahn.wamp.protocol.ApplicationSession.join`
:param authextra: Passed to :meth:`autobahn.wamp.protocol.ApplicationSession.join`
.. note::
This function must be called instead of :meth:`AutobahnSync.run`
if we are calling from twisted application (typically if we are running
our application inside crossbar as a `wsgi` component) | [
"Start",
"the",
"WAMP",
"connection",
".",
"Given",
"we",
"cannot",
"run",
"synchronous",
"stuff",
"inside",
"the",
"twisted",
"thread",
"use",
"this",
"function",
"(",
"which",
"returns",
"immediately",
")",
"to",
"do",
"the",
"initialization",
"from",
"a",
"spawned",
"thread",
"."
] | python | train |
drhagen/parsita | parsita/parsers.py | https://github.com/drhagen/parsita/blob/d97414a05541f48231381f607d1d2e6b50781d39/parsita/parsers.py#L318-L330 | def opt(parser: Union[Parser, Sequence[Input]]) -> OptionalParser:
"""Optionally match a parser.
An ``OptionalParser`` attempts to match ``parser``. If it succeeds, it
returns a list of length one with the value returned by the parser as the
only element. If it fails, it returns an empty list.
Args:
parser: Parser or literal
"""
if isinstance(parser, str):
parser = lit(parser)
return OptionalParser(parser) | [
"def",
"opt",
"(",
"parser",
":",
"Union",
"[",
"Parser",
",",
"Sequence",
"[",
"Input",
"]",
"]",
")",
"->",
"OptionalParser",
":",
"if",
"isinstance",
"(",
"parser",
",",
"str",
")",
":",
"parser",
"=",
"lit",
"(",
"parser",
")",
"return",
"OptionalParser",
"(",
"parser",
")"
] | Optionally match a parser.
An ``OptionalParser`` attempts to match ``parser``. If it succeeds, it
returns a list of length one with the value returned by the parser as the
only element. If it fails, it returns an empty list.
Args:
parser: Parser or literal | [
"Optionally",
"match",
"a",
"parser",
"."
] | python | test |
ryanjdillon/pylleo | pylleo/utils.py | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L104-L119 | def nearest(items, pivot):
'''Find nearest value in array, including datetimes
Args
----
items: iterable
List of values from which to find nearest value to `pivot`
pivot: int or float
Value to find nearest of in `items`
Returns
-------
nearest: int or float
Value in items nearest to `pivot`
'''
return min(items, key=lambda x: abs(x - pivot)) | [
"def",
"nearest",
"(",
"items",
",",
"pivot",
")",
":",
"return",
"min",
"(",
"items",
",",
"key",
"=",
"lambda",
"x",
":",
"abs",
"(",
"x",
"-",
"pivot",
")",
")"
] | Find nearest value in array, including datetimes
Args
----
items: iterable
List of values from which to find nearest value to `pivot`
pivot: int or float
Value to find nearest of in `items`
Returns
-------
nearest: int or float
Value in items nearest to `pivot` | [
"Find",
"nearest",
"value",
"in",
"array",
"including",
"datetimes"
] | python | train |
siemens/django-dingos | dingos/models.py | https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/models.py#L946-L969 | def embedded_in(self):
"""
Used in the view for the InfoObject (in order to be able to use the standard class-based object view.
Should be removed from here and put into a proper custom view for the object.
This query only returns embedding objects of the latest revision: to change
this, the filter 'iobject__timestamp=F('iobject__identifier__latest__timestamp' must
be removed.
"""
return self._DCM['InfoObject2Fact']. \
objects. \
filter(fact__value_iobject_id=self.identifier). \
filter(iobject__timestamp=F('iobject__identifier__latest__timestamp')). \
order_by('-iobject__timestamp') \
.values_list(
'iobject',
'iobject__identifier__namespace__uri',
'iobject__identifier__uid',
'iobject__timestamp',
'iobject__name',
'fact__value_iobject_ts',
'fact__fact_term__term',
'node_id__name').distinct() | [
"def",
"embedded_in",
"(",
"self",
")",
":",
"return",
"self",
".",
"_DCM",
"[",
"'InfoObject2Fact'",
"]",
".",
"objects",
".",
"filter",
"(",
"fact__value_iobject_id",
"=",
"self",
".",
"identifier",
")",
".",
"filter",
"(",
"iobject__timestamp",
"=",
"F",
"(",
"'iobject__identifier__latest__timestamp'",
")",
")",
".",
"order_by",
"(",
"'-iobject__timestamp'",
")",
".",
"values_list",
"(",
"'iobject'",
",",
"'iobject__identifier__namespace__uri'",
",",
"'iobject__identifier__uid'",
",",
"'iobject__timestamp'",
",",
"'iobject__name'",
",",
"'fact__value_iobject_ts'",
",",
"'fact__fact_term__term'",
",",
"'node_id__name'",
")",
".",
"distinct",
"(",
")"
] | Used in the view for the InfoObject (in order to be able to use the standard class-based object view.
Should be removed from here and put into a proper custom view for the object.
This query only returns embedding objects of the latest revision: to change
this, the filter 'iobject__timestamp=F('iobject__identifier__latest__timestamp' must
be removed. | [
"Used",
"in",
"the",
"view",
"for",
"the",
"InfoObject",
"(",
"in",
"order",
"to",
"be",
"able",
"to",
"use",
"the",
"standard",
"class",
"-",
"based",
"object",
"view",
".",
"Should",
"be",
"removed",
"from",
"here",
"and",
"put",
"into",
"a",
"proper",
"custom",
"view",
"for",
"the",
"object",
"."
] | python | train |
calmjs/calmjs.parse | src/calmjs/parse/parsers/es5.py | https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/parsers/es5.py#L1310-L1313 | def p_with_statement(self, p):
"""with_statement : WITH LPAREN expr RPAREN statement"""
p[0] = self.asttypes.With(expr=p[3], statement=p[5])
p[0].setpos(p) | [
"def",
"p_with_statement",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"With",
"(",
"expr",
"=",
"p",
"[",
"3",
"]",
",",
"statement",
"=",
"p",
"[",
"5",
"]",
")",
"p",
"[",
"0",
"]",
".",
"setpos",
"(",
"p",
")"
] | with_statement : WITH LPAREN expr RPAREN statement | [
"with_statement",
":",
"WITH",
"LPAREN",
"expr",
"RPAREN",
"statement"
] | python | train |
EasyPost/pystalk | pystalk/client.py | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L482-L493 | def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED') | [
"def",
"bury_job",
"(",
"self",
",",
"job_id",
",",
"pri",
"=",
"65536",
")",
":",
"if",
"hasattr",
"(",
"job_id",
",",
"'job_id'",
")",
":",
"job_id",
"=",
"job_id",
".",
"job_id",
"with",
"self",
".",
"_sock_ctx",
"(",
")",
"as",
"socket",
":",
"self",
".",
"_send_message",
"(",
"'bury {0} {1}'",
".",
"format",
"(",
"job_id",
",",
"pri",
")",
",",
"socket",
")",
"return",
"self",
".",
"_receive_word",
"(",
"socket",
",",
"b'BURIED'",
")"
] | Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int | [
"Mark",
"the",
"given",
"job_id",
"as",
"buried",
".",
"The",
"job",
"must",
"have",
"been",
"previously",
"reserved",
"by",
"this",
"connection"
] | python | train |
onelogin/python-saml | src/onelogin/saml2/auth.py | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/auth.py#L444-L457 | def build_request_signature(self, saml_request, relay_state, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1):
"""
Builds the Signature of the SAML Request.
:param saml_request: The SAML Request
:type saml_request: string
:param relay_state: The target URL the user should be redirected to
:type relay_state: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
"""
return self.__build_signature(saml_request, relay_state, 'SAMLRequest', sign_algorithm) | [
"def",
"build_request_signature",
"(",
"self",
",",
"saml_request",
",",
"relay_state",
",",
"sign_algorithm",
"=",
"OneLogin_Saml2_Constants",
".",
"RSA_SHA1",
")",
":",
"return",
"self",
".",
"__build_signature",
"(",
"saml_request",
",",
"relay_state",
",",
"'SAMLRequest'",
",",
"sign_algorithm",
")"
] | Builds the Signature of the SAML Request.
:param saml_request: The SAML Request
:type saml_request: string
:param relay_state: The target URL the user should be redirected to
:type relay_state: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string | [
"Builds",
"the",
"Signature",
"of",
"the",
"SAML",
"Request",
"."
] | python | train |
fhcrc/taxtastic | taxtastic/taxonomy.py | https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L457-L481 | def verify_rank_integrity(self, tax_id, rank, parent_id, children):
"""Confirm that for each node the parent ranks and children ranks are
coherent
"""
def _lower(n1, n2):
return self.ranks.index(n1) < self.ranks.index(n2)
if rank not in self.ranks:
raise TaxonIntegrityError('rank "{}" is undefined'.format(rank))
parent_rank = self.rank(parent_id)
# undefined ranks can be placed anywhere in a lineage
if not _lower(rank, parent_rank) and rank != self.NO_RANK:
msg = ('New node "{}", rank "{}" has same or '
'higher rank than parent node "{}", rank "{}"')
msg = msg.format(tax_id, rank, parent_id, parent_rank)
raise TaxonIntegrityError(msg)
for child in children:
if not _lower(self.rank(child), rank):
msg = 'Child node {} has same or lower rank as new node {}'
msg = msg.format(tax_id, child)
raise TaxonIntegrityError(msg)
return True | [
"def",
"verify_rank_integrity",
"(",
"self",
",",
"tax_id",
",",
"rank",
",",
"parent_id",
",",
"children",
")",
":",
"def",
"_lower",
"(",
"n1",
",",
"n2",
")",
":",
"return",
"self",
".",
"ranks",
".",
"index",
"(",
"n1",
")",
"<",
"self",
".",
"ranks",
".",
"index",
"(",
"n2",
")",
"if",
"rank",
"not",
"in",
"self",
".",
"ranks",
":",
"raise",
"TaxonIntegrityError",
"(",
"'rank \"{}\" is undefined'",
".",
"format",
"(",
"rank",
")",
")",
"parent_rank",
"=",
"self",
".",
"rank",
"(",
"parent_id",
")",
"# undefined ranks can be placed anywhere in a lineage",
"if",
"not",
"_lower",
"(",
"rank",
",",
"parent_rank",
")",
"and",
"rank",
"!=",
"self",
".",
"NO_RANK",
":",
"msg",
"=",
"(",
"'New node \"{}\", rank \"{}\" has same or '",
"'higher rank than parent node \"{}\", rank \"{}\"'",
")",
"msg",
"=",
"msg",
".",
"format",
"(",
"tax_id",
",",
"rank",
",",
"parent_id",
",",
"parent_rank",
")",
"raise",
"TaxonIntegrityError",
"(",
"msg",
")",
"for",
"child",
"in",
"children",
":",
"if",
"not",
"_lower",
"(",
"self",
".",
"rank",
"(",
"child",
")",
",",
"rank",
")",
":",
"msg",
"=",
"'Child node {} has same or lower rank as new node {}'",
"msg",
"=",
"msg",
".",
"format",
"(",
"tax_id",
",",
"child",
")",
"raise",
"TaxonIntegrityError",
"(",
"msg",
")",
"return",
"True"
] | Confirm that for each node the parent ranks and children ranks are
coherent | [
"Confirm",
"that",
"for",
"each",
"node",
"the",
"parent",
"ranks",
"and",
"children",
"ranks",
"are",
"coherent"
] | python | train |
pycontribs/pyrax | pyrax/cloudblockstorage.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudblockstorage.py#L438-L451 | def _configure_manager(self):
"""
Create the manager to handle the instances, and also another
to handle flavors.
"""
self._manager = CloudBlockStorageManager(self,
resource_class=CloudBlockStorageVolume, response_key="volume",
uri_base="volumes")
self._types_manager = BaseManager(self,
resource_class=CloudBlockStorageVolumeType,
response_key="volume_type", uri_base="types")
self._snapshot_manager = CloudBlockStorageSnapshotManager(self,
resource_class=CloudBlockStorageSnapshot,
response_key="snapshot", uri_base="snapshots") | [
"def",
"_configure_manager",
"(",
"self",
")",
":",
"self",
".",
"_manager",
"=",
"CloudBlockStorageManager",
"(",
"self",
",",
"resource_class",
"=",
"CloudBlockStorageVolume",
",",
"response_key",
"=",
"\"volume\"",
",",
"uri_base",
"=",
"\"volumes\"",
")",
"self",
".",
"_types_manager",
"=",
"BaseManager",
"(",
"self",
",",
"resource_class",
"=",
"CloudBlockStorageVolumeType",
",",
"response_key",
"=",
"\"volume_type\"",
",",
"uri_base",
"=",
"\"types\"",
")",
"self",
".",
"_snapshot_manager",
"=",
"CloudBlockStorageSnapshotManager",
"(",
"self",
",",
"resource_class",
"=",
"CloudBlockStorageSnapshot",
",",
"response_key",
"=",
"\"snapshot\"",
",",
"uri_base",
"=",
"\"snapshots\"",
")"
] | Create the manager to handle the instances, and also another
to handle flavors. | [
"Create",
"the",
"manager",
"to",
"handle",
"the",
"instances",
"and",
"also",
"another",
"to",
"handle",
"flavors",
"."
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/trax/learning_rate.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/learning_rate.py#L42-L92 | def MultifactorSchedule(history=None,
factors="constant * linear_warmup * rsqrt_decay",
constant=0.1,
warmup_steps=100,
decay_factor=0.5,
steps_per_decay=20000):
"""Factor-based learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* decay_every: Every k steps decay the learning rate by decay_factor.
Args:
history: the history of training and evaluation (History object).
factors: a string with factors separated by "*" that defines the schedule.
constant: float, the starting constant for the learning rate schedule.
warmup_steps: how many steps to warm up for in the warmup schedule.
decay_factor: The amount to decay the learning rate by.
steps_per_decay: How often to decay the learning rate.
Returns:
a function learning_rate(step): float -> float, the step-dependent lr.
"""
del history
cache_args = (factors, constant, warmup_steps)
if cache_args in _memoized_multifactor_schedules:
return _memoized_multifactor_schedules[cache_args]
factors = [n.strip() for n in factors.split("*")]
def learning_rate(step): # pylint: disable=invalid-name
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == "constant":
ret *= constant
elif name == "linear_warmup":
ret *= np.minimum(1.0, step / warmup_steps)
elif name == "rsqrt_decay":
ret /= np.sqrt(np.maximum(step, warmup_steps))
elif name == "decay_every":
ret *= (decay_factor ** (step//steps_per_decay))
else:
raise ValueError("Unknown factor %s." % name)
return ret
_memoized_multifactor_schedules[cache_args] = learning_rate
return learning_rate | [
"def",
"MultifactorSchedule",
"(",
"history",
"=",
"None",
",",
"factors",
"=",
"\"constant * linear_warmup * rsqrt_decay\"",
",",
"constant",
"=",
"0.1",
",",
"warmup_steps",
"=",
"100",
",",
"decay_factor",
"=",
"0.5",
",",
"steps_per_decay",
"=",
"20000",
")",
":",
"del",
"history",
"cache_args",
"=",
"(",
"factors",
",",
"constant",
",",
"warmup_steps",
")",
"if",
"cache_args",
"in",
"_memoized_multifactor_schedules",
":",
"return",
"_memoized_multifactor_schedules",
"[",
"cache_args",
"]",
"factors",
"=",
"[",
"n",
".",
"strip",
"(",
")",
"for",
"n",
"in",
"factors",
".",
"split",
"(",
"\"*\"",
")",
"]",
"def",
"learning_rate",
"(",
"step",
")",
":",
"# pylint: disable=invalid-name",
"\"\"\"Step to learning rate function.\"\"\"",
"ret",
"=",
"1.0",
"for",
"name",
"in",
"factors",
":",
"if",
"name",
"==",
"\"constant\"",
":",
"ret",
"*=",
"constant",
"elif",
"name",
"==",
"\"linear_warmup\"",
":",
"ret",
"*=",
"np",
".",
"minimum",
"(",
"1.0",
",",
"step",
"/",
"warmup_steps",
")",
"elif",
"name",
"==",
"\"rsqrt_decay\"",
":",
"ret",
"/=",
"np",
".",
"sqrt",
"(",
"np",
".",
"maximum",
"(",
"step",
",",
"warmup_steps",
")",
")",
"elif",
"name",
"==",
"\"decay_every\"",
":",
"ret",
"*=",
"(",
"decay_factor",
"**",
"(",
"step",
"//",
"steps_per_decay",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown factor %s.\"",
"%",
"name",
")",
"return",
"ret",
"_memoized_multifactor_schedules",
"[",
"cache_args",
"]",
"=",
"learning_rate",
"return",
"learning_rate"
] | Factor-based learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* decay_every: Every k steps decay the learning rate by decay_factor.
Args:
history: the history of training and evaluation (History object).
factors: a string with factors separated by "*" that defines the schedule.
constant: float, the starting constant for the learning rate schedule.
warmup_steps: how many steps to warm up for in the warmup schedule.
decay_factor: The amount to decay the learning rate by.
steps_per_decay: How often to decay the learning rate.
Returns:
a function learning_rate(step): float -> float, the step-dependent lr. | [
"Factor",
"-",
"based",
"learning",
"rate",
"schedule",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/repository/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L1478-L1502 | def delete_asset(self, asset_id):
"""Deletes an ``Asset``.
arg: asset_id (osid.id.Id): the ``Id`` of the ``Asset`` to
remove
raise: NotFound - ``asset_id`` not found
raise: NullArgument - ``asset_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.delete_resource_template
collection = JSONClientValidated('repository',
collection='Asset',
runtime=self._runtime)
if not isinstance(asset_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
asset_map = collection.find_one(
dict({'_id': ObjectId(asset_id.get_identifier())},
**self._view_filter()))
objects.Asset(osid_object_map=asset_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({'_id': ObjectId(asset_id.get_identifier())}) | [
"def",
"delete_asset",
"(",
"self",
",",
"asset_id",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceAdminSession.delete_resource_template",
"collection",
"=",
"JSONClientValidated",
"(",
"'repository'",
",",
"collection",
"=",
"'Asset'",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"if",
"not",
"isinstance",
"(",
"asset_id",
",",
"ABCId",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
"'the argument is not a valid OSID Id'",
")",
"asset_map",
"=",
"collection",
".",
"find_one",
"(",
"dict",
"(",
"{",
"'_id'",
":",
"ObjectId",
"(",
"asset_id",
".",
"get_identifier",
"(",
")",
")",
"}",
",",
"*",
"*",
"self",
".",
"_view_filter",
"(",
")",
")",
")",
"objects",
".",
"Asset",
"(",
"osid_object_map",
"=",
"asset_map",
",",
"runtime",
"=",
"self",
".",
"_runtime",
",",
"proxy",
"=",
"self",
".",
"_proxy",
")",
".",
"_delete",
"(",
")",
"collection",
".",
"delete_one",
"(",
"{",
"'_id'",
":",
"ObjectId",
"(",
"asset_id",
".",
"get_identifier",
"(",
")",
")",
"}",
")"
] | Deletes an ``Asset``.
arg: asset_id (osid.id.Id): the ``Id`` of the ``Asset`` to
remove
raise: NotFound - ``asset_id`` not found
raise: NullArgument - ``asset_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | [
"Deletes",
"an",
"Asset",
"."
] | python | train |
flo-compbio/genometools | genometools/ontology/ontology.py | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ontology/ontology.py#L159-L184 | def write_pickle(self, path, compress=False):
"""Serialize the current `GOParser` object and store it in a pickle file.
Parameters
----------
path: str
Path of the output file.
compress: bool, optional
Whether to compress the file using gzip.
Returns
-------
None
Notes
-----
Compression with gzip is significantly slower than storing the file
in uncompressed form.
"""
logger.info('Writing pickle to "%s"...', path)
if compress:
with gzip.open(path, 'wb') as ofh:
pickle.dump(self, ofh, pickle.HIGHEST_PROTOCOL)
else:
with open(path, 'wb') as ofh:
pickle.dump(self, ofh, pickle.HIGHEST_PROTOCOL) | [
"def",
"write_pickle",
"(",
"self",
",",
"path",
",",
"compress",
"=",
"False",
")",
":",
"logger",
".",
"info",
"(",
"'Writing pickle to \"%s\"...'",
",",
"path",
")",
"if",
"compress",
":",
"with",
"gzip",
".",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"ofh",
":",
"pickle",
".",
"dump",
"(",
"self",
",",
"ofh",
",",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"else",
":",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"ofh",
":",
"pickle",
".",
"dump",
"(",
"self",
",",
"ofh",
",",
"pickle",
".",
"HIGHEST_PROTOCOL",
")"
] | Serialize the current `GOParser` object and store it in a pickle file.
Parameters
----------
path: str
Path of the output file.
compress: bool, optional
Whether to compress the file using gzip.
Returns
-------
None
Notes
-----
Compression with gzip is significantly slower than storing the file
in uncompressed form. | [
"Serialize",
"the",
"current",
"GOParser",
"object",
"and",
"store",
"it",
"in",
"a",
"pickle",
"file",
"."
] | python | train |
LordGaav/python-chaos | chaos/threading/scheduler.py | https://github.com/LordGaav/python-chaos/blob/52cd29a6fd15693ee1e53786b93bcb23fbf84ddd/chaos/threading/scheduler.py#L90-L105 | def setStopAction(self, action, *args, **kwargs):
"""
Set a function to call when run() is stopping, after the main action is called.
Parameters
----------
action: function pointer
The function to call.
*args
Positional arguments to pass to action.
**kwargs:
Keyword arguments to pass to action.
"""
self.stop_action = action
self.stop_args = args
self.stop_kwargs = kwargs | [
"def",
"setStopAction",
"(",
"self",
",",
"action",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"stop_action",
"=",
"action",
"self",
".",
"stop_args",
"=",
"args",
"self",
".",
"stop_kwargs",
"=",
"kwargs"
] | Set a function to call when run() is stopping, after the main action is called.
Parameters
----------
action: function pointer
The function to call.
*args
Positional arguments to pass to action.
**kwargs:
Keyword arguments to pass to action. | [
"Set",
"a",
"function",
"to",
"call",
"when",
"run",
"()",
"is",
"stopping",
"after",
"the",
"main",
"action",
"is",
"called",
"."
] | python | train |
jsvine/spectra | spectra/grapefruit.py | https://github.com/jsvine/spectra/blob/2269a0ae9b5923154b15bd661fb81179608f7ec2/spectra/grapefruit.py#L495-L533 | def HsvToRgb(h, s, v):
'''Convert the color from RGB coordinates to HSV.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.HslToRgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
'''
if s==0: return (v, v, v) # achromatic (gray)
h /= 60.0
h = h % 6.0
i = int(h)
f = h - i
if not(i&1): f = 1-f # if i is even
m = v * (1.0 - s)
n = v * (1.0 - (s * f))
if i==0: return (v, n, m)
if i==1: return (n, v, m)
if i==2: return (m, v, n)
if i==3: return (m, n, v)
if i==4: return (n, m, v)
return (v, m, n) | [
"def",
"HsvToRgb",
"(",
"h",
",",
"s",
",",
"v",
")",
":",
"if",
"s",
"==",
"0",
":",
"return",
"(",
"v",
",",
"v",
",",
"v",
")",
"# achromatic (gray)",
"h",
"/=",
"60.0",
"h",
"=",
"h",
"%",
"6.0",
"i",
"=",
"int",
"(",
"h",
")",
"f",
"=",
"h",
"-",
"i",
"if",
"not",
"(",
"i",
"&",
"1",
")",
":",
"f",
"=",
"1",
"-",
"f",
"# if i is even",
"m",
"=",
"v",
"*",
"(",
"1.0",
"-",
"s",
")",
"n",
"=",
"v",
"*",
"(",
"1.0",
"-",
"(",
"s",
"*",
"f",
")",
")",
"if",
"i",
"==",
"0",
":",
"return",
"(",
"v",
",",
"n",
",",
"m",
")",
"if",
"i",
"==",
"1",
":",
"return",
"(",
"n",
",",
"v",
",",
"m",
")",
"if",
"i",
"==",
"2",
":",
"return",
"(",
"m",
",",
"v",
",",
"n",
")",
"if",
"i",
"==",
"3",
":",
"return",
"(",
"m",
",",
"n",
",",
"v",
")",
"if",
"i",
"==",
"4",
":",
"return",
"(",
"n",
",",
"m",
",",
"v",
")",
"return",
"(",
"v",
",",
"m",
",",
"n",
")"
] | Convert the color from RGB coordinates to HSV.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.HslToRgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0) | [
"Convert",
"the",
"color",
"from",
"RGB",
"coordinates",
"to",
"HSV",
"."
] | python | train |
rwl/pylon | pylon/io/matpower.py | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/matpower.py#L906-L915 | def write_area_data(self, file):
""" Writes area data to file.
"""
file.write("%% area data" + "\n")
file.write("%\tno.\tprice_ref_bus" + "\n")
file.write("areas = [" + "\n")
# TODO: Implement areas
file.write("\t1\t1;" + "\n")
file.write("];" + "\n") | [
"def",
"write_area_data",
"(",
"self",
",",
"file",
")",
":",
"file",
".",
"write",
"(",
"\"%% area data\"",
"+",
"\"\\n\"",
")",
"file",
".",
"write",
"(",
"\"%\\tno.\\tprice_ref_bus\"",
"+",
"\"\\n\"",
")",
"file",
".",
"write",
"(",
"\"areas = [\"",
"+",
"\"\\n\"",
")",
"# TODO: Implement areas",
"file",
".",
"write",
"(",
"\"\\t1\\t1;\"",
"+",
"\"\\n\"",
")",
"file",
".",
"write",
"(",
"\"];\"",
"+",
"\"\\n\"",
")"
] | Writes area data to file. | [
"Writes",
"area",
"data",
"to",
"file",
"."
] | python | train |
Telefonica/toolium | toolium/behave/env_utils.py | https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/behave/env_utils.py#L178-L185 | def __print_step_by_console(self, step):
"""
print the step by console if the show variable is enabled
:param step: step text
"""
step_list = step.split(u'\n')
for s in step_list:
self.logger.by_console(u' %s' % repr(s).replace("u'", "").replace("'", "")) | [
"def",
"__print_step_by_console",
"(",
"self",
",",
"step",
")",
":",
"step_list",
"=",
"step",
".",
"split",
"(",
"u'\\n'",
")",
"for",
"s",
"in",
"step_list",
":",
"self",
".",
"logger",
".",
"by_console",
"(",
"u' %s'",
"%",
"repr",
"(",
"s",
")",
".",
"replace",
"(",
"\"u'\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
")"
] | print the step by console if the show variable is enabled
:param step: step text | [
"print",
"the",
"step",
"by",
"console",
"if",
"the",
"show",
"variable",
"is",
"enabled",
":",
"param",
"step",
":",
"step",
"text"
] | python | train |
iotile/coretools | iotilecore/iotile/core/hw/transport/adapter/mixin_notifications.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/adapter/mixin_notifications.py#L58-L87 | def register_monitor(self, devices, events, callback):
"""Register a callback when events happen.
If this method is called, it is guaranteed to take effect before the
next call to ``_notify_event`` after this method returns. This method
is safe to call from within a callback that is itself called by
``notify_event``.
See :meth:`AbstractDeviceAdapter.register_monitor`.
"""
# Ensure we don't exhaust any iterables
events = list(events)
devices = list(devices)
for event in events:
if event not in self.SUPPORTED_EVENTS:
raise ArgumentError("Unknown event type {} specified".format(event), events=events)
monitor_id = str(uuid.uuid4())
action = (monitor_id, "add", devices, events)
self._callbacks[monitor_id] = callback
if self._currently_notifying:
self._deferred_adjustments.append(action)
else:
self._adjust_monitor_internal(*action)
return monitor_id | [
"def",
"register_monitor",
"(",
"self",
",",
"devices",
",",
"events",
",",
"callback",
")",
":",
"# Ensure we don't exhaust any iterables",
"events",
"=",
"list",
"(",
"events",
")",
"devices",
"=",
"list",
"(",
"devices",
")",
"for",
"event",
"in",
"events",
":",
"if",
"event",
"not",
"in",
"self",
".",
"SUPPORTED_EVENTS",
":",
"raise",
"ArgumentError",
"(",
"\"Unknown event type {} specified\"",
".",
"format",
"(",
"event",
")",
",",
"events",
"=",
"events",
")",
"monitor_id",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"action",
"=",
"(",
"monitor_id",
",",
"\"add\"",
",",
"devices",
",",
"events",
")",
"self",
".",
"_callbacks",
"[",
"monitor_id",
"]",
"=",
"callback",
"if",
"self",
".",
"_currently_notifying",
":",
"self",
".",
"_deferred_adjustments",
".",
"append",
"(",
"action",
")",
"else",
":",
"self",
".",
"_adjust_monitor_internal",
"(",
"*",
"action",
")",
"return",
"monitor_id"
] | Register a callback when events happen.
If this method is called, it is guaranteed to take effect before the
next call to ``_notify_event`` after this method returns. This method
is safe to call from within a callback that is itself called by
``notify_event``.
See :meth:`AbstractDeviceAdapter.register_monitor`. | [
"Register",
"a",
"callback",
"when",
"events",
"happen",
"."
] | python | train |
fhs/pyhdf | pyhdf/SD.py | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1147-L1176 | def info(self):
"""Retrieve info about the attribute : name, data type and
number of values.
Args::
no argument
Returns::
3-element tuple holding:
- attribute name
- attribute data type (see constants SDC.xxx)
- number of values in the attribute; for a string-valued
attribute (data type SDC.CHAR8), the number of values
corresponds to the string length
C library equivalent : SDattrinfo
"""
if self._index is None:
try:
self._index = self._obj.findattr(self._name)
except HDF4Error:
raise HDF4Error("info: cannot convert name to index")
status, self._name, data_type, n_values = \
_C.SDattrinfo(self._obj._id, self._index)
_checkErr('info', status, 'illegal attribute index')
return self._name, data_type, n_values | [
"def",
"info",
"(",
"self",
")",
":",
"if",
"self",
".",
"_index",
"is",
"None",
":",
"try",
":",
"self",
".",
"_index",
"=",
"self",
".",
"_obj",
".",
"findattr",
"(",
"self",
".",
"_name",
")",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"\"info: cannot convert name to index\"",
")",
"status",
",",
"self",
".",
"_name",
",",
"data_type",
",",
"n_values",
"=",
"_C",
".",
"SDattrinfo",
"(",
"self",
".",
"_obj",
".",
"_id",
",",
"self",
".",
"_index",
")",
"_checkErr",
"(",
"'info'",
",",
"status",
",",
"'illegal attribute index'",
")",
"return",
"self",
".",
"_name",
",",
"data_type",
",",
"n_values"
] | Retrieve info about the attribute : name, data type and
number of values.
Args::
no argument
Returns::
3-element tuple holding:
- attribute name
- attribute data type (see constants SDC.xxx)
- number of values in the attribute; for a string-valued
attribute (data type SDC.CHAR8), the number of values
corresponds to the string length
C library equivalent : SDattrinfo | [
"Retrieve",
"info",
"about",
"the",
"attribute",
":",
"name",
"data",
"type",
"and",
"number",
"of",
"values",
"."
] | python | train |
pgjones/quart | quart/wrappers/request.py | https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/wrappers/request.py#L158-L171 | async def get_data(self, raw: bool=True) -> AnyStr:
"""The request body data."""
try:
body_future = asyncio.ensure_future(self.body)
raw_data = await asyncio.wait_for(body_future, timeout=self.body_timeout)
except asyncio.TimeoutError:
body_future.cancel()
from ..exceptions import RequestTimeout # noqa Avoiding circular import
raise RequestTimeout()
if raw:
return raw_data
else:
return raw_data.decode(self.charset) | [
"async",
"def",
"get_data",
"(",
"self",
",",
"raw",
":",
"bool",
"=",
"True",
")",
"->",
"AnyStr",
":",
"try",
":",
"body_future",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"body",
")",
"raw_data",
"=",
"await",
"asyncio",
".",
"wait_for",
"(",
"body_future",
",",
"timeout",
"=",
"self",
".",
"body_timeout",
")",
"except",
"asyncio",
".",
"TimeoutError",
":",
"body_future",
".",
"cancel",
"(",
")",
"from",
".",
".",
"exceptions",
"import",
"RequestTimeout",
"# noqa Avoiding circular import",
"raise",
"RequestTimeout",
"(",
")",
"if",
"raw",
":",
"return",
"raw_data",
"else",
":",
"return",
"raw_data",
".",
"decode",
"(",
"self",
".",
"charset",
")"
] | The request body data. | [
"The",
"request",
"body",
"data",
"."
] | python | train |
markovmodel/msmtools | msmtools/flux/dense/tpt.py | https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/flux/dense/tpt.py#L186-L210 | def coarsegrain(F, sets):
r"""Coarse-grains the flux to the given sets
$fc_{i,j} = \sum_{i \in I,j \in J} f_{i,j}$
Note that if you coarse-grain a net flux, it does not necessarily have a net
flux property anymore. If want to make sure you get a netflux,
use to_netflux(coarsegrain(F,sets)).
Parameters
----------
F : (n, n) ndarray
Matrix of flux values between pairs of states.
sets : list of array-like of ints
The sets of states onto which the flux is coarse-grained.
"""
nnew = len(sets)
Fc = np.zeros((nnew, nnew))
for i in range(0, nnew - 1):
for j in range(i + 1, nnew):
I = list(sets[i])
J = list(sets[j])
Fc[i, j] = np.sum(F[I, :][:, J])
Fc[j, i] = np.sum(F[J, :][:, I])
return Fc | [
"def",
"coarsegrain",
"(",
"F",
",",
"sets",
")",
":",
"nnew",
"=",
"len",
"(",
"sets",
")",
"Fc",
"=",
"np",
".",
"zeros",
"(",
"(",
"nnew",
",",
"nnew",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"nnew",
"-",
"1",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
"+",
"1",
",",
"nnew",
")",
":",
"I",
"=",
"list",
"(",
"sets",
"[",
"i",
"]",
")",
"J",
"=",
"list",
"(",
"sets",
"[",
"j",
"]",
")",
"Fc",
"[",
"i",
",",
"j",
"]",
"=",
"np",
".",
"sum",
"(",
"F",
"[",
"I",
",",
":",
"]",
"[",
":",
",",
"J",
"]",
")",
"Fc",
"[",
"j",
",",
"i",
"]",
"=",
"np",
".",
"sum",
"(",
"F",
"[",
"J",
",",
":",
"]",
"[",
":",
",",
"I",
"]",
")",
"return",
"Fc"
] | r"""Coarse-grains the flux to the given sets
$fc_{i,j} = \sum_{i \in I,j \in J} f_{i,j}$
Note that if you coarse-grain a net flux, it does not necessarily have a net
flux property anymore. If want to make sure you get a netflux,
use to_netflux(coarsegrain(F,sets)).
Parameters
----------
F : (n, n) ndarray
Matrix of flux values between pairs of states.
sets : list of array-like of ints
The sets of states onto which the flux is coarse-grained. | [
"r",
"Coarse",
"-",
"grains",
"the",
"flux",
"to",
"the",
"given",
"sets"
] | python | train |
sam-cox/pytides | pytides/tide.py | https://github.com/sam-cox/pytides/blob/63a2507299002f1979ea55a17a82561158d685f7/pytides/tide.py#L260-L268 | def normalize(self):
"""
Adapt self.model so that amplitudes are positive and phases are in [0,360) as per convention
"""
for i, (_, amplitude, phase) in enumerate(self.model):
if amplitude < 0:
self.model['amplitude'][i] = -amplitude
self.model['phase'][i] = phase + 180.0
self.model['phase'][i] = np.mod(self.model['phase'][i], 360.0) | [
"def",
"normalize",
"(",
"self",
")",
":",
"for",
"i",
",",
"(",
"_",
",",
"amplitude",
",",
"phase",
")",
"in",
"enumerate",
"(",
"self",
".",
"model",
")",
":",
"if",
"amplitude",
"<",
"0",
":",
"self",
".",
"model",
"[",
"'amplitude'",
"]",
"[",
"i",
"]",
"=",
"-",
"amplitude",
"self",
".",
"model",
"[",
"'phase'",
"]",
"[",
"i",
"]",
"=",
"phase",
"+",
"180.0",
"self",
".",
"model",
"[",
"'phase'",
"]",
"[",
"i",
"]",
"=",
"np",
".",
"mod",
"(",
"self",
".",
"model",
"[",
"'phase'",
"]",
"[",
"i",
"]",
",",
"360.0",
")"
] | Adapt self.model so that amplitudes are positive and phases are in [0,360) as per convention | [
"Adapt",
"self",
".",
"model",
"so",
"that",
"amplitudes",
"are",
"positive",
"and",
"phases",
"are",
"in",
"[",
"0",
"360",
")",
"as",
"per",
"convention"
] | python | train |
Rafiot/PubSubLogger | pubsublogger/subscriber.py | https://github.com/Rafiot/PubSubLogger/blob/4f28ad673f42ee2ec7792d414d325aef9a56da53/pubsublogger/subscriber.py#L94-L109 | def mail_setup(path):
"""
Set the variables to be able to send emails.
:param path: path to the config file
"""
global dest_mails
global smtp_server
global smtp_port
global src_server
config = configparser.RawConfigParser()
config.readfp(path)
dest_mails = config.get('mail', 'dest_mail').split(',')
smtp_server = config.get('mail', 'smtp_server')
smtp_port = config.get('mail', 'smtp_port')
src_server = config.get('mail', 'src_server') | [
"def",
"mail_setup",
"(",
"path",
")",
":",
"global",
"dest_mails",
"global",
"smtp_server",
"global",
"smtp_port",
"global",
"src_server",
"config",
"=",
"configparser",
".",
"RawConfigParser",
"(",
")",
"config",
".",
"readfp",
"(",
"path",
")",
"dest_mails",
"=",
"config",
".",
"get",
"(",
"'mail'",
",",
"'dest_mail'",
")",
".",
"split",
"(",
"','",
")",
"smtp_server",
"=",
"config",
".",
"get",
"(",
"'mail'",
",",
"'smtp_server'",
")",
"smtp_port",
"=",
"config",
".",
"get",
"(",
"'mail'",
",",
"'smtp_port'",
")",
"src_server",
"=",
"config",
".",
"get",
"(",
"'mail'",
",",
"'src_server'",
")"
] | Set the variables to be able to send emails.
:param path: path to the config file | [
"Set",
"the",
"variables",
"to",
"be",
"able",
"to",
"send",
"emails",
"."
] | python | train |
androguard/androguard | androguard/core/analysis/analysis.py | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/analysis/analysis.py#L1555-L1613 | def get_call_graph(self, classname=".*", methodname=".*", descriptor=".*",
accessflags=".*", no_isolated=False, entry_points=[]):
"""
Generate a directed graph based on the methods found by the filters applied.
The filters are the same as in
:meth:`~androguard.core.analaysis.analaysis.Analysis.find_methods`
A networkx.DiGraph is returned, containing all edges only once!
that means, if a method calls some method twice or more often, there will
only be a single connection.
:param classname: regular expression of the classname (default: ".*")
:param fieldname: regular expression of the fieldname (default: ".*")
:param fieldtype: regular expression of the fieldtype (default: ".*")
:param accessflags: regular expression of the access flags (default: ".*")
:param no_isolated: remove isolated nodes from the graph, e.g. methods which do not call anything (default: False)
:param entry_points: A list of classes that are marked as entry point
:rtype: DiGraph
"""
def _add_node(G, method):
"""
Wrapper to add methods to a graph
"""
if method not in G.node:
G.add_node(method,
external=isinstance(method, ExternalMethod),
entrypoint=method.get_class_name() in entry_points,
native="native" in method.get_access_flags_string(),
public="public" in method.get_access_flags_string(),
static="static" in method.get_access_flags_string(),
)
CG = nx.DiGraph()
# Note: If you create the CG from many classes at the same time, the drawing
# will be a total mess...
for m in self.find_methods(classname=classname, methodname=methodname,
descriptor=descriptor, accessflags=accessflags):
orig_method = m.get_method()
log.info("Found Method --> {}".format(orig_method))
if no_isolated and len(m.get_xref_to()) == 0:
log.info("Skipped {}, because if has no xrefs".format(orig_method))
continue
_add_node(CG, orig_method)
for other_class, callee, offset in m.get_xref_to():
_add_node(CG, callee)
# As this is a DiGraph and we are not interested in duplicate edges,
# check if the edge is already in the edge set.
# If you need all calls, you probably want to check out MultiDiGraph
if not CG.has_edge(orig_method, callee):
CG.add_edge(orig_method, callee)
return CG | [
"def",
"get_call_graph",
"(",
"self",
",",
"classname",
"=",
"\".*\"",
",",
"methodname",
"=",
"\".*\"",
",",
"descriptor",
"=",
"\".*\"",
",",
"accessflags",
"=",
"\".*\"",
",",
"no_isolated",
"=",
"False",
",",
"entry_points",
"=",
"[",
"]",
")",
":",
"def",
"_add_node",
"(",
"G",
",",
"method",
")",
":",
"\"\"\"\n Wrapper to add methods to a graph\n \"\"\"",
"if",
"method",
"not",
"in",
"G",
".",
"node",
":",
"G",
".",
"add_node",
"(",
"method",
",",
"external",
"=",
"isinstance",
"(",
"method",
",",
"ExternalMethod",
")",
",",
"entrypoint",
"=",
"method",
".",
"get_class_name",
"(",
")",
"in",
"entry_points",
",",
"native",
"=",
"\"native\"",
"in",
"method",
".",
"get_access_flags_string",
"(",
")",
",",
"public",
"=",
"\"public\"",
"in",
"method",
".",
"get_access_flags_string",
"(",
")",
",",
"static",
"=",
"\"static\"",
"in",
"method",
".",
"get_access_flags_string",
"(",
")",
",",
")",
"CG",
"=",
"nx",
".",
"DiGraph",
"(",
")",
"# Note: If you create the CG from many classes at the same time, the drawing",
"# will be a total mess...",
"for",
"m",
"in",
"self",
".",
"find_methods",
"(",
"classname",
"=",
"classname",
",",
"methodname",
"=",
"methodname",
",",
"descriptor",
"=",
"descriptor",
",",
"accessflags",
"=",
"accessflags",
")",
":",
"orig_method",
"=",
"m",
".",
"get_method",
"(",
")",
"log",
".",
"info",
"(",
"\"Found Method --> {}\"",
".",
"format",
"(",
"orig_method",
")",
")",
"if",
"no_isolated",
"and",
"len",
"(",
"m",
".",
"get_xref_to",
"(",
")",
")",
"==",
"0",
":",
"log",
".",
"info",
"(",
"\"Skipped {}, because if has no xrefs\"",
".",
"format",
"(",
"orig_method",
")",
")",
"continue",
"_add_node",
"(",
"CG",
",",
"orig_method",
")",
"for",
"other_class",
",",
"callee",
",",
"offset",
"in",
"m",
".",
"get_xref_to",
"(",
")",
":",
"_add_node",
"(",
"CG",
",",
"callee",
")",
"# As this is a DiGraph and we are not interested in duplicate edges,",
"# check if the edge is already in the edge set.",
"# If you need all calls, you probably want to check out MultiDiGraph",
"if",
"not",
"CG",
".",
"has_edge",
"(",
"orig_method",
",",
"callee",
")",
":",
"CG",
".",
"add_edge",
"(",
"orig_method",
",",
"callee",
")",
"return",
"CG"
] | Generate a directed graph based on the methods found by the filters applied.
The filters are the same as in
:meth:`~androguard.core.analaysis.analaysis.Analysis.find_methods`
A networkx.DiGraph is returned, containing all edges only once!
that means, if a method calls some method twice or more often, there will
only be a single connection.
:param classname: regular expression of the classname (default: ".*")
:param fieldname: regular expression of the fieldname (default: ".*")
:param fieldtype: regular expression of the fieldtype (default: ".*")
:param accessflags: regular expression of the access flags (default: ".*")
:param no_isolated: remove isolated nodes from the graph, e.g. methods which do not call anything (default: False)
:param entry_points: A list of classes that are marked as entry point
:rtype: DiGraph | [
"Generate",
"a",
"directed",
"graph",
"based",
"on",
"the",
"methods",
"found",
"by",
"the",
"filters",
"applied",
".",
"The",
"filters",
"are",
"the",
"same",
"as",
"in",
":",
"meth",
":",
"~androguard",
".",
"core",
".",
"analaysis",
".",
"analaysis",
".",
"Analysis",
".",
"find_methods"
] | python | train |
Azure/blobxfer | blobxfer/models/azure.py | https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/models/azure.py#L355-L387 | def populate_from_file(
self, sa, file, path, vio=None, store_raw_metadata=False,
snapshot=None):
# type: (StorageEntity, blobxfer.operations.azure.StorageAccount,
# azure.storage.file.models.File, str,
# blobxfer.models.metadata.VectoredStripe, bool, str) -> None
"""Populate properties from File
:param StorageEntity self: this
:param blobxfer.operations.azure.StorageAccount sa: storage account
:param azure.storage.file.models.File file: file to populate from
:param str path: full path to file
:param blobxfer.models.metadata.VectoredStripe vio: Vectored stripe
:param bool store_raw_metadata: store raw metadata
:param str snapshot: snapshot
"""
if store_raw_metadata:
self._raw_metadata = file.metadata
else:
self._fileattr = blobxfer.models.metadata.fileattr_from_metadata(
file.metadata)
self._vio = vio
self._can_create_containers = sa.can_create_containers
if path is not None:
self._name = str(pathlib.Path(path) / file.name)
else:
self._name = file.name
self._snapshot = snapshot
self._lmt = file.properties.last_modified
self._size = file.properties.content_length
self._md5 = file.properties.content_settings.content_md5
self._cache_control = file.properties.content_settings.cache_control
self._mode = StorageModes.File
self._client = sa.file_client | [
"def",
"populate_from_file",
"(",
"self",
",",
"sa",
",",
"file",
",",
"path",
",",
"vio",
"=",
"None",
",",
"store_raw_metadata",
"=",
"False",
",",
"snapshot",
"=",
"None",
")",
":",
"# type: (StorageEntity, blobxfer.operations.azure.StorageAccount,",
"# azure.storage.file.models.File, str,",
"# blobxfer.models.metadata.VectoredStripe, bool, str) -> None",
"if",
"store_raw_metadata",
":",
"self",
".",
"_raw_metadata",
"=",
"file",
".",
"metadata",
"else",
":",
"self",
".",
"_fileattr",
"=",
"blobxfer",
".",
"models",
".",
"metadata",
".",
"fileattr_from_metadata",
"(",
"file",
".",
"metadata",
")",
"self",
".",
"_vio",
"=",
"vio",
"self",
".",
"_can_create_containers",
"=",
"sa",
".",
"can_create_containers",
"if",
"path",
"is",
"not",
"None",
":",
"self",
".",
"_name",
"=",
"str",
"(",
"pathlib",
".",
"Path",
"(",
"path",
")",
"/",
"file",
".",
"name",
")",
"else",
":",
"self",
".",
"_name",
"=",
"file",
".",
"name",
"self",
".",
"_snapshot",
"=",
"snapshot",
"self",
".",
"_lmt",
"=",
"file",
".",
"properties",
".",
"last_modified",
"self",
".",
"_size",
"=",
"file",
".",
"properties",
".",
"content_length",
"self",
".",
"_md5",
"=",
"file",
".",
"properties",
".",
"content_settings",
".",
"content_md5",
"self",
".",
"_cache_control",
"=",
"file",
".",
"properties",
".",
"content_settings",
".",
"cache_control",
"self",
".",
"_mode",
"=",
"StorageModes",
".",
"File",
"self",
".",
"_client",
"=",
"sa",
".",
"file_client"
] | Populate properties from File
:param StorageEntity self: this
:param blobxfer.operations.azure.StorageAccount sa: storage account
:param azure.storage.file.models.File file: file to populate from
:param str path: full path to file
:param blobxfer.models.metadata.VectoredStripe vio: Vectored stripe
:param bool store_raw_metadata: store raw metadata
:param str snapshot: snapshot | [
"Populate",
"properties",
"from",
"File",
":",
"param",
"StorageEntity",
"self",
":",
"this",
":",
"param",
"blobxfer",
".",
"operations",
".",
"azure",
".",
"StorageAccount",
"sa",
":",
"storage",
"account",
":",
"param",
"azure",
".",
"storage",
".",
"file",
".",
"models",
".",
"File",
"file",
":",
"file",
"to",
"populate",
"from",
":",
"param",
"str",
"path",
":",
"full",
"path",
"to",
"file",
":",
"param",
"blobxfer",
".",
"models",
".",
"metadata",
".",
"VectoredStripe",
"vio",
":",
"Vectored",
"stripe",
":",
"param",
"bool",
"store_raw_metadata",
":",
"store",
"raw",
"metadata",
":",
"param",
"str",
"snapshot",
":",
"snapshot"
] | python | train |
StackStorm/pybind | pybind/nos/v6_0_2f/interface/tengigabitethernet/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/interface/tengigabitethernet/__init__.py#L1082-L1106 | def _set_dot1x(self, v, load=False):
"""
Setter method for dot1x, mapped from YANG variable /interface/tengigabitethernet/dot1x (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_dot1x is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dot1x() directly.
YANG Description: This provides grouping of all the dot1x configuration
elements.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=dot1x.dot1x, is_container='container', presence=False, yang_name="dot1x", rest_name="dot1x", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IEEE 802.1X Port-Based Access Control', u'cli-incomplete-no': None, u'callpoint': u'dot1x_callpoint', u'sort-priority': u'105'}}, namespace='urn:brocade.com:mgmt:brocade-dot1x', defining_module='brocade-dot1x', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dot1x must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=dot1x.dot1x, is_container='container', presence=False, yang_name="dot1x", rest_name="dot1x", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IEEE 802.1X Port-Based Access Control', u'cli-incomplete-no': None, u'callpoint': u'dot1x_callpoint', u'sort-priority': u'105'}}, namespace='urn:brocade.com:mgmt:brocade-dot1x', defining_module='brocade-dot1x', yang_type='container', is_config=True)""",
})
self.__dot1x = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_dot1x",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"dot1x",
".",
"dot1x",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"dot1x\"",
",",
"rest_name",
"=",
"\"dot1x\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'IEEE 802.1X Port-Based Access Control'",
",",
"u'cli-incomplete-no'",
":",
"None",
",",
"u'callpoint'",
":",
"u'dot1x_callpoint'",
",",
"u'sort-priority'",
":",
"u'105'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-dot1x'",
",",
"defining_module",
"=",
"'brocade-dot1x'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"dot1x must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=dot1x.dot1x, is_container='container', presence=False, yang_name=\"dot1x\", rest_name=\"dot1x\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IEEE 802.1X Port-Based Access Control', u'cli-incomplete-no': None, u'callpoint': u'dot1x_callpoint', u'sort-priority': u'105'}}, namespace='urn:brocade.com:mgmt:brocade-dot1x', defining_module='brocade-dot1x', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__dot1x",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for dot1x, mapped from YANG variable /interface/tengigabitethernet/dot1x (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_dot1x is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dot1x() directly.
YANG Description: This provides grouping of all the dot1x configuration
elements. | [
"Setter",
"method",
"for",
"dot1x",
"mapped",
"from",
"YANG",
"variable",
"/",
"interface",
"/",
"tengigabitethernet",
"/",
"dot1x",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_dot1x",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_dot1x",
"()",
"directly",
"."
] | python | train |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L2790-L2815 | def parse_resource_entry(self, rva):
"""Parse a directory entry from the resources directory."""
try:
data = self.get_data( rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__).sizeof() )
except PEFormatError, excp:
# A warning will be added by the caller if this method returns None
return None
resource = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource is None:
return None
#resource.NameIsString = (resource.Name & 0x80000000L) >> 31
resource.NameOffset = resource.Name & 0x7FFFFFFFL
resource.__pad = resource.Name & 0xFFFF0000L
resource.Id = resource.Name & 0x0000FFFFL
resource.DataIsDirectory = (resource.OffsetToData & 0x80000000L) >> 31
resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFFL
return resource | [
"def",
"parse_resource_entry",
"(",
"self",
",",
"rva",
")",
":",
"try",
":",
"data",
"=",
"self",
".",
"get_data",
"(",
"rva",
",",
"Structure",
"(",
"self",
".",
"__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__",
")",
".",
"sizeof",
"(",
")",
")",
"except",
"PEFormatError",
",",
"excp",
":",
"# A warning will be added by the caller if this method returns None",
"return",
"None",
"resource",
"=",
"self",
".",
"__unpack_data__",
"(",
"self",
".",
"__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__",
",",
"data",
",",
"file_offset",
"=",
"self",
".",
"get_offset_from_rva",
"(",
"rva",
")",
")",
"if",
"resource",
"is",
"None",
":",
"return",
"None",
"#resource.NameIsString = (resource.Name & 0x80000000L) >> 31",
"resource",
".",
"NameOffset",
"=",
"resource",
".",
"Name",
"&",
"0x7FFFFFFFL",
"resource",
".",
"__pad",
"=",
"resource",
".",
"Name",
"&",
"0xFFFF0000L",
"resource",
".",
"Id",
"=",
"resource",
".",
"Name",
"&",
"0x0000FFFFL",
"resource",
".",
"DataIsDirectory",
"=",
"(",
"resource",
".",
"OffsetToData",
"&",
"0x80000000L",
")",
">>",
"31",
"resource",
".",
"OffsetToDirectory",
"=",
"resource",
".",
"OffsetToData",
"&",
"0x7FFFFFFFL",
"return",
"resource"
] | Parse a directory entry from the resources directory. | [
"Parse",
"a",
"directory",
"entry",
"from",
"the",
"resources",
"directory",
"."
] | python | train |
datastax/python-driver | cassandra/cluster.py | https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cluster.py#L1283-L1289 | def connection_factory(self, endpoint, *args, **kwargs):
"""
Called to create a new connection with proper configuration.
Intended for internal use only.
"""
kwargs = self._make_connection_kwargs(endpoint, kwargs)
return self.connection_class.factory(endpoint, self.connect_timeout, *args, **kwargs) | [
"def",
"connection_factory",
"(",
"self",
",",
"endpoint",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"self",
".",
"_make_connection_kwargs",
"(",
"endpoint",
",",
"kwargs",
")",
"return",
"self",
".",
"connection_class",
".",
"factory",
"(",
"endpoint",
",",
"self",
".",
"connect_timeout",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Called to create a new connection with proper configuration.
Intended for internal use only. | [
"Called",
"to",
"create",
"a",
"new",
"connection",
"with",
"proper",
"configuration",
".",
"Intended",
"for",
"internal",
"use",
"only",
"."
] | python | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/bakery/_checker.py | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/bakery/_checker.py#L183-L212 | def allow(self, ctx, ops):
''' Checks that the authorizer's request is authorized to
perform all the given operations. Note that allow does not check
first party caveats - if there is more than one macaroon that may
authorize the request, it will choose the first one that does
regardless.
If all the operations are allowed, an AuthInfo is returned holding
details of the decision and any first party caveats that must be
checked before actually executing any operation.
If operations include LOGIN_OP, the request should contain an
authentication macaroon proving the client's identity. Once an
authentication macaroon is chosen, it will be used for all other
authorization requests.
If an operation was not allowed, an exception will be raised which may
be:
- DischargeRequiredError holding the operations that remain to
be authorized in order to allow authorization to proceed
- PermissionDenied when no operations can be authorized and there's
no third party to discharge macaroons for.
@param ctx AuthContext
@param ops an array of Op
:return: an AuthInfo object.
'''
auth_info, _ = self.allow_any(ctx, ops)
return auth_info | [
"def",
"allow",
"(",
"self",
",",
"ctx",
",",
"ops",
")",
":",
"auth_info",
",",
"_",
"=",
"self",
".",
"allow_any",
"(",
"ctx",
",",
"ops",
")",
"return",
"auth_info"
] | Checks that the authorizer's request is authorized to
perform all the given operations. Note that allow does not check
first party caveats - if there is more than one macaroon that may
authorize the request, it will choose the first one that does
regardless.
If all the operations are allowed, an AuthInfo is returned holding
details of the decision and any first party caveats that must be
checked before actually executing any operation.
If operations include LOGIN_OP, the request should contain an
authentication macaroon proving the client's identity. Once an
authentication macaroon is chosen, it will be used for all other
authorization requests.
If an operation was not allowed, an exception will be raised which may
be:
- DischargeRequiredError holding the operations that remain to
be authorized in order to allow authorization to proceed
- PermissionDenied when no operations can be authorized and there's
no third party to discharge macaroons for.
@param ctx AuthContext
@param ops an array of Op
:return: an AuthInfo object. | [
"Checks",
"that",
"the",
"authorizer",
"s",
"request",
"is",
"authorized",
"to",
"perform",
"all",
"the",
"given",
"operations",
".",
"Note",
"that",
"allow",
"does",
"not",
"check",
"first",
"party",
"caveats",
"-",
"if",
"there",
"is",
"more",
"than",
"one",
"macaroon",
"that",
"may",
"authorize",
"the",
"request",
"it",
"will",
"choose",
"the",
"first",
"one",
"that",
"does",
"regardless",
"."
] | python | train |
bitesofcode/projexui | projexui/configs/xschemeconfig.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/configs/xschemeconfig.py#L113-L121 | def reset( self ):
"""
Resets the colors to the default settings.
"""
dataSet = self.dataSet()
if ( not dataSet ):
dataSet = XScheme()
dataSet.reset() | [
"def",
"reset",
"(",
"self",
")",
":",
"dataSet",
"=",
"self",
".",
"dataSet",
"(",
")",
"if",
"(",
"not",
"dataSet",
")",
":",
"dataSet",
"=",
"XScheme",
"(",
")",
"dataSet",
".",
"reset",
"(",
")"
] | Resets the colors to the default settings. | [
"Resets",
"the",
"colors",
"to",
"the",
"default",
"settings",
"."
] | python | train |
72squared/redpipe | redpipe/keyspaces.py | https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/keyspaces.py#L2043-L2055 | def hincrby(self, name, key, amount=1):
"""
Increment the value of the field.
:param name: str the name of the redis key
:param increment: int
:param field: str
:return: Future()
"""
with self.pipe as pipe:
return pipe.hincrby(self.redis_key(name),
self.memberparse.encode(key),
amount) | [
"def",
"hincrby",
"(",
"self",
",",
"name",
",",
"key",
",",
"amount",
"=",
"1",
")",
":",
"with",
"self",
".",
"pipe",
"as",
"pipe",
":",
"return",
"pipe",
".",
"hincrby",
"(",
"self",
".",
"redis_key",
"(",
"name",
")",
",",
"self",
".",
"memberparse",
".",
"encode",
"(",
"key",
")",
",",
"amount",
")"
] | Increment the value of the field.
:param name: str the name of the redis key
:param increment: int
:param field: str
:return: Future() | [
"Increment",
"the",
"value",
"of",
"the",
"field",
"."
] | python | train |
DLR-RM/RAFCON | source/rafcon/gui/models/container_state.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/models/container_state.py#L72-L86 | def _load_child_state_models(self, load_meta_data):
"""Adds models for each child state of the state
:param bool load_meta_data: Whether to load the meta data of the child state
"""
self.states = {}
# Create model for each child class
child_states = self.state.states
for child_state in child_states.values():
# Create hierarchy
model_class = get_state_model_class_for_state(child_state)
if model_class is not None:
self._add_model(self.states, child_state, model_class, child_state.state_id, load_meta_data)
else:
logger.error("Unknown state type '{type:s}'. Cannot create model.".format(type=type(child_state))) | [
"def",
"_load_child_state_models",
"(",
"self",
",",
"load_meta_data",
")",
":",
"self",
".",
"states",
"=",
"{",
"}",
"# Create model for each child class",
"child_states",
"=",
"self",
".",
"state",
".",
"states",
"for",
"child_state",
"in",
"child_states",
".",
"values",
"(",
")",
":",
"# Create hierarchy",
"model_class",
"=",
"get_state_model_class_for_state",
"(",
"child_state",
")",
"if",
"model_class",
"is",
"not",
"None",
":",
"self",
".",
"_add_model",
"(",
"self",
".",
"states",
",",
"child_state",
",",
"model_class",
",",
"child_state",
".",
"state_id",
",",
"load_meta_data",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"Unknown state type '{type:s}'. Cannot create model.\"",
".",
"format",
"(",
"type",
"=",
"type",
"(",
"child_state",
")",
")",
")"
] | Adds models for each child state of the state
:param bool load_meta_data: Whether to load the meta data of the child state | [
"Adds",
"models",
"for",
"each",
"child",
"state",
"of",
"the",
"state"
] | python | train |
Erotemic/utool | utool/util_progress.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_progress.py#L808-L821 | def _get_timethresh_heuristics(self):
"""
resonably decent hueristics for how much time to wait before
updating progress.
"""
if self.length > 1E5:
time_thresh = 2.5
elif self.length > 1E4:
time_thresh = 2.0
elif self.length > 1E3:
time_thresh = 1.0
else:
time_thresh = 0.5
return time_thresh | [
"def",
"_get_timethresh_heuristics",
"(",
"self",
")",
":",
"if",
"self",
".",
"length",
">",
"1E5",
":",
"time_thresh",
"=",
"2.5",
"elif",
"self",
".",
"length",
">",
"1E4",
":",
"time_thresh",
"=",
"2.0",
"elif",
"self",
".",
"length",
">",
"1E3",
":",
"time_thresh",
"=",
"1.0",
"else",
":",
"time_thresh",
"=",
"0.5",
"return",
"time_thresh"
] | resonably decent hueristics for how much time to wait before
updating progress. | [
"resonably",
"decent",
"hueristics",
"for",
"how",
"much",
"time",
"to",
"wait",
"before",
"updating",
"progress",
"."
] | python | train |
DAI-Lab/Copulas | copulas/__init__.py | https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/__init__.py#L56-L112 | def vectorize(function):
"""Allow a method that only accepts scalars to accept vectors too.
This decorator has two different behaviors depending on the dimensionality of the
array passed as an argument:
**1-d array**
It will work under the assumption that the `function` argument is a callable
with signature::
function(self, X, *args, **kwargs)
where X is an scalar magnitude.
In this case the arguments of the input array will be given one at a time, and
both the input and output of the decorated function will have shape (n,).
**2-d array**
It will work under the assumption that the `function` argument is a callable with signature::
function(self, X0, ..., Xj, *args, **kwargs)
where `Xi` are scalar magnitudes.
It will pass the contents of each row unpacked on each call. The input is espected to have
shape (n, j), the output a shape of (n,)
It will return a function that is guaranteed to return a `numpy.array`.
Args:
function(callable): Function that only accept and return scalars.
Returns:
callable: Decorated function that can accept and return :attr:`numpy.array`.
"""
def decorated(self, X, *args, **kwargs):
if not isinstance(X, np.ndarray):
return function(self, X, *args, **kwargs)
if len(X.shape) == 1:
X = X.reshape([-1, 1])
if len(X.shape) == 2:
return np.fromiter(
(function(self, *x, *args, **kwargs) for x in X),
np.dtype('float64')
)
else:
raise ValueError('Arrays of dimensionality higher than 2 are not supported.')
decorated.__doc__ = function.__doc__
return decorated | [
"def",
"vectorize",
"(",
"function",
")",
":",
"def",
"decorated",
"(",
"self",
",",
"X",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"X",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"function",
"(",
"self",
",",
"X",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"1",
":",
"X",
"=",
"X",
".",
"reshape",
"(",
"[",
"-",
"1",
",",
"1",
"]",
")",
"if",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"2",
":",
"return",
"np",
".",
"fromiter",
"(",
"(",
"function",
"(",
"self",
",",
"*",
"x",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"x",
"in",
"X",
")",
",",
"np",
".",
"dtype",
"(",
"'float64'",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Arrays of dimensionality higher than 2 are not supported.'",
")",
"decorated",
".",
"__doc__",
"=",
"function",
".",
"__doc__",
"return",
"decorated"
] | Allow a method that only accepts scalars to accept vectors too.
This decorator has two different behaviors depending on the dimensionality of the
array passed as an argument:
**1-d array**
It will work under the assumption that the `function` argument is a callable
with signature::
function(self, X, *args, **kwargs)
where X is an scalar magnitude.
In this case the arguments of the input array will be given one at a time, and
both the input and output of the decorated function will have shape (n,).
**2-d array**
It will work under the assumption that the `function` argument is a callable with signature::
function(self, X0, ..., Xj, *args, **kwargs)
where `Xi` are scalar magnitudes.
It will pass the contents of each row unpacked on each call. The input is espected to have
shape (n, j), the output a shape of (n,)
It will return a function that is guaranteed to return a `numpy.array`.
Args:
function(callable): Function that only accept and return scalars.
Returns:
callable: Decorated function that can accept and return :attr:`numpy.array`. | [
"Allow",
"a",
"method",
"that",
"only",
"accepts",
"scalars",
"to",
"accept",
"vectors",
"too",
"."
] | python | train |
ael-code/pyFsdb | fsdb/config.py | https://github.com/ael-code/pyFsdb/blob/de33a0d41373307cb32cdd7ba1991b85ff495ee3/fsdb/config.py#L57-L62 | def to_json_format(conf):
'''Convert fields of a python dictionary to be dumped in json format'''
if 'fmode' in conf:
conf['fmode'] = oct(conf['fmode'])[-3:]
if 'dmode' in conf:
conf['dmode'] = oct(conf['dmode'])[-3:] | [
"def",
"to_json_format",
"(",
"conf",
")",
":",
"if",
"'fmode'",
"in",
"conf",
":",
"conf",
"[",
"'fmode'",
"]",
"=",
"oct",
"(",
"conf",
"[",
"'fmode'",
"]",
")",
"[",
"-",
"3",
":",
"]",
"if",
"'dmode'",
"in",
"conf",
":",
"conf",
"[",
"'dmode'",
"]",
"=",
"oct",
"(",
"conf",
"[",
"'dmode'",
"]",
")",
"[",
"-",
"3",
":",
"]"
] | Convert fields of a python dictionary to be dumped in json format | [
"Convert",
"fields",
"of",
"a",
"python",
"dictionary",
"to",
"be",
"dumped",
"in",
"json",
"format"
] | python | train |
pyamg/pyamg | pyamg/amg_core/bindthem.py | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/amg_core/bindthem.py#L193-L306 | def build_plugin(headerfile, ch, comments, inst, remaps):
"""
Take a header file (headerfile) and a parse tree (ch)
and build the pybind11 plugin
headerfile: somefile.h
ch: parse tree from CppHeaderParser
comments: a dictionary of comments
inst: files to instantiate
remaps: list of remaps
"""
headerfilename = os.path.splitext(headerfile)[0]
indent = ' '
plugin = ''
# plugin += '#define NC py::arg().noconvert()\n'
# plugin += '#define YC py::arg()\n'
plugin += 'PYBIND11_MODULE({}, m) {{\n'.format(headerfilename)
plugin += indent + 'm.doc() = R"pbdoc(\n'
plugin += indent + 'Pybind11 bindings for {}\n\n'.format(headerfile)
plugin += indent + 'Methods\n'
plugin += indent + '-------\n'
for f in ch.functions:
for func in inst:
if f['name'] in func['functions']:
plugin += indent + f['name'] + '\n'
plugin += indent + ')pbdoc";\n\n'
plugin += indent + 'py::options options;\n'
plugin += indent + 'options.disable_function_signatures();\n\n'
unbound = []
bound = []
for f in ch.functions:
# for each function:
# - find the entry in the instantiation list
# - note any array parameters to the function
# - for each type, instantiate
found = False
for func in inst:
if f['name'] in func['functions']:
found = True
types = func['types']
if not found:
# print('Could not find {}'.format(f['name']))
unbound.append(f['name'])
continue
else:
bound.append(f['name'])
# find all parameter names and mark if array
argnames = []
for p in f['parameters']:
array = False
if p['pointer'] or p['array']:
array = True
# skip "_size" parameters
if '_size' in p['name']:
continue
else:
argnames.append((p['name'], array))
ntypes = len(types)
for i, t in enumerate(types):
# add the function call with each template
instname = f['name']
# check the remaps
for remap in remaps:
if f['name'] in remap:
instname = remap[f['name']]
if t is not None:
# templated function
typestr = '<' + ', '.join(t) + '>'
else:
# not a templated function
typestr = ''
plugin += indent + \
'm.def("{}", &_{}{},\n'.format(instname, f['name'], typestr)
# name the arguments
pyargnames = []
for p, array in argnames:
convert = ''
if array:
convert = '.noconvert()'
pyargnames.append('py::arg("{}"){}'.format(p, convert))
argstring = indent + ', '.join(pyargnames)
plugin += indent + argstring
# add the docstring to the last
if i == ntypes - 1:
plugin += ',\nR"pbdoc(\n{})pbdoc");\n'.format(
comments[f['name']])
else:
plugin += ');\n'
plugin += '\n'
plugin += '}\n'
# plugin += '#undef NC\n'
# plugin += '#undef YC\n'
return plugin, bound, unbound | [
"def",
"build_plugin",
"(",
"headerfile",
",",
"ch",
",",
"comments",
",",
"inst",
",",
"remaps",
")",
":",
"headerfilename",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"headerfile",
")",
"[",
"0",
"]",
"indent",
"=",
"' '",
"plugin",
"=",
"''",
"# plugin += '#define NC py::arg().noconvert()\\n'",
"# plugin += '#define YC py::arg()\\n'",
"plugin",
"+=",
"'PYBIND11_MODULE({}, m) {{\\n'",
".",
"format",
"(",
"headerfilename",
")",
"plugin",
"+=",
"indent",
"+",
"'m.doc() = R\"pbdoc(\\n'",
"plugin",
"+=",
"indent",
"+",
"'Pybind11 bindings for {}\\n\\n'",
".",
"format",
"(",
"headerfile",
")",
"plugin",
"+=",
"indent",
"+",
"'Methods\\n'",
"plugin",
"+=",
"indent",
"+",
"'-------\\n'",
"for",
"f",
"in",
"ch",
".",
"functions",
":",
"for",
"func",
"in",
"inst",
":",
"if",
"f",
"[",
"'name'",
"]",
"in",
"func",
"[",
"'functions'",
"]",
":",
"plugin",
"+=",
"indent",
"+",
"f",
"[",
"'name'",
"]",
"+",
"'\\n'",
"plugin",
"+=",
"indent",
"+",
"')pbdoc\";\\n\\n'",
"plugin",
"+=",
"indent",
"+",
"'py::options options;\\n'",
"plugin",
"+=",
"indent",
"+",
"'options.disable_function_signatures();\\n\\n'",
"unbound",
"=",
"[",
"]",
"bound",
"=",
"[",
"]",
"for",
"f",
"in",
"ch",
".",
"functions",
":",
"# for each function:",
"# - find the entry in the instantiation list",
"# - note any array parameters to the function",
"# - for each type, instantiate",
"found",
"=",
"False",
"for",
"func",
"in",
"inst",
":",
"if",
"f",
"[",
"'name'",
"]",
"in",
"func",
"[",
"'functions'",
"]",
":",
"found",
"=",
"True",
"types",
"=",
"func",
"[",
"'types'",
"]",
"if",
"not",
"found",
":",
"# print('Could not find {}'.format(f['name']))",
"unbound",
".",
"append",
"(",
"f",
"[",
"'name'",
"]",
")",
"continue",
"else",
":",
"bound",
".",
"append",
"(",
"f",
"[",
"'name'",
"]",
")",
"# find all parameter names and mark if array",
"argnames",
"=",
"[",
"]",
"for",
"p",
"in",
"f",
"[",
"'parameters'",
"]",
":",
"array",
"=",
"False",
"if",
"p",
"[",
"'pointer'",
"]",
"or",
"p",
"[",
"'array'",
"]",
":",
"array",
"=",
"True",
"# skip \"_size\" parameters",
"if",
"'_size'",
"in",
"p",
"[",
"'name'",
"]",
":",
"continue",
"else",
":",
"argnames",
".",
"append",
"(",
"(",
"p",
"[",
"'name'",
"]",
",",
"array",
")",
")",
"ntypes",
"=",
"len",
"(",
"types",
")",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"types",
")",
":",
"# add the function call with each template",
"instname",
"=",
"f",
"[",
"'name'",
"]",
"# check the remaps",
"for",
"remap",
"in",
"remaps",
":",
"if",
"f",
"[",
"'name'",
"]",
"in",
"remap",
":",
"instname",
"=",
"remap",
"[",
"f",
"[",
"'name'",
"]",
"]",
"if",
"t",
"is",
"not",
"None",
":",
"# templated function",
"typestr",
"=",
"'<'",
"+",
"', '",
".",
"join",
"(",
"t",
")",
"+",
"'>'",
"else",
":",
"# not a templated function",
"typestr",
"=",
"''",
"plugin",
"+=",
"indent",
"+",
"'m.def(\"{}\", &_{}{},\\n'",
".",
"format",
"(",
"instname",
",",
"f",
"[",
"'name'",
"]",
",",
"typestr",
")",
"# name the arguments",
"pyargnames",
"=",
"[",
"]",
"for",
"p",
",",
"array",
"in",
"argnames",
":",
"convert",
"=",
"''",
"if",
"array",
":",
"convert",
"=",
"'.noconvert()'",
"pyargnames",
".",
"append",
"(",
"'py::arg(\"{}\"){}'",
".",
"format",
"(",
"p",
",",
"convert",
")",
")",
"argstring",
"=",
"indent",
"+",
"', '",
".",
"join",
"(",
"pyargnames",
")",
"plugin",
"+=",
"indent",
"+",
"argstring",
"# add the docstring to the last",
"if",
"i",
"==",
"ntypes",
"-",
"1",
":",
"plugin",
"+=",
"',\\nR\"pbdoc(\\n{})pbdoc\");\\n'",
".",
"format",
"(",
"comments",
"[",
"f",
"[",
"'name'",
"]",
"]",
")",
"else",
":",
"plugin",
"+=",
"');\\n'",
"plugin",
"+=",
"'\\n'",
"plugin",
"+=",
"'}\\n'",
"# plugin += '#undef NC\\n'",
"# plugin += '#undef YC\\n'",
"return",
"plugin",
",",
"bound",
",",
"unbound"
] | Take a header file (headerfile) and a parse tree (ch)
and build the pybind11 plugin
headerfile: somefile.h
ch: parse tree from CppHeaderParser
comments: a dictionary of comments
inst: files to instantiate
remaps: list of remaps | [
"Take",
"a",
"header",
"file",
"(",
"headerfile",
")",
"and",
"a",
"parse",
"tree",
"(",
"ch",
")",
"and",
"build",
"the",
"pybind11",
"plugin"
] | python | train |
nugget/python-insteonplm | insteonplm/states/dimmable.py | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/dimmable.py#L193-L205 | def set_level(self, val):
"""Set the devive ON LEVEL."""
if val == 0:
self.off()
else:
setlevel = 255
if val < 1:
setlevel = val * 100
elif val <= 0xff:
setlevel = val
set_command = StandardSend(
self._address, COMMAND_LIGHT_ON_0X11_NONE, cmd2=setlevel)
self._send_method(set_command, self._on_message_received) | [
"def",
"set_level",
"(",
"self",
",",
"val",
")",
":",
"if",
"val",
"==",
"0",
":",
"self",
".",
"off",
"(",
")",
"else",
":",
"setlevel",
"=",
"255",
"if",
"val",
"<",
"1",
":",
"setlevel",
"=",
"val",
"*",
"100",
"elif",
"val",
"<=",
"0xff",
":",
"setlevel",
"=",
"val",
"set_command",
"=",
"StandardSend",
"(",
"self",
".",
"_address",
",",
"COMMAND_LIGHT_ON_0X11_NONE",
",",
"cmd2",
"=",
"setlevel",
")",
"self",
".",
"_send_method",
"(",
"set_command",
",",
"self",
".",
"_on_message_received",
")"
] | Set the devive ON LEVEL. | [
"Set",
"the",
"devive",
"ON",
"LEVEL",
"."
] | python | train |
OLC-Bioinformatics/sipprverse | method.py | https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/method.py#L228-L261 | def complete(self):
"""
Determine if the analyses of the strains are complete e.g. there are no missing GDCS genes, and the
sample.general.bestassemblyfile != 'NA'
"""
# Boolean to store the completeness of the analyses
allcomplete = True
# Clear the list of samples that still require more sequence data
self.incomplete = list()
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
try:
# If the sample has been tagged as incomplete, only add it to the complete metadata list if the
# pipeline is on its final iteration
if sample.general.incomplete:
if self.final:
self.completemetadata.append(sample)
else:
sample.general.complete = False
allcomplete = False
self.incomplete.append(sample.name)
except AttributeError:
sample.general.complete = True
self.completemetadata.append(sample)
else:
if self.final:
self.completemetadata.append(sample)
else:
sample.general.complete = False
allcomplete = False
self.incomplete.append(sample.name)
# If all the samples are complete, set the global variable for run completeness to True
if allcomplete:
self.analysescomplete = True | [
"def",
"complete",
"(",
"self",
")",
":",
"# Boolean to store the completeness of the analyses",
"allcomplete",
"=",
"True",
"# Clear the list of samples that still require more sequence data",
"self",
".",
"incomplete",
"=",
"list",
"(",
")",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"try",
":",
"# If the sample has been tagged as incomplete, only add it to the complete metadata list if the",
"# pipeline is on its final iteration",
"if",
"sample",
".",
"general",
".",
"incomplete",
":",
"if",
"self",
".",
"final",
":",
"self",
".",
"completemetadata",
".",
"append",
"(",
"sample",
")",
"else",
":",
"sample",
".",
"general",
".",
"complete",
"=",
"False",
"allcomplete",
"=",
"False",
"self",
".",
"incomplete",
".",
"append",
"(",
"sample",
".",
"name",
")",
"except",
"AttributeError",
":",
"sample",
".",
"general",
".",
"complete",
"=",
"True",
"self",
".",
"completemetadata",
".",
"append",
"(",
"sample",
")",
"else",
":",
"if",
"self",
".",
"final",
":",
"self",
".",
"completemetadata",
".",
"append",
"(",
"sample",
")",
"else",
":",
"sample",
".",
"general",
".",
"complete",
"=",
"False",
"allcomplete",
"=",
"False",
"self",
".",
"incomplete",
".",
"append",
"(",
"sample",
".",
"name",
")",
"# If all the samples are complete, set the global variable for run completeness to True",
"if",
"allcomplete",
":",
"self",
".",
"analysescomplete",
"=",
"True"
] | Determine if the analyses of the strains are complete e.g. there are no missing GDCS genes, and the
sample.general.bestassemblyfile != 'NA' | [
"Determine",
"if",
"the",
"analyses",
"of",
"the",
"strains",
"are",
"complete",
"e",
".",
"g",
".",
"there",
"are",
"no",
"missing",
"GDCS",
"genes",
"and",
"the",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!",
"=",
"NA"
] | python | train |
authomatic/authomatic | authomatic/core.py | https://github.com/authomatic/authomatic/blob/90a9ce60cc405ae8a2bf5c3713acd5d78579a04e/authomatic/core.py#L212-L228 | def id_to_name(config, short_name):
"""
Returns the provider :doc:`config` key based on it's ``id`` value.
:param dict config:
:doc:`config`.
:param id:
Value of the id parameter in the :ref:`config` to search for.
"""
for k, v in list(config.items()):
if v.get('id') == short_name:
return k
raise Exception(
'No provider with id={0} found in the config!'.format(short_name)) | [
"def",
"id_to_name",
"(",
"config",
",",
"short_name",
")",
":",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"config",
".",
"items",
"(",
")",
")",
":",
"if",
"v",
".",
"get",
"(",
"'id'",
")",
"==",
"short_name",
":",
"return",
"k",
"raise",
"Exception",
"(",
"'No provider with id={0} found in the config!'",
".",
"format",
"(",
"short_name",
")",
")"
] | Returns the provider :doc:`config` key based on it's ``id`` value.
:param dict config:
:doc:`config`.
:param id:
Value of the id parameter in the :ref:`config` to search for. | [
"Returns",
"the",
"provider",
":",
"doc",
":",
"config",
"key",
"based",
"on",
"it",
"s",
"id",
"value",
"."
] | python | test |
Cito/DBUtils | DBUtils/SteadyDB.py | https://github.com/Cito/DBUtils/blob/90e8825e038f08c82044b8e50831480175fa026a/DBUtils/SteadyDB.py#L111-L139 | def connect(
creator, maxusage=None, setsession=None,
failures=None, ping=1, closeable=True, *args, **kwargs):
"""A tough version of the connection constructor of a DB-API 2 module.
creator: either an arbitrary function returning new DB-API 2 compliant
connection objects or a DB-API 2 compliant database module
maxusage: maximum usage limit for the underlying DB-API 2 connection
(number of database operations, 0 or None means unlimited usage)
callproc(), execute() and executemany() count as one operation.
When the limit is reached, the connection is automatically reset.
setsession: an optional list of SQL commands that may serve to prepare
the session, e.g. ["set datestyle to german", "set time zone mez"]
failures: an optional exception class or a tuple of exception classes
for which the failover mechanism shall be applied, if the default
(OperationalError, InternalError) is not adequate
ping: determines when the connection should be checked with ping()
(0 = None = never, 1 = default = when _ping_check() is called,
2 = whenever a cursor is created, 4 = when a query is executed,
7 = always, and all other bit combinations of these values)
closeable: if this is set to false, then closing the connection will
be silently ignored, but by default the connection can be closed
args, kwargs: the parameters that shall be passed to the creator
function or the connection constructor of the DB-API 2 module
"""
return SteadyDBConnection(
creator, maxusage, setsession,
failures, ping, closeable, *args, **kwargs) | [
"def",
"connect",
"(",
"creator",
",",
"maxusage",
"=",
"None",
",",
"setsession",
"=",
"None",
",",
"failures",
"=",
"None",
",",
"ping",
"=",
"1",
",",
"closeable",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"SteadyDBConnection",
"(",
"creator",
",",
"maxusage",
",",
"setsession",
",",
"failures",
",",
"ping",
",",
"closeable",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | A tough version of the connection constructor of a DB-API 2 module.
creator: either an arbitrary function returning new DB-API 2 compliant
connection objects or a DB-API 2 compliant database module
maxusage: maximum usage limit for the underlying DB-API 2 connection
(number of database operations, 0 or None means unlimited usage)
callproc(), execute() and executemany() count as one operation.
When the limit is reached, the connection is automatically reset.
setsession: an optional list of SQL commands that may serve to prepare
the session, e.g. ["set datestyle to german", "set time zone mez"]
failures: an optional exception class or a tuple of exception classes
for which the failover mechanism shall be applied, if the default
(OperationalError, InternalError) is not adequate
ping: determines when the connection should be checked with ping()
(0 = None = never, 1 = default = when _ping_check() is called,
2 = whenever a cursor is created, 4 = when a query is executed,
7 = always, and all other bit combinations of these values)
closeable: if this is set to false, then closing the connection will
be silently ignored, but by default the connection can be closed
args, kwargs: the parameters that shall be passed to the creator
function or the connection constructor of the DB-API 2 module | [
"A",
"tough",
"version",
"of",
"the",
"connection",
"constructor",
"of",
"a",
"DB",
"-",
"API",
"2",
"module",
"."
] | python | train |
twilio/twilio-python | twilio/rest/preview/marketplace/__init__.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/marketplace/__init__.py#L29-L35 | def installed_add_ons(self):
"""
:rtype: twilio.rest.preview.marketplace.installed_add_on.InstalledAddOnList
"""
if self._installed_add_ons is None:
self._installed_add_ons = InstalledAddOnList(self)
return self._installed_add_ons | [
"def",
"installed_add_ons",
"(",
"self",
")",
":",
"if",
"self",
".",
"_installed_add_ons",
"is",
"None",
":",
"self",
".",
"_installed_add_ons",
"=",
"InstalledAddOnList",
"(",
"self",
")",
"return",
"self",
".",
"_installed_add_ons"
] | :rtype: twilio.rest.preview.marketplace.installed_add_on.InstalledAddOnList | [
":",
"rtype",
":",
"twilio",
".",
"rest",
".",
"preview",
".",
"marketplace",
".",
"installed_add_on",
".",
"InstalledAddOnList"
] | python | train |
mongodb/mongo-python-driver | pymongo/message.py | https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/message.py#L608-L615 | def update(collection_name, upsert, multi, spec,
doc, safe, last_error_args, check_keys, opts, ctx=None):
"""Get an **update** message."""
if ctx:
return _update_compressed(
collection_name, upsert, multi, spec, doc, check_keys, opts, ctx)
return _update_uncompressed(collection_name, upsert, multi, spec,
doc, safe, last_error_args, check_keys, opts) | [
"def",
"update",
"(",
"collection_name",
",",
"upsert",
",",
"multi",
",",
"spec",
",",
"doc",
",",
"safe",
",",
"last_error_args",
",",
"check_keys",
",",
"opts",
",",
"ctx",
"=",
"None",
")",
":",
"if",
"ctx",
":",
"return",
"_update_compressed",
"(",
"collection_name",
",",
"upsert",
",",
"multi",
",",
"spec",
",",
"doc",
",",
"check_keys",
",",
"opts",
",",
"ctx",
")",
"return",
"_update_uncompressed",
"(",
"collection_name",
",",
"upsert",
",",
"multi",
",",
"spec",
",",
"doc",
",",
"safe",
",",
"last_error_args",
",",
"check_keys",
",",
"opts",
")"
] | Get an **update** message. | [
"Get",
"an",
"**",
"update",
"**",
"message",
"."
] | python | train |
quodlibet/mutagen | mutagen/_util.py | https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_util.py#L824-L857 | def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16):
"""Insert size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent. Mutagen tries to use mmap to resize the file, but
falls back to a significantly slower method if mmap fails.
Args:
fobj (fileobj)
size (int): The amount of space to insert
offset (int): The offset at which to insert the space
Raises:
IOError
"""
if size < 0 or offset < 0:
raise ValueError
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = filesize - offset
if movesize < 0:
raise ValueError
resize_file(fobj, size, BUFFER_SIZE)
if mmap is not None:
try:
mmap_move(fobj, offset + size, offset, movesize)
except mmap.error:
fallback_move(fobj, offset + size, offset, movesize, BUFFER_SIZE)
else:
fallback_move(fobj, offset + size, offset, movesize, BUFFER_SIZE) | [
"def",
"insert_bytes",
"(",
"fobj",
",",
"size",
",",
"offset",
",",
"BUFFER_SIZE",
"=",
"2",
"**",
"16",
")",
":",
"if",
"size",
"<",
"0",
"or",
"offset",
"<",
"0",
":",
"raise",
"ValueError",
"fobj",
".",
"seek",
"(",
"0",
",",
"2",
")",
"filesize",
"=",
"fobj",
".",
"tell",
"(",
")",
"movesize",
"=",
"filesize",
"-",
"offset",
"if",
"movesize",
"<",
"0",
":",
"raise",
"ValueError",
"resize_file",
"(",
"fobj",
",",
"size",
",",
"BUFFER_SIZE",
")",
"if",
"mmap",
"is",
"not",
"None",
":",
"try",
":",
"mmap_move",
"(",
"fobj",
",",
"offset",
"+",
"size",
",",
"offset",
",",
"movesize",
")",
"except",
"mmap",
".",
"error",
":",
"fallback_move",
"(",
"fobj",
",",
"offset",
"+",
"size",
",",
"offset",
",",
"movesize",
",",
"BUFFER_SIZE",
")",
"else",
":",
"fallback_move",
"(",
"fobj",
",",
"offset",
"+",
"size",
",",
"offset",
",",
"movesize",
",",
"BUFFER_SIZE",
")"
] | Insert size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent. Mutagen tries to use mmap to resize the file, but
falls back to a significantly slower method if mmap fails.
Args:
fobj (fileobj)
size (int): The amount of space to insert
offset (int): The offset at which to insert the space
Raises:
IOError | [
"Insert",
"size",
"bytes",
"of",
"empty",
"space",
"starting",
"at",
"offset",
"."
] | python | train |
airspeed-velocity/asv | asv/extern/asizeof.py | https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/extern/asizeof.py#L354-L360 | def _derive_typedef(typ):
'''Return single, existing super type typedef or None.
'''
v = [v for v in _values(_typedefs) if _issubclass(typ, v.type)]
if len(v) == 1:
return v[0]
return None | [
"def",
"_derive_typedef",
"(",
"typ",
")",
":",
"v",
"=",
"[",
"v",
"for",
"v",
"in",
"_values",
"(",
"_typedefs",
")",
"if",
"_issubclass",
"(",
"typ",
",",
"v",
".",
"type",
")",
"]",
"if",
"len",
"(",
"v",
")",
"==",
"1",
":",
"return",
"v",
"[",
"0",
"]",
"return",
"None"
] | Return single, existing super type typedef or None. | [
"Return",
"single",
"existing",
"super",
"type",
"typedef",
"or",
"None",
"."
] | python | train |
PyCQA/pylint | pylint/pyreverse/diadefslib.py | https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/pyreverse/diadefslib.py#L75-L79 | def show_node(self, node):
"""true if builtins and not show_builtins"""
if self.config.show_builtin:
return True
return node.root().name != BUILTINS_NAME | [
"def",
"show_node",
"(",
"self",
",",
"node",
")",
":",
"if",
"self",
".",
"config",
".",
"show_builtin",
":",
"return",
"True",
"return",
"node",
".",
"root",
"(",
")",
".",
"name",
"!=",
"BUILTINS_NAME"
] | true if builtins and not show_builtins | [
"true",
"if",
"builtins",
"and",
"not",
"show_builtins"
] | python | test |
MillionIntegrals/vel | vel/rl/api/rollout.py | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/api/rollout.py#L59-L76 | def shuffled_batches(self, batch_size):
""" Generate randomized batches of data """
if batch_size >= self.size:
yield self
else:
batch_splits = math_util.divide_ceiling(self.size, batch_size)
indices = list(range(self.size))
np.random.shuffle(indices)
for sub_indices in np.array_split(indices, batch_splits):
yield Transitions(
size=len(sub_indices),
environment_information=None,
# Dont use it in batches for a moment, can be uncommented later if needed
# environment_information=[info[sub_indices.tolist()] for info in self.environment_information]
transition_tensors={k: v[sub_indices] for k, v in self.transition_tensors.items()}
# extra_data does not go into batches
) | [
"def",
"shuffled_batches",
"(",
"self",
",",
"batch_size",
")",
":",
"if",
"batch_size",
">=",
"self",
".",
"size",
":",
"yield",
"self",
"else",
":",
"batch_splits",
"=",
"math_util",
".",
"divide_ceiling",
"(",
"self",
".",
"size",
",",
"batch_size",
")",
"indices",
"=",
"list",
"(",
"range",
"(",
"self",
".",
"size",
")",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"indices",
")",
"for",
"sub_indices",
"in",
"np",
".",
"array_split",
"(",
"indices",
",",
"batch_splits",
")",
":",
"yield",
"Transitions",
"(",
"size",
"=",
"len",
"(",
"sub_indices",
")",
",",
"environment_information",
"=",
"None",
",",
"# Dont use it in batches for a moment, can be uncommented later if needed",
"# environment_information=[info[sub_indices.tolist()] for info in self.environment_information]",
"transition_tensors",
"=",
"{",
"k",
":",
"v",
"[",
"sub_indices",
"]",
"for",
"k",
",",
"v",
"in",
"self",
".",
"transition_tensors",
".",
"items",
"(",
")",
"}",
"# extra_data does not go into batches",
")"
] | Generate randomized batches of data | [
"Generate",
"randomized",
"batches",
"of",
"data"
] | python | train |
kodexlab/reliure | reliure/pipeline.py | https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/pipeline.py#L164-L172 | def print_options(self):
""" print description of the component options
"""
summary = []
for opt_name, opt in self.options.items():
if opt.hidden:
continue
summary.append(opt.summary())
print("\n".join(summary)) | [
"def",
"print_options",
"(",
"self",
")",
":",
"summary",
"=",
"[",
"]",
"for",
"opt_name",
",",
"opt",
"in",
"self",
".",
"options",
".",
"items",
"(",
")",
":",
"if",
"opt",
".",
"hidden",
":",
"continue",
"summary",
".",
"append",
"(",
"opt",
".",
"summary",
"(",
")",
")",
"print",
"(",
"\"\\n\"",
".",
"join",
"(",
"summary",
")",
")"
] | print description of the component options | [
"print",
"description",
"of",
"the",
"component",
"options"
] | python | train |
UCBerkeleySETI/blimpy | blimpy/calib_utils/calib_plots.py | https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/calib_plots.py#L167-L215 | def plot_gain_offsets(dio_cross,dio_chan_per_coarse=8,feedtype='l',ax1=None,ax2=None,legend=True,**kwargs):
'''
Plots the calculated gain offsets of each coarse channel along with
the time averaged power spectra of the X and Y feeds
'''
#Get ON-OFF ND spectra
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
obs = Waterfall(dio_cross,max_load=150)
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Get phase offsets and convert to degrees
coarse_G = gain_offsets(I,Q,U,V,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
coarse_freqs = convert_to_coarse(freqs,dio_chan_per_coarse)
#Get X and Y spectra for the noise diode ON and OFF
#If using circular feeds these correspond to LL and RR
XX_OFF,XX_ON = foldcal(np.expand_dims(data[:,0,:],axis=1),tsamp,**kwargs)
YY_OFF,YY_ON = foldcal(np.expand_dims(data[:,1,:],axis=1),tsamp,**kwargs)
if ax1==None:
plt.subplot(211)
else:
axG = plt.axes(ax1)
plt.setp(axG.get_xticklabels(),visible=False)
plt.plot(coarse_freqs,coarse_G,'ko',markersize=2)
plt.ylabel(r'$\frac{\Delta G}{2}$',rotation=90)
if feedtype=='l':
plt.title('XY Gain Difference')
if feedtype=='c':
plt.title('LR Gain Difference')
plt.grid(True)
if ax2==None:
plt.subplot(212)
else:
axXY = plt.axes(ax2,sharex=axG)
if feedtype=='l':
plt.plot(freqs,XX_OFF,'b-',label='XX')
plt.plot(freqs,YY_OFF,'r-',label='YY')
if feedtype=='c':
plt.plot(freqs,XX_OFF,'b-',label='LL')
plt.plot(freqs,YY_OFF,'r-',label='RR')
plt.xlabel('Frequency (MHz)')
plt.ylabel('Power (Counts)')
if legend==True:
plt.legend() | [
"def",
"plot_gain_offsets",
"(",
"dio_cross",
",",
"dio_chan_per_coarse",
"=",
"8",
",",
"feedtype",
"=",
"'l'",
",",
"ax1",
"=",
"None",
",",
"ax2",
"=",
"None",
",",
"legend",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"#Get ON-OFF ND spectra",
"Idiff",
",",
"Qdiff",
",",
"Udiff",
",",
"Vdiff",
",",
"freqs",
"=",
"get_diff",
"(",
"dio_cross",
",",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"obs",
"=",
"Waterfall",
"(",
"dio_cross",
",",
"max_load",
"=",
"150",
")",
"tsamp",
"=",
"obs",
".",
"header",
"[",
"'tsamp'",
"]",
"data",
"=",
"obs",
".",
"data",
"obs",
"=",
"None",
"I",
",",
"Q",
",",
"U",
",",
"V",
"=",
"get_stokes",
"(",
"data",
",",
"feedtype",
")",
"#Get phase offsets and convert to degrees",
"coarse_G",
"=",
"gain_offsets",
"(",
"I",
",",
"Q",
",",
"U",
",",
"V",
",",
"tsamp",
",",
"dio_chan_per_coarse",
",",
"feedtype",
",",
"*",
"*",
"kwargs",
")",
"coarse_freqs",
"=",
"convert_to_coarse",
"(",
"freqs",
",",
"dio_chan_per_coarse",
")",
"#Get X and Y spectra for the noise diode ON and OFF",
"#If using circular feeds these correspond to LL and RR",
"XX_OFF",
",",
"XX_ON",
"=",
"foldcal",
"(",
"np",
".",
"expand_dims",
"(",
"data",
"[",
":",
",",
"0",
",",
":",
"]",
",",
"axis",
"=",
"1",
")",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"YY_OFF",
",",
"YY_ON",
"=",
"foldcal",
"(",
"np",
".",
"expand_dims",
"(",
"data",
"[",
":",
",",
"1",
",",
":",
"]",
",",
"axis",
"=",
"1",
")",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"if",
"ax1",
"==",
"None",
":",
"plt",
".",
"subplot",
"(",
"211",
")",
"else",
":",
"axG",
"=",
"plt",
".",
"axes",
"(",
"ax1",
")",
"plt",
".",
"setp",
"(",
"axG",
".",
"get_xticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"plt",
".",
"plot",
"(",
"coarse_freqs",
",",
"coarse_G",
",",
"'ko'",
",",
"markersize",
"=",
"2",
")",
"plt",
".",
"ylabel",
"(",
"r'$\\frac{\\Delta G}{2}$'",
",",
"rotation",
"=",
"90",
")",
"if",
"feedtype",
"==",
"'l'",
":",
"plt",
".",
"title",
"(",
"'XY Gain Difference'",
")",
"if",
"feedtype",
"==",
"'c'",
":",
"plt",
".",
"title",
"(",
"'LR Gain Difference'",
")",
"plt",
".",
"grid",
"(",
"True",
")",
"if",
"ax2",
"==",
"None",
":",
"plt",
".",
"subplot",
"(",
"212",
")",
"else",
":",
"axXY",
"=",
"plt",
".",
"axes",
"(",
"ax2",
",",
"sharex",
"=",
"axG",
")",
"if",
"feedtype",
"==",
"'l'",
":",
"plt",
".",
"plot",
"(",
"freqs",
",",
"XX_OFF",
",",
"'b-'",
",",
"label",
"=",
"'XX'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"YY_OFF",
",",
"'r-'",
",",
"label",
"=",
"'YY'",
")",
"if",
"feedtype",
"==",
"'c'",
":",
"plt",
".",
"plot",
"(",
"freqs",
",",
"XX_OFF",
",",
"'b-'",
",",
"label",
"=",
"'LL'",
")",
"plt",
".",
"plot",
"(",
"freqs",
",",
"YY_OFF",
",",
"'r-'",
",",
"label",
"=",
"'RR'",
")",
"plt",
".",
"xlabel",
"(",
"'Frequency (MHz)'",
")",
"plt",
".",
"ylabel",
"(",
"'Power (Counts)'",
")",
"if",
"legend",
"==",
"True",
":",
"plt",
".",
"legend",
"(",
")"
] | Plots the calculated gain offsets of each coarse channel along with
the time averaged power spectra of the X and Y feeds | [
"Plots",
"the",
"calculated",
"gain",
"offsets",
"of",
"each",
"coarse",
"channel",
"along",
"with",
"the",
"time",
"averaged",
"power",
"spectra",
"of",
"the",
"X",
"and",
"Y",
"feeds"
] | python | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.