repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
MolSSI-BSE/basis_set_exchange | basis_set_exchange/cli/bse_handlers.py | https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/cli/bse_handlers.py#L40-L49 | def _bse_cli_list_ref_formats(args):
'''Handles the list-ref-formats subcommand'''
all_refformats = api.get_reference_formats()
if args.no_description:
liststr = all_refformats.keys()
else:
liststr = format_columns(all_refformats.items())
return '\n'.join(liststr) | [
"def",
"_bse_cli_list_ref_formats",
"(",
"args",
")",
":",
"all_refformats",
"=",
"api",
".",
"get_reference_formats",
"(",
")",
"if",
"args",
".",
"no_description",
":",
"liststr",
"=",
"all_refformats",
".",
"keys",
"(",
")",
"else",
":",
"liststr",
"=",
"format_columns",
"(",
"all_refformats",
".",
"items",
"(",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"liststr",
")"
] | Handles the list-ref-formats subcommand | [
"Handles",
"the",
"list",
"-",
"ref",
"-",
"formats",
"subcommand"
] | python | train |
OSSOS/MOP | src/ossos/core/ossos/astrom.py | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/astrom.py#L345-L373 | def _write_observation_headers(self, observations):
"""
See src/pipematt/step1matt-c
"""
for observation in observations:
header = observation.header
def get_header_vals(header_list):
header_vals = []
for key in header_list:
val = header[key]
if key == MJD_OBS_CENTER:
header_vals.append(val)
elif key == DETECTOR:
header_vals.append(val.ljust(20))
else:
header_vals.append(float(val))
return tuple(header_vals)
self._write_line("## MOPversion")
self._write_line("# %s" % header[MOPVERSION])
self._write_line("## MJD-OBS-CENTER EXPTIME THRES FWHM MAXCOUNT CRVAL1 CRVAL2 EXPNUM")
self._write_line("# %s%8.2f%6.2f%6.2f%9.1f%11.5f%11.5f%9d" % get_header_vals(
[MJD_OBS_CENTER, EXPTIME, THRES, FWHM, MAXCOUNT, CRVAL1, CRVAL2, EXPNUM]))
self._write_line("## SCALE CHIP CRPIX1 CRPIX2 NAX1 NAX2 DETECTOR PHADU RDNOIS")
self._write_line("# %6.3f%4d%10.2f%10.2f%6d%6d %s%5.2f %5.2f" % get_header_vals(
[SCALE, CHIP, CRPIX1, CRPIX2, NAX1, NAX2, DETECTOR, PHADU, RDNOIS])) | [
"def",
"_write_observation_headers",
"(",
"self",
",",
"observations",
")",
":",
"for",
"observation",
"in",
"observations",
":",
"header",
"=",
"observation",
".",
"header",
"def",
"get_header_vals",
"(",
"header_list",
")",
":",
"header_vals",
"=",
"[",
"]",
"for",
"key",
"in",
"header_list",
":",
"val",
"=",
"header",
"[",
"key",
"]",
"if",
"key",
"==",
"MJD_OBS_CENTER",
":",
"header_vals",
".",
"append",
"(",
"val",
")",
"elif",
"key",
"==",
"DETECTOR",
":",
"header_vals",
".",
"append",
"(",
"val",
".",
"ljust",
"(",
"20",
")",
")",
"else",
":",
"header_vals",
".",
"append",
"(",
"float",
"(",
"val",
")",
")",
"return",
"tuple",
"(",
"header_vals",
")",
"self",
".",
"_write_line",
"(",
"\"## MOPversion\"",
")",
"self",
".",
"_write_line",
"(",
"\"# %s\"",
"%",
"header",
"[",
"MOPVERSION",
"]",
")",
"self",
".",
"_write_line",
"(",
"\"## MJD-OBS-CENTER EXPTIME THRES FWHM MAXCOUNT CRVAL1 CRVAL2 EXPNUM\"",
")",
"self",
".",
"_write_line",
"(",
"\"# %s%8.2f%6.2f%6.2f%9.1f%11.5f%11.5f%9d\"",
"%",
"get_header_vals",
"(",
"[",
"MJD_OBS_CENTER",
",",
"EXPTIME",
",",
"THRES",
",",
"FWHM",
",",
"MAXCOUNT",
",",
"CRVAL1",
",",
"CRVAL2",
",",
"EXPNUM",
"]",
")",
")",
"self",
".",
"_write_line",
"(",
"\"## SCALE CHIP CRPIX1 CRPIX2 NAX1 NAX2 DETECTOR PHADU RDNOIS\"",
")",
"self",
".",
"_write_line",
"(",
"\"# %6.3f%4d%10.2f%10.2f%6d%6d %s%5.2f %5.2f\"",
"%",
"get_header_vals",
"(",
"[",
"SCALE",
",",
"CHIP",
",",
"CRPIX1",
",",
"CRPIX2",
",",
"NAX1",
",",
"NAX2",
",",
"DETECTOR",
",",
"PHADU",
",",
"RDNOIS",
"]",
")",
")"
] | See src/pipematt/step1matt-c | [
"See",
"src",
"/",
"pipematt",
"/",
"step1matt",
"-",
"c"
] | python | train |
mottosso/be | be/vendor/requests/sessions.py | https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/requests/sessions.py#L42-L72 | def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""
Determines appropriate setting for a given request, taking into account the
explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None.
for (k, v) in request_setting.items():
if v is None:
del merged_setting[k]
merged_setting = dict((k, v) for (k, v) in merged_setting.items() if v is not None)
return merged_setting | [
"def",
"merge_setting",
"(",
"request_setting",
",",
"session_setting",
",",
"dict_class",
"=",
"OrderedDict",
")",
":",
"if",
"session_setting",
"is",
"None",
":",
"return",
"request_setting",
"if",
"request_setting",
"is",
"None",
":",
"return",
"session_setting",
"# Bypass if not a dictionary (e.g. verify)",
"if",
"not",
"(",
"isinstance",
"(",
"session_setting",
",",
"Mapping",
")",
"and",
"isinstance",
"(",
"request_setting",
",",
"Mapping",
")",
")",
":",
"return",
"request_setting",
"merged_setting",
"=",
"dict_class",
"(",
"to_key_val_list",
"(",
"session_setting",
")",
")",
"merged_setting",
".",
"update",
"(",
"to_key_val_list",
"(",
"request_setting",
")",
")",
"# Remove keys that are set to None.",
"for",
"(",
"k",
",",
"v",
")",
"in",
"request_setting",
".",
"items",
"(",
")",
":",
"if",
"v",
"is",
"None",
":",
"del",
"merged_setting",
"[",
"k",
"]",
"merged_setting",
"=",
"dict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"merged_setting",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"None",
")",
"return",
"merged_setting"
] | Determines appropriate setting for a given request, taking into account the
explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class` | [
"Determines",
"appropriate",
"setting",
"for",
"a",
"given",
"request",
"taking",
"into",
"account",
"the",
"explicit",
"setting",
"on",
"that",
"request",
"and",
"the",
"setting",
"in",
"the",
"session",
".",
"If",
"a",
"setting",
"is",
"a",
"dictionary",
"they",
"will",
"be",
"merged",
"together",
"using",
"dict_class"
] | python | train |
eventable/vobject | docs/build/lib/vobject/icalendar.py | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/docs/build/lib/vobject/icalendar.py#L802-L822 | def transformToNative(obj):
"""
Turn obj.value into a list of dates, datetimes, or
(datetime, timedelta) tuples.
"""
if obj.isNative:
return obj
obj.isNative = True
if obj.value == '':
obj.value = []
return obj
tzinfo = getTzid(getattr(obj, 'tzid_param', None))
valueParam = getattr(obj, 'value_param', "DATE-TIME").upper()
valTexts = obj.value.split(",")
if valueParam == "DATE":
obj.value = [stringToDate(x) for x in valTexts]
elif valueParam == "DATE-TIME":
obj.value = [stringToDateTime(x, tzinfo) for x in valTexts]
elif valueParam == "PERIOD":
obj.value = [stringToPeriod(x, tzinfo) for x in valTexts]
return obj | [
"def",
"transformToNative",
"(",
"obj",
")",
":",
"if",
"obj",
".",
"isNative",
":",
"return",
"obj",
"obj",
".",
"isNative",
"=",
"True",
"if",
"obj",
".",
"value",
"==",
"''",
":",
"obj",
".",
"value",
"=",
"[",
"]",
"return",
"obj",
"tzinfo",
"=",
"getTzid",
"(",
"getattr",
"(",
"obj",
",",
"'tzid_param'",
",",
"None",
")",
")",
"valueParam",
"=",
"getattr",
"(",
"obj",
",",
"'value_param'",
",",
"\"DATE-TIME\"",
")",
".",
"upper",
"(",
")",
"valTexts",
"=",
"obj",
".",
"value",
".",
"split",
"(",
"\",\"",
")",
"if",
"valueParam",
"==",
"\"DATE\"",
":",
"obj",
".",
"value",
"=",
"[",
"stringToDate",
"(",
"x",
")",
"for",
"x",
"in",
"valTexts",
"]",
"elif",
"valueParam",
"==",
"\"DATE-TIME\"",
":",
"obj",
".",
"value",
"=",
"[",
"stringToDateTime",
"(",
"x",
",",
"tzinfo",
")",
"for",
"x",
"in",
"valTexts",
"]",
"elif",
"valueParam",
"==",
"\"PERIOD\"",
":",
"obj",
".",
"value",
"=",
"[",
"stringToPeriod",
"(",
"x",
",",
"tzinfo",
")",
"for",
"x",
"in",
"valTexts",
"]",
"return",
"obj"
] | Turn obj.value into a list of dates, datetimes, or
(datetime, timedelta) tuples. | [
"Turn",
"obj",
".",
"value",
"into",
"a",
"list",
"of",
"dates",
"datetimes",
"or",
"(",
"datetime",
"timedelta",
")",
"tuples",
"."
] | python | train |
datosgobar/pydatajson | pydatajson/indicators.py | https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/indicators.py#L509-L562 | def _count_fields_recursive(dataset, fields):
"""Cuenta la información de campos optativos/recomendados/requeridos
desde 'fields', y cuenta la ocurrencia de los mismos en 'dataset'.
Args:
dataset (dict): diccionario con claves a ser verificadas.
fields (dict): diccionario con los campos a verificar en dataset
como claves, y 'optativo', 'recomendado', o 'requerido' como
valores. Puede tener objetios anidados pero no arrays.
Returns:
dict: diccionario con las claves 'recomendado', 'optativo',
'requerido', 'recomendado_total', 'optativo_total',
'requerido_total', con la cantidad como valores.
"""
key_count = {
'recomendado': 0,
'optativo': 0,
'requerido': 0,
'total_optativo': 0,
'total_recomendado': 0,
'total_requerido': 0
}
for k, v in fields.items():
# Si la clave es un diccionario se implementa recursivamente el
# mismo algoritmo
if isinstance(v, dict):
# dataset[k] puede ser o un dict o una lista, ej 'dataset' es
# list, 'publisher' no. Si no es lista, lo metemos en una.
# Si no es ninguno de los dos, dataset[k] es inválido
# y se pasa un diccionario vacío para poder comparar
elements = dataset.get(k)
if not isinstance(elements, (list, dict)):
elements = [{}]
if isinstance(elements, dict):
elements = [dataset[k].copy()]
for element in elements:
# Llamada recursiva y suma del resultado al nuestro
result = _count_fields_recursive(element, v)
for key in result:
key_count[key] += result[key]
# Es un elemento normal (no iterable), se verifica si está en
# dataset o no. Se suma 1 siempre al total de su tipo
else:
# total_requerido, total_recomendado, o total_optativo
key_count['total_' + v] += 1
if k in dataset:
key_count[v] += 1
return key_count | [
"def",
"_count_fields_recursive",
"(",
"dataset",
",",
"fields",
")",
":",
"key_count",
"=",
"{",
"'recomendado'",
":",
"0",
",",
"'optativo'",
":",
"0",
",",
"'requerido'",
":",
"0",
",",
"'total_optativo'",
":",
"0",
",",
"'total_recomendado'",
":",
"0",
",",
"'total_requerido'",
":",
"0",
"}",
"for",
"k",
",",
"v",
"in",
"fields",
".",
"items",
"(",
")",
":",
"# Si la clave es un diccionario se implementa recursivamente el",
"# mismo algoritmo",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"# dataset[k] puede ser o un dict o una lista, ej 'dataset' es",
"# list, 'publisher' no. Si no es lista, lo metemos en una.",
"# Si no es ninguno de los dos, dataset[k] es inválido",
"# y se pasa un diccionario vacío para poder comparar",
"elements",
"=",
"dataset",
".",
"get",
"(",
"k",
")",
"if",
"not",
"isinstance",
"(",
"elements",
",",
"(",
"list",
",",
"dict",
")",
")",
":",
"elements",
"=",
"[",
"{",
"}",
"]",
"if",
"isinstance",
"(",
"elements",
",",
"dict",
")",
":",
"elements",
"=",
"[",
"dataset",
"[",
"k",
"]",
".",
"copy",
"(",
")",
"]",
"for",
"element",
"in",
"elements",
":",
"# Llamada recursiva y suma del resultado al nuestro",
"result",
"=",
"_count_fields_recursive",
"(",
"element",
",",
"v",
")",
"for",
"key",
"in",
"result",
":",
"key_count",
"[",
"key",
"]",
"+=",
"result",
"[",
"key",
"]",
"# Es un elemento normal (no iterable), se verifica si está en",
"# dataset o no. Se suma 1 siempre al total de su tipo",
"else",
":",
"# total_requerido, total_recomendado, o total_optativo",
"key_count",
"[",
"'total_'",
"+",
"v",
"]",
"+=",
"1",
"if",
"k",
"in",
"dataset",
":",
"key_count",
"[",
"v",
"]",
"+=",
"1",
"return",
"key_count"
] | Cuenta la información de campos optativos/recomendados/requeridos
desde 'fields', y cuenta la ocurrencia de los mismos en 'dataset'.
Args:
dataset (dict): diccionario con claves a ser verificadas.
fields (dict): diccionario con los campos a verificar en dataset
como claves, y 'optativo', 'recomendado', o 'requerido' como
valores. Puede tener objetios anidados pero no arrays.
Returns:
dict: diccionario con las claves 'recomendado', 'optativo',
'requerido', 'recomendado_total', 'optativo_total',
'requerido_total', con la cantidad como valores. | [
"Cuenta",
"la",
"información",
"de",
"campos",
"optativos",
"/",
"recomendados",
"/",
"requeridos",
"desde",
"fields",
"y",
"cuenta",
"la",
"ocurrencia",
"de",
"los",
"mismos",
"en",
"dataset",
"."
] | python | train |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/output_writers.py | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L504-L509 | def _get_tmp_account_id(cls, writer_spec):
"""Returns the account id to use with tmp bucket."""
# pick tmp id iff tmp bucket is set explicitly
if cls.TMP_BUCKET_NAME_PARAM in writer_spec:
return writer_spec.get(cls._TMP_ACCOUNT_ID_PARAM, None)
return cls._get_account_id(writer_spec) | [
"def",
"_get_tmp_account_id",
"(",
"cls",
",",
"writer_spec",
")",
":",
"# pick tmp id iff tmp bucket is set explicitly",
"if",
"cls",
".",
"TMP_BUCKET_NAME_PARAM",
"in",
"writer_spec",
":",
"return",
"writer_spec",
".",
"get",
"(",
"cls",
".",
"_TMP_ACCOUNT_ID_PARAM",
",",
"None",
")",
"return",
"cls",
".",
"_get_account_id",
"(",
"writer_spec",
")"
] | Returns the account id to use with tmp bucket. | [
"Returns",
"the",
"account",
"id",
"to",
"use",
"with",
"tmp",
"bucket",
"."
] | python | train |
greenelab/django-genes | genes/api.py | https://github.com/greenelab/django-genes/blob/298939adcb115031acfc11cfcef60ea0b596fae5/genes/api.py#L202-L214 | def post_list(self, request, **kwargs):
"""
(Copied from implementation in
https://github.com/greenelab/adage-server/blob/master/adage/analyze/api.py)
Handle an incoming POST as a GET to work around URI length limitations
"""
# The convert_post_to_VERB() technique is borrowed from
# resources.py in tastypie source. This helps us to convert the POST
# to a GET in the proper way internally.
request.method = 'GET' # override the incoming POST
dispatch_request = convert_post_to_VERB(request, 'GET')
return self.dispatch('list', dispatch_request, **kwargs) | [
"def",
"post_list",
"(",
"self",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"# The convert_post_to_VERB() technique is borrowed from",
"# resources.py in tastypie source. This helps us to convert the POST",
"# to a GET in the proper way internally.",
"request",
".",
"method",
"=",
"'GET'",
"# override the incoming POST",
"dispatch_request",
"=",
"convert_post_to_VERB",
"(",
"request",
",",
"'GET'",
")",
"return",
"self",
".",
"dispatch",
"(",
"'list'",
",",
"dispatch_request",
",",
"*",
"*",
"kwargs",
")"
] | (Copied from implementation in
https://github.com/greenelab/adage-server/blob/master/adage/analyze/api.py)
Handle an incoming POST as a GET to work around URI length limitations | [
"(",
"Copied",
"from",
"implementation",
"in",
"https",
":",
"//",
"github",
".",
"com",
"/",
"greenelab",
"/",
"adage",
"-",
"server",
"/",
"blob",
"/",
"master",
"/",
"adage",
"/",
"analyze",
"/",
"api",
".",
"py",
")"
] | python | train |
aarongarrett/inspyred | inspyred/swarm/topologies.py | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/swarm/topologies.py#L69-L101 | def ring_topology(random, population, args):
"""Returns the neighbors using a ring topology.
This function sets all particles in a specified sized neighborhood
as neighbors for a given particle. This is known as a ring
topology. The resulting list of lists of neighbors is returned.
.. Arguments:
random -- the random number generator object
population -- the population of particles
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *neighborhood_size* -- the width of the neighborhood around a
particle which determines the size of the neighborhood
(default 3)
"""
neighborhood_size = args.setdefault('neighborhood_size', 3)
half_hood = neighborhood_size // 2
neighbor_index_start = []
for index in range(len(population)):
if index < half_hood:
neighbor_index_start.append(len(population) - half_hood + index)
else:
neighbor_index_start.append(index - half_hood)
neighbors = []
for start in neighbor_index_start:
n = []
for i in range(0, neighborhood_size):
n.append(population[(start + i) % len(population)])
yield n | [
"def",
"ring_topology",
"(",
"random",
",",
"population",
",",
"args",
")",
":",
"neighborhood_size",
"=",
"args",
".",
"setdefault",
"(",
"'neighborhood_size'",
",",
"3",
")",
"half_hood",
"=",
"neighborhood_size",
"//",
"2",
"neighbor_index_start",
"=",
"[",
"]",
"for",
"index",
"in",
"range",
"(",
"len",
"(",
"population",
")",
")",
":",
"if",
"index",
"<",
"half_hood",
":",
"neighbor_index_start",
".",
"append",
"(",
"len",
"(",
"population",
")",
"-",
"half_hood",
"+",
"index",
")",
"else",
":",
"neighbor_index_start",
".",
"append",
"(",
"index",
"-",
"half_hood",
")",
"neighbors",
"=",
"[",
"]",
"for",
"start",
"in",
"neighbor_index_start",
":",
"n",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"neighborhood_size",
")",
":",
"n",
".",
"append",
"(",
"population",
"[",
"(",
"start",
"+",
"i",
")",
"%",
"len",
"(",
"population",
")",
"]",
")",
"yield",
"n"
] | Returns the neighbors using a ring topology.
This function sets all particles in a specified sized neighborhood
as neighbors for a given particle. This is known as a ring
topology. The resulting list of lists of neighbors is returned.
.. Arguments:
random -- the random number generator object
population -- the population of particles
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *neighborhood_size* -- the width of the neighborhood around a
particle which determines the size of the neighborhood
(default 3) | [
"Returns",
"the",
"neighbors",
"using",
"a",
"ring",
"topology",
".",
"This",
"function",
"sets",
"all",
"particles",
"in",
"a",
"specified",
"sized",
"neighborhood",
"as",
"neighbors",
"for",
"a",
"given",
"particle",
".",
"This",
"is",
"known",
"as",
"a",
"ring",
"topology",
".",
"The",
"resulting",
"list",
"of",
"lists",
"of",
"neighbors",
"is",
"returned",
".",
"..",
"Arguments",
":",
"random",
"--",
"the",
"random",
"number",
"generator",
"object",
"population",
"--",
"the",
"population",
"of",
"particles",
"args",
"--",
"a",
"dictionary",
"of",
"keyword",
"arguments"
] | python | train |
mar10/wsgidav | wsgidav/server/server_cli.py | https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/server/server_cli.py#L715-L730 | def _run_wsgiref(app, config, mode):
"""Run WsgiDAV using wsgiref.simple_server, on Python 2.5+."""
# http://www.python.org/doc/2.5.2/lib/module-wsgiref.html
from wsgiref.simple_server import make_server, software_version
version = "WsgiDAV/{} {}".format(__version__, software_version)
_logger.info("Running {}...".format(version))
_logger.warning(
"WARNING: This single threaded server (wsgiref) is not meant for production."
)
httpd = make_server(config["host"], config["port"], app)
try:
httpd.serve_forever()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
return | [
"def",
"_run_wsgiref",
"(",
"app",
",",
"config",
",",
"mode",
")",
":",
"# http://www.python.org/doc/2.5.2/lib/module-wsgiref.html",
"from",
"wsgiref",
".",
"simple_server",
"import",
"make_server",
",",
"software_version",
"version",
"=",
"\"WsgiDAV/{} {}\"",
".",
"format",
"(",
"__version__",
",",
"software_version",
")",
"_logger",
".",
"info",
"(",
"\"Running {}...\"",
".",
"format",
"(",
"version",
")",
")",
"_logger",
".",
"warning",
"(",
"\"WARNING: This single threaded server (wsgiref) is not meant for production.\"",
")",
"httpd",
"=",
"make_server",
"(",
"config",
"[",
"\"host\"",
"]",
",",
"config",
"[",
"\"port\"",
"]",
",",
"app",
")",
"try",
":",
"httpd",
".",
"serve_forever",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"_logger",
".",
"warning",
"(",
"\"Caught Ctrl-C, shutting down...\"",
")",
"return"
] | Run WsgiDAV using wsgiref.simple_server, on Python 2.5+. | [
"Run",
"WsgiDAV",
"using",
"wsgiref",
".",
"simple_server",
"on",
"Python",
"2",
".",
"5",
"+",
"."
] | python | valid |
hazelcast/hazelcast-python-client | hazelcast/proxy/queue.py | https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/queue.py#L199-L208 | def put(self, item):
"""
Adds the specified element into this queue. If there is no space, it waits until necessary space becomes
available.
:param item: (object), the specified item.
"""
check_not_none(item, "Value can't be None")
element_data = self._to_data(item)
return self._encode_invoke(queue_put_codec, value=element_data) | [
"def",
"put",
"(",
"self",
",",
"item",
")",
":",
"check_not_none",
"(",
"item",
",",
"\"Value can't be None\"",
")",
"element_data",
"=",
"self",
".",
"_to_data",
"(",
"item",
")",
"return",
"self",
".",
"_encode_invoke",
"(",
"queue_put_codec",
",",
"value",
"=",
"element_data",
")"
] | Adds the specified element into this queue. If there is no space, it waits until necessary space becomes
available.
:param item: (object), the specified item. | [
"Adds",
"the",
"specified",
"element",
"into",
"this",
"queue",
".",
"If",
"there",
"is",
"no",
"space",
"it",
"waits",
"until",
"necessary",
"space",
"becomes",
"available",
"."
] | python | train |
ddorn/GUI | GUI/vracabulous.py | https://github.com/ddorn/GUI/blob/e1fcb5286d24e0995f280d5180222e51895c368c/GUI/vracabulous.py#L251-L265 | def update_on_event(self, e):
"""Process a single event."""
if e.type == QUIT:
self.running = False
elif e.type == KEYDOWN:
if e.key == K_ESCAPE:
self.running = False
elif e.key == K_F4 and e.mod & KMOD_ALT: # Alt+F4 --> quits
self.running = False
elif e.type == VIDEORESIZE:
self.SCREEN_SIZE = e.size
self.screen = self.new_screen() | [
"def",
"update_on_event",
"(",
"self",
",",
"e",
")",
":",
"if",
"e",
".",
"type",
"==",
"QUIT",
":",
"self",
".",
"running",
"=",
"False",
"elif",
"e",
".",
"type",
"==",
"KEYDOWN",
":",
"if",
"e",
".",
"key",
"==",
"K_ESCAPE",
":",
"self",
".",
"running",
"=",
"False",
"elif",
"e",
".",
"key",
"==",
"K_F4",
"and",
"e",
".",
"mod",
"&",
"KMOD_ALT",
":",
"# Alt+F4 --> quits",
"self",
".",
"running",
"=",
"False",
"elif",
"e",
".",
"type",
"==",
"VIDEORESIZE",
":",
"self",
".",
"SCREEN_SIZE",
"=",
"e",
".",
"size",
"self",
".",
"screen",
"=",
"self",
".",
"new_screen",
"(",
")"
] | Process a single event. | [
"Process",
"a",
"single",
"event",
"."
] | python | train |
quodlibet/mutagen | mutagen/apev2.py | https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/apev2.py#L427-L485 | def save(self, filething=None):
"""Save changes to a file.
If no filename is given, the one most recently loaded is used.
Tags are always written at the end of the file, and include
a header and a footer.
"""
fileobj = filething.fileobj
data = _APEv2Data(fileobj)
if data.is_at_start:
delete_bytes(fileobj, data.end - data.start, data.start)
elif data.start is not None:
fileobj.seek(data.start)
# Delete an ID3v1 tag if present, too.
fileobj.truncate()
fileobj.seek(0, 2)
tags = []
for key, value in self.items():
# Packed format for an item:
# 4B: Value length
# 4B: Value type
# Key name
# 1B: Null
# Key value
value_data = value._write()
if not isinstance(key, bytes):
key = key.encode("utf-8")
tag_data = bytearray()
tag_data += struct.pack("<2I", len(value_data), value.kind << 1)
tag_data += key + b"\0" + value_data
tags.append(bytes(tag_data))
# "APE tags items should be sorted ascending by size... This is
# not a MUST, but STRONGLY recommended. Actually the items should
# be sorted by importance/byte, but this is not feasible."
tags.sort(key=lambda tag: (len(tag), tag))
num_tags = len(tags)
tags = b"".join(tags)
header = bytearray(b"APETAGEX")
# version, tag size, item count, flags
header += struct.pack("<4I", 2000, len(tags) + 32, num_tags,
HAS_HEADER | IS_HEADER)
header += b"\0" * 8
fileobj.write(header)
fileobj.write(tags)
footer = bytearray(b"APETAGEX")
footer += struct.pack("<4I", 2000, len(tags) + 32, num_tags,
HAS_HEADER)
footer += b"\0" * 8
fileobj.write(footer) | [
"def",
"save",
"(",
"self",
",",
"filething",
"=",
"None",
")",
":",
"fileobj",
"=",
"filething",
".",
"fileobj",
"data",
"=",
"_APEv2Data",
"(",
"fileobj",
")",
"if",
"data",
".",
"is_at_start",
":",
"delete_bytes",
"(",
"fileobj",
",",
"data",
".",
"end",
"-",
"data",
".",
"start",
",",
"data",
".",
"start",
")",
"elif",
"data",
".",
"start",
"is",
"not",
"None",
":",
"fileobj",
".",
"seek",
"(",
"data",
".",
"start",
")",
"# Delete an ID3v1 tag if present, too.",
"fileobj",
".",
"truncate",
"(",
")",
"fileobj",
".",
"seek",
"(",
"0",
",",
"2",
")",
"tags",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"self",
".",
"items",
"(",
")",
":",
"# Packed format for an item:",
"# 4B: Value length",
"# 4B: Value type",
"# Key name",
"# 1B: Null",
"# Key value",
"value_data",
"=",
"value",
".",
"_write",
"(",
")",
"if",
"not",
"isinstance",
"(",
"key",
",",
"bytes",
")",
":",
"key",
"=",
"key",
".",
"encode",
"(",
"\"utf-8\"",
")",
"tag_data",
"=",
"bytearray",
"(",
")",
"tag_data",
"+=",
"struct",
".",
"pack",
"(",
"\"<2I\"",
",",
"len",
"(",
"value_data",
")",
",",
"value",
".",
"kind",
"<<",
"1",
")",
"tag_data",
"+=",
"key",
"+",
"b\"\\0\"",
"+",
"value_data",
"tags",
".",
"append",
"(",
"bytes",
"(",
"tag_data",
")",
")",
"# \"APE tags items should be sorted ascending by size... This is",
"# not a MUST, but STRONGLY recommended. Actually the items should",
"# be sorted by importance/byte, but this is not feasible.\"",
"tags",
".",
"sort",
"(",
"key",
"=",
"lambda",
"tag",
":",
"(",
"len",
"(",
"tag",
")",
",",
"tag",
")",
")",
"num_tags",
"=",
"len",
"(",
"tags",
")",
"tags",
"=",
"b\"\"",
".",
"join",
"(",
"tags",
")",
"header",
"=",
"bytearray",
"(",
"b\"APETAGEX\"",
")",
"# version, tag size, item count, flags",
"header",
"+=",
"struct",
".",
"pack",
"(",
"\"<4I\"",
",",
"2000",
",",
"len",
"(",
"tags",
")",
"+",
"32",
",",
"num_tags",
",",
"HAS_HEADER",
"|",
"IS_HEADER",
")",
"header",
"+=",
"b\"\\0\"",
"*",
"8",
"fileobj",
".",
"write",
"(",
"header",
")",
"fileobj",
".",
"write",
"(",
"tags",
")",
"footer",
"=",
"bytearray",
"(",
"b\"APETAGEX\"",
")",
"footer",
"+=",
"struct",
".",
"pack",
"(",
"\"<4I\"",
",",
"2000",
",",
"len",
"(",
"tags",
")",
"+",
"32",
",",
"num_tags",
",",
"HAS_HEADER",
")",
"footer",
"+=",
"b\"\\0\"",
"*",
"8",
"fileobj",
".",
"write",
"(",
"footer",
")"
] | Save changes to a file.
If no filename is given, the one most recently loaded is used.
Tags are always written at the end of the file, and include
a header and a footer. | [
"Save",
"changes",
"to",
"a",
"file",
"."
] | python | train |
cs50/check50 | check50/api.py | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/api.py#L67-L86 | def hash(file):
"""
Hashes file using SHA-256.
:param file: name of file to be hashed
:type file: str
:rtype: str
:raises check50.Failure: if ``file`` does not exist
"""
exists(file)
log(_("hashing {}...").format(file))
# https://stackoverflow.com/a/22058673
with open(file, "rb") as f:
sha256 = hashlib.sha256()
for block in iter(lambda: f.read(65536), b""):
sha256.update(block)
return sha256.hexdigest() | [
"def",
"hash",
"(",
"file",
")",
":",
"exists",
"(",
"file",
")",
"log",
"(",
"_",
"(",
"\"hashing {}...\"",
")",
".",
"format",
"(",
"file",
")",
")",
"# https://stackoverflow.com/a/22058673",
"with",
"open",
"(",
"file",
",",
"\"rb\"",
")",
"as",
"f",
":",
"sha256",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"for",
"block",
"in",
"iter",
"(",
"lambda",
":",
"f",
".",
"read",
"(",
"65536",
")",
",",
"b\"\"",
")",
":",
"sha256",
".",
"update",
"(",
"block",
")",
"return",
"sha256",
".",
"hexdigest",
"(",
")"
] | Hashes file using SHA-256.
:param file: name of file to be hashed
:type file: str
:rtype: str
:raises check50.Failure: if ``file`` does not exist | [
"Hashes",
"file",
"using",
"SHA",
"-",
"256",
"."
] | python | train |
fossasia/knittingpattern | knittingpattern/Loader.py | https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/Loader.py#L198-L211 | def url(self, url, encoding="UTF-8"):
"""load and process the content behind a url
:return: the processed result of the :paramref:`url's <url>` content
:param str url: the url to retrieve the content from
:param str encoding: the encoding of the retrieved content.
The default encoding is UTF-8.
"""
import urllib.request
with urllib.request.urlopen(url) as file:
webpage_content = file.read()
webpage_content = webpage_content.decode(encoding)
return self.string(webpage_content) | [
"def",
"url",
"(",
"self",
",",
"url",
",",
"encoding",
"=",
"\"UTF-8\"",
")",
":",
"import",
"urllib",
".",
"request",
"with",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"url",
")",
"as",
"file",
":",
"webpage_content",
"=",
"file",
".",
"read",
"(",
")",
"webpage_content",
"=",
"webpage_content",
".",
"decode",
"(",
"encoding",
")",
"return",
"self",
".",
"string",
"(",
"webpage_content",
")"
] | load and process the content behind a url
:return: the processed result of the :paramref:`url's <url>` content
:param str url: the url to retrieve the content from
:param str encoding: the encoding of the retrieved content.
The default encoding is UTF-8. | [
"load",
"and",
"process",
"the",
"content",
"behind",
"a",
"url"
] | python | valid |
jonathf/chaospy | chaospy/distributions/sampler/generator.py | https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/sampler/generator.py#L76-L139 | def generate_samples(order, domain=1, rule="R", antithetic=None):
"""
Sample generator.
Args:
order (int):
Sample order. Determines the number of samples to create.
domain (Dist, int, numpy.ndarray):
Defines the space where the samples are generated. If integer is
provided, the space ``[0, 1]^domain`` will be used. If array-like
object is provided, a hypercube it defines will be used. If
distribution, the domain it spans will be used.
rule (str):
rule for generating samples. The various rules are listed in
:mod:`chaospy.distributions.sampler.generator`.
antithetic (tuple):
Sequence of boolean values. Represents the axes to mirror using
antithetic variable.
"""
logger = logging.getLogger(__name__)
logger.debug("generating random samples using rule %s", rule)
rule = rule.upper()
if isinstance(domain, int):
dim = domain
trans = lambda x_data: x_data
elif isinstance(domain, (tuple, list, numpy.ndarray)):
domain = numpy.asfarray(domain)
if len(domain.shape) < 2:
dim = 1
else:
dim = len(domain[0])
trans = lambda x_data: ((domain[1]-domain[0])*x_data.T + domain[0]).T
else:
dist = domain
dim = len(dist)
trans = dist.inv
if antithetic is not None:
from .antithetic import create_antithetic_variates
antithetic = numpy.array(antithetic, dtype=bool).flatten()
if antithetic.size == 1 and dim > 1:
antithetic = numpy.repeat(antithetic, dim)
size = numpy.sum(1*numpy.array(antithetic))
order_saved = order
order = int(numpy.log(order - dim))
order = order if order > 1 else 1
while order**dim < order_saved:
order += 1
trans_ = trans
trans = lambda x_data: trans_(
create_antithetic_variates(x_data, antithetic)[:, :order_saved])
assert rule in SAMPLERS, "rule not recognised"
sampler = SAMPLERS[rule]
x_data = trans(sampler(order=order, dim=dim))
logger.debug("order: %d, dim: %d -> shape: %s", order, dim, x_data.shape)
return x_data | [
"def",
"generate_samples",
"(",
"order",
",",
"domain",
"=",
"1",
",",
"rule",
"=",
"\"R\"",
",",
"antithetic",
"=",
"None",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"generating random samples using rule %s\"",
",",
"rule",
")",
"rule",
"=",
"rule",
".",
"upper",
"(",
")",
"if",
"isinstance",
"(",
"domain",
",",
"int",
")",
":",
"dim",
"=",
"domain",
"trans",
"=",
"lambda",
"x_data",
":",
"x_data",
"elif",
"isinstance",
"(",
"domain",
",",
"(",
"tuple",
",",
"list",
",",
"numpy",
".",
"ndarray",
")",
")",
":",
"domain",
"=",
"numpy",
".",
"asfarray",
"(",
"domain",
")",
"if",
"len",
"(",
"domain",
".",
"shape",
")",
"<",
"2",
":",
"dim",
"=",
"1",
"else",
":",
"dim",
"=",
"len",
"(",
"domain",
"[",
"0",
"]",
")",
"trans",
"=",
"lambda",
"x_data",
":",
"(",
"(",
"domain",
"[",
"1",
"]",
"-",
"domain",
"[",
"0",
"]",
")",
"*",
"x_data",
".",
"T",
"+",
"domain",
"[",
"0",
"]",
")",
".",
"T",
"else",
":",
"dist",
"=",
"domain",
"dim",
"=",
"len",
"(",
"dist",
")",
"trans",
"=",
"dist",
".",
"inv",
"if",
"antithetic",
"is",
"not",
"None",
":",
"from",
".",
"antithetic",
"import",
"create_antithetic_variates",
"antithetic",
"=",
"numpy",
".",
"array",
"(",
"antithetic",
",",
"dtype",
"=",
"bool",
")",
".",
"flatten",
"(",
")",
"if",
"antithetic",
".",
"size",
"==",
"1",
"and",
"dim",
">",
"1",
":",
"antithetic",
"=",
"numpy",
".",
"repeat",
"(",
"antithetic",
",",
"dim",
")",
"size",
"=",
"numpy",
".",
"sum",
"(",
"1",
"*",
"numpy",
".",
"array",
"(",
"antithetic",
")",
")",
"order_saved",
"=",
"order",
"order",
"=",
"int",
"(",
"numpy",
".",
"log",
"(",
"order",
"-",
"dim",
")",
")",
"order",
"=",
"order",
"if",
"order",
">",
"1",
"else",
"1",
"while",
"order",
"**",
"dim",
"<",
"order_saved",
":",
"order",
"+=",
"1",
"trans_",
"=",
"trans",
"trans",
"=",
"lambda",
"x_data",
":",
"trans_",
"(",
"create_antithetic_variates",
"(",
"x_data",
",",
"antithetic",
")",
"[",
":",
",",
":",
"order_saved",
"]",
")",
"assert",
"rule",
"in",
"SAMPLERS",
",",
"\"rule not recognised\"",
"sampler",
"=",
"SAMPLERS",
"[",
"rule",
"]",
"x_data",
"=",
"trans",
"(",
"sampler",
"(",
"order",
"=",
"order",
",",
"dim",
"=",
"dim",
")",
")",
"logger",
".",
"debug",
"(",
"\"order: %d, dim: %d -> shape: %s\"",
",",
"order",
",",
"dim",
",",
"x_data",
".",
"shape",
")",
"return",
"x_data"
] | Sample generator.
Args:
order (int):
Sample order. Determines the number of samples to create.
domain (Dist, int, numpy.ndarray):
Defines the space where the samples are generated. If integer is
provided, the space ``[0, 1]^domain`` will be used. If array-like
object is provided, a hypercube it defines will be used. If
distribution, the domain it spans will be used.
rule (str):
rule for generating samples. The various rules are listed in
:mod:`chaospy.distributions.sampler.generator`.
antithetic (tuple):
Sequence of boolean values. Represents the axes to mirror using
antithetic variable. | [
"Sample",
"generator",
"."
] | python | train |
google/tangent | tangent/transformers.py | https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/transformers.py#L81-L96 | def append(self, node):
"""Append a statement to the current statement.
Note that multiple calls to append will result in the last statement to be
appended to end up at the bottom.
Args:
node: The statement to append.
Raises:
ValueError: If the given node is not a statement.
"""
if not isinstance(node, grammar.STATEMENTS):
raise ValueError
self.to_append[-1].append(node) | [
"def",
"append",
"(",
"self",
",",
"node",
")",
":",
"if",
"not",
"isinstance",
"(",
"node",
",",
"grammar",
".",
"STATEMENTS",
")",
":",
"raise",
"ValueError",
"self",
".",
"to_append",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"node",
")"
] | Append a statement to the current statement.
Note that multiple calls to append will result in the last statement to be
appended to end up at the bottom.
Args:
node: The statement to append.
Raises:
ValueError: If the given node is not a statement. | [
"Append",
"a",
"statement",
"to",
"the",
"current",
"statement",
"."
] | python | train |
MagicStack/asyncpg | asyncpg/prepared_stmt.py | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/prepared_stmt.py#L95-L108 | def cursor(self, *args, prefetch=None,
timeout=None) -> cursor.CursorFactory:
"""Return a *cursor factory* for the prepared statement.
:param args: Query arguments.
:param int prefetch: The number of rows the *cursor iterator*
will prefetch (defaults to ``50``.)
:param float timeout: Optional timeout in seconds.
:return: A :class:`~cursor.CursorFactory` object.
"""
return cursor.CursorFactory(self._connection, self._query,
self._state, args, prefetch,
timeout) | [
"def",
"cursor",
"(",
"self",
",",
"*",
"args",
",",
"prefetch",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
"->",
"cursor",
".",
"CursorFactory",
":",
"return",
"cursor",
".",
"CursorFactory",
"(",
"self",
".",
"_connection",
",",
"self",
".",
"_query",
",",
"self",
".",
"_state",
",",
"args",
",",
"prefetch",
",",
"timeout",
")"
] | Return a *cursor factory* for the prepared statement.
:param args: Query arguments.
:param int prefetch: The number of rows the *cursor iterator*
will prefetch (defaults to ``50``.)
:param float timeout: Optional timeout in seconds.
:return: A :class:`~cursor.CursorFactory` object. | [
"Return",
"a",
"*",
"cursor",
"factory",
"*",
"for",
"the",
"prepared",
"statement",
"."
] | python | train |
wonambi-python/wonambi | wonambi/attr/anat.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/attr/anat.py#L295-L318 | def read_label(self, hemi, parc_type='aparc'):
"""Read the labels (annotations) for each hemisphere.
Parameters
----------
hemi : str
'lh' or 'rh'
parc_type : str
'aparc', 'aparc.a2009s', 'BA', 'BA.thresh', or 'aparc.DKTatlas40'
'aparc.DKTatlas40' is only for recent freesurfer versions
Returns
-------
numpy.ndarray
value at each vertex, indicating the label
numpy.ndarray
RGB + alpha colors for each label
list of str
names of the labels
"""
parc_file = self.dir / 'label' / (hemi + '.' + parc_type + '.annot')
vert_val, region_color, region_name = read_annot(parc_file)
region_name = [x.decode('utf-8') for x in region_name]
return vert_val, region_color, region_name | [
"def",
"read_label",
"(",
"self",
",",
"hemi",
",",
"parc_type",
"=",
"'aparc'",
")",
":",
"parc_file",
"=",
"self",
".",
"dir",
"/",
"'label'",
"/",
"(",
"hemi",
"+",
"'.'",
"+",
"parc_type",
"+",
"'.annot'",
")",
"vert_val",
",",
"region_color",
",",
"region_name",
"=",
"read_annot",
"(",
"parc_file",
")",
"region_name",
"=",
"[",
"x",
".",
"decode",
"(",
"'utf-8'",
")",
"for",
"x",
"in",
"region_name",
"]",
"return",
"vert_val",
",",
"region_color",
",",
"region_name"
] | Read the labels (annotations) for each hemisphere.
Parameters
----------
hemi : str
'lh' or 'rh'
parc_type : str
'aparc', 'aparc.a2009s', 'BA', 'BA.thresh', or 'aparc.DKTatlas40'
'aparc.DKTatlas40' is only for recent freesurfer versions
Returns
-------
numpy.ndarray
value at each vertex, indicating the label
numpy.ndarray
RGB + alpha colors for each label
list of str
names of the labels | [
"Read",
"the",
"labels",
"(",
"annotations",
")",
"for",
"each",
"hemisphere",
"."
] | python | train |
CenturyLinkCloud/clc-python-sdk | src/clc/APIv2/time_utils.py | https://github.com/CenturyLinkCloud/clc-python-sdk/blob/f4dba40c627cb08dd4b7d0d277e8d67578010b05/src/clc/APIv2/time_utils.py#L16-L24 | def SecondsToZuluTS(secs=None):
"""Returns Zulu TS from unix time seconds.
If secs is not provided will convert the current time.
"""
if not secs: secs = int(time.time())
return(datetime.utcfromtimestamp(secs).strftime("%Y-%m-%dT%H:%M:%SZ")) | [
"def",
"SecondsToZuluTS",
"(",
"secs",
"=",
"None",
")",
":",
"if",
"not",
"secs",
":",
"secs",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"return",
"(",
"datetime",
".",
"utcfromtimestamp",
"(",
"secs",
")",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%SZ\"",
")",
")"
] | Returns Zulu TS from unix time seconds.
If secs is not provided will convert the current time. | [
"Returns",
"Zulu",
"TS",
"from",
"unix",
"time",
"seconds",
"."
] | python | train |
pecan/pecan | pecan/secure.py | https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/secure.py#L205-L217 | def handle_security(controller, im_self=None):
""" Checks the security of a controller. """
if controller._pecan.get('secured', False):
check_permissions = controller._pecan['check_permissions']
if isinstance(check_permissions, six.string_types):
check_permissions = getattr(
im_self or six.get_method_self(controller),
check_permissions
)
if not check_permissions():
raise exc.HTTPUnauthorized | [
"def",
"handle_security",
"(",
"controller",
",",
"im_self",
"=",
"None",
")",
":",
"if",
"controller",
".",
"_pecan",
".",
"get",
"(",
"'secured'",
",",
"False",
")",
":",
"check_permissions",
"=",
"controller",
".",
"_pecan",
"[",
"'check_permissions'",
"]",
"if",
"isinstance",
"(",
"check_permissions",
",",
"six",
".",
"string_types",
")",
":",
"check_permissions",
"=",
"getattr",
"(",
"im_self",
"or",
"six",
".",
"get_method_self",
"(",
"controller",
")",
",",
"check_permissions",
")",
"if",
"not",
"check_permissions",
"(",
")",
":",
"raise",
"exc",
".",
"HTTPUnauthorized"
] | Checks the security of a controller. | [
"Checks",
"the",
"security",
"of",
"a",
"controller",
"."
] | python | train |
fedora-python/pyp2rpm | pyp2rpm/archive.py | https://github.com/fedora-python/pyp2rpm/blob/853eb3d226689a5ccdcdb9358b1a3394fafbd2b5/pyp2rpm/archive.py#L183-L188 | def extract_all(self, directory=".", members=None):
"""Extract all member from the archive to the specified working
directory.
"""
if self.handle:
self.handle.extractall(path=directory, members=members) | [
"def",
"extract_all",
"(",
"self",
",",
"directory",
"=",
"\".\"",
",",
"members",
"=",
"None",
")",
":",
"if",
"self",
".",
"handle",
":",
"self",
".",
"handle",
".",
"extractall",
"(",
"path",
"=",
"directory",
",",
"members",
"=",
"members",
")"
] | Extract all member from the archive to the specified working
directory. | [
"Extract",
"all",
"member",
"from",
"the",
"archive",
"to",
"the",
"specified",
"working",
"directory",
"."
] | python | train |
StackStorm/pybind | pybind/slxos/v17r_2_00/cluster/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_2_00/cluster/__init__.py#L317-L338 | def _set_designated_forwarder_hold_time(self, v, load=False):
"""
Setter method for designated_forwarder_hold_time, mapped from YANG variable /cluster/designated_forwarder_hold_time (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_designated_forwarder_hold_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_designated_forwarder_hold_time() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': [u'1..60']}), is_leaf=True, yang_name="designated-forwarder-hold-time", rest_name="designated-forwarder-hold-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Time in seconds to wait before electing a designated forwarder (Range:<1-60>, default:3)', u'cli-suppress-show-conf-path': None, u'cli-suppress-show-match': None}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='uint16', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """designated_forwarder_hold_time must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': [u'1..60']}), is_leaf=True, yang_name="designated-forwarder-hold-time", rest_name="designated-forwarder-hold-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Time in seconds to wait before electing a designated forwarder (Range:<1-60>, default:3)', u'cli-suppress-show-conf-path': None, u'cli-suppress-show-match': None}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='uint16', is_config=True)""",
})
self.__designated_forwarder_hold_time = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_designated_forwarder_hold_time",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"int",
",",
"restriction_dict",
"=",
"{",
"'range'",
":",
"[",
"'0..65535'",
"]",
"}",
",",
"int_size",
"=",
"16",
")",
",",
"restriction_dict",
"=",
"{",
"'range'",
":",
"[",
"u'1..60'",
"]",
"}",
")",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"designated-forwarder-hold-time\"",
",",
"rest_name",
"=",
"\"designated-forwarder-hold-time\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Time in seconds to wait before electing a designated forwarder (Range:<1-60>, default:3)'",
",",
"u'cli-suppress-show-conf-path'",
":",
"None",
",",
"u'cli-suppress-show-match'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-mct'",
",",
"defining_module",
"=",
"'brocade-mct'",
",",
"yang_type",
"=",
"'uint16'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"designated_forwarder_hold_time must be of a type compatible with uint16\"\"\"",
",",
"'defined-type'",
":",
"\"uint16\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': [u'1..60']}), is_leaf=True, yang_name=\"designated-forwarder-hold-time\", rest_name=\"designated-forwarder-hold-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Time in seconds to wait before electing a designated forwarder (Range:<1-60>, default:3)', u'cli-suppress-show-conf-path': None, u'cli-suppress-show-match': None}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='uint16', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__designated_forwarder_hold_time",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for designated_forwarder_hold_time, mapped from YANG variable /cluster/designated_forwarder_hold_time (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_designated_forwarder_hold_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_designated_forwarder_hold_time() directly. | [
"Setter",
"method",
"for",
"designated_forwarder_hold_time",
"mapped",
"from",
"YANG",
"variable",
"/",
"cluster",
"/",
"designated_forwarder_hold_time",
"(",
"uint16",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_designated_forwarder_hold_time",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_designated_forwarder_hold_time",
"()",
"directly",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xviewwidget/xviewprofile.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewprofile.py#L442-L490 | def fromXml( xprofile ):
"""
Restores the profile information from XML.
:param xprofile | <xml.etree.ElementTree.Element>
:return <XViewProfile>
"""
# determine the proper version
if xprofile.tag != 'profile':
return XViewProfile()
version = int(xprofile.get('version', '1'))
# load the legacy information - just layout data
if version == 1:
prof = XViewProfile()
prof.setXmlElement(xprofile)
return prof
# load latest information
prof = XViewProfile()
prof.setName(xprofile.get('name', ''))
prof.setVersion(float(xprofile.get('profile_version', '1.0')))
ico = xprofile.get('icon')
if ico is not None:
prof.setIcon(os.path.expandvars(ico))
else:
ico = xprofile.find('icon')
if ico is not None:
prof.setIcon(projexui.generatePixmap(ico.text))
# restore data
xdata = xprofile.find('data')
if ( xdata is not None ):
prof._customData = DataSet.fromXml(xdata)
# load description
xdesc = xprofile.find('desc')
if ( xdesc is not None ):
prof.setDescription(xdesc.text)
# load layout
xlayout = xprofile.find('layout')
if ( xlayout is not None ):
prof.setXmlElement(xlayout)
return prof | [
"def",
"fromXml",
"(",
"xprofile",
")",
":",
"# determine the proper version\r",
"if",
"xprofile",
".",
"tag",
"!=",
"'profile'",
":",
"return",
"XViewProfile",
"(",
")",
"version",
"=",
"int",
"(",
"xprofile",
".",
"get",
"(",
"'version'",
",",
"'1'",
")",
")",
"# load the legacy information - just layout data\r",
"if",
"version",
"==",
"1",
":",
"prof",
"=",
"XViewProfile",
"(",
")",
"prof",
".",
"setXmlElement",
"(",
"xprofile",
")",
"return",
"prof",
"# load latest information\r",
"prof",
"=",
"XViewProfile",
"(",
")",
"prof",
".",
"setName",
"(",
"xprofile",
".",
"get",
"(",
"'name'",
",",
"''",
")",
")",
"prof",
".",
"setVersion",
"(",
"float",
"(",
"xprofile",
".",
"get",
"(",
"'profile_version'",
",",
"'1.0'",
")",
")",
")",
"ico",
"=",
"xprofile",
".",
"get",
"(",
"'icon'",
")",
"if",
"ico",
"is",
"not",
"None",
":",
"prof",
".",
"setIcon",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"ico",
")",
")",
"else",
":",
"ico",
"=",
"xprofile",
".",
"find",
"(",
"'icon'",
")",
"if",
"ico",
"is",
"not",
"None",
":",
"prof",
".",
"setIcon",
"(",
"projexui",
".",
"generatePixmap",
"(",
"ico",
".",
"text",
")",
")",
"# restore data\r",
"xdata",
"=",
"xprofile",
".",
"find",
"(",
"'data'",
")",
"if",
"(",
"xdata",
"is",
"not",
"None",
")",
":",
"prof",
".",
"_customData",
"=",
"DataSet",
".",
"fromXml",
"(",
"xdata",
")",
"# load description\r",
"xdesc",
"=",
"xprofile",
".",
"find",
"(",
"'desc'",
")",
"if",
"(",
"xdesc",
"is",
"not",
"None",
")",
":",
"prof",
".",
"setDescription",
"(",
"xdesc",
".",
"text",
")",
"# load layout\r",
"xlayout",
"=",
"xprofile",
".",
"find",
"(",
"'layout'",
")",
"if",
"(",
"xlayout",
"is",
"not",
"None",
")",
":",
"prof",
".",
"setXmlElement",
"(",
"xlayout",
")",
"return",
"prof"
] | Restores the profile information from XML.
:param xprofile | <xml.etree.ElementTree.Element>
:return <XViewProfile> | [
"Restores",
"the",
"profile",
"information",
"from",
"XML",
".",
":",
"param",
"xprofile",
"|",
"<xml",
".",
"etree",
".",
"ElementTree",
".",
"Element",
">",
":",
"return",
"<XViewProfile",
">"
] | python | train |
bcbio/bcbio-nextgen | bcbio/variation/vcfanno.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfanno.py#L150-L154 | def annotate_gemini(data, retriever=None):
"""Annotate with population calls if have data installed.
"""
r = dd.get_variation_resources(data)
return all([r.get(k) and objectstore.file_exists_or_remote(r[k]) for k in ["exac", "gnomad_exome"]]) | [
"def",
"annotate_gemini",
"(",
"data",
",",
"retriever",
"=",
"None",
")",
":",
"r",
"=",
"dd",
".",
"get_variation_resources",
"(",
"data",
")",
"return",
"all",
"(",
"[",
"r",
".",
"get",
"(",
"k",
")",
"and",
"objectstore",
".",
"file_exists_or_remote",
"(",
"r",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"[",
"\"exac\"",
",",
"\"gnomad_exome\"",
"]",
"]",
")"
] | Annotate with population calls if have data installed. | [
"Annotate",
"with",
"population",
"calls",
"if",
"have",
"data",
"installed",
"."
] | python | train |
jorbas/GADDAG | gaddag/gaddag.py | https://github.com/jorbas/GADDAG/blob/a0ede3def715c586e1f273d96e9fc0d537cd9561/gaddag/gaddag.py#L186-L203 | def add_word(self, word):
"""
Add a word to the GADDAG.
Args:
word: A word to be added to the GADDAG.
"""
word = word.lower()
if not (word.isascii() and word.isalpha()):
raise ValueError("Invalid character in word '{}'".format(word))
word = word.encode(encoding="ascii")
result = cgaddag.gdg_add_word(self.gdg, word)
if result == 1:
raise ValueError("Invalid character in word '{}'".format(word))
elif result == 2:
raise MemoryError("Out of memory, GADDAG is in an undefined state") | [
"def",
"add_word",
"(",
"self",
",",
"word",
")",
":",
"word",
"=",
"word",
".",
"lower",
"(",
")",
"if",
"not",
"(",
"word",
".",
"isascii",
"(",
")",
"and",
"word",
".",
"isalpha",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid character in word '{}'\"",
".",
"format",
"(",
"word",
")",
")",
"word",
"=",
"word",
".",
"encode",
"(",
"encoding",
"=",
"\"ascii\"",
")",
"result",
"=",
"cgaddag",
".",
"gdg_add_word",
"(",
"self",
".",
"gdg",
",",
"word",
")",
"if",
"result",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"\"Invalid character in word '{}'\"",
".",
"format",
"(",
"word",
")",
")",
"elif",
"result",
"==",
"2",
":",
"raise",
"MemoryError",
"(",
"\"Out of memory, GADDAG is in an undefined state\"",
")"
] | Add a word to the GADDAG.
Args:
word: A word to be added to the GADDAG. | [
"Add",
"a",
"word",
"to",
"the",
"GADDAG",
"."
] | python | train |
DEIB-GECO/PyGMQL | gmql/RemoteConnection/RemoteManager.py | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L611-L622 | def trace_job(self, jobId):
""" Get information about the specified remote job
:param jobId: the job identifier
:return: a dictionary with the information
"""
header = self.__check_authentication()
status_url = self.address + "/jobs/" + jobId + "/trace"
status_resp = requests.get(status_url, headers=header)
if status_resp.status_code != 200:
raise ValueError("Code {}. {}".format(status_resp.status_code, status_resp.json().get("error")))
return status_resp.json() | [
"def",
"trace_job",
"(",
"self",
",",
"jobId",
")",
":",
"header",
"=",
"self",
".",
"__check_authentication",
"(",
")",
"status_url",
"=",
"self",
".",
"address",
"+",
"\"/jobs/\"",
"+",
"jobId",
"+",
"\"/trace\"",
"status_resp",
"=",
"requests",
".",
"get",
"(",
"status_url",
",",
"headers",
"=",
"header",
")",
"if",
"status_resp",
".",
"status_code",
"!=",
"200",
":",
"raise",
"ValueError",
"(",
"\"Code {}. {}\"",
".",
"format",
"(",
"status_resp",
".",
"status_code",
",",
"status_resp",
".",
"json",
"(",
")",
".",
"get",
"(",
"\"error\"",
")",
")",
")",
"return",
"status_resp",
".",
"json",
"(",
")"
] | Get information about the specified remote job
:param jobId: the job identifier
:return: a dictionary with the information | [
"Get",
"information",
"about",
"the",
"specified",
"remote",
"job"
] | python | train |
martinpitt/python-dbusmock | dbusmock/templates/logind.py | https://github.com/martinpitt/python-dbusmock/blob/26f65f78bc0ed347233f699a8d6ee0e6880e7eb0/dbusmock/templates/logind.py#L117-L145 | def AddSeat(self, seat):
'''Convenience method to add a seat.
Return the object path of the new seat.
'''
seat_path = '/org/freedesktop/login1/seat/' + seat
if seat_path in mockobject.objects:
raise dbus.exceptions.DBusException('Seat %s already exists' % seat,
name=MOCK_IFACE + '.SeatExists')
self.AddObject(seat_path,
'org.freedesktop.login1.Seat',
{
'Sessions': dbus.Array([], signature='(so)'),
'CanGraphical': False,
'CanMultiSession': True,
'CanTTY': False,
'IdleHint': False,
'ActiveSession': ('', dbus.ObjectPath('/')),
'Id': seat,
'IdleSinceHint': dbus.UInt64(0),
'IdleSinceHintMonotonic': dbus.UInt64(0),
},
[
('ActivateSession', 's', '', ''),
('Terminate', '', '', '')
])
return seat_path | [
"def",
"AddSeat",
"(",
"self",
",",
"seat",
")",
":",
"seat_path",
"=",
"'/org/freedesktop/login1/seat/'",
"+",
"seat",
"if",
"seat_path",
"in",
"mockobject",
".",
"objects",
":",
"raise",
"dbus",
".",
"exceptions",
".",
"DBusException",
"(",
"'Seat %s already exists'",
"%",
"seat",
",",
"name",
"=",
"MOCK_IFACE",
"+",
"'.SeatExists'",
")",
"self",
".",
"AddObject",
"(",
"seat_path",
",",
"'org.freedesktop.login1.Seat'",
",",
"{",
"'Sessions'",
":",
"dbus",
".",
"Array",
"(",
"[",
"]",
",",
"signature",
"=",
"'(so)'",
")",
",",
"'CanGraphical'",
":",
"False",
",",
"'CanMultiSession'",
":",
"True",
",",
"'CanTTY'",
":",
"False",
",",
"'IdleHint'",
":",
"False",
",",
"'ActiveSession'",
":",
"(",
"''",
",",
"dbus",
".",
"ObjectPath",
"(",
"'/'",
")",
")",
",",
"'Id'",
":",
"seat",
",",
"'IdleSinceHint'",
":",
"dbus",
".",
"UInt64",
"(",
"0",
")",
",",
"'IdleSinceHintMonotonic'",
":",
"dbus",
".",
"UInt64",
"(",
"0",
")",
",",
"}",
",",
"[",
"(",
"'ActivateSession'",
",",
"'s'",
",",
"''",
",",
"''",
")",
",",
"(",
"'Terminate'",
",",
"''",
",",
"''",
",",
"''",
")",
"]",
")",
"return",
"seat_path"
] | Convenience method to add a seat.
Return the object path of the new seat. | [
"Convenience",
"method",
"to",
"add",
"a",
"seat",
"."
] | python | train |
dbarsam/python-vsgen | vsgen/solution.py | https://github.com/dbarsam/python-vsgen/blob/640191bb018a1ff7d7b7a4982e0d3c1a423ba878/vsgen/solution.py#L37-L47 | def _import(self, datadict):
"""
Internal method to import instance variables data from a dictionary
:param dict datadict: The dictionary containing variables values.
"""
self.GUID = datadict.get("GUID", uuid.uuid1())
self.FileName = datadict.get("FileName", "")
self.Name = datadict.get("Name", "")
self.Projects = datadict.get("Projects", [])
self.VSVersion = datadict.get("VSVersion", None) | [
"def",
"_import",
"(",
"self",
",",
"datadict",
")",
":",
"self",
".",
"GUID",
"=",
"datadict",
".",
"get",
"(",
"\"GUID\"",
",",
"uuid",
".",
"uuid1",
"(",
")",
")",
"self",
".",
"FileName",
"=",
"datadict",
".",
"get",
"(",
"\"FileName\"",
",",
"\"\"",
")",
"self",
".",
"Name",
"=",
"datadict",
".",
"get",
"(",
"\"Name\"",
",",
"\"\"",
")",
"self",
".",
"Projects",
"=",
"datadict",
".",
"get",
"(",
"\"Projects\"",
",",
"[",
"]",
")",
"self",
".",
"VSVersion",
"=",
"datadict",
".",
"get",
"(",
"\"VSVersion\"",
",",
"None",
")"
] | Internal method to import instance variables data from a dictionary
:param dict datadict: The dictionary containing variables values. | [
"Internal",
"method",
"to",
"import",
"instance",
"variables",
"data",
"from",
"a",
"dictionary"
] | python | train |
opendatateam/udata | udata/commands/images.py | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/images.py#L30-L72 | def render():
'''Force (re)rendering stored images'''
from udata.core.organization.models import Organization
from udata.core.post.models import Post
from udata.core.reuse.models import Reuse
from udata.core.user.models import User
header('Rendering images')
count = Counter()
total = Counter()
organizations = Organization.objects(logo__exists=True)
total['orgs'] = organizations.count()
log.info('Processing {0} organizations logos'.format(total['orgs']))
for org in organizations:
count['orgs'] += render_or_skip(org, 'logo')
users = User.objects(avatar__exists=True)
total['users'] = users.count()
log.info('Processing {0} user avatars'.format(total['users']))
for user in users:
count['users'] += render_or_skip(user, 'avatar')
posts = Post.objects(image__exists=True)
total['posts'] = posts.count()
log.info('Processing {0} post images'.format(total['posts']))
for post in posts:
count['posts'] += render_or_skip(post, 'image')
reuses = Reuse.objects(image__exists=True)
total['reuses'] = reuses.count()
log.info('Processing {0} reuse images'.format(total['reuses']))
for reuse in reuses:
count['reuses'] += render_or_skip(reuse, 'image')
log.info('''Summary:
Organization logos: {count[orgs]}/{total[orgs]}
User avatars: {count[users]}/{total[users]}
Post images: {count[posts]}/{total[posts]}
Reuse images: {count[reuses]}/{total[reuses]}
'''.format(count=count, total=total))
success('Images rendered') | [
"def",
"render",
"(",
")",
":",
"from",
"udata",
".",
"core",
".",
"organization",
".",
"models",
"import",
"Organization",
"from",
"udata",
".",
"core",
".",
"post",
".",
"models",
"import",
"Post",
"from",
"udata",
".",
"core",
".",
"reuse",
".",
"models",
"import",
"Reuse",
"from",
"udata",
".",
"core",
".",
"user",
".",
"models",
"import",
"User",
"header",
"(",
"'Rendering images'",
")",
"count",
"=",
"Counter",
"(",
")",
"total",
"=",
"Counter",
"(",
")",
"organizations",
"=",
"Organization",
".",
"objects",
"(",
"logo__exists",
"=",
"True",
")",
"total",
"[",
"'orgs'",
"]",
"=",
"organizations",
".",
"count",
"(",
")",
"log",
".",
"info",
"(",
"'Processing {0} organizations logos'",
".",
"format",
"(",
"total",
"[",
"'orgs'",
"]",
")",
")",
"for",
"org",
"in",
"organizations",
":",
"count",
"[",
"'orgs'",
"]",
"+=",
"render_or_skip",
"(",
"org",
",",
"'logo'",
")",
"users",
"=",
"User",
".",
"objects",
"(",
"avatar__exists",
"=",
"True",
")",
"total",
"[",
"'users'",
"]",
"=",
"users",
".",
"count",
"(",
")",
"log",
".",
"info",
"(",
"'Processing {0} user avatars'",
".",
"format",
"(",
"total",
"[",
"'users'",
"]",
")",
")",
"for",
"user",
"in",
"users",
":",
"count",
"[",
"'users'",
"]",
"+=",
"render_or_skip",
"(",
"user",
",",
"'avatar'",
")",
"posts",
"=",
"Post",
".",
"objects",
"(",
"image__exists",
"=",
"True",
")",
"total",
"[",
"'posts'",
"]",
"=",
"posts",
".",
"count",
"(",
")",
"log",
".",
"info",
"(",
"'Processing {0} post images'",
".",
"format",
"(",
"total",
"[",
"'posts'",
"]",
")",
")",
"for",
"post",
"in",
"posts",
":",
"count",
"[",
"'posts'",
"]",
"+=",
"render_or_skip",
"(",
"post",
",",
"'image'",
")",
"reuses",
"=",
"Reuse",
".",
"objects",
"(",
"image__exists",
"=",
"True",
")",
"total",
"[",
"'reuses'",
"]",
"=",
"reuses",
".",
"count",
"(",
")",
"log",
".",
"info",
"(",
"'Processing {0} reuse images'",
".",
"format",
"(",
"total",
"[",
"'reuses'",
"]",
")",
")",
"for",
"reuse",
"in",
"reuses",
":",
"count",
"[",
"'reuses'",
"]",
"+=",
"render_or_skip",
"(",
"reuse",
",",
"'image'",
")",
"log",
".",
"info",
"(",
"'''Summary:\n Organization logos: {count[orgs]}/{total[orgs]}\n User avatars: {count[users]}/{total[users]}\n Post images: {count[posts]}/{total[posts]}\n Reuse images: {count[reuses]}/{total[reuses]}\n '''",
".",
"format",
"(",
"count",
"=",
"count",
",",
"total",
"=",
"total",
")",
")",
"success",
"(",
"'Images rendered'",
")"
] | Force (re)rendering stored images | [
"Force",
"(",
"re",
")",
"rendering",
"stored",
"images"
] | python | train |
AirtestProject/Poco | poco/pocofw.py | https://github.com/AirtestProject/Poco/blob/2c559a586adf3fd11ee81cabc446d4d3f6f2d119/poco/pocofw.py#L162-L199 | def freeze(this):
"""
Snapshot current **hierarchy** and cache it into a new poco instance. This new poco instance is a copy from
current poco instance (``self``). The hierarchy of the new poco instance is fixed and immutable. It will be
super fast when calling ``dump`` function from frozen poco. See the example below.
Examples:
::
poco = Poco(...)
frozen_poco = poco.freeze()
hierarchy_dict = frozen_poco.agent.hierarchy.dump() # will return the already cached hierarchy data
Returns:
:py:class:`Poco <poco.pocofw.Poco>`: new poco instance copy from current poco instance (``self``)
"""
class FrozenPoco(Poco):
def __init__(self, **kwargs):
hierarchy_dict = this.agent.hierarchy.dump()
hierarchy = create_immutable_hierarchy(hierarchy_dict)
agent_ = PocoAgent(hierarchy, this.agent.input, this.agent.screen)
kwargs['action_interval'] = 0.01
kwargs['pre_action_wait_for_appearance'] = 0
super(FrozenPoco, self).__init__(agent_, **kwargs)
self.this = this
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __getattr__(self, item):
return getattr(self.this, item)
return FrozenPoco() | [
"def",
"freeze",
"(",
"this",
")",
":",
"class",
"FrozenPoco",
"(",
"Poco",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"hierarchy_dict",
"=",
"this",
".",
"agent",
".",
"hierarchy",
".",
"dump",
"(",
")",
"hierarchy",
"=",
"create_immutable_hierarchy",
"(",
"hierarchy_dict",
")",
"agent_",
"=",
"PocoAgent",
"(",
"hierarchy",
",",
"this",
".",
"agent",
".",
"input",
",",
"this",
".",
"agent",
".",
"screen",
")",
"kwargs",
"[",
"'action_interval'",
"]",
"=",
"0.01",
"kwargs",
"[",
"'pre_action_wait_for_appearance'",
"]",
"=",
"0",
"super",
"(",
"FrozenPoco",
",",
"self",
")",
".",
"__init__",
"(",
"agent_",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"this",
"=",
"this",
"def",
"__enter__",
"(",
"self",
")",
":",
"return",
"self",
"def",
"__exit__",
"(",
"self",
",",
"exc_type",
",",
"exc_val",
",",
"exc_tb",
")",
":",
"pass",
"def",
"__getattr__",
"(",
"self",
",",
"item",
")",
":",
"return",
"getattr",
"(",
"self",
".",
"this",
",",
"item",
")",
"return",
"FrozenPoco",
"(",
")"
] | Snapshot current **hierarchy** and cache it into a new poco instance. This new poco instance is a copy from
current poco instance (``self``). The hierarchy of the new poco instance is fixed and immutable. It will be
super fast when calling ``dump`` function from frozen poco. See the example below.
Examples:
::
poco = Poco(...)
frozen_poco = poco.freeze()
hierarchy_dict = frozen_poco.agent.hierarchy.dump() # will return the already cached hierarchy data
Returns:
:py:class:`Poco <poco.pocofw.Poco>`: new poco instance copy from current poco instance (``self``) | [
"Snapshot",
"current",
"**",
"hierarchy",
"**",
"and",
"cache",
"it",
"into",
"a",
"new",
"poco",
"instance",
".",
"This",
"new",
"poco",
"instance",
"is",
"a",
"copy",
"from",
"current",
"poco",
"instance",
"(",
"self",
")",
".",
"The",
"hierarchy",
"of",
"the",
"new",
"poco",
"instance",
"is",
"fixed",
"and",
"immutable",
".",
"It",
"will",
"be",
"super",
"fast",
"when",
"calling",
"dump",
"function",
"from",
"frozen",
"poco",
".",
"See",
"the",
"example",
"below",
".",
"Examples",
":",
"::"
] | python | train |
pypa/setuptools | setuptools/__init__.py | https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/__init__.py#L203-L212 | def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results) | [
"def",
"_find_all_simple",
"(",
"path",
")",
":",
"results",
"=",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"file",
")",
"for",
"base",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
",",
"followlinks",
"=",
"True",
")",
"for",
"file",
"in",
"files",
")",
"return",
"filter",
"(",
"os",
".",
"path",
".",
"isfile",
",",
"results",
")"
] | Find all files under 'path' | [
"Find",
"all",
"files",
"under",
"path"
] | python | train |
chemlab/chemlab | chemlab/graphics/transformations.py | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/graphics/transformations.py#L1039-L1087 | def superimposition_matrix(v0, v1, scale=False, usesvd=True):
"""Return matrix to transform given 3D point set into second point set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
The parameters scale and usesvd are explained in the more general
affine_matrix_from_points function.
The returned matrix is a similarity or Eucledian transformation matrix.
This function has a fast C implementation in transformations.c.
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scale=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3))
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
return affine_matrix_from_points(v0, v1, shear=False,
scale=scale, usesvd=usesvd) | [
"def",
"superimposition_matrix",
"(",
"v0",
",",
"v1",
",",
"scale",
"=",
"False",
",",
"usesvd",
"=",
"True",
")",
":",
"v0",
"=",
"numpy",
".",
"array",
"(",
"v0",
",",
"dtype",
"=",
"numpy",
".",
"float64",
",",
"copy",
"=",
"False",
")",
"[",
":",
"3",
"]",
"v1",
"=",
"numpy",
".",
"array",
"(",
"v1",
",",
"dtype",
"=",
"numpy",
".",
"float64",
",",
"copy",
"=",
"False",
")",
"[",
":",
"3",
"]",
"return",
"affine_matrix_from_points",
"(",
"v0",
",",
"v1",
",",
"shear",
"=",
"False",
",",
"scale",
"=",
"scale",
",",
"usesvd",
"=",
"usesvd",
")"
] | Return matrix to transform given 3D point set into second point set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
The parameters scale and usesvd are explained in the more general
affine_matrix_from_points function.
The returned matrix is a similarity or Eucledian transformation matrix.
This function has a fast C implementation in transformations.c.
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scale=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3))
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True | [
"Return",
"matrix",
"to",
"transform",
"given",
"3D",
"point",
"set",
"into",
"second",
"point",
"set",
"."
] | python | train |
common-workflow-language/cwltool | cwltool/executors.py | https://github.com/common-workflow-language/cwltool/blob/cb81b22abc52838823da9945f04d06739ab32fda/cwltool/executors.py#L252-L305 | def run_job(self,
job, # type: Union[JobBase, WorkflowJob, None]
runtime_context # type: RuntimeContext
): # type: (...) -> None
""" Execute a single Job in a seperate thread. """
if job is not None:
with self.pending_jobs_lock:
self.pending_jobs.append(job)
with self.pending_jobs_lock:
n = 0
while (n+1) <= len(self.pending_jobs):
job = self.pending_jobs[n]
if isinstance(job, JobBase):
if ((job.builder.resources["ram"])
> self.max_ram
or (job.builder.resources["cores"])
> self.max_cores):
_logger.error(
'Job "%s" cannot be run, requests more resources (%s) '
'than available on this host (max ram %d, max cores %d',
job.name, job.builder.resources,
self.allocated_ram,
self.allocated_cores,
self.max_ram,
self.max_cores)
self.pending_jobs.remove(job)
return
if ((self.allocated_ram + job.builder.resources["ram"])
> self.max_ram
or (self.allocated_cores + job.builder.resources["cores"])
> self.max_cores):
_logger.debug(
'Job "%s" cannot run yet, resources (%s) are not '
'available (already allocated ram is %d, allocated cores is %d, '
'max ram %d, max cores %d',
job.name, job.builder.resources,
self.allocated_ram,
self.allocated_cores,
self.max_ram,
self.max_cores)
n += 1
continue
thread = threading.Thread(target=self._runner, args=(job, runtime_context))
thread.daemon = True
self.threads.add(thread)
if isinstance(job, JobBase):
self.allocated_ram += job.builder.resources["ram"]
self.allocated_cores += job.builder.resources["cores"]
thread.start()
self.pending_jobs.remove(job) | [
"def",
"run_job",
"(",
"self",
",",
"job",
",",
"# type: Union[JobBase, WorkflowJob, None]",
"runtime_context",
"# type: RuntimeContext",
")",
":",
"# type: (...) -> None",
"if",
"job",
"is",
"not",
"None",
":",
"with",
"self",
".",
"pending_jobs_lock",
":",
"self",
".",
"pending_jobs",
".",
"append",
"(",
"job",
")",
"with",
"self",
".",
"pending_jobs_lock",
":",
"n",
"=",
"0",
"while",
"(",
"n",
"+",
"1",
")",
"<=",
"len",
"(",
"self",
".",
"pending_jobs",
")",
":",
"job",
"=",
"self",
".",
"pending_jobs",
"[",
"n",
"]",
"if",
"isinstance",
"(",
"job",
",",
"JobBase",
")",
":",
"if",
"(",
"(",
"job",
".",
"builder",
".",
"resources",
"[",
"\"ram\"",
"]",
")",
">",
"self",
".",
"max_ram",
"or",
"(",
"job",
".",
"builder",
".",
"resources",
"[",
"\"cores\"",
"]",
")",
">",
"self",
".",
"max_cores",
")",
":",
"_logger",
".",
"error",
"(",
"'Job \"%s\" cannot be run, requests more resources (%s) '",
"'than available on this host (max ram %d, max cores %d'",
",",
"job",
".",
"name",
",",
"job",
".",
"builder",
".",
"resources",
",",
"self",
".",
"allocated_ram",
",",
"self",
".",
"allocated_cores",
",",
"self",
".",
"max_ram",
",",
"self",
".",
"max_cores",
")",
"self",
".",
"pending_jobs",
".",
"remove",
"(",
"job",
")",
"return",
"if",
"(",
"(",
"self",
".",
"allocated_ram",
"+",
"job",
".",
"builder",
".",
"resources",
"[",
"\"ram\"",
"]",
")",
">",
"self",
".",
"max_ram",
"or",
"(",
"self",
".",
"allocated_cores",
"+",
"job",
".",
"builder",
".",
"resources",
"[",
"\"cores\"",
"]",
")",
">",
"self",
".",
"max_cores",
")",
":",
"_logger",
".",
"debug",
"(",
"'Job \"%s\" cannot run yet, resources (%s) are not '",
"'available (already allocated ram is %d, allocated cores is %d, '",
"'max ram %d, max cores %d'",
",",
"job",
".",
"name",
",",
"job",
".",
"builder",
".",
"resources",
",",
"self",
".",
"allocated_ram",
",",
"self",
".",
"allocated_cores",
",",
"self",
".",
"max_ram",
",",
"self",
".",
"max_cores",
")",
"n",
"+=",
"1",
"continue",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_runner",
",",
"args",
"=",
"(",
"job",
",",
"runtime_context",
")",
")",
"thread",
".",
"daemon",
"=",
"True",
"self",
".",
"threads",
".",
"add",
"(",
"thread",
")",
"if",
"isinstance",
"(",
"job",
",",
"JobBase",
")",
":",
"self",
".",
"allocated_ram",
"+=",
"job",
".",
"builder",
".",
"resources",
"[",
"\"ram\"",
"]",
"self",
".",
"allocated_cores",
"+=",
"job",
".",
"builder",
".",
"resources",
"[",
"\"cores\"",
"]",
"thread",
".",
"start",
"(",
")",
"self",
".",
"pending_jobs",
".",
"remove",
"(",
"job",
")"
] | Execute a single Job in a seperate thread. | [
"Execute",
"a",
"single",
"Job",
"in",
"a",
"seperate",
"thread",
"."
] | python | train |
TeamHG-Memex/eli5 | eli5/transform.py | https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/transform.py#L7-L34 | def transform_feature_names(transformer, in_names=None):
"""Get feature names for transformer output as a function of input names.
Used by :func:`explain_weights` when applied to a scikit-learn Pipeline,
this ``singledispatch`` should be registered with custom name
transformations for each class of transformer.
If there is no ``singledispatch`` handler registered for a transformer
class, ``transformer.get_feature_names()`` method is called; if there is
no such method then feature names are not supported and
this function raises an exception.
Parameters
----------
transformer : scikit-learn-compatible transformer
in_names : list of str, optional
Names for features input to transformer.transform().
If not provided, the implementation may generate default feature names
if the number of input features is known.
Returns
-------
feature_names : list of str
"""
if hasattr(transformer, 'get_feature_names'):
return transformer.get_feature_names()
raise NotImplementedError('transform_feature_names not available for '
'{}'.format(transformer)) | [
"def",
"transform_feature_names",
"(",
"transformer",
",",
"in_names",
"=",
"None",
")",
":",
"if",
"hasattr",
"(",
"transformer",
",",
"'get_feature_names'",
")",
":",
"return",
"transformer",
".",
"get_feature_names",
"(",
")",
"raise",
"NotImplementedError",
"(",
"'transform_feature_names not available for '",
"'{}'",
".",
"format",
"(",
"transformer",
")",
")"
] | Get feature names for transformer output as a function of input names.
Used by :func:`explain_weights` when applied to a scikit-learn Pipeline,
this ``singledispatch`` should be registered with custom name
transformations for each class of transformer.
If there is no ``singledispatch`` handler registered for a transformer
class, ``transformer.get_feature_names()`` method is called; if there is
no such method then feature names are not supported and
this function raises an exception.
Parameters
----------
transformer : scikit-learn-compatible transformer
in_names : list of str, optional
Names for features input to transformer.transform().
If not provided, the implementation may generate default feature names
if the number of input features is known.
Returns
-------
feature_names : list of str | [
"Get",
"feature",
"names",
"for",
"transformer",
"output",
"as",
"a",
"function",
"of",
"input",
"names",
"."
] | python | train |
ejeschke/ginga | ginga/rv/Control.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/Control.py#L939-L945 | def zoom_1_to_1(self):
"""Zoom the view to a 1 to 1 pixel ratio (100 %%).
"""
viewer = self.getfocus_viewer()
if hasattr(viewer, 'scale_to'):
viewer.scale_to(1.0, 1.0)
return True | [
"def",
"zoom_1_to_1",
"(",
"self",
")",
":",
"viewer",
"=",
"self",
".",
"getfocus_viewer",
"(",
")",
"if",
"hasattr",
"(",
"viewer",
",",
"'scale_to'",
")",
":",
"viewer",
".",
"scale_to",
"(",
"1.0",
",",
"1.0",
")",
"return",
"True"
] | Zoom the view to a 1 to 1 pixel ratio (100 %%). | [
"Zoom",
"the",
"view",
"to",
"a",
"1",
"to",
"1",
"pixel",
"ratio",
"(",
"100",
"%%",
")",
"."
] | python | train |
zengbin93/zb | zb/crawlers/xinshipu.py | https://github.com/zengbin93/zb/blob/ccdb384a0b5801b459933220efcb71972c2b89a7/zb/crawlers/xinshipu.py#L86-L100 | def get_all_classify():
"""获取全部菜谱分类"""
url = "https://www.xinshipu.com/%E8%8F%9C%E8%B0%B1%E5%A4%A7%E5%85%A8.html"
response = requests.get(url, headers=get_header())
html = BeautifulSoup(response.text, "lxml")
all_a = html.find("div", {'class': "detail-cate-list clearfix mt20"}).find_all('a')
classify = dict()
for a in all_a:
if a.has_attr('rel') and not a.has_attr('class'):
class_url = urljoin(HOME_URL, a['href'])
classify[a.text] = class_url
return classify | [
"def",
"get_all_classify",
"(",
")",
":",
"url",
"=",
"\"https://www.xinshipu.com/%E8%8F%9C%E8%B0%B1%E5%A4%A7%E5%85%A8.html\"",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"get_header",
"(",
")",
")",
"html",
"=",
"BeautifulSoup",
"(",
"response",
".",
"text",
",",
"\"lxml\"",
")",
"all_a",
"=",
"html",
".",
"find",
"(",
"\"div\"",
",",
"{",
"'class'",
":",
"\"detail-cate-list clearfix mt20\"",
"}",
")",
".",
"find_all",
"(",
"'a'",
")",
"classify",
"=",
"dict",
"(",
")",
"for",
"a",
"in",
"all_a",
":",
"if",
"a",
".",
"has_attr",
"(",
"'rel'",
")",
"and",
"not",
"a",
".",
"has_attr",
"(",
"'class'",
")",
":",
"class_url",
"=",
"urljoin",
"(",
"HOME_URL",
",",
"a",
"[",
"'href'",
"]",
")",
"classify",
"[",
"a",
".",
"text",
"]",
"=",
"class_url",
"return",
"classify"
] | 获取全部菜谱分类 | [
"获取全部菜谱分类"
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/core/shellapp.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/shellapp.py#L231-L244 | def init_code(self):
"""run the pre-flight code, specified via exec_lines"""
self._run_startup_files()
self._run_exec_lines()
self._run_exec_files()
self._run_cmd_line_code()
self._run_module()
# flush output, so itwon't be attached to the first cell
sys.stdout.flush()
sys.stderr.flush()
# Hide variables defined here from %who etc.
self.shell.user_ns_hidden.update(self.shell.user_ns) | [
"def",
"init_code",
"(",
"self",
")",
":",
"self",
".",
"_run_startup_files",
"(",
")",
"self",
".",
"_run_exec_lines",
"(",
")",
"self",
".",
"_run_exec_files",
"(",
")",
"self",
".",
"_run_cmd_line_code",
"(",
")",
"self",
".",
"_run_module",
"(",
")",
"# flush output, so itwon't be attached to the first cell",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"# Hide variables defined here from %who etc.",
"self",
".",
"shell",
".",
"user_ns_hidden",
".",
"update",
"(",
"self",
".",
"shell",
".",
"user_ns",
")"
] | run the pre-flight code, specified via exec_lines | [
"run",
"the",
"pre",
"-",
"flight",
"code",
"specified",
"via",
"exec_lines"
] | python | test |
SHTOOLS/SHTOOLS | pyshtools/shclasses/shwindow.py | https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shwindow.py#L714-L864 | def plot_windows(self, nwin, lmax=None, maxcolumns=3,
tick_interval=[60, 45], minor_tick_interval=None,
xlabel='Longitude', ylabel='Latitude',
axes_labelsize=None, tick_labelsize=None,
title_labelsize=None, grid=False, show=True, title=True,
ax=None, fname=None):
"""
Plot the best-concentrated localization windows.
Usage
-----
x.plot_windows(nwin, [lmax, maxcolumns, tick_interval,
minor_tick_interval, xlabel, ylabel, grid, show,
title, axes_labelsize, tick_labelsize,
title_labelsize, ax, fname])
Parameters
----------
nwin : int
The number of localization windows to plot.
lmax : int, optional, default = self.lwin
The maximum degree to use when plotting the windows, which controls
the number of samples in latitude and longitude.
maxcolumns : int, optional, default = 3
The maximum number of columns to use when plotting multiple
localization windows.
tick_interval : list or tuple, optional, default = [60, 45]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = None
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
grid : bool, optional, default = False
If True, plot grid lines.
show : bool, optional, default = True
If True, plot the image to the screen.
title : bool, optional, default = True
If True, plot a title on top of each subplot providing the taper
number and 1 minus the concentration factor.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
title_labelsize : int, optional, default = None
The font size for the subplot titles.
ax : matplotlib axes object, optional, default = None
An array of matplotlib axes objects where the plots will appear.
fname : str, optional, default = None
If present, save the image to the specified file.
"""
if self.kind == 'cap':
if self.nwinrot is not None and self.nwinrot <= nwin:
nwin = self.nwinrot
ncolumns = min(maxcolumns, nwin)
nrows = _np.ceil(nwin / ncolumns).astype(int)
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0]
* 0.55 * nrows / ncolumns + 0.41)
if ax is None:
fig, axes = _plt.subplots(nrows, ncolumns, figsize=figsize,
sharex='all', sharey='all')
else:
if hasattr(ax, 'flatten') and ax.size < nwin:
raise ValueError('ax.size must be greater or equal to nwin. ' +
'nwin = {:s}'.format(repr(nwin)) +
' and ax.size = {:s}.'.format(repr(ax.size)))
axes = ax
if tick_interval is None:
xticks = []
yticks = []
else:
xticks = _np.linspace(0, 360, num=360//tick_interval[0]+1,
endpoint=True)
yticks = _np.linspace(-90, 90, num=180//tick_interval[1]+1,
endpoint=True)
if axes_labelsize is None:
axes_labelsize = _mpl.rcParams['axes.labelsize']
if tick_labelsize is None:
tick_labelsize = _mpl.rcParams['xtick.labelsize']
if title_labelsize is None:
title_labelsize = _mpl.rcParams['axes.titlesize']
if minor_tick_interval is None:
minor_xticks = []
minor_yticks = []
else:
minor_xticks = _np.linspace(
0, 360, num=360//minor_tick_interval[0]+1, endpoint=True)
minor_yticks = _np.linspace(
-90, 90, num=180//minor_tick_interval[1]+1, endpoint=True)
deg = '$^{\circ}$'
xticklabels = [str(int(y)) + deg for y in xticks]
yticklabels = [str(int(y)) + deg for y in yticks]
if ax is None:
if nrows > 1:
for axtemp in axes[:-1, :].flatten():
for xlabel_i in axtemp.get_xticklabels():
xlabel_i.set_visible(False)
axtemp.set_xlabel('', visible=False)
for axtemp in axes[:, 1:].flatten():
for ylabel_i in axtemp.get_yticklabels():
ylabel_i.set_visible(False)
axtemp.set_ylabel('', visible=False)
elif nwin > 1:
for axtemp in axes[1:].flatten():
for ylabel_i in axtemp.get_yticklabels():
ylabel_i.set_visible(False)
axtemp.set_ylabel('', visible=False)
for itaper in range(min(self.nwin, nwin)):
evalue = self.eigenvalues[itaper]
if min(self.nwin, nwin) == 1 and ax is None:
axtemp = axes
elif hasattr(axes, 'flatten'):
axtemp = axes.flatten()[itaper]
else:
axtemp = axes[itaper]
gridout = _shtools.MakeGridDH(self.to_array(itaper), sampling=2,
lmax=lmax, norm=1, csphase=1)
axtemp.imshow(gridout, origin='upper',
extent=(0., 360., -90., 90.))
axtemp.set(xticks=xticks, yticks=yticks)
axtemp.set_xlabel(xlabel, fontsize=axes_labelsize)
axtemp.set_ylabel(ylabel, fontsize=axes_labelsize)
axtemp.set_xticklabels(xticklabels, fontsize=tick_labelsize)
axtemp.set_yticklabels(yticklabels, fontsize=tick_labelsize)
axtemp.set_xticks(minor_xticks, minor=True)
axtemp.set_yticks(minor_yticks, minor=True)
axtemp.grid(grid, which='major')
if title is True:
axtemp.set_title('#{:d} [loss={:2.2g}]'
.format(itaper, 1-evalue),
fontsize=title_labelsize)
if ax is None:
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes | [
"def",
"plot_windows",
"(",
"self",
",",
"nwin",
",",
"lmax",
"=",
"None",
",",
"maxcolumns",
"=",
"3",
",",
"tick_interval",
"=",
"[",
"60",
",",
"45",
"]",
",",
"minor_tick_interval",
"=",
"None",
",",
"xlabel",
"=",
"'Longitude'",
",",
"ylabel",
"=",
"'Latitude'",
",",
"axes_labelsize",
"=",
"None",
",",
"tick_labelsize",
"=",
"None",
",",
"title_labelsize",
"=",
"None",
",",
"grid",
"=",
"False",
",",
"show",
"=",
"True",
",",
"title",
"=",
"True",
",",
"ax",
"=",
"None",
",",
"fname",
"=",
"None",
")",
":",
"if",
"self",
".",
"kind",
"==",
"'cap'",
":",
"if",
"self",
".",
"nwinrot",
"is",
"not",
"None",
"and",
"self",
".",
"nwinrot",
"<=",
"nwin",
":",
"nwin",
"=",
"self",
".",
"nwinrot",
"ncolumns",
"=",
"min",
"(",
"maxcolumns",
",",
"nwin",
")",
"nrows",
"=",
"_np",
".",
"ceil",
"(",
"nwin",
"/",
"ncolumns",
")",
".",
"astype",
"(",
"int",
")",
"figsize",
"=",
"(",
"_mpl",
".",
"rcParams",
"[",
"'figure.figsize'",
"]",
"[",
"0",
"]",
",",
"_mpl",
".",
"rcParams",
"[",
"'figure.figsize'",
"]",
"[",
"0",
"]",
"*",
"0.55",
"*",
"nrows",
"/",
"ncolumns",
"+",
"0.41",
")",
"if",
"ax",
"is",
"None",
":",
"fig",
",",
"axes",
"=",
"_plt",
".",
"subplots",
"(",
"nrows",
",",
"ncolumns",
",",
"figsize",
"=",
"figsize",
",",
"sharex",
"=",
"'all'",
",",
"sharey",
"=",
"'all'",
")",
"else",
":",
"if",
"hasattr",
"(",
"ax",
",",
"'flatten'",
")",
"and",
"ax",
".",
"size",
"<",
"nwin",
":",
"raise",
"ValueError",
"(",
"'ax.size must be greater or equal to nwin. '",
"+",
"'nwin = {:s}'",
".",
"format",
"(",
"repr",
"(",
"nwin",
")",
")",
"+",
"' and ax.size = {:s}.'",
".",
"format",
"(",
"repr",
"(",
"ax",
".",
"size",
")",
")",
")",
"axes",
"=",
"ax",
"if",
"tick_interval",
"is",
"None",
":",
"xticks",
"=",
"[",
"]",
"yticks",
"=",
"[",
"]",
"else",
":",
"xticks",
"=",
"_np",
".",
"linspace",
"(",
"0",
",",
"360",
",",
"num",
"=",
"360",
"//",
"tick_interval",
"[",
"0",
"]",
"+",
"1",
",",
"endpoint",
"=",
"True",
")",
"yticks",
"=",
"_np",
".",
"linspace",
"(",
"-",
"90",
",",
"90",
",",
"num",
"=",
"180",
"//",
"tick_interval",
"[",
"1",
"]",
"+",
"1",
",",
"endpoint",
"=",
"True",
")",
"if",
"axes_labelsize",
"is",
"None",
":",
"axes_labelsize",
"=",
"_mpl",
".",
"rcParams",
"[",
"'axes.labelsize'",
"]",
"if",
"tick_labelsize",
"is",
"None",
":",
"tick_labelsize",
"=",
"_mpl",
".",
"rcParams",
"[",
"'xtick.labelsize'",
"]",
"if",
"title_labelsize",
"is",
"None",
":",
"title_labelsize",
"=",
"_mpl",
".",
"rcParams",
"[",
"'axes.titlesize'",
"]",
"if",
"minor_tick_interval",
"is",
"None",
":",
"minor_xticks",
"=",
"[",
"]",
"minor_yticks",
"=",
"[",
"]",
"else",
":",
"minor_xticks",
"=",
"_np",
".",
"linspace",
"(",
"0",
",",
"360",
",",
"num",
"=",
"360",
"//",
"minor_tick_interval",
"[",
"0",
"]",
"+",
"1",
",",
"endpoint",
"=",
"True",
")",
"minor_yticks",
"=",
"_np",
".",
"linspace",
"(",
"-",
"90",
",",
"90",
",",
"num",
"=",
"180",
"//",
"minor_tick_interval",
"[",
"1",
"]",
"+",
"1",
",",
"endpoint",
"=",
"True",
")",
"deg",
"=",
"'$^{\\circ}$'",
"xticklabels",
"=",
"[",
"str",
"(",
"int",
"(",
"y",
")",
")",
"+",
"deg",
"for",
"y",
"in",
"xticks",
"]",
"yticklabels",
"=",
"[",
"str",
"(",
"int",
"(",
"y",
")",
")",
"+",
"deg",
"for",
"y",
"in",
"yticks",
"]",
"if",
"ax",
"is",
"None",
":",
"if",
"nrows",
">",
"1",
":",
"for",
"axtemp",
"in",
"axes",
"[",
":",
"-",
"1",
",",
":",
"]",
".",
"flatten",
"(",
")",
":",
"for",
"xlabel_i",
"in",
"axtemp",
".",
"get_xticklabels",
"(",
")",
":",
"xlabel_i",
".",
"set_visible",
"(",
"False",
")",
"axtemp",
".",
"set_xlabel",
"(",
"''",
",",
"visible",
"=",
"False",
")",
"for",
"axtemp",
"in",
"axes",
"[",
":",
",",
"1",
":",
"]",
".",
"flatten",
"(",
")",
":",
"for",
"ylabel_i",
"in",
"axtemp",
".",
"get_yticklabels",
"(",
")",
":",
"ylabel_i",
".",
"set_visible",
"(",
"False",
")",
"axtemp",
".",
"set_ylabel",
"(",
"''",
",",
"visible",
"=",
"False",
")",
"elif",
"nwin",
">",
"1",
":",
"for",
"axtemp",
"in",
"axes",
"[",
"1",
":",
"]",
".",
"flatten",
"(",
")",
":",
"for",
"ylabel_i",
"in",
"axtemp",
".",
"get_yticklabels",
"(",
")",
":",
"ylabel_i",
".",
"set_visible",
"(",
"False",
")",
"axtemp",
".",
"set_ylabel",
"(",
"''",
",",
"visible",
"=",
"False",
")",
"for",
"itaper",
"in",
"range",
"(",
"min",
"(",
"self",
".",
"nwin",
",",
"nwin",
")",
")",
":",
"evalue",
"=",
"self",
".",
"eigenvalues",
"[",
"itaper",
"]",
"if",
"min",
"(",
"self",
".",
"nwin",
",",
"nwin",
")",
"==",
"1",
"and",
"ax",
"is",
"None",
":",
"axtemp",
"=",
"axes",
"elif",
"hasattr",
"(",
"axes",
",",
"'flatten'",
")",
":",
"axtemp",
"=",
"axes",
".",
"flatten",
"(",
")",
"[",
"itaper",
"]",
"else",
":",
"axtemp",
"=",
"axes",
"[",
"itaper",
"]",
"gridout",
"=",
"_shtools",
".",
"MakeGridDH",
"(",
"self",
".",
"to_array",
"(",
"itaper",
")",
",",
"sampling",
"=",
"2",
",",
"lmax",
"=",
"lmax",
",",
"norm",
"=",
"1",
",",
"csphase",
"=",
"1",
")",
"axtemp",
".",
"imshow",
"(",
"gridout",
",",
"origin",
"=",
"'upper'",
",",
"extent",
"=",
"(",
"0.",
",",
"360.",
",",
"-",
"90.",
",",
"90.",
")",
")",
"axtemp",
".",
"set",
"(",
"xticks",
"=",
"xticks",
",",
"yticks",
"=",
"yticks",
")",
"axtemp",
".",
"set_xlabel",
"(",
"xlabel",
",",
"fontsize",
"=",
"axes_labelsize",
")",
"axtemp",
".",
"set_ylabel",
"(",
"ylabel",
",",
"fontsize",
"=",
"axes_labelsize",
")",
"axtemp",
".",
"set_xticklabels",
"(",
"xticklabels",
",",
"fontsize",
"=",
"tick_labelsize",
")",
"axtemp",
".",
"set_yticklabels",
"(",
"yticklabels",
",",
"fontsize",
"=",
"tick_labelsize",
")",
"axtemp",
".",
"set_xticks",
"(",
"minor_xticks",
",",
"minor",
"=",
"True",
")",
"axtemp",
".",
"set_yticks",
"(",
"minor_yticks",
",",
"minor",
"=",
"True",
")",
"axtemp",
".",
"grid",
"(",
"grid",
",",
"which",
"=",
"'major'",
")",
"if",
"title",
"is",
"True",
":",
"axtemp",
".",
"set_title",
"(",
"'#{:d} [loss={:2.2g}]'",
".",
"format",
"(",
"itaper",
",",
"1",
"-",
"evalue",
")",
",",
"fontsize",
"=",
"title_labelsize",
")",
"if",
"ax",
"is",
"None",
":",
"fig",
".",
"tight_layout",
"(",
"pad",
"=",
"0.5",
")",
"if",
"show",
":",
"fig",
".",
"show",
"(",
")",
"if",
"fname",
"is",
"not",
"None",
":",
"fig",
".",
"savefig",
"(",
"fname",
")",
"return",
"fig",
",",
"axes"
] | Plot the best-concentrated localization windows.
Usage
-----
x.plot_windows(nwin, [lmax, maxcolumns, tick_interval,
minor_tick_interval, xlabel, ylabel, grid, show,
title, axes_labelsize, tick_labelsize,
title_labelsize, ax, fname])
Parameters
----------
nwin : int
The number of localization windows to plot.
lmax : int, optional, default = self.lwin
The maximum degree to use when plotting the windows, which controls
the number of samples in latitude and longitude.
maxcolumns : int, optional, default = 3
The maximum number of columns to use when plotting multiple
localization windows.
tick_interval : list or tuple, optional, default = [60, 45]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = None
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
grid : bool, optional, default = False
If True, plot grid lines.
show : bool, optional, default = True
If True, plot the image to the screen.
title : bool, optional, default = True
If True, plot a title on top of each subplot providing the taper
number and 1 minus the concentration factor.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
title_labelsize : int, optional, default = None
The font size for the subplot titles.
ax : matplotlib axes object, optional, default = None
An array of matplotlib axes objects where the plots will appear.
fname : str, optional, default = None
If present, save the image to the specified file. | [
"Plot",
"the",
"best",
"-",
"concentrated",
"localization",
"windows",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/image/image.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/image/image.py#L1290-L1300 | def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None | [
"def",
"hard_reset",
"(",
"self",
")",
":",
"if",
"self",
".",
"seq",
"is",
"not",
"None",
"and",
"self",
".",
"shuffle",
":",
"random",
".",
"shuffle",
"(",
"self",
".",
"seq",
")",
"if",
"self",
".",
"imgrec",
"is",
"not",
"None",
":",
"self",
".",
"imgrec",
".",
"reset",
"(",
")",
"self",
".",
"cur",
"=",
"0",
"self",
".",
"_allow_read",
"=",
"True",
"self",
".",
"_cache_data",
"=",
"None",
"self",
".",
"_cache_label",
"=",
"None",
"self",
".",
"_cache_idx",
"=",
"None"
] | Resets the iterator and ignore roll over data | [
"Resets",
"the",
"iterator",
"and",
"ignore",
"roll",
"over",
"data"
] | python | train |
dokterbob/django-multilingual-model | multilingual_model/forms.py | https://github.com/dokterbob/django-multilingual-model/blob/2479b2c3d6f7b697e95aa1e082c8bc8699f1f638/multilingual_model/forms.py#L19-L58 | def clean(self):
"""
Make sure there is at least a translation has been filled in. If a
default language has been specified, make sure that it exists amongst
translations.
"""
# First make sure the super's clean method is called upon.
super(TranslationFormSet, self).clean()
if settings.HIDE_LANGUAGE:
return
if len(self.forms) > 0:
# If a default language has been provided, make sure a translation
# is available
if settings.DEFAULT_LANGUAGE and not any(self.errors):
# Don't bother validating the formset unless each form is
# valid on its own. Reference:
# http://docs.djangoproject.com/en/dev/topics/forms/formsets/#custom-formset-validation
for form in self.forms:
language_code = form.cleaned_data.get(
'language_code', None
)
if language_code == settings.DEFAULT_LANGUAGE:
# All is good, don't bother checking any further
return
raise forms.ValidationError(_(
'No translation provided for default language \'%s\'.'
) % settings.DEFAULT_LANGUAGE)
else:
raise forms.ValidationError(
_('At least one translation should be provided.')
) | [
"def",
"clean",
"(",
"self",
")",
":",
"# First make sure the super's clean method is called upon.",
"super",
"(",
"TranslationFormSet",
",",
"self",
")",
".",
"clean",
"(",
")",
"if",
"settings",
".",
"HIDE_LANGUAGE",
":",
"return",
"if",
"len",
"(",
"self",
".",
"forms",
")",
">",
"0",
":",
"# If a default language has been provided, make sure a translation",
"# is available",
"if",
"settings",
".",
"DEFAULT_LANGUAGE",
"and",
"not",
"any",
"(",
"self",
".",
"errors",
")",
":",
"# Don't bother validating the formset unless each form is",
"# valid on its own. Reference:",
"# http://docs.djangoproject.com/en/dev/topics/forms/formsets/#custom-formset-validation",
"for",
"form",
"in",
"self",
".",
"forms",
":",
"language_code",
"=",
"form",
".",
"cleaned_data",
".",
"get",
"(",
"'language_code'",
",",
"None",
")",
"if",
"language_code",
"==",
"settings",
".",
"DEFAULT_LANGUAGE",
":",
"# All is good, don't bother checking any further",
"return",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"'No translation provided for default language \\'%s\\'.'",
")",
"%",
"settings",
".",
"DEFAULT_LANGUAGE",
")",
"else",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"'At least one translation should be provided.'",
")",
")"
] | Make sure there is at least a translation has been filled in. If a
default language has been specified, make sure that it exists amongst
translations. | [
"Make",
"sure",
"there",
"is",
"at",
"least",
"a",
"translation",
"has",
"been",
"filled",
"in",
".",
"If",
"a",
"default",
"language",
"has",
"been",
"specified",
"make",
"sure",
"that",
"it",
"exists",
"amongst",
"translations",
"."
] | python | train |
PmagPy/PmagPy | pmagpy/ipmag.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L6155-L6301 | def azdip_magic(orient_file='orient.txt', samp_file="samples.txt", samp_con="1", Z=1, method_codes='FS-FD', location_name='unknown', append=False, output_dir='.', input_dir='.', data_model=3):
"""
takes space delimited AzDip file and converts to MagIC formatted tables
Parameters
__________
orient_file : name of azdip formatted input file
samp_file : name of samples.txt formatted output file
samp_con : integer of sample orientation convention
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
method_codes : colon delimited string with the following as desired
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
location_name : location of samples
append : boolean. if True, append to the output file
output_dir : path to output file directory
input_dir : path to input file directory
data_model : MagIC data model.
INPUT FORMAT
Input files must be space delimited:
Samp Az Dip Strike Dip
Orientation convention:
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
e.g. field_dip is degrees from horizontal of drill direction
Magnetic declination convention:
Az is already corrected in file
"""
#
# initialize variables
#
data_model = int(data_model)
if (data_model != 3) and (samp_file == "samples.txt"):
samp_file = "er_samples.txt"
if (data_model == 2) and (samp_file == "er_samples.txt"):
samp_file = "samples.txt"
DEBUG = 0
version_num = pmag.get_version()
or_con, corr = "3", "1"
# date of sampling, latitude (pos North), longitude (pos East)
date, lat, lon = "", "", ""
bed_dip, bed_dip_dir = "", ""
participantlist = ""
sites = [] # list of site names
Lats, Lons = [], [] # list of latitudes and longitudes
# lists of Sample records and Site records
SampRecs, SiteRecs, ImageRecs, imagelist = [], [], [], []
average_bedding = "1", 1, "0"
newbaseline, newbeddir, newbeddip = "", "", ""
delta_u = "0"
sclass, lithology, type = "", "", ""
newclass, newlith, newtype = '', '', ''
user = ""
corr == "3"
DecCorr = 0.
samp_file = pmag.resolve_file_name(samp_file, output_dir)
orient_file = pmag.resolve_file_name(orient_file, input_dir)
input_dir = os.path.split(orient_file)[0]
output_dir = os.path.split(samp_file)[0]
#
#
if append:
try:
SampRecs, file_type = pmag.magic_read(samp_file)
print("sample data to be appended to: ", samp_file)
except:
print('problem with existing samp file: ',
samp_file, ' will create new')
#
# read in file to convert
#
azfile = open(orient_file, 'r')
AzDipDat = azfile.readlines()
azfile.close()
if not AzDipDat:
return False, 'No data in orientation file, please try again'
azfile.close()
SampOut, samplist = [], []
for line in AzDipDat:
orec = line.split()
if len(orec) > 2:
labaz, labdip = pmag.orient(float(orec[1]), float(orec[2]), or_con)
bed_dip = float(orec[4])
if bed_dip != 0:
bed_dip_dir = float(orec[3]) - \
90. # assume dip to right of strike
else:
bed_dip_dir = float(orec[3]) # assume dip to right of strike
MagRec = {}
MagRec["er_location_name"] = location_name
MagRec["er_citation_names"] = "This study"
#
# parse information common to all orientation methods
#
MagRec["er_sample_name"] = orec[0]
MagRec["sample_bed_dip"] = '%7.1f' % (bed_dip)
MagRec["sample_bed_dip_direction"] = '%7.1f' % (bed_dip_dir)
MagRec["sample_dip"] = '%7.1f' % (labdip)
MagRec["sample_azimuth"] = '%7.1f' % (labaz)
methods = method_codes.replace(" ", "").split(":")
OR = 0
for method in methods:
method_type = method.split("-")
if "SO" in method_type:
OR = 1
if OR == 0:
method_codes = method_codes + ":SO-NO"
MagRec["magic_method_codes"] = method_codes
# parse out the site name
site = pmag.parse_site(orec[0], samp_con, Z)
MagRec["er_site_name"] = site
MagRec['magic_software_packages'] = version_num
SampOut.append(MagRec)
if MagRec['er_sample_name'] not in samplist:
samplist.append(MagRec['er_sample_name'])
for samp in SampRecs:
if samp not in samplist:
SampOut.append(samp)
Samps, keys = pmag.fillkeys(SampOut)
if data_model == 2:
# write to file
pmag.magic_write(samp_file, Samps, "er_samples")
else:
# translate sample records to MagIC 3
Samps3 = []
for samp in Samps:
Samps3.append(map_magic.mapping(
samp, map_magic.samp_magic2_2_magic3_map))
# write to file
pmag.magic_write(samp_file, Samps3, "samples")
print("Data saved in ", samp_file)
return True, None | [
"def",
"azdip_magic",
"(",
"orient_file",
"=",
"'orient.txt'",
",",
"samp_file",
"=",
"\"samples.txt\"",
",",
"samp_con",
"=",
"\"1\"",
",",
"Z",
"=",
"1",
",",
"method_codes",
"=",
"'FS-FD'",
",",
"location_name",
"=",
"'unknown'",
",",
"append",
"=",
"False",
",",
"output_dir",
"=",
"'.'",
",",
"input_dir",
"=",
"'.'",
",",
"data_model",
"=",
"3",
")",
":",
"#",
"# initialize variables",
"#",
"data_model",
"=",
"int",
"(",
"data_model",
")",
"if",
"(",
"data_model",
"!=",
"3",
")",
"and",
"(",
"samp_file",
"==",
"\"samples.txt\"",
")",
":",
"samp_file",
"=",
"\"er_samples.txt\"",
"if",
"(",
"data_model",
"==",
"2",
")",
"and",
"(",
"samp_file",
"==",
"\"er_samples.txt\"",
")",
":",
"samp_file",
"=",
"\"samples.txt\"",
"DEBUG",
"=",
"0",
"version_num",
"=",
"pmag",
".",
"get_version",
"(",
")",
"or_con",
",",
"corr",
"=",
"\"3\"",
",",
"\"1\"",
"# date of sampling, latitude (pos North), longitude (pos East)",
"date",
",",
"lat",
",",
"lon",
"=",
"\"\"",
",",
"\"\"",
",",
"\"\"",
"bed_dip",
",",
"bed_dip_dir",
"=",
"\"\"",
",",
"\"\"",
"participantlist",
"=",
"\"\"",
"sites",
"=",
"[",
"]",
"# list of site names",
"Lats",
",",
"Lons",
"=",
"[",
"]",
",",
"[",
"]",
"# list of latitudes and longitudes",
"# lists of Sample records and Site records",
"SampRecs",
",",
"SiteRecs",
",",
"ImageRecs",
",",
"imagelist",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"average_bedding",
"=",
"\"1\"",
",",
"1",
",",
"\"0\"",
"newbaseline",
",",
"newbeddir",
",",
"newbeddip",
"=",
"\"\"",
",",
"\"\"",
",",
"\"\"",
"delta_u",
"=",
"\"0\"",
"sclass",
",",
"lithology",
",",
"type",
"=",
"\"\"",
",",
"\"\"",
",",
"\"\"",
"newclass",
",",
"newlith",
",",
"newtype",
"=",
"''",
",",
"''",
",",
"''",
"user",
"=",
"\"\"",
"corr",
"==",
"\"3\"",
"DecCorr",
"=",
"0.",
"samp_file",
"=",
"pmag",
".",
"resolve_file_name",
"(",
"samp_file",
",",
"output_dir",
")",
"orient_file",
"=",
"pmag",
".",
"resolve_file_name",
"(",
"orient_file",
",",
"input_dir",
")",
"input_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"orient_file",
")",
"[",
"0",
"]",
"output_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"samp_file",
")",
"[",
"0",
"]",
"#",
"#",
"if",
"append",
":",
"try",
":",
"SampRecs",
",",
"file_type",
"=",
"pmag",
".",
"magic_read",
"(",
"samp_file",
")",
"print",
"(",
"\"sample data to be appended to: \"",
",",
"samp_file",
")",
"except",
":",
"print",
"(",
"'problem with existing samp file: '",
",",
"samp_file",
",",
"' will create new'",
")",
"#",
"# read in file to convert",
"#",
"azfile",
"=",
"open",
"(",
"orient_file",
",",
"'r'",
")",
"AzDipDat",
"=",
"azfile",
".",
"readlines",
"(",
")",
"azfile",
".",
"close",
"(",
")",
"if",
"not",
"AzDipDat",
":",
"return",
"False",
",",
"'No data in orientation file, please try again'",
"azfile",
".",
"close",
"(",
")",
"SampOut",
",",
"samplist",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"line",
"in",
"AzDipDat",
":",
"orec",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"orec",
")",
">",
"2",
":",
"labaz",
",",
"labdip",
"=",
"pmag",
".",
"orient",
"(",
"float",
"(",
"orec",
"[",
"1",
"]",
")",
",",
"float",
"(",
"orec",
"[",
"2",
"]",
")",
",",
"or_con",
")",
"bed_dip",
"=",
"float",
"(",
"orec",
"[",
"4",
"]",
")",
"if",
"bed_dip",
"!=",
"0",
":",
"bed_dip_dir",
"=",
"float",
"(",
"orec",
"[",
"3",
"]",
")",
"-",
"90.",
"# assume dip to right of strike",
"else",
":",
"bed_dip_dir",
"=",
"float",
"(",
"orec",
"[",
"3",
"]",
")",
"# assume dip to right of strike",
"MagRec",
"=",
"{",
"}",
"MagRec",
"[",
"\"er_location_name\"",
"]",
"=",
"location_name",
"MagRec",
"[",
"\"er_citation_names\"",
"]",
"=",
"\"This study\"",
"#",
"# parse information common to all orientation methods",
"#",
"MagRec",
"[",
"\"er_sample_name\"",
"]",
"=",
"orec",
"[",
"0",
"]",
"MagRec",
"[",
"\"sample_bed_dip\"",
"]",
"=",
"'%7.1f'",
"%",
"(",
"bed_dip",
")",
"MagRec",
"[",
"\"sample_bed_dip_direction\"",
"]",
"=",
"'%7.1f'",
"%",
"(",
"bed_dip_dir",
")",
"MagRec",
"[",
"\"sample_dip\"",
"]",
"=",
"'%7.1f'",
"%",
"(",
"labdip",
")",
"MagRec",
"[",
"\"sample_azimuth\"",
"]",
"=",
"'%7.1f'",
"%",
"(",
"labaz",
")",
"methods",
"=",
"method_codes",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
".",
"split",
"(",
"\":\"",
")",
"OR",
"=",
"0",
"for",
"method",
"in",
"methods",
":",
"method_type",
"=",
"method",
".",
"split",
"(",
"\"-\"",
")",
"if",
"\"SO\"",
"in",
"method_type",
":",
"OR",
"=",
"1",
"if",
"OR",
"==",
"0",
":",
"method_codes",
"=",
"method_codes",
"+",
"\":SO-NO\"",
"MagRec",
"[",
"\"magic_method_codes\"",
"]",
"=",
"method_codes",
"# parse out the site name",
"site",
"=",
"pmag",
".",
"parse_site",
"(",
"orec",
"[",
"0",
"]",
",",
"samp_con",
",",
"Z",
")",
"MagRec",
"[",
"\"er_site_name\"",
"]",
"=",
"site",
"MagRec",
"[",
"'magic_software_packages'",
"]",
"=",
"version_num",
"SampOut",
".",
"append",
"(",
"MagRec",
")",
"if",
"MagRec",
"[",
"'er_sample_name'",
"]",
"not",
"in",
"samplist",
":",
"samplist",
".",
"append",
"(",
"MagRec",
"[",
"'er_sample_name'",
"]",
")",
"for",
"samp",
"in",
"SampRecs",
":",
"if",
"samp",
"not",
"in",
"samplist",
":",
"SampOut",
".",
"append",
"(",
"samp",
")",
"Samps",
",",
"keys",
"=",
"pmag",
".",
"fillkeys",
"(",
"SampOut",
")",
"if",
"data_model",
"==",
"2",
":",
"# write to file",
"pmag",
".",
"magic_write",
"(",
"samp_file",
",",
"Samps",
",",
"\"er_samples\"",
")",
"else",
":",
"# translate sample records to MagIC 3",
"Samps3",
"=",
"[",
"]",
"for",
"samp",
"in",
"Samps",
":",
"Samps3",
".",
"append",
"(",
"map_magic",
".",
"mapping",
"(",
"samp",
",",
"map_magic",
".",
"samp_magic2_2_magic3_map",
")",
")",
"# write to file",
"pmag",
".",
"magic_write",
"(",
"samp_file",
",",
"Samps3",
",",
"\"samples\"",
")",
"print",
"(",
"\"Data saved in \"",
",",
"samp_file",
")",
"return",
"True",
",",
"None"
] | takes space delimited AzDip file and converts to MagIC formatted tables
Parameters
__________
orient_file : name of azdip formatted input file
samp_file : name of samples.txt formatted output file
samp_con : integer of sample orientation convention
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
method_codes : colon delimited string with the following as desired
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
location_name : location of samples
append : boolean. if True, append to the output file
output_dir : path to output file directory
input_dir : path to input file directory
data_model : MagIC data model.
INPUT FORMAT
Input files must be space delimited:
Samp Az Dip Strike Dip
Orientation convention:
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
e.g. field_dip is degrees from horizontal of drill direction
Magnetic declination convention:
Az is already corrected in file | [
"takes",
"space",
"delimited",
"AzDip",
"file",
"and",
"converts",
"to",
"MagIC",
"formatted",
"tables"
] | python | train |
neovim/pynvim | pynvim/api/buffer.py | https://github.com/neovim/pynvim/blob/5e577188e6d7133f597ad0ce60dc6a4b1314064a/pynvim/api/buffer.py#L86-L90 | def append(self, lines, index=-1):
"""Append a string or list of lines to the buffer."""
if isinstance(lines, (basestring, bytes)):
lines = [lines]
return self.request('nvim_buf_set_lines', index, index, True, lines) | [
"def",
"append",
"(",
"self",
",",
"lines",
",",
"index",
"=",
"-",
"1",
")",
":",
"if",
"isinstance",
"(",
"lines",
",",
"(",
"basestring",
",",
"bytes",
")",
")",
":",
"lines",
"=",
"[",
"lines",
"]",
"return",
"self",
".",
"request",
"(",
"'nvim_buf_set_lines'",
",",
"index",
",",
"index",
",",
"True",
",",
"lines",
")"
] | Append a string or list of lines to the buffer. | [
"Append",
"a",
"string",
"or",
"list",
"of",
"lines",
"to",
"the",
"buffer",
"."
] | python | train |
blockstack/blockstack-core | blockstack/lib/client.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/client.py#L240-L254 | def json_is_exception(resp):
"""
Is the given response object
an exception traceback?
Return True if so
Return False if not
"""
if not json_is_error(resp):
return False
if 'traceback' not in resp.keys() or 'error' not in resp.keys():
return False
return True | [
"def",
"json_is_exception",
"(",
"resp",
")",
":",
"if",
"not",
"json_is_error",
"(",
"resp",
")",
":",
"return",
"False",
"if",
"'traceback'",
"not",
"in",
"resp",
".",
"keys",
"(",
")",
"or",
"'error'",
"not",
"in",
"resp",
".",
"keys",
"(",
")",
":",
"return",
"False",
"return",
"True"
] | Is the given response object
an exception traceback?
Return True if so
Return False if not | [
"Is",
"the",
"given",
"response",
"object",
"an",
"exception",
"traceback?"
] | python | train |
jupyterhub/nbgitpuller | nbgitpuller/pull.py | https://github.com/jupyterhub/nbgitpuller/blob/30df8d548078c58665ce0ae920308f991122abe3/nbgitpuller/pull.py#L133-L146 | def find_upstream_changed(self, kind):
"""
Return list of files that have been changed upstream belonging to a particular kind of change
"""
output = subprocess.check_output([
'git', 'log', '{}..origin/{}'.format(self.branch_name, self.branch_name),
'--oneline', '--name-status'
], cwd=self.repo_dir).decode()
files = []
for line in output.split('\n'):
if line.startswith(kind):
files.append(os.path.join(self.repo_dir, line.split('\t', 1)[1]))
return files | [
"def",
"find_upstream_changed",
"(",
"self",
",",
"kind",
")",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'git'",
",",
"'log'",
",",
"'{}..origin/{}'",
".",
"format",
"(",
"self",
".",
"branch_name",
",",
"self",
".",
"branch_name",
")",
",",
"'--oneline'",
",",
"'--name-status'",
"]",
",",
"cwd",
"=",
"self",
".",
"repo_dir",
")",
".",
"decode",
"(",
")",
"files",
"=",
"[",
"]",
"for",
"line",
"in",
"output",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"kind",
")",
":",
"files",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"repo_dir",
",",
"line",
".",
"split",
"(",
"'\\t'",
",",
"1",
")",
"[",
"1",
"]",
")",
")",
"return",
"files"
] | Return list of files that have been changed upstream belonging to a particular kind of change | [
"Return",
"list",
"of",
"files",
"that",
"have",
"been",
"changed",
"upstream",
"belonging",
"to",
"a",
"particular",
"kind",
"of",
"change"
] | python | train |
wonambi-python/wonambi | wonambi/ioeeg/ktlx.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/ktlx.py#L770-L835 | def _read_hdr_file(ktlx_file):
"""Reads header of one KTLX file.
Parameters
----------
ktlx_file : Path
name of one of the ktlx files inside the directory (absolute path)
Returns
-------
dict
dict with information about the file
Notes
-----
p.3: says long, but python-long requires 8 bytes, so we use f.read(4)
GUID is correct, BUT little/big endian problems somewhere
"""
with ktlx_file.open('rb') as f:
hdr = {}
assert f.tell() == 0
hdr['file_guid'] = hexlify(f.read(16))
hdr['file_schema'], = unpack('<H', f.read(2))
if not hdr['file_schema'] in (1, 3, 7, 8, 9):
raise NotImplementedError('Reading header not implemented for ' +
'file_schema ' + str(hdr['file_schema']))
hdr['base_schema'], = unpack('<H', f.read(2))
if not hdr['base_schema'] == 1: # p.3: base_schema 0 is rare, I think
raise NotImplementedError('Reading header not implemented for ' +
'base_schema ' + str(hdr['base_schema']))
hdr['creation_time'] = datetime.fromtimestamp(unpack('<i',
f.read(4))[0])
hdr['patient_id'], = unpack('<i', f.read(4))
hdr['study_id'], = unpack('<i', f.read(4))
hdr['pat_last_name'] = _make_str(unpack('c' * 80, f.read(80)))
hdr['pat_first_name'] = _make_str(unpack('c' * 80, f.read(80)))
hdr['pat_middle_name'] = _make_str(unpack('c' * 80, f.read(80)))
hdr['patient_id'] = _make_str(unpack('c' * 80, f.read(80)))
assert f.tell() == 352
if hdr['file_schema'] >= 7:
hdr['sample_freq'], = unpack('<d', f.read(8))
n_chan, = unpack('<i', f.read(4))
hdr['num_channels'] = n_chan
hdr['deltabits'], = unpack('<i', f.read(4))
hdr['phys_chan'] = unpack('<' + 'i' * hdr['num_channels'],
f.read(hdr['num_channels'] * 4))
f.seek(4464)
hdr['headbox_type'] = unpack('<' + 'i' * 4, f.read(16))
hdr['headbox_sn'] = unpack('<' + 'i' * 4, f.read(16))
hdr['headbox_sw_version'] = _make_str(unpack('c' * 40, f.read(40)))
hdr['dsp_hw_version'] = _make_str(unpack('c' * 10, f.read(10)))
hdr['dsp_sw_version'] = _make_str(unpack('c' * 10, f.read(10)))
hdr['discardbits'], = unpack('<i', f.read(4))
if hdr['file_schema'] >= 8:
hdr['shorted'] = unpack('<' + 'h' * 1024, f.read(2048))[:n_chan]
hdr['frequency_factor'] = unpack('<' + 'h' * 1024,
f.read(2048))[:n_chan]
return hdr | [
"def",
"_read_hdr_file",
"(",
"ktlx_file",
")",
":",
"with",
"ktlx_file",
".",
"open",
"(",
"'rb'",
")",
"as",
"f",
":",
"hdr",
"=",
"{",
"}",
"assert",
"f",
".",
"tell",
"(",
")",
"==",
"0",
"hdr",
"[",
"'file_guid'",
"]",
"=",
"hexlify",
"(",
"f",
".",
"read",
"(",
"16",
")",
")",
"hdr",
"[",
"'file_schema'",
"]",
",",
"=",
"unpack",
"(",
"'<H'",
",",
"f",
".",
"read",
"(",
"2",
")",
")",
"if",
"not",
"hdr",
"[",
"'file_schema'",
"]",
"in",
"(",
"1",
",",
"3",
",",
"7",
",",
"8",
",",
"9",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Reading header not implemented for '",
"+",
"'file_schema '",
"+",
"str",
"(",
"hdr",
"[",
"'file_schema'",
"]",
")",
")",
"hdr",
"[",
"'base_schema'",
"]",
",",
"=",
"unpack",
"(",
"'<H'",
",",
"f",
".",
"read",
"(",
"2",
")",
")",
"if",
"not",
"hdr",
"[",
"'base_schema'",
"]",
"==",
"1",
":",
"# p.3: base_schema 0 is rare, I think",
"raise",
"NotImplementedError",
"(",
"'Reading header not implemented for '",
"+",
"'base_schema '",
"+",
"str",
"(",
"hdr",
"[",
"'base_schema'",
"]",
")",
")",
"hdr",
"[",
"'creation_time'",
"]",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"unpack",
"(",
"'<i'",
",",
"f",
".",
"read",
"(",
"4",
")",
")",
"[",
"0",
"]",
")",
"hdr",
"[",
"'patient_id'",
"]",
",",
"=",
"unpack",
"(",
"'<i'",
",",
"f",
".",
"read",
"(",
"4",
")",
")",
"hdr",
"[",
"'study_id'",
"]",
",",
"=",
"unpack",
"(",
"'<i'",
",",
"f",
".",
"read",
"(",
"4",
")",
")",
"hdr",
"[",
"'pat_last_name'",
"]",
"=",
"_make_str",
"(",
"unpack",
"(",
"'c'",
"*",
"80",
",",
"f",
".",
"read",
"(",
"80",
")",
")",
")",
"hdr",
"[",
"'pat_first_name'",
"]",
"=",
"_make_str",
"(",
"unpack",
"(",
"'c'",
"*",
"80",
",",
"f",
".",
"read",
"(",
"80",
")",
")",
")",
"hdr",
"[",
"'pat_middle_name'",
"]",
"=",
"_make_str",
"(",
"unpack",
"(",
"'c'",
"*",
"80",
",",
"f",
".",
"read",
"(",
"80",
")",
")",
")",
"hdr",
"[",
"'patient_id'",
"]",
"=",
"_make_str",
"(",
"unpack",
"(",
"'c'",
"*",
"80",
",",
"f",
".",
"read",
"(",
"80",
")",
")",
")",
"assert",
"f",
".",
"tell",
"(",
")",
"==",
"352",
"if",
"hdr",
"[",
"'file_schema'",
"]",
">=",
"7",
":",
"hdr",
"[",
"'sample_freq'",
"]",
",",
"=",
"unpack",
"(",
"'<d'",
",",
"f",
".",
"read",
"(",
"8",
")",
")",
"n_chan",
",",
"=",
"unpack",
"(",
"'<i'",
",",
"f",
".",
"read",
"(",
"4",
")",
")",
"hdr",
"[",
"'num_channels'",
"]",
"=",
"n_chan",
"hdr",
"[",
"'deltabits'",
"]",
",",
"=",
"unpack",
"(",
"'<i'",
",",
"f",
".",
"read",
"(",
"4",
")",
")",
"hdr",
"[",
"'phys_chan'",
"]",
"=",
"unpack",
"(",
"'<'",
"+",
"'i'",
"*",
"hdr",
"[",
"'num_channels'",
"]",
",",
"f",
".",
"read",
"(",
"hdr",
"[",
"'num_channels'",
"]",
"*",
"4",
")",
")",
"f",
".",
"seek",
"(",
"4464",
")",
"hdr",
"[",
"'headbox_type'",
"]",
"=",
"unpack",
"(",
"'<'",
"+",
"'i'",
"*",
"4",
",",
"f",
".",
"read",
"(",
"16",
")",
")",
"hdr",
"[",
"'headbox_sn'",
"]",
"=",
"unpack",
"(",
"'<'",
"+",
"'i'",
"*",
"4",
",",
"f",
".",
"read",
"(",
"16",
")",
")",
"hdr",
"[",
"'headbox_sw_version'",
"]",
"=",
"_make_str",
"(",
"unpack",
"(",
"'c'",
"*",
"40",
",",
"f",
".",
"read",
"(",
"40",
")",
")",
")",
"hdr",
"[",
"'dsp_hw_version'",
"]",
"=",
"_make_str",
"(",
"unpack",
"(",
"'c'",
"*",
"10",
",",
"f",
".",
"read",
"(",
"10",
")",
")",
")",
"hdr",
"[",
"'dsp_sw_version'",
"]",
"=",
"_make_str",
"(",
"unpack",
"(",
"'c'",
"*",
"10",
",",
"f",
".",
"read",
"(",
"10",
")",
")",
")",
"hdr",
"[",
"'discardbits'",
"]",
",",
"=",
"unpack",
"(",
"'<i'",
",",
"f",
".",
"read",
"(",
"4",
")",
")",
"if",
"hdr",
"[",
"'file_schema'",
"]",
">=",
"8",
":",
"hdr",
"[",
"'shorted'",
"]",
"=",
"unpack",
"(",
"'<'",
"+",
"'h'",
"*",
"1024",
",",
"f",
".",
"read",
"(",
"2048",
")",
")",
"[",
":",
"n_chan",
"]",
"hdr",
"[",
"'frequency_factor'",
"]",
"=",
"unpack",
"(",
"'<'",
"+",
"'h'",
"*",
"1024",
",",
"f",
".",
"read",
"(",
"2048",
")",
")",
"[",
":",
"n_chan",
"]",
"return",
"hdr"
] | Reads header of one KTLX file.
Parameters
----------
ktlx_file : Path
name of one of the ktlx files inside the directory (absolute path)
Returns
-------
dict
dict with information about the file
Notes
-----
p.3: says long, but python-long requires 8 bytes, so we use f.read(4)
GUID is correct, BUT little/big endian problems somewhere | [
"Reads",
"header",
"of",
"one",
"KTLX",
"file",
"."
] | python | train |
Clinical-Genomics/scout | scout/parse/omim.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/parse/omim.py#L43-L166 | def parse_genemap2(lines):
"""Parse the omim source file called genemap2.txt
Explanation of Phenotype field:
Brackets, "[ ]", indicate "nondiseases," mainly genetic variations that
lead to apparently abnormal laboratory test values.
Braces, "{ }", indicate mutations that contribute to susceptibility to
multifactorial disorders (e.g., diabetes, asthma) or to susceptibility
to infection (e.g., malaria).
A question mark, "?", before the phenotype name indicates that the
relationship between the phenotype and gene is provisional.
More details about this relationship are provided in the comment
field of the map and in the gene and phenotype OMIM entries.
The number in parentheses after the name of each disorder indicates
the following:
(1) the disorder was positioned by mapping of the wildtype gene;
(2) the disease phenotype itself was mapped;
(3) the molecular basis of the disorder is known;
(4) the disorder is a chromosome deletion or duplication syndrome.
Args:
lines(iterable(str))
Yields:
parsed_entry(dict)
"""
LOG.info("Parsing the omim genemap2")
header = []
for i,line in enumerate(lines):
line = line.rstrip()
if line.startswith('#'):
if i < 10:
if line.startswith('# Chromosome'):
header = line[2:].split('\t')
continue
if len(line) < 5:
continue
parsed_entry = parse_omim_line(line, header)
parsed_entry['mim_number'] = int(parsed_entry['Mim Number'])
parsed_entry['raw'] = line
# This is the approved symbol for the entry
hgnc_symbol = parsed_entry.get("Approved Symbol")
# If no approved symbol could be found choose the first of
# the gene symbols
gene_symbols = []
if parsed_entry.get('Gene Symbols'):
gene_symbols = [symbol.strip() for symbol in parsed_entry['Gene Symbols'].split(',')]
parsed_entry['hgnc_symbols'] = gene_symbols
if not hgnc_symbol and gene_symbols:
hgnc_symbol = gene_symbols[0]
parsed_entry['hgnc_symbol'] = hgnc_symbol
# Gene inheritance is a construct. It is the union of all inheritance
# patterns found in the associated phenotypes
gene_inheritance = set()
parsed_phenotypes = []
# Information about the related phenotypes
# Each related phenotype is separated by ';'
for phenotype_info in parsed_entry.get('Phenotypes', '').split(';'):
if not phenotype_info:
continue
phenotype_info = phenotype_info.lstrip()
# First symbol in description indicates phenotype status
# If no special symbol is used the phenotype is 'established'
phenotype_status = OMIM_STATUS_MAP.get(phenotype_info[0], 'established')
# Skip phenotype entries that not associated to disease
if phenotype_status == 'nondisease':
continue
phenotype_description = ""
# We will try to save the description
splitted_info = phenotype_info.split(',')
for i, text in enumerate(splitted_info):
# Everything before ([1,2,3])
# We check if we are in the part where the mim number exists
match = entry_pattern.search(text)
if not match:
phenotype_description += text
else:
# If we find the end of the entry
mimnr_match = mimnr_pattern.search(phenotype_info)
# Then if the entry have a mim number we choose that
if mimnr_match:
phenotype_mim = int(mimnr_match.group())
else:
phenotype_mim = parsed_entry['mim_number']
phenotype_description += text[:-4]
break
# Find the inheritance
inheritance = set()
inheritance_text = ','.join(splitted_info[i:])
for term in mim_inheritance_terms:
if term in inheritance_text:
inheritance.add(TERMS_MAPPER[term])
gene_inheritance.add(TERMS_MAPPER[term])
parsed_phenotypes.append(
{
'mim_number':phenotype_mim,
'inheritance': inheritance,
'description': phenotype_description.strip('?\{\}'),
'status': phenotype_status,
}
)
parsed_entry['phenotypes'] = parsed_phenotypes
parsed_entry['inheritance'] = gene_inheritance
yield parsed_entry | [
"def",
"parse_genemap2",
"(",
"lines",
")",
":",
"LOG",
".",
"info",
"(",
"\"Parsing the omim genemap2\"",
")",
"header",
"=",
"[",
"]",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"if",
"i",
"<",
"10",
":",
"if",
"line",
".",
"startswith",
"(",
"'# Chromosome'",
")",
":",
"header",
"=",
"line",
"[",
"2",
":",
"]",
".",
"split",
"(",
"'\\t'",
")",
"continue",
"if",
"len",
"(",
"line",
")",
"<",
"5",
":",
"continue",
"parsed_entry",
"=",
"parse_omim_line",
"(",
"line",
",",
"header",
")",
"parsed_entry",
"[",
"'mim_number'",
"]",
"=",
"int",
"(",
"parsed_entry",
"[",
"'Mim Number'",
"]",
")",
"parsed_entry",
"[",
"'raw'",
"]",
"=",
"line",
"# This is the approved symbol for the entry",
"hgnc_symbol",
"=",
"parsed_entry",
".",
"get",
"(",
"\"Approved Symbol\"",
")",
"# If no approved symbol could be found choose the first of",
"# the gene symbols",
"gene_symbols",
"=",
"[",
"]",
"if",
"parsed_entry",
".",
"get",
"(",
"'Gene Symbols'",
")",
":",
"gene_symbols",
"=",
"[",
"symbol",
".",
"strip",
"(",
")",
"for",
"symbol",
"in",
"parsed_entry",
"[",
"'Gene Symbols'",
"]",
".",
"split",
"(",
"','",
")",
"]",
"parsed_entry",
"[",
"'hgnc_symbols'",
"]",
"=",
"gene_symbols",
"if",
"not",
"hgnc_symbol",
"and",
"gene_symbols",
":",
"hgnc_symbol",
"=",
"gene_symbols",
"[",
"0",
"]",
"parsed_entry",
"[",
"'hgnc_symbol'",
"]",
"=",
"hgnc_symbol",
"# Gene inheritance is a construct. It is the union of all inheritance",
"# patterns found in the associated phenotypes",
"gene_inheritance",
"=",
"set",
"(",
")",
"parsed_phenotypes",
"=",
"[",
"]",
"# Information about the related phenotypes",
"# Each related phenotype is separated by ';'",
"for",
"phenotype_info",
"in",
"parsed_entry",
".",
"get",
"(",
"'Phenotypes'",
",",
"''",
")",
".",
"split",
"(",
"';'",
")",
":",
"if",
"not",
"phenotype_info",
":",
"continue",
"phenotype_info",
"=",
"phenotype_info",
".",
"lstrip",
"(",
")",
"# First symbol in description indicates phenotype status",
"# If no special symbol is used the phenotype is 'established'",
"phenotype_status",
"=",
"OMIM_STATUS_MAP",
".",
"get",
"(",
"phenotype_info",
"[",
"0",
"]",
",",
"'established'",
")",
"# Skip phenotype entries that not associated to disease",
"if",
"phenotype_status",
"==",
"'nondisease'",
":",
"continue",
"phenotype_description",
"=",
"\"\"",
"# We will try to save the description ",
"splitted_info",
"=",
"phenotype_info",
".",
"split",
"(",
"','",
")",
"for",
"i",
",",
"text",
"in",
"enumerate",
"(",
"splitted_info",
")",
":",
"# Everything before ([1,2,3])",
"# We check if we are in the part where the mim number exists",
"match",
"=",
"entry_pattern",
".",
"search",
"(",
"text",
")",
"if",
"not",
"match",
":",
"phenotype_description",
"+=",
"text",
"else",
":",
"# If we find the end of the entry",
"mimnr_match",
"=",
"mimnr_pattern",
".",
"search",
"(",
"phenotype_info",
")",
"# Then if the entry have a mim number we choose that",
"if",
"mimnr_match",
":",
"phenotype_mim",
"=",
"int",
"(",
"mimnr_match",
".",
"group",
"(",
")",
")",
"else",
":",
"phenotype_mim",
"=",
"parsed_entry",
"[",
"'mim_number'",
"]",
"phenotype_description",
"+=",
"text",
"[",
":",
"-",
"4",
"]",
"break",
"# Find the inheritance",
"inheritance",
"=",
"set",
"(",
")",
"inheritance_text",
"=",
"','",
".",
"join",
"(",
"splitted_info",
"[",
"i",
":",
"]",
")",
"for",
"term",
"in",
"mim_inheritance_terms",
":",
"if",
"term",
"in",
"inheritance_text",
":",
"inheritance",
".",
"add",
"(",
"TERMS_MAPPER",
"[",
"term",
"]",
")",
"gene_inheritance",
".",
"add",
"(",
"TERMS_MAPPER",
"[",
"term",
"]",
")",
"parsed_phenotypes",
".",
"append",
"(",
"{",
"'mim_number'",
":",
"phenotype_mim",
",",
"'inheritance'",
":",
"inheritance",
",",
"'description'",
":",
"phenotype_description",
".",
"strip",
"(",
"'?\\{\\}'",
")",
",",
"'status'",
":",
"phenotype_status",
",",
"}",
")",
"parsed_entry",
"[",
"'phenotypes'",
"]",
"=",
"parsed_phenotypes",
"parsed_entry",
"[",
"'inheritance'",
"]",
"=",
"gene_inheritance",
"yield",
"parsed_entry"
] | Parse the omim source file called genemap2.txt
Explanation of Phenotype field:
Brackets, "[ ]", indicate "nondiseases," mainly genetic variations that
lead to apparently abnormal laboratory test values.
Braces, "{ }", indicate mutations that contribute to susceptibility to
multifactorial disorders (e.g., diabetes, asthma) or to susceptibility
to infection (e.g., malaria).
A question mark, "?", before the phenotype name indicates that the
relationship between the phenotype and gene is provisional.
More details about this relationship are provided in the comment
field of the map and in the gene and phenotype OMIM entries.
The number in parentheses after the name of each disorder indicates
the following:
(1) the disorder was positioned by mapping of the wildtype gene;
(2) the disease phenotype itself was mapped;
(3) the molecular basis of the disorder is known;
(4) the disorder is a chromosome deletion or duplication syndrome.
Args:
lines(iterable(str))
Yields:
parsed_entry(dict) | [
"Parse",
"the",
"omim",
"source",
"file",
"called",
"genemap2",
".",
"txt",
"Explanation",
"of",
"Phenotype",
"field",
":",
"Brackets",
"[",
"]",
"indicate",
"nondiseases",
"mainly",
"genetic",
"variations",
"that",
"lead",
"to",
"apparently",
"abnormal",
"laboratory",
"test",
"values",
"."
] | python | test |
pvlib/pvlib-python | pvlib/clearsky.py | https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/clearsky.py#L491-L499 | def _calc_b(w, aod700):
"""Calculate the b coefficient."""
b1 = 0.00925*aod700**2 + 0.0148*aod700 - 0.0172
b0 = -0.7565*aod700**2 + 0.5057*aod700 + 0.4557
b = b1 * np.log(w) + b0
return b | [
"def",
"_calc_b",
"(",
"w",
",",
"aod700",
")",
":",
"b1",
"=",
"0.00925",
"*",
"aod700",
"**",
"2",
"+",
"0.0148",
"*",
"aod700",
"-",
"0.0172",
"b0",
"=",
"-",
"0.7565",
"*",
"aod700",
"**",
"2",
"+",
"0.5057",
"*",
"aod700",
"+",
"0.4557",
"b",
"=",
"b1",
"*",
"np",
".",
"log",
"(",
"w",
")",
"+",
"b0",
"return",
"b"
] | Calculate the b coefficient. | [
"Calculate",
"the",
"b",
"coefficient",
"."
] | python | train |
mar10/wsgidav | wsgidav/dc/simple_dc.py | https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/dc/simple_dc.py#L109-L117 | def require_authentication(self, realm, environ):
"""Return True if this realm requires authentication (grant anonymous access otherwise)."""
realm_entry = self._get_realm_entry(realm)
if realm_entry is None:
_logger.error(
'Missing configuration simple_dc.user_mapping["{}"] (or "*"): '
"realm is not accessible!".format(realm)
)
return realm_entry is not True | [
"def",
"require_authentication",
"(",
"self",
",",
"realm",
",",
"environ",
")",
":",
"realm_entry",
"=",
"self",
".",
"_get_realm_entry",
"(",
"realm",
")",
"if",
"realm_entry",
"is",
"None",
":",
"_logger",
".",
"error",
"(",
"'Missing configuration simple_dc.user_mapping[\"{}\"] (or \"*\"): '",
"\"realm is not accessible!\"",
".",
"format",
"(",
"realm",
")",
")",
"return",
"realm_entry",
"is",
"not",
"True"
] | Return True if this realm requires authentication (grant anonymous access otherwise). | [
"Return",
"True",
"if",
"this",
"realm",
"requires",
"authentication",
"(",
"grant",
"anonymous",
"access",
"otherwise",
")",
"."
] | python | valid |
CS207-Final-Project-Group-10/cs207-FinalProject | fluxions/fluxion_node.py | https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/fluxions/fluxion_node.py#L419-L431 | def _forward_mode(self, *args):
"""Forward mode differentiation for a constant"""
# Evaluate inner function self.f
X: np.ndarray
dX: np.ndarray
X, dX = self.f._forward_mode(*args)
# Alias the power to p for legibility
p: float = self.p
# The function value
val = X ** p
# The derivative
diff = p * X ** (p-1) * dX
return (val, diff) | [
"def",
"_forward_mode",
"(",
"self",
",",
"*",
"args",
")",
":",
"# Evaluate inner function self.f",
"X",
":",
"np",
".",
"ndarray",
"dX",
":",
"np",
".",
"ndarray",
"X",
",",
"dX",
"=",
"self",
".",
"f",
".",
"_forward_mode",
"(",
"*",
"args",
")",
"# Alias the power to p for legibility",
"p",
":",
"float",
"=",
"self",
".",
"p",
"# The function value",
"val",
"=",
"X",
"**",
"p",
"# The derivative",
"diff",
"=",
"p",
"*",
"X",
"**",
"(",
"p",
"-",
"1",
")",
"*",
"dX",
"return",
"(",
"val",
",",
"diff",
")"
] | Forward mode differentiation for a constant | [
"Forward",
"mode",
"differentiation",
"for",
"a",
"constant"
] | python | train |
zeroSteiner/AdvancedHTTPServer | advancedhttpserver.py | https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1521-L1536 | def close(self):
"""
Close the web socket connection and stop processing results. If the
connection is still open, a WebSocket close message will be sent to the
peer.
"""
if not self.connected:
return
self.connected = False
if self.handler.wfile.closed:
return
if select.select([], [self.handler.wfile], [], 0)[1]:
with self.lock:
self.handler.wfile.write(b'\x88\x00')
self.handler.wfile.flush()
self.on_closed() | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"connected",
":",
"return",
"self",
".",
"connected",
"=",
"False",
"if",
"self",
".",
"handler",
".",
"wfile",
".",
"closed",
":",
"return",
"if",
"select",
".",
"select",
"(",
"[",
"]",
",",
"[",
"self",
".",
"handler",
".",
"wfile",
"]",
",",
"[",
"]",
",",
"0",
")",
"[",
"1",
"]",
":",
"with",
"self",
".",
"lock",
":",
"self",
".",
"handler",
".",
"wfile",
".",
"write",
"(",
"b'\\x88\\x00'",
")",
"self",
".",
"handler",
".",
"wfile",
".",
"flush",
"(",
")",
"self",
".",
"on_closed",
"(",
")"
] | Close the web socket connection and stop processing results. If the
connection is still open, a WebSocket close message will be sent to the
peer. | [
"Close",
"the",
"web",
"socket",
"connection",
"and",
"stop",
"processing",
"results",
".",
"If",
"the",
"connection",
"is",
"still",
"open",
"a",
"WebSocket",
"close",
"message",
"will",
"be",
"sent",
"to",
"the",
"peer",
"."
] | python | train |
azraq27/neural | neural/general.py | https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/general.py#L24-L30 | def voxel_loop(self):
'''iterator that loops through each voxel and yields the coords and time series as a tuple'''
# Prob not the most efficient, but the best I can do for now:
for x in xrange(len(self.data)):
for y in xrange(len(self.data[x])):
for z in xrange(len(self.data[x][y])):
yield ((x,y,z),self.data[x][y][z]) | [
"def",
"voxel_loop",
"(",
"self",
")",
":",
"# Prob not the most efficient, but the best I can do for now:",
"for",
"x",
"in",
"xrange",
"(",
"len",
"(",
"self",
".",
"data",
")",
")",
":",
"for",
"y",
"in",
"xrange",
"(",
"len",
"(",
"self",
".",
"data",
"[",
"x",
"]",
")",
")",
":",
"for",
"z",
"in",
"xrange",
"(",
"len",
"(",
"self",
".",
"data",
"[",
"x",
"]",
"[",
"y",
"]",
")",
")",
":",
"yield",
"(",
"(",
"x",
",",
"y",
",",
"z",
")",
",",
"self",
".",
"data",
"[",
"x",
"]",
"[",
"y",
"]",
"[",
"z",
"]",
")"
] | iterator that loops through each voxel and yields the coords and time series as a tuple | [
"iterator",
"that",
"loops",
"through",
"each",
"voxel",
"and",
"yields",
"the",
"coords",
"and",
"time",
"series",
"as",
"a",
"tuple"
] | python | train |
crs4/hl7apy | hl7apy/__init__.py | https://github.com/crs4/hl7apy/blob/91be488e9274f6ec975519a1d9c17045bc91bf74/hl7apy/__init__.py#L239-L281 | def load_reference(name, element_type, version):
"""
Look for an element of the given type, name and version and return its reference structure
:type element_type: ``str``
:param element_type: the element type to look for (e.g. 'Segment')
:type name: ``str``
:param name: the element name to look for (e.g. 'MSH')
:type version: ``str``
:param version: the version of the library where to search the element (e.g. '2.6')
:rtype: ``dict``
:return: a dictionary describing the element structure
:raise: ``KeyError`` if the element has not been found
The returned dictionary will contain the following keys:
+--------------+--------------------------------------------+
|Key |Value |
+==============+============================================+
|cls |an :class:`hl7apy.core.Element` subclass |
+--------------+--------------------------------------------+
|name |the Element name (e.g. PID) |
+--------------+--------------------------------------------+
|ref |a tuple of one of the following format: |
| | |
| |('leaf', <datatype>, <longName>, <table>) |
| |('sequence', (<child>, (<min>, <max>), ...))|
+--------------+--------------------------------------------+
>>> load_reference('UNKNOWN', 'Segment', '2.5') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ChildNotFound: No child named UNKNOWN
>>> r = load_reference('ADT_A01', 'Message', '2.5')
>>> print(r[0])
sequence
>>> r = load_reference('MSH_3', 'Field', '2.5')
>>> print(r[0])
sequence
"""
lib = load_library(version)
ref = lib.get(name, element_type)
return ref | [
"def",
"load_reference",
"(",
"name",
",",
"element_type",
",",
"version",
")",
":",
"lib",
"=",
"load_library",
"(",
"version",
")",
"ref",
"=",
"lib",
".",
"get",
"(",
"name",
",",
"element_type",
")",
"return",
"ref"
] | Look for an element of the given type, name and version and return its reference structure
:type element_type: ``str``
:param element_type: the element type to look for (e.g. 'Segment')
:type name: ``str``
:param name: the element name to look for (e.g. 'MSH')
:type version: ``str``
:param version: the version of the library where to search the element (e.g. '2.6')
:rtype: ``dict``
:return: a dictionary describing the element structure
:raise: ``KeyError`` if the element has not been found
The returned dictionary will contain the following keys:
+--------------+--------------------------------------------+
|Key |Value |
+==============+============================================+
|cls |an :class:`hl7apy.core.Element` subclass |
+--------------+--------------------------------------------+
|name |the Element name (e.g. PID) |
+--------------+--------------------------------------------+
|ref |a tuple of one of the following format: |
| | |
| |('leaf', <datatype>, <longName>, <table>) |
| |('sequence', (<child>, (<min>, <max>), ...))|
+--------------+--------------------------------------------+
>>> load_reference('UNKNOWN', 'Segment', '2.5') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ChildNotFound: No child named UNKNOWN
>>> r = load_reference('ADT_A01', 'Message', '2.5')
>>> print(r[0])
sequence
>>> r = load_reference('MSH_3', 'Field', '2.5')
>>> print(r[0])
sequence | [
"Look",
"for",
"an",
"element",
"of",
"the",
"given",
"type",
"name",
"and",
"version",
"and",
"return",
"its",
"reference",
"structure"
] | python | train |
ourway/auth | auth/CAS/authorization.py | https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L62-L66 | def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json()) | [
"def",
"get_role_members",
"(",
"self",
",",
"role",
")",
":",
"targetRoleDb",
"=",
"AuthGroup",
".",
"objects",
"(",
"creator",
"=",
"self",
".",
"client",
",",
"role",
"=",
"role",
")",
"members",
"=",
"AuthMembership",
".",
"objects",
"(",
"groups__in",
"=",
"targetRoleDb",
")",
".",
"only",
"(",
"'user'",
")",
"return",
"json",
".",
"loads",
"(",
"members",
".",
"to_json",
"(",
")",
")"
] | get permissions of a user | [
"get",
"permissions",
"of",
"a",
"user"
] | python | train |
linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/default_detector.py | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/default_detector.py#L35-L49 | def _set_scores(self):
"""
Set anomaly scores using a weighted sum.
"""
anom_scores_ema = self.exp_avg_detector.run()
anom_scores_deri = self.derivative_detector.run()
anom_scores = {}
for timestamp in anom_scores_ema.timestamps:
# Compute a weighted anomaly score.
anom_scores[timestamp] = max(anom_scores_ema[timestamp],
anom_scores_ema[timestamp] * DEFAULT_DETECTOR_EMA_WEIGHT + anom_scores_deri[timestamp] * (1 - DEFAULT_DETECTOR_EMA_WEIGHT))
# If ema score is significant enough, take the bigger one of the weighted score and deri score.
if anom_scores_ema[timestamp] > DEFAULT_DETECTOR_EMA_SIGNIFICANT:
anom_scores[timestamp] = max(anom_scores[timestamp], anom_scores_deri[timestamp])
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | [
"def",
"_set_scores",
"(",
"self",
")",
":",
"anom_scores_ema",
"=",
"self",
".",
"exp_avg_detector",
".",
"run",
"(",
")",
"anom_scores_deri",
"=",
"self",
".",
"derivative_detector",
".",
"run",
"(",
")",
"anom_scores",
"=",
"{",
"}",
"for",
"timestamp",
"in",
"anom_scores_ema",
".",
"timestamps",
":",
"# Compute a weighted anomaly score.",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"max",
"(",
"anom_scores_ema",
"[",
"timestamp",
"]",
",",
"anom_scores_ema",
"[",
"timestamp",
"]",
"*",
"DEFAULT_DETECTOR_EMA_WEIGHT",
"+",
"anom_scores_deri",
"[",
"timestamp",
"]",
"*",
"(",
"1",
"-",
"DEFAULT_DETECTOR_EMA_WEIGHT",
")",
")",
"# If ema score is significant enough, take the bigger one of the weighted score and deri score.",
"if",
"anom_scores_ema",
"[",
"timestamp",
"]",
">",
"DEFAULT_DETECTOR_EMA_SIGNIFICANT",
":",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"max",
"(",
"anom_scores",
"[",
"timestamp",
"]",
",",
"anom_scores_deri",
"[",
"timestamp",
"]",
")",
"self",
".",
"anom_scores",
"=",
"TimeSeries",
"(",
"self",
".",
"_denoise_scores",
"(",
"anom_scores",
")",
")"
] | Set anomaly scores using a weighted sum. | [
"Set",
"anomaly",
"scores",
"using",
"a",
"weighted",
"sum",
"."
] | python | train |
juju/charm-helpers | charmhelpers/contrib/openstack/audits/openstack_security_guide.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/audits/openstack_security_guide.py#L220-L237 | def validate_file_permissions(config):
"""Verify that permissions on configuration files are secure enough."""
files = config.get('files', {})
for file_name, options in files.items():
for key in options.keys():
if key not in ["owner", "group", "mode"]:
raise RuntimeError(
"Invalid ownership configuration: {}".format(key))
mode = options.get('mode', config.get('permissions', '600'))
optional = options.get('optional', config.get('optional', 'False'))
if '*' in file_name:
for file in glob.glob(file_name):
if file not in files.keys():
if os.path.isfile(file):
_validate_file_mode(mode, file, optional)
else:
if os.path.isfile(file_name):
_validate_file_mode(mode, file_name, optional) | [
"def",
"validate_file_permissions",
"(",
"config",
")",
":",
"files",
"=",
"config",
".",
"get",
"(",
"'files'",
",",
"{",
"}",
")",
"for",
"file_name",
",",
"options",
"in",
"files",
".",
"items",
"(",
")",
":",
"for",
"key",
"in",
"options",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"[",
"\"owner\"",
",",
"\"group\"",
",",
"\"mode\"",
"]",
":",
"raise",
"RuntimeError",
"(",
"\"Invalid ownership configuration: {}\"",
".",
"format",
"(",
"key",
")",
")",
"mode",
"=",
"options",
".",
"get",
"(",
"'mode'",
",",
"config",
".",
"get",
"(",
"'permissions'",
",",
"'600'",
")",
")",
"optional",
"=",
"options",
".",
"get",
"(",
"'optional'",
",",
"config",
".",
"get",
"(",
"'optional'",
",",
"'False'",
")",
")",
"if",
"'*'",
"in",
"file_name",
":",
"for",
"file",
"in",
"glob",
".",
"glob",
"(",
"file_name",
")",
":",
"if",
"file",
"not",
"in",
"files",
".",
"keys",
"(",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file",
")",
":",
"_validate_file_mode",
"(",
"mode",
",",
"file",
",",
"optional",
")",
"else",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file_name",
")",
":",
"_validate_file_mode",
"(",
"mode",
",",
"file_name",
",",
"optional",
")"
] | Verify that permissions on configuration files are secure enough. | [
"Verify",
"that",
"permissions",
"on",
"configuration",
"files",
"are",
"secure",
"enough",
"."
] | python | train |
pyviz/holoviews | holoviews/core/dimension.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/dimension.py#L1206-L1284 | def options(self, *args, **kwargs):
"""Applies simplified option definition returning a new object.
Applies options on an object or nested group of objects in a
flat format returning a new object with the options
applied. If the options are to be set directly on the object a
simple format may be used, e.g.:
obj.options(cmap='viridis', show_title=False)
If the object is nested the options must be qualified using
a type[.group][.label] specification, e.g.:
obj.options('Image', cmap='viridis', show_title=False)
or using:
obj.options({'Image': dict(cmap='viridis', show_title=False)})
Identical to the .opts method but returns a clone of the object
by default.
Args:
*args: Sets of options to apply to object
Supports a number of formats including lists of Options
objects, a type[.group][.label] followed by a set of
keyword options to apply and a dictionary indexed by
type[.group][.label] specs.
backend (optional): Backend to apply options to
Defaults to current selected backend
clone (bool, optional): Whether to clone object
Options can be applied inplace with clone=False
**kwargs: Keywords of options
Set of options to apply to the object
Returns:
Returns the cloned object with the options applied
"""
backend = kwargs.get('backend', None)
clone = kwargs.pop('clone', True)
if len(args) == 0 and len(kwargs)==0:
options = None
elif args and isinstance(args[0], basestring):
options = {args[0]: kwargs}
elif args and isinstance(args[0], list):
if kwargs:
raise ValueError('Please specify a list of option objects, or kwargs, but not both')
options = args[0]
elif args and [k for k in kwargs.keys() if k != 'backend']:
raise ValueError("Options must be defined in one of two formats. "
"Either supply keywords defining the options for "
"the current object, e.g. obj.options(cmap='viridis'), "
"or explicitly define the type, e.g. "
"obj.options({'Image': {'cmap': 'viridis'}}). "
"Supplying both formats is not supported.")
elif args and all(isinstance(el, dict) for el in args):
if len(args) > 1:
self.warning('Only a single dictionary can be passed '
'as a positional argument. Only processing '
'the first dictionary')
options = [Options(spec, **kws) for spec,kws in args[0].items()]
elif args:
options = list(args)
elif kwargs:
options = {type(self).__name__: kwargs}
from ..util import opts
if options is None:
expanded_backends = [(backend, {})]
elif isinstance(options, list): # assuming a flat list of Options objects
expanded_backends = opts._expand_by_backend(options, backend)
else:
expanded_backends = [(backend, opts._expand_options(options, backend))]
obj = self
for backend, expanded in expanded_backends:
obj = obj.opts._dispatch_opts(expanded, backend=backend, clone=clone)
return obj | [
"def",
"options",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"backend",
"=",
"kwargs",
".",
"get",
"(",
"'backend'",
",",
"None",
")",
"clone",
"=",
"kwargs",
".",
"pop",
"(",
"'clone'",
",",
"True",
")",
"if",
"len",
"(",
"args",
")",
"==",
"0",
"and",
"len",
"(",
"kwargs",
")",
"==",
"0",
":",
"options",
"=",
"None",
"elif",
"args",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"basestring",
")",
":",
"options",
"=",
"{",
"args",
"[",
"0",
"]",
":",
"kwargs",
"}",
"elif",
"args",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"list",
")",
":",
"if",
"kwargs",
":",
"raise",
"ValueError",
"(",
"'Please specify a list of option objects, or kwargs, but not both'",
")",
"options",
"=",
"args",
"[",
"0",
"]",
"elif",
"args",
"and",
"[",
"k",
"for",
"k",
"in",
"kwargs",
".",
"keys",
"(",
")",
"if",
"k",
"!=",
"'backend'",
"]",
":",
"raise",
"ValueError",
"(",
"\"Options must be defined in one of two formats. \"",
"\"Either supply keywords defining the options for \"",
"\"the current object, e.g. obj.options(cmap='viridis'), \"",
"\"or explicitly define the type, e.g. \"",
"\"obj.options({'Image': {'cmap': 'viridis'}}). \"",
"\"Supplying both formats is not supported.\"",
")",
"elif",
"args",
"and",
"all",
"(",
"isinstance",
"(",
"el",
",",
"dict",
")",
"for",
"el",
"in",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
">",
"1",
":",
"self",
".",
"warning",
"(",
"'Only a single dictionary can be passed '",
"'as a positional argument. Only processing '",
"'the first dictionary'",
")",
"options",
"=",
"[",
"Options",
"(",
"spec",
",",
"*",
"*",
"kws",
")",
"for",
"spec",
",",
"kws",
"in",
"args",
"[",
"0",
"]",
".",
"items",
"(",
")",
"]",
"elif",
"args",
":",
"options",
"=",
"list",
"(",
"args",
")",
"elif",
"kwargs",
":",
"options",
"=",
"{",
"type",
"(",
"self",
")",
".",
"__name__",
":",
"kwargs",
"}",
"from",
".",
".",
"util",
"import",
"opts",
"if",
"options",
"is",
"None",
":",
"expanded_backends",
"=",
"[",
"(",
"backend",
",",
"{",
"}",
")",
"]",
"elif",
"isinstance",
"(",
"options",
",",
"list",
")",
":",
"# assuming a flat list of Options objects",
"expanded_backends",
"=",
"opts",
".",
"_expand_by_backend",
"(",
"options",
",",
"backend",
")",
"else",
":",
"expanded_backends",
"=",
"[",
"(",
"backend",
",",
"opts",
".",
"_expand_options",
"(",
"options",
",",
"backend",
")",
")",
"]",
"obj",
"=",
"self",
"for",
"backend",
",",
"expanded",
"in",
"expanded_backends",
":",
"obj",
"=",
"obj",
".",
"opts",
".",
"_dispatch_opts",
"(",
"expanded",
",",
"backend",
"=",
"backend",
",",
"clone",
"=",
"clone",
")",
"return",
"obj"
] | Applies simplified option definition returning a new object.
Applies options on an object or nested group of objects in a
flat format returning a new object with the options
applied. If the options are to be set directly on the object a
simple format may be used, e.g.:
obj.options(cmap='viridis', show_title=False)
If the object is nested the options must be qualified using
a type[.group][.label] specification, e.g.:
obj.options('Image', cmap='viridis', show_title=False)
or using:
obj.options({'Image': dict(cmap='viridis', show_title=False)})
Identical to the .opts method but returns a clone of the object
by default.
Args:
*args: Sets of options to apply to object
Supports a number of formats including lists of Options
objects, a type[.group][.label] followed by a set of
keyword options to apply and a dictionary indexed by
type[.group][.label] specs.
backend (optional): Backend to apply options to
Defaults to current selected backend
clone (bool, optional): Whether to clone object
Options can be applied inplace with clone=False
**kwargs: Keywords of options
Set of options to apply to the object
Returns:
Returns the cloned object with the options applied | [
"Applies",
"simplified",
"option",
"definition",
"returning",
"a",
"new",
"object",
"."
] | python | train |
spyder-ide/spyder | spyder/plugins/editor/panels/debugger.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/panels/debugger.py#L51-L66 | def _draw_breakpoint_icon(self, top, painter, icon_name):
"""Draw the given breakpoint pixmap.
Args:
top (int): top of the line to draw the breakpoint icon.
painter (QPainter)
icon_name (srt): key of icon to draw (see: self.icons)
"""
rect = QRect(0, top, self.sizeHint().width(),
self.sizeHint().height())
try:
icon = self.icons[icon_name]
except KeyError as e:
debug_print("Breakpoint icon doen't exist, {}".format(e))
else:
icon.paint(painter, rect) | [
"def",
"_draw_breakpoint_icon",
"(",
"self",
",",
"top",
",",
"painter",
",",
"icon_name",
")",
":",
"rect",
"=",
"QRect",
"(",
"0",
",",
"top",
",",
"self",
".",
"sizeHint",
"(",
")",
".",
"width",
"(",
")",
",",
"self",
".",
"sizeHint",
"(",
")",
".",
"height",
"(",
")",
")",
"try",
":",
"icon",
"=",
"self",
".",
"icons",
"[",
"icon_name",
"]",
"except",
"KeyError",
"as",
"e",
":",
"debug_print",
"(",
"\"Breakpoint icon doen't exist, {}\"",
".",
"format",
"(",
"e",
")",
")",
"else",
":",
"icon",
".",
"paint",
"(",
"painter",
",",
"rect",
")"
] | Draw the given breakpoint pixmap.
Args:
top (int): top of the line to draw the breakpoint icon.
painter (QPainter)
icon_name (srt): key of icon to draw (see: self.icons) | [
"Draw",
"the",
"given",
"breakpoint",
"pixmap",
"."
] | python | train |
jtmoulia/switchboard-python | examples/lamsonworker.py | https://github.com/jtmoulia/switchboard-python/blob/c9b0cb74cb12a64385465091be633e78d39f08b1/examples/lamsonworker.py#L37-L44 | def received_new(self, msg):
"""
As new messages arrive, deliver them to the lamson relay.
"""
logger.info("Receiving msg, delivering to Lamson...")
logger.debug("Relaying msg to lamson: From: %s, To: %s",
msg['From'], msg['To'])
self._relay.deliver(msg) | [
"def",
"received_new",
"(",
"self",
",",
"msg",
")",
":",
"logger",
".",
"info",
"(",
"\"Receiving msg, delivering to Lamson...\"",
")",
"logger",
".",
"debug",
"(",
"\"Relaying msg to lamson: From: %s, To: %s\"",
",",
"msg",
"[",
"'From'",
"]",
",",
"msg",
"[",
"'To'",
"]",
")",
"self",
".",
"_relay",
".",
"deliver",
"(",
"msg",
")"
] | As new messages arrive, deliver them to the lamson relay. | [
"As",
"new",
"messages",
"arrive",
"deliver",
"them",
"to",
"the",
"lamson",
"relay",
"."
] | python | train |
slickqa/python-client | slickqa/connection.py | https://github.com/slickqa/python-client/blob/1d36b4977cd4140d7d24917cab2b3f82b60739c2/slickqa/connection.py#L321-L348 | def upload_local_file(self, local_file_path, file_obj=None):
"""Create a Stored File and upload it's data. This is a one part do it all type method. Here is what
it does:
1. "Discover" information about the file (mime-type, size)
2. Create the stored file object in slick
3. Upload (chunked) all the data in the local file
4. re-fetch the stored file object from slick, and return it
"""
if file_obj is None and not os.path.exists(local_file_path):
return
storedfile = StoredFile()
storedfile.mimetype = mimetypes.guess_type(local_file_path)[0]
storedfile.filename = os.path.basename(local_file_path)
if file_obj is None:
storedfile.length = os.stat(local_file_path).st_size
else:
file_obj.seek(0,os.SEEK_END)
storedfile.length = file_obj.tell()
file_obj.seek(0)
storedfile = self(storedfile).create()
md5 = hashlib.md5()
url = self(storedfile).getUrl() + "/addchunk"
if file_obj is None:
with open(local_file_path, 'rb') as filecontents:
upload_chunks(url, storedfile, filecontents)
else:
upload_chunks(url, storedfile, file_obj)
return self(storedfile).update() | [
"def",
"upload_local_file",
"(",
"self",
",",
"local_file_path",
",",
"file_obj",
"=",
"None",
")",
":",
"if",
"file_obj",
"is",
"None",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"local_file_path",
")",
":",
"return",
"storedfile",
"=",
"StoredFile",
"(",
")",
"storedfile",
".",
"mimetype",
"=",
"mimetypes",
".",
"guess_type",
"(",
"local_file_path",
")",
"[",
"0",
"]",
"storedfile",
".",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"local_file_path",
")",
"if",
"file_obj",
"is",
"None",
":",
"storedfile",
".",
"length",
"=",
"os",
".",
"stat",
"(",
"local_file_path",
")",
".",
"st_size",
"else",
":",
"file_obj",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_END",
")",
"storedfile",
".",
"length",
"=",
"file_obj",
".",
"tell",
"(",
")",
"file_obj",
".",
"seek",
"(",
"0",
")",
"storedfile",
"=",
"self",
"(",
"storedfile",
")",
".",
"create",
"(",
")",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"url",
"=",
"self",
"(",
"storedfile",
")",
".",
"getUrl",
"(",
")",
"+",
"\"/addchunk\"",
"if",
"file_obj",
"is",
"None",
":",
"with",
"open",
"(",
"local_file_path",
",",
"'rb'",
")",
"as",
"filecontents",
":",
"upload_chunks",
"(",
"url",
",",
"storedfile",
",",
"filecontents",
")",
"else",
":",
"upload_chunks",
"(",
"url",
",",
"storedfile",
",",
"file_obj",
")",
"return",
"self",
"(",
"storedfile",
")",
".",
"update",
"(",
")"
] | Create a Stored File and upload it's data. This is a one part do it all type method. Here is what
it does:
1. "Discover" information about the file (mime-type, size)
2. Create the stored file object in slick
3. Upload (chunked) all the data in the local file
4. re-fetch the stored file object from slick, and return it | [
"Create",
"a",
"Stored",
"File",
"and",
"upload",
"it",
"s",
"data",
".",
"This",
"is",
"a",
"one",
"part",
"do",
"it",
"all",
"type",
"method",
".",
"Here",
"is",
"what",
"it",
"does",
":",
"1",
".",
"Discover",
"information",
"about",
"the",
"file",
"(",
"mime",
"-",
"type",
"size",
")",
"2",
".",
"Create",
"the",
"stored",
"file",
"object",
"in",
"slick",
"3",
".",
"Upload",
"(",
"chunked",
")",
"all",
"the",
"data",
"in",
"the",
"local",
"file",
"4",
".",
"re",
"-",
"fetch",
"the",
"stored",
"file",
"object",
"from",
"slick",
"and",
"return",
"it"
] | python | train |
ehansis/ozelot | ozelot/client.py | https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/client.py#L102-L115 | def store_password(params, password):
"""Store the password for a database connection using :mod:`keyring`
Use the ``user`` field as the user name and ``<host>:<driver>`` as service name.
Args:
params (dict): database configuration, as defined in :mod:`ozelot.config`
password (str): password to store
"""
user_name = params['user']
service_name = params['host'] + ':' + params['driver']
keyring.set_password(service_name=service_name,
username=user_name,
password=password) | [
"def",
"store_password",
"(",
"params",
",",
"password",
")",
":",
"user_name",
"=",
"params",
"[",
"'user'",
"]",
"service_name",
"=",
"params",
"[",
"'host'",
"]",
"+",
"':'",
"+",
"params",
"[",
"'driver'",
"]",
"keyring",
".",
"set_password",
"(",
"service_name",
"=",
"service_name",
",",
"username",
"=",
"user_name",
",",
"password",
"=",
"password",
")"
] | Store the password for a database connection using :mod:`keyring`
Use the ``user`` field as the user name and ``<host>:<driver>`` as service name.
Args:
params (dict): database configuration, as defined in :mod:`ozelot.config`
password (str): password to store | [
"Store",
"the",
"password",
"for",
"a",
"database",
"connection",
"using",
":",
"mod",
":",
"keyring"
] | python | train |
yeraydiazdiaz/lunr.py | lunr/pipeline.py | https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L43-L56 | def load(cls, serialised):
"""Loads a previously serialised pipeline."""
pipeline = cls()
for fn_name in serialised:
try:
fn = cls.registered_functions[fn_name]
except KeyError:
raise BaseLunrException(
"Cannot load unregistered function ".format(fn_name)
)
else:
pipeline.add(fn)
return pipeline | [
"def",
"load",
"(",
"cls",
",",
"serialised",
")",
":",
"pipeline",
"=",
"cls",
"(",
")",
"for",
"fn_name",
"in",
"serialised",
":",
"try",
":",
"fn",
"=",
"cls",
".",
"registered_functions",
"[",
"fn_name",
"]",
"except",
"KeyError",
":",
"raise",
"BaseLunrException",
"(",
"\"Cannot load unregistered function \"",
".",
"format",
"(",
"fn_name",
")",
")",
"else",
":",
"pipeline",
".",
"add",
"(",
"fn",
")",
"return",
"pipeline"
] | Loads a previously serialised pipeline. | [
"Loads",
"a",
"previously",
"serialised",
"pipeline",
"."
] | python | train |
iamteem/redisco | redisco/containers.py | https://github.com/iamteem/redisco/blob/a7ba19ff3c38061d6d8bc0c10fa754baadcfeb91/redisco/containers.py#L457-L464 | def lt(self, v, limit=None, offset=None):
"""Returns the list of the members of the set that have scores
less than v.
"""
if limit is not None and offset is None:
offset = 0
return self.zrangebyscore(self._min_score, "(%f" % v,
start=offset, num=limit) | [
"def",
"lt",
"(",
"self",
",",
"v",
",",
"limit",
"=",
"None",
",",
"offset",
"=",
"None",
")",
":",
"if",
"limit",
"is",
"not",
"None",
"and",
"offset",
"is",
"None",
":",
"offset",
"=",
"0",
"return",
"self",
".",
"zrangebyscore",
"(",
"self",
".",
"_min_score",
",",
"\"(%f\"",
"%",
"v",
",",
"start",
"=",
"offset",
",",
"num",
"=",
"limit",
")"
] | Returns the list of the members of the set that have scores
less than v. | [
"Returns",
"the",
"list",
"of",
"the",
"members",
"of",
"the",
"set",
"that",
"have",
"scores",
"less",
"than",
"v",
"."
] | python | train |
dantezhu/haven | haven/gevent_impl.py | https://github.com/dantezhu/haven/blob/7bf7edab07fa2ade7644a548d6ab9d89cf3d259d/haven/gevent_impl.py#L88-L112 | def set(self, interval, callback, repeat=False, force=True):
"""
添加timer
"""
if self.timer:
if force:
# 如果已经存在,那么先要把现在的清空
self.clear()
else:
# 已经存在的话,就返回了
return
def callback_wrapper():
# 必须要确定,这次调用就是这个timer引起的
if self.timer == timer:
# 必须加这句,否则如果在callback中有clear操作,会出现GreenletExit
self.timer = None
# 不可以加 timer = None,否则会导致判断self.timer == timer 报错找不到timer
result = safe_call(callback)
if repeat and not self.timer:
# 之所以还要判断timer,是因为callback中可能设置了新的回调
self.set(interval, callback, repeat, True)
return result
self.timer = timer = gevent.spawn_later(interval, callback_wrapper) | [
"def",
"set",
"(",
"self",
",",
"interval",
",",
"callback",
",",
"repeat",
"=",
"False",
",",
"force",
"=",
"True",
")",
":",
"if",
"self",
".",
"timer",
":",
"if",
"force",
":",
"# 如果已经存在,那么先要把现在的清空",
"self",
".",
"clear",
"(",
")",
"else",
":",
"# 已经存在的话,就返回了",
"return",
"def",
"callback_wrapper",
"(",
")",
":",
"# 必须要确定,这次调用就是这个timer引起的",
"if",
"self",
".",
"timer",
"==",
"timer",
":",
"# 必须加这句,否则如果在callback中有clear操作,会出现GreenletExit",
"self",
".",
"timer",
"=",
"None",
"# 不可以加 timer = None,否则会导致判断self.timer == timer 报错找不到timer",
"result",
"=",
"safe_call",
"(",
"callback",
")",
"if",
"repeat",
"and",
"not",
"self",
".",
"timer",
":",
"# 之所以还要判断timer,是因为callback中可能设置了新的回调",
"self",
".",
"set",
"(",
"interval",
",",
"callback",
",",
"repeat",
",",
"True",
")",
"return",
"result",
"self",
".",
"timer",
"=",
"timer",
"=",
"gevent",
".",
"spawn_later",
"(",
"interval",
",",
"callback_wrapper",
")"
] | 添加timer | [
"添加timer"
] | python | train |
reanahub/reana-commons | reana_commons/utils.py | https://github.com/reanahub/reana-commons/blob/abf31d9f495e0d93171c43fc4a414cd292091b11/reana_commons/utils.py#L210-L216 | def render_cvmfs_sc(cvmfs_volume):
"""Render REANA_CVMFS_SC_TEMPLATE."""
name = CVMFS_REPOSITORIES[cvmfs_volume]
rendered_template = dict(REANA_CVMFS_SC_TEMPLATE)
rendered_template['metadata']['name'] = "csi-cvmfs-{}".format(name)
rendered_template['parameters']['repository'] = cvmfs_volume
return rendered_template | [
"def",
"render_cvmfs_sc",
"(",
"cvmfs_volume",
")",
":",
"name",
"=",
"CVMFS_REPOSITORIES",
"[",
"cvmfs_volume",
"]",
"rendered_template",
"=",
"dict",
"(",
"REANA_CVMFS_SC_TEMPLATE",
")",
"rendered_template",
"[",
"'metadata'",
"]",
"[",
"'name'",
"]",
"=",
"\"csi-cvmfs-{}\"",
".",
"format",
"(",
"name",
")",
"rendered_template",
"[",
"'parameters'",
"]",
"[",
"'repository'",
"]",
"=",
"cvmfs_volume",
"return",
"rendered_template"
] | Render REANA_CVMFS_SC_TEMPLATE. | [
"Render",
"REANA_CVMFS_SC_TEMPLATE",
"."
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/gooey/gui/lang/i18n.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/lang/i18n.py#L23-L29 | def get_path(language):
''' Returns the full path to the language file '''
filename = language.lower() + '.json'
lang_file_path = os.path.join(_DEFAULT_DIR, filename)
if not os.path.exists(lang_file_path):
raise IOError('Could not find {} language file'.format(language))
return lang_file_path | [
"def",
"get_path",
"(",
"language",
")",
":",
"filename",
"=",
"language",
".",
"lower",
"(",
")",
"+",
"'.json'",
"lang_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_DEFAULT_DIR",
",",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"lang_file_path",
")",
":",
"raise",
"IOError",
"(",
"'Could not find {} language file'",
".",
"format",
"(",
"language",
")",
")",
"return",
"lang_file_path"
] | Returns the full path to the language file | [
"Returns",
"the",
"full",
"path",
"to",
"the",
"language",
"file"
] | python | train |
aodag/WebDispatch | webdispatch/uritemplate.py | https://github.com/aodag/WebDispatch/blob/55f8658a2b4100498e098a80303a346c3940f1bc/webdispatch/uritemplate.py#L93-L97 | def new_named_args(self, cur_named_args: Dict[str, Any]) -> Dict[str, Any]:
""" create new named args updating current name args"""
named_args = cur_named_args.copy()
named_args.update(self.matchdict)
return named_args | [
"def",
"new_named_args",
"(",
"self",
",",
"cur_named_args",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"named_args",
"=",
"cur_named_args",
".",
"copy",
"(",
")",
"named_args",
".",
"update",
"(",
"self",
".",
"matchdict",
")",
"return",
"named_args"
] | create new named args updating current name args | [
"create",
"new",
"named",
"args",
"updating",
"current",
"name",
"args"
] | python | train |
CI-WATER/gsshapy | gsshapy/orm/gag.py | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gag.py#L150-L186 | def _createGsshaPyObjects(self, eventChunk):
"""
Create GSSHAPY PrecipEvent, PrecipValue, and PrecipGage Objects Method
"""
## TODO: Add Support for RADAR file format type values
# Create GSSHAPY PrecipEvent
event = PrecipEvent(description=eventChunk['description'],
nrGag=eventChunk['nrgag'],
nrPds=eventChunk['nrpds'])
# Associate PrecipEvent with PrecipFile
event.precipFile = self
gages = []
for coord in eventChunk['coords']:
# Create GSSHAPY PrecipGage object
gage = PrecipGage(description=coord['description'],
x=coord['x'],
y=coord['y'])
# Associate PrecipGage with PrecipEvent
gage.event = event
# Append to gages list for association with PrecipValues
gages.append(gage)
for valLine in eventChunk['valLines']:
for index, value in enumerate(valLine['values']):
# Create GSSHAPY PrecipValue object
val = PrecipValue(valueType=valLine['type'],
dateTime=valLine['dateTime'],
value=value)
# Associate PrecipValue with PrecipEvent and PrecipGage
val.event = event
val.gage = gages[index] | [
"def",
"_createGsshaPyObjects",
"(",
"self",
",",
"eventChunk",
")",
":",
"## TODO: Add Support for RADAR file format type values",
"# Create GSSHAPY PrecipEvent",
"event",
"=",
"PrecipEvent",
"(",
"description",
"=",
"eventChunk",
"[",
"'description'",
"]",
",",
"nrGag",
"=",
"eventChunk",
"[",
"'nrgag'",
"]",
",",
"nrPds",
"=",
"eventChunk",
"[",
"'nrpds'",
"]",
")",
"# Associate PrecipEvent with PrecipFile",
"event",
".",
"precipFile",
"=",
"self",
"gages",
"=",
"[",
"]",
"for",
"coord",
"in",
"eventChunk",
"[",
"'coords'",
"]",
":",
"# Create GSSHAPY PrecipGage object",
"gage",
"=",
"PrecipGage",
"(",
"description",
"=",
"coord",
"[",
"'description'",
"]",
",",
"x",
"=",
"coord",
"[",
"'x'",
"]",
",",
"y",
"=",
"coord",
"[",
"'y'",
"]",
")",
"# Associate PrecipGage with PrecipEvent",
"gage",
".",
"event",
"=",
"event",
"# Append to gages list for association with PrecipValues",
"gages",
".",
"append",
"(",
"gage",
")",
"for",
"valLine",
"in",
"eventChunk",
"[",
"'valLines'",
"]",
":",
"for",
"index",
",",
"value",
"in",
"enumerate",
"(",
"valLine",
"[",
"'values'",
"]",
")",
":",
"# Create GSSHAPY PrecipValue object",
"val",
"=",
"PrecipValue",
"(",
"valueType",
"=",
"valLine",
"[",
"'type'",
"]",
",",
"dateTime",
"=",
"valLine",
"[",
"'dateTime'",
"]",
",",
"value",
"=",
"value",
")",
"# Associate PrecipValue with PrecipEvent and PrecipGage",
"val",
".",
"event",
"=",
"event",
"val",
".",
"gage",
"=",
"gages",
"[",
"index",
"]"
] | Create GSSHAPY PrecipEvent, PrecipValue, and PrecipGage Objects Method | [
"Create",
"GSSHAPY",
"PrecipEvent",
"PrecipValue",
"and",
"PrecipGage",
"Objects",
"Method"
] | python | train |
uw-it-aca/uw-restclients-sws | uw_sws/department.py | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/department.py#L14-L22 | def get_departments_by_college(college):
"""
Returns a list of restclients.Department models, for the passed
College model.
"""
url = "{}?{}".format(
dept_search_url_prefix,
urlencode({"college_abbreviation": college.label}))
return _json_to_departments(get_resource(url), college) | [
"def",
"get_departments_by_college",
"(",
"college",
")",
":",
"url",
"=",
"\"{}?{}\"",
".",
"format",
"(",
"dept_search_url_prefix",
",",
"urlencode",
"(",
"{",
"\"college_abbreviation\"",
":",
"college",
".",
"label",
"}",
")",
")",
"return",
"_json_to_departments",
"(",
"get_resource",
"(",
"url",
")",
",",
"college",
")"
] | Returns a list of restclients.Department models, for the passed
College model. | [
"Returns",
"a",
"list",
"of",
"restclients",
".",
"Department",
"models",
"for",
"the",
"passed",
"College",
"model",
"."
] | python | train |
Workiva/furious | example/complex_workflow.py | https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/example/complex_workflow.py#L76-L91 | def state_machine_success():
"""A positive result! Iterate!"""
from furious.async import Async
from furious.context import get_current_async
result = get_current_async().result
if result == 'ALPHA':
logging.info('Inserting continuation for state %s.', result)
return Async(target=complex_state_generator_alpha, args=[result])
elif result == 'BRAVO':
logging.info('Inserting continuation for state %s.', result)
return Async(target=complex_state_generator_bravo, args=[result])
logging.info('Done working, stop now.') | [
"def",
"state_machine_success",
"(",
")",
":",
"from",
"furious",
".",
"async",
"import",
"Async",
"from",
"furious",
".",
"context",
"import",
"get_current_async",
"result",
"=",
"get_current_async",
"(",
")",
".",
"result",
"if",
"result",
"==",
"'ALPHA'",
":",
"logging",
".",
"info",
"(",
"'Inserting continuation for state %s.'",
",",
"result",
")",
"return",
"Async",
"(",
"target",
"=",
"complex_state_generator_alpha",
",",
"args",
"=",
"[",
"result",
"]",
")",
"elif",
"result",
"==",
"'BRAVO'",
":",
"logging",
".",
"info",
"(",
"'Inserting continuation for state %s.'",
",",
"result",
")",
"return",
"Async",
"(",
"target",
"=",
"complex_state_generator_bravo",
",",
"args",
"=",
"[",
"result",
"]",
")",
"logging",
".",
"info",
"(",
"'Done working, stop now.'",
")"
] | A positive result! Iterate! | [
"A",
"positive",
"result!",
"Iterate!"
] | python | train |
JoeVirtual/KonFoo | konfoo/core.py | https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L1046-L1055 | def insert(self, index, item):
""" Inserts the *item* before the *index* into the `Sequence`.
:param int index: `Sequence` index.
:param item: any :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instance.
"""
if not is_any(item):
raise MemberTypeError(self, item, member=len(self))
self._data.insert(index, item) | [
"def",
"insert",
"(",
"self",
",",
"index",
",",
"item",
")",
":",
"if",
"not",
"is_any",
"(",
"item",
")",
":",
"raise",
"MemberTypeError",
"(",
"self",
",",
"item",
",",
"member",
"=",
"len",
"(",
"self",
")",
")",
"self",
".",
"_data",
".",
"insert",
"(",
"index",
",",
"item",
")"
] | Inserts the *item* before the *index* into the `Sequence`.
:param int index: `Sequence` index.
:param item: any :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instance. | [
"Inserts",
"the",
"*",
"item",
"*",
"before",
"the",
"*",
"index",
"*",
"into",
"the",
"Sequence",
"."
] | python | train |
base4sistemas/pyescpos | escpos/conn/serial.py | https://github.com/base4sistemas/pyescpos/blob/621bd00f1499aff700f37d8d36d04e0d761708f1/escpos/conn/serial.py#L349-L382 | def as_from(value):
"""
Constructs an instance of :class:`SerialSettings` from a string
representation, that looks like ``/dev/ttyS0:9600,8,1,N,RTSCTS``,
describing, in order, the serial port name, baud rate, byte size,
stop bits, parity and flow control protocol.
Valid string representations are (in cases where protocol is not
specified, RTS/CTS is assumed)::
COM1:115000,8,1,E
COM1:115000:8:1:E
COM4:9600:8:2:O:DSRDTR
/dev/ttyS0:9600,8,1,N,RTSCTS
/dev/ttyS0,9600,8,1,N
"""
keys = ['port','baudrate','databits','stopbits','parity','protocol']
values = value.replace(',', ':').split(':')
if len(values) == 5:
values.append(RTSCTS)
if len(keys) != len(values):
raise ValueError('Unknown serial port string format: %s '
'(expecting something like "COM1:9600,8,1,N,RTSCTS")' % (
value,))
kwargs = dict(zip(keys, values))
kwargs['baudrate'] = int(kwargs['baudrate'])
kwargs['databits'] = int(kwargs['databits'])
kwargs['stopbits'] = int(kwargs['stopbits'])
return SerialSettings(**kwargs) | [
"def",
"as_from",
"(",
"value",
")",
":",
"keys",
"=",
"[",
"'port'",
",",
"'baudrate'",
",",
"'databits'",
",",
"'stopbits'",
",",
"'parity'",
",",
"'protocol'",
"]",
"values",
"=",
"value",
".",
"replace",
"(",
"','",
",",
"':'",
")",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"values",
")",
"==",
"5",
":",
"values",
".",
"append",
"(",
"RTSCTS",
")",
"if",
"len",
"(",
"keys",
")",
"!=",
"len",
"(",
"values",
")",
":",
"raise",
"ValueError",
"(",
"'Unknown serial port string format: %s '",
"'(expecting something like \"COM1:9600,8,1,N,RTSCTS\")'",
"%",
"(",
"value",
",",
")",
")",
"kwargs",
"=",
"dict",
"(",
"zip",
"(",
"keys",
",",
"values",
")",
")",
"kwargs",
"[",
"'baudrate'",
"]",
"=",
"int",
"(",
"kwargs",
"[",
"'baudrate'",
"]",
")",
"kwargs",
"[",
"'databits'",
"]",
"=",
"int",
"(",
"kwargs",
"[",
"'databits'",
"]",
")",
"kwargs",
"[",
"'stopbits'",
"]",
"=",
"int",
"(",
"kwargs",
"[",
"'stopbits'",
"]",
")",
"return",
"SerialSettings",
"(",
"*",
"*",
"kwargs",
")"
] | Constructs an instance of :class:`SerialSettings` from a string
representation, that looks like ``/dev/ttyS0:9600,8,1,N,RTSCTS``,
describing, in order, the serial port name, baud rate, byte size,
stop bits, parity and flow control protocol.
Valid string representations are (in cases where protocol is not
specified, RTS/CTS is assumed)::
COM1:115000,8,1,E
COM1:115000:8:1:E
COM4:9600:8:2:O:DSRDTR
/dev/ttyS0:9600,8,1,N,RTSCTS
/dev/ttyS0,9600,8,1,N | [
"Constructs",
"an",
"instance",
"of",
":",
"class",
":",
"SerialSettings",
"from",
"a",
"string",
"representation",
"that",
"looks",
"like",
"/",
"dev",
"/",
"ttyS0",
":",
"9600",
"8",
"1",
"N",
"RTSCTS",
"describing",
"in",
"order",
"the",
"serial",
"port",
"name",
"baud",
"rate",
"byte",
"size",
"stop",
"bits",
"parity",
"and",
"flow",
"control",
"protocol",
"."
] | python | train |
DataDog/integrations-core | openstack_controller/datadog_checks/openstack_controller/openstack_controller.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/openstack_controller/datadog_checks/openstack_controller/openstack_controller.py#L601-L669 | def init_api(self, instance_config, custom_tags):
"""
Guarantees a valid auth scope for this instance, and returns it
Communicates with the identity server and initializes a new scope when one is absent, or has been forcibly
removed due to token expiry
"""
custom_tags = custom_tags or []
keystone_server_url = instance_config.get("keystone_server_url")
proxy_config = self.get_instance_proxy(instance_config, keystone_server_url)
if self._api is None:
# We are missing the entire instance scope either because it is the first time we initialize it or because
# authentication previously failed and got removed from the cache
# Let's populate it now
try:
self.log.debug("Fetch scope for instance {}".format(self.instance_name))
# Set keystone api with proper token
self._api = ApiFactory.create(self.log, proxy_config, instance_config)
self.service_check(
self.IDENTITY_API_SC,
AgentCheck.OK,
tags=["keystone_server: {}".format(keystone_server_url)] + custom_tags,
)
except KeystoneUnreachable as e:
self.warning(
"The agent could not contact the specified identity server at {} . "
"Are you sure it is up at that address?".format(keystone_server_url)
)
self.log.debug("Problem grabbing auth token: %s", e)
self.service_check(
self.IDENTITY_API_SC,
AgentCheck.CRITICAL,
tags=["keystone_server: {}".format(keystone_server_url)] + custom_tags,
)
# If Keystone is down/unreachable, we default the
# Nova and Neutron APIs to UNKNOWN since we cannot access the service catalog
self.service_check(
self.NETWORK_API_SC,
AgentCheck.UNKNOWN,
tags=["keystone_server: {}".format(keystone_server_url)] + custom_tags,
)
self.service_check(
self.COMPUTE_API_SC,
AgentCheck.UNKNOWN,
tags=["keystone_server: {}".format(keystone_server_url)] + custom_tags,
)
except MissingNovaEndpoint as e:
self.warning("The agent could not find a compatible Nova endpoint in your service catalog!")
self.log.debug("Failed to get nova endpoint for response catalog: %s", e)
self.service_check(
self.COMPUTE_API_SC,
AgentCheck.CRITICAL,
tags=["keystone_server: {}".format(keystone_server_url)] + custom_tags,
)
except MissingNeutronEndpoint:
self.warning("The agent could not find a compatible Neutron endpoint in your service catalog!")
self.service_check(
self.NETWORK_API_SC,
AgentCheck.CRITICAL,
tags=["keystone_server: {}".format(keystone_server_url)] + custom_tags,
)
if self._api is None:
# Fast fail in the absence of an api
raise IncompleteConfig() | [
"def",
"init_api",
"(",
"self",
",",
"instance_config",
",",
"custom_tags",
")",
":",
"custom_tags",
"=",
"custom_tags",
"or",
"[",
"]",
"keystone_server_url",
"=",
"instance_config",
".",
"get",
"(",
"\"keystone_server_url\"",
")",
"proxy_config",
"=",
"self",
".",
"get_instance_proxy",
"(",
"instance_config",
",",
"keystone_server_url",
")",
"if",
"self",
".",
"_api",
"is",
"None",
":",
"# We are missing the entire instance scope either because it is the first time we initialize it or because",
"# authentication previously failed and got removed from the cache",
"# Let's populate it now",
"try",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Fetch scope for instance {}\"",
".",
"format",
"(",
"self",
".",
"instance_name",
")",
")",
"# Set keystone api with proper token",
"self",
".",
"_api",
"=",
"ApiFactory",
".",
"create",
"(",
"self",
".",
"log",
",",
"proxy_config",
",",
"instance_config",
")",
"self",
".",
"service_check",
"(",
"self",
".",
"IDENTITY_API_SC",
",",
"AgentCheck",
".",
"OK",
",",
"tags",
"=",
"[",
"\"keystone_server: {}\"",
".",
"format",
"(",
"keystone_server_url",
")",
"]",
"+",
"custom_tags",
",",
")",
"except",
"KeystoneUnreachable",
"as",
"e",
":",
"self",
".",
"warning",
"(",
"\"The agent could not contact the specified identity server at {} . \"",
"\"Are you sure it is up at that address?\"",
".",
"format",
"(",
"keystone_server_url",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Problem grabbing auth token: %s\"",
",",
"e",
")",
"self",
".",
"service_check",
"(",
"self",
".",
"IDENTITY_API_SC",
",",
"AgentCheck",
".",
"CRITICAL",
",",
"tags",
"=",
"[",
"\"keystone_server: {}\"",
".",
"format",
"(",
"keystone_server_url",
")",
"]",
"+",
"custom_tags",
",",
")",
"# If Keystone is down/unreachable, we default the",
"# Nova and Neutron APIs to UNKNOWN since we cannot access the service catalog",
"self",
".",
"service_check",
"(",
"self",
".",
"NETWORK_API_SC",
",",
"AgentCheck",
".",
"UNKNOWN",
",",
"tags",
"=",
"[",
"\"keystone_server: {}\"",
".",
"format",
"(",
"keystone_server_url",
")",
"]",
"+",
"custom_tags",
",",
")",
"self",
".",
"service_check",
"(",
"self",
".",
"COMPUTE_API_SC",
",",
"AgentCheck",
".",
"UNKNOWN",
",",
"tags",
"=",
"[",
"\"keystone_server: {}\"",
".",
"format",
"(",
"keystone_server_url",
")",
"]",
"+",
"custom_tags",
",",
")",
"except",
"MissingNovaEndpoint",
"as",
"e",
":",
"self",
".",
"warning",
"(",
"\"The agent could not find a compatible Nova endpoint in your service catalog!\"",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Failed to get nova endpoint for response catalog: %s\"",
",",
"e",
")",
"self",
".",
"service_check",
"(",
"self",
".",
"COMPUTE_API_SC",
",",
"AgentCheck",
".",
"CRITICAL",
",",
"tags",
"=",
"[",
"\"keystone_server: {}\"",
".",
"format",
"(",
"keystone_server_url",
")",
"]",
"+",
"custom_tags",
",",
")",
"except",
"MissingNeutronEndpoint",
":",
"self",
".",
"warning",
"(",
"\"The agent could not find a compatible Neutron endpoint in your service catalog!\"",
")",
"self",
".",
"service_check",
"(",
"self",
".",
"NETWORK_API_SC",
",",
"AgentCheck",
".",
"CRITICAL",
",",
"tags",
"=",
"[",
"\"keystone_server: {}\"",
".",
"format",
"(",
"keystone_server_url",
")",
"]",
"+",
"custom_tags",
",",
")",
"if",
"self",
".",
"_api",
"is",
"None",
":",
"# Fast fail in the absence of an api",
"raise",
"IncompleteConfig",
"(",
")"
] | Guarantees a valid auth scope for this instance, and returns it
Communicates with the identity server and initializes a new scope when one is absent, or has been forcibly
removed due to token expiry | [
"Guarantees",
"a",
"valid",
"auth",
"scope",
"for",
"this",
"instance",
"and",
"returns",
"it"
] | python | train |
PyGithub/PyGithub | github/AuthenticatedUser.py | https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/AuthenticatedUser.py#L383-L393 | def add_to_following(self, following):
"""
:calls: `PUT /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(following, github.NamedUser.NamedUser), following
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/following/" + following._identity
) | [
"def",
"add_to_following",
"(",
"self",
",",
"following",
")",
":",
"assert",
"isinstance",
"(",
"following",
",",
"github",
".",
"NamedUser",
".",
"NamedUser",
")",
",",
"following",
"headers",
",",
"data",
"=",
"self",
".",
"_requester",
".",
"requestJsonAndCheck",
"(",
"\"PUT\"",
",",
"\"/user/following/\"",
"+",
"following",
".",
"_identity",
")"
] | :calls: `PUT /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: None | [
":",
"calls",
":",
"PUT",
"/",
"user",
"/",
"following",
"/",
":",
"user",
"<http",
":",
"//",
"developer",
".",
"github",
".",
"com",
"/",
"v3",
"/",
"users",
"/",
"followers",
">",
"_",
":",
"param",
"following",
":",
":",
"class",
":",
"github",
".",
"NamedUser",
".",
"NamedUser",
":",
"rtype",
":",
"None"
] | python | train |
getpelican/pelican-plugins | render_math/math.py | https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/render_math/math.py#L319-L330 | def rst_add_mathjax(content):
"""Adds mathjax script for reStructuredText"""
# .rst is the only valid extension for reStructuredText files
_, ext = os.path.splitext(os.path.basename(content.source_path))
if ext != '.rst':
return
# If math class is present in text, add the javascript
# note that RST hardwires mathjax to be class "math"
if 'class="math"' in content._content:
content._content += "<script type='text/javascript'>%s</script>" % rst_add_mathjax.mathjax_script | [
"def",
"rst_add_mathjax",
"(",
"content",
")",
":",
"# .rst is the only valid extension for reStructuredText files",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"content",
".",
"source_path",
")",
")",
"if",
"ext",
"!=",
"'.rst'",
":",
"return",
"# If math class is present in text, add the javascript",
"# note that RST hardwires mathjax to be class \"math\"",
"if",
"'class=\"math\"'",
"in",
"content",
".",
"_content",
":",
"content",
".",
"_content",
"+=",
"\"<script type='text/javascript'>%s</script>\"",
"%",
"rst_add_mathjax",
".",
"mathjax_script"
] | Adds mathjax script for reStructuredText | [
"Adds",
"mathjax",
"script",
"for",
"reStructuredText"
] | python | train |
ace0/pyrelic | pyrelic/ecqv.py | https://github.com/ace0/pyrelic/blob/f23d4e6586674675f72304d5938548267d6413bf/pyrelic/ecqv.py#L48-L81 | def validate(idText, alpha, r, cert, caPubkey):
"""
A server can validate an implicit certificate response using identity
string @idText, private value @alpha (used to generate cert request),
and the certificate response @r (private key component) and implicit
@cert.
@raises Exception if the certificate response is invalid.
@returns (privkey, pubkey)
"""
# Verify parameter types
assertScalarType(alpha)
assertScalarType(r)
assertType(cert, ec1Element)
assertType(caPubkey, ec1Element)
G = generatorEc()
# Compute the private key @s
e = _exp(cert, idText)
s = (e*alpha + r) % orderEc()
# Compute the public key two ways: using the privkey and using the cert
# (the way a client will compute it)
# The easy way
S1 = s*G
# Using the cert
S2 = e*cert + caPubkey
# The two techniques should produce the same pubkey value -- raise an
# exception if they don't match
if S1 != S2:
raise Exception("Implicit certification response failed validation")
return s, S1 | [
"def",
"validate",
"(",
"idText",
",",
"alpha",
",",
"r",
",",
"cert",
",",
"caPubkey",
")",
":",
"# Verify parameter types",
"assertScalarType",
"(",
"alpha",
")",
"assertScalarType",
"(",
"r",
")",
"assertType",
"(",
"cert",
",",
"ec1Element",
")",
"assertType",
"(",
"caPubkey",
",",
"ec1Element",
")",
"G",
"=",
"generatorEc",
"(",
")",
"# Compute the private key @s",
"e",
"=",
"_exp",
"(",
"cert",
",",
"idText",
")",
"s",
"=",
"(",
"e",
"*",
"alpha",
"+",
"r",
")",
"%",
"orderEc",
"(",
")",
"# Compute the public key two ways: using the privkey and using the cert",
"# (the way a client will compute it)",
"# The easy way",
"S1",
"=",
"s",
"*",
"G",
"# Using the cert",
"S2",
"=",
"e",
"*",
"cert",
"+",
"caPubkey",
"# The two techniques should produce the same pubkey value -- raise an",
"# exception if they don't match",
"if",
"S1",
"!=",
"S2",
":",
"raise",
"Exception",
"(",
"\"Implicit certification response failed validation\"",
")",
"return",
"s",
",",
"S1"
] | A server can validate an implicit certificate response using identity
string @idText, private value @alpha (used to generate cert request),
and the certificate response @r (private key component) and implicit
@cert.
@raises Exception if the certificate response is invalid.
@returns (privkey, pubkey) | [
"A",
"server",
"can",
"validate",
"an",
"implicit",
"certificate",
"response",
"using",
"identity",
"string"
] | python | train |
Iotic-Labs/py-IoticAgent | src/IoticAgent/Core/Client.py | https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/Core/Client.py#L1225-L1235 | def __fire_callback(self, type_, *args, **kwargs):
"""Returns True if at least one callback was called"""
called = False
plain_submit = self.__threadpool.submit
with self.__callbacks:
submit = self.__crud_threadpool.submit if type_ in _CB_CRUD_TYPES else plain_submit
for func, serialised_if_crud in self.__callbacks[type_]:
called = True
# allow CRUD callbacks to not be serialised if requested
(submit if serialised_if_crud else plain_submit)(func, *args, **kwargs)
return called | [
"def",
"__fire_callback",
"(",
"self",
",",
"type_",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"called",
"=",
"False",
"plain_submit",
"=",
"self",
".",
"__threadpool",
".",
"submit",
"with",
"self",
".",
"__callbacks",
":",
"submit",
"=",
"self",
".",
"__crud_threadpool",
".",
"submit",
"if",
"type_",
"in",
"_CB_CRUD_TYPES",
"else",
"plain_submit",
"for",
"func",
",",
"serialised_if_crud",
"in",
"self",
".",
"__callbacks",
"[",
"type_",
"]",
":",
"called",
"=",
"True",
"# allow CRUD callbacks to not be serialised if requested",
"(",
"submit",
"if",
"serialised_if_crud",
"else",
"plain_submit",
")",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"called"
] | Returns True if at least one callback was called | [
"Returns",
"True",
"if",
"at",
"least",
"one",
"callback",
"was",
"called"
] | python | train |
DataONEorg/d1_python | lib_client/src/d1_client/cnclient.py | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/cnclient.py#L622-L640 | def listSubjectsResponse(
self, query, status=None, start=None, count=None, vendorSpecific=None
):
"""CNIdentity.listSubjects(session, query, status, start, count) → SubjectList
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.listSubjects.
Args:
query:
status:
start:
count:
vendorSpecific:
Returns:
"""
url_query = {'status': status, 'start': start, 'count': count, 'query': query}
return self.GET('accounts', query=url_query, headers=vendorSpecific) | [
"def",
"listSubjectsResponse",
"(",
"self",
",",
"query",
",",
"status",
"=",
"None",
",",
"start",
"=",
"None",
",",
"count",
"=",
"None",
",",
"vendorSpecific",
"=",
"None",
")",
":",
"url_query",
"=",
"{",
"'status'",
":",
"status",
",",
"'start'",
":",
"start",
",",
"'count'",
":",
"count",
",",
"'query'",
":",
"query",
"}",
"return",
"self",
".",
"GET",
"(",
"'accounts'",
",",
"query",
"=",
"url_query",
",",
"headers",
"=",
"vendorSpecific",
")"
] | CNIdentity.listSubjects(session, query, status, start, count) → SubjectList
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.listSubjects.
Args:
query:
status:
start:
count:
vendorSpecific:
Returns: | [
"CNIdentity",
".",
"listSubjects",
"(",
"session",
"query",
"status",
"start",
"count",
")",
"→",
"SubjectList",
"https",
":",
"//",
"releases",
".",
"dataone",
".",
"org",
"/",
"online",
"/",
"api",
"-",
"documentation",
"-",
"v2",
".",
"0",
".",
"1",
"/",
"apis",
"/",
"CN_APIs",
".",
"html#CNIdentity",
".",
"listSubjects",
"."
] | python | train |
d0c-s4vage/pfp | pfp/interp.py | https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L340-L351 | def get_var(self, name, recurse=True):
"""Return the first var of name ``name`` in the current
scope stack (remember, vars are the ones that parse the
input stream)
:name: The name of the id
:recurse: Whether parent scopes should also be searched (defaults to True)
:returns: TODO
"""
self._dlog("getting var '{}'".format(name))
return self._search("vars", name, recurse) | [
"def",
"get_var",
"(",
"self",
",",
"name",
",",
"recurse",
"=",
"True",
")",
":",
"self",
".",
"_dlog",
"(",
"\"getting var '{}'\"",
".",
"format",
"(",
"name",
")",
")",
"return",
"self",
".",
"_search",
"(",
"\"vars\"",
",",
"name",
",",
"recurse",
")"
] | Return the first var of name ``name`` in the current
scope stack (remember, vars are the ones that parse the
input stream)
:name: The name of the id
:recurse: Whether parent scopes should also be searched (defaults to True)
:returns: TODO | [
"Return",
"the",
"first",
"var",
"of",
"name",
"name",
"in",
"the",
"current",
"scope",
"stack",
"(",
"remember",
"vars",
"are",
"the",
"ones",
"that",
"parse",
"the",
"input",
"stream",
")"
] | python | train |
awslabs/aws-greengrass-group-setup | gg_group_setup/cmd.py | https://github.com/awslabs/aws-greengrass-group-setup/blob/06189ceccb794fedf80e0e7649938c18792e16c9/gg_group_setup/cmd.py#L773-L837 | def create_devices(self, thing_names, config_file, region=None,
cert_dir=None, append=False, account_id=None,
policy_name='ggd-discovery-policy', profile_name=None):
"""
Using the `thing_names` values, creates Things in AWS IoT, attaches and
downloads new keys & certs to the certificate directory, then records
the created information in the local config file for inclusion in the
Greengrass Group as Greengrass Devices.
:param thing_names: the thing name or list of thing names to create and
use as Greengrass Devices
:param config_file: config file used to track the Greengrass Devices in
the group
:param region: the region in which to create the new devices.
[default: us-west-2]
:param cert_dir: the directory in which to store the thing's keys and
certs. If `None` then use the current directory.
:param append: append the created devices to the list of devices in the
config file. [default: False]
:param account_id: the account ID in which to create devices. If 'None'
the config_file will be checked for an `account_id` value in the
`misc` section.
:param policy_name: the name of the policy to associate with the device.
[default: 'ggd-discovery-policy']
:param profile_name: the name of the `awscli` profile to use.
[default: None]
"""
logging.info("create_devices thing_names:{0}".format(thing_names))
config = GroupConfigFile(config_file=config_file)
if append is False and config.is_device_fresh() is False:
raise ValueError(
"Config file tracking previously created devices. Append "
"devices instead"
)
if region is None:
region = self._region
if account_id is None:
account_id = self._account_id
devices = dict()
if append:
devices = config['devices']
if type(thing_names) is str:
thing_names = [thing_names]
iot_client = _get_iot_session(region=region, profile_name=profile_name)
for thing_name in thing_names:
keys_cert, thing = self.create_thing(thing_name, region, cert_dir)
cert_arn = keys_cert['certificateArn']
devices[thing_name] = {
'thing_arn': thing['thingArn'],
'cert_arn': cert_arn,
'cert_id': keys_cert['certificateId'],
'thing_name': thing_name
}
logging.info("Thing:'{0}' associated with cert:'{1}'".format(
thing_name, cert_arn))
device_policy = self.get_device_policy(
device_name=thing_name, account_id=account_id, region=region
)
self._create_attach_thing_policy(cert_arn, device_policy,
iot_client, policy_name)
config['devices'] = devices
logging.info("create_devices cfg:{0}".format(config)) | [
"def",
"create_devices",
"(",
"self",
",",
"thing_names",
",",
"config_file",
",",
"region",
"=",
"None",
",",
"cert_dir",
"=",
"None",
",",
"append",
"=",
"False",
",",
"account_id",
"=",
"None",
",",
"policy_name",
"=",
"'ggd-discovery-policy'",
",",
"profile_name",
"=",
"None",
")",
":",
"logging",
".",
"info",
"(",
"\"create_devices thing_names:{0}\"",
".",
"format",
"(",
"thing_names",
")",
")",
"config",
"=",
"GroupConfigFile",
"(",
"config_file",
"=",
"config_file",
")",
"if",
"append",
"is",
"False",
"and",
"config",
".",
"is_device_fresh",
"(",
")",
"is",
"False",
":",
"raise",
"ValueError",
"(",
"\"Config file tracking previously created devices. Append \"",
"\"devices instead\"",
")",
"if",
"region",
"is",
"None",
":",
"region",
"=",
"self",
".",
"_region",
"if",
"account_id",
"is",
"None",
":",
"account_id",
"=",
"self",
".",
"_account_id",
"devices",
"=",
"dict",
"(",
")",
"if",
"append",
":",
"devices",
"=",
"config",
"[",
"'devices'",
"]",
"if",
"type",
"(",
"thing_names",
")",
"is",
"str",
":",
"thing_names",
"=",
"[",
"thing_names",
"]",
"iot_client",
"=",
"_get_iot_session",
"(",
"region",
"=",
"region",
",",
"profile_name",
"=",
"profile_name",
")",
"for",
"thing_name",
"in",
"thing_names",
":",
"keys_cert",
",",
"thing",
"=",
"self",
".",
"create_thing",
"(",
"thing_name",
",",
"region",
",",
"cert_dir",
")",
"cert_arn",
"=",
"keys_cert",
"[",
"'certificateArn'",
"]",
"devices",
"[",
"thing_name",
"]",
"=",
"{",
"'thing_arn'",
":",
"thing",
"[",
"'thingArn'",
"]",
",",
"'cert_arn'",
":",
"cert_arn",
",",
"'cert_id'",
":",
"keys_cert",
"[",
"'certificateId'",
"]",
",",
"'thing_name'",
":",
"thing_name",
"}",
"logging",
".",
"info",
"(",
"\"Thing:'{0}' associated with cert:'{1}'\"",
".",
"format",
"(",
"thing_name",
",",
"cert_arn",
")",
")",
"device_policy",
"=",
"self",
".",
"get_device_policy",
"(",
"device_name",
"=",
"thing_name",
",",
"account_id",
"=",
"account_id",
",",
"region",
"=",
"region",
")",
"self",
".",
"_create_attach_thing_policy",
"(",
"cert_arn",
",",
"device_policy",
",",
"iot_client",
",",
"policy_name",
")",
"config",
"[",
"'devices'",
"]",
"=",
"devices",
"logging",
".",
"info",
"(",
"\"create_devices cfg:{0}\"",
".",
"format",
"(",
"config",
")",
")"
] | Using the `thing_names` values, creates Things in AWS IoT, attaches and
downloads new keys & certs to the certificate directory, then records
the created information in the local config file for inclusion in the
Greengrass Group as Greengrass Devices.
:param thing_names: the thing name or list of thing names to create and
use as Greengrass Devices
:param config_file: config file used to track the Greengrass Devices in
the group
:param region: the region in which to create the new devices.
[default: us-west-2]
:param cert_dir: the directory in which to store the thing's keys and
certs. If `None` then use the current directory.
:param append: append the created devices to the list of devices in the
config file. [default: False]
:param account_id: the account ID in which to create devices. If 'None'
the config_file will be checked for an `account_id` value in the
`misc` section.
:param policy_name: the name of the policy to associate with the device.
[default: 'ggd-discovery-policy']
:param profile_name: the name of the `awscli` profile to use.
[default: None] | [
"Using",
"the",
"thing_names",
"values",
"creates",
"Things",
"in",
"AWS",
"IoT",
"attaches",
"and",
"downloads",
"new",
"keys",
"&",
"certs",
"to",
"the",
"certificate",
"directory",
"then",
"records",
"the",
"created",
"information",
"in",
"the",
"local",
"config",
"file",
"for",
"inclusion",
"in",
"the",
"Greengrass",
"Group",
"as",
"Greengrass",
"Devices",
"."
] | python | train |
tomplus/kubernetes_asyncio | kubernetes_asyncio/client/api/extensions_v1beta1_api.py | https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/extensions_v1beta1_api.py#L1243-L1272 | def delete_collection_namespaced_network_policy(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_network_policy # noqa: E501
delete collection of NetworkPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_network_policy(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_network_policy_with_http_info(namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_collection_namespaced_network_policy_with_http_info(namespace, **kwargs) # noqa: E501
return data | [
"def",
"delete_collection_namespaced_network_policy",
"(",
"self",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"delete_collection_namespaced_network_policy_with_http_info",
"(",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"delete_collection_namespaced_network_policy_with_http_info",
"(",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | delete_collection_namespaced_network_policy # noqa: E501
delete collection of NetworkPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_network_policy(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread. | [
"delete_collection_namespaced_network_policy",
"#",
"noqa",
":",
"E501"
] | python | train |
datacamp/antlr-ast | antlr_ast/ast.py | https://github.com/datacamp/antlr-ast/blob/d08d5eb2e663bd40501d0eeddc8a731ac7e96b11/antlr_ast/ast.py#L629-L632 | def visitTerminal(self, ctx):
"""Converts case insensitive keywords and identifiers to lowercase"""
text = ctx.getText()
return Terminal.from_text(text, ctx) | [
"def",
"visitTerminal",
"(",
"self",
",",
"ctx",
")",
":",
"text",
"=",
"ctx",
".",
"getText",
"(",
")",
"return",
"Terminal",
".",
"from_text",
"(",
"text",
",",
"ctx",
")"
] | Converts case insensitive keywords and identifiers to lowercase | [
"Converts",
"case",
"insensitive",
"keywords",
"and",
"identifiers",
"to",
"lowercase"
] | python | train |
gmr/tinman | tinman/application.py | https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/application.py#L216-L219 | def _prepare_transforms(self):
"""Prepare the list of transforming objects"""
for offset, value in enumerate(self._config.get(config.TRANSFORMS, [])):
self._config[config.TRANSFORMS][offset] = self._import_class(value) | [
"def",
"_prepare_transforms",
"(",
"self",
")",
":",
"for",
"offset",
",",
"value",
"in",
"enumerate",
"(",
"self",
".",
"_config",
".",
"get",
"(",
"config",
".",
"TRANSFORMS",
",",
"[",
"]",
")",
")",
":",
"self",
".",
"_config",
"[",
"config",
".",
"TRANSFORMS",
"]",
"[",
"offset",
"]",
"=",
"self",
".",
"_import_class",
"(",
"value",
")"
] | Prepare the list of transforming objects | [
"Prepare",
"the",
"list",
"of",
"transforming",
"objects"
] | python | train |
matthew-brett/delocate | delocate/delocating.py | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L508-L552 | def bads_report(bads, path_prefix=None):
""" Return a nice report of bad architectures in `bads`
Parameters
----------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
path_prefix : None or str, optional
Path prefix to strip from ``depended_lib`` and ``depending_lib``. None
means do not strip anything.
Returns
-------
report : str
A nice report for printing
"""
path_processor = ((lambda x : x) if path_prefix is None
else get_rp_stripper(path_prefix))
reports = []
for result in bads:
if len(result) == 3:
depended_lib, depending_lib, missing_archs = result
reports.append("{0} needs {1} {2} missing from {3}".format(
path_processor(depending_lib),
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depended_lib)))
elif len(result) == 2:
depending_lib, missing_archs = result
reports.append("Required {0} {1} missing from {2}".format(
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depending_lib)))
else:
raise ValueError('Report tuple should be length 2 or 3')
return '\n'.join(sorted(reports)) | [
"def",
"bads_report",
"(",
"bads",
",",
"path_prefix",
"=",
"None",
")",
":",
"path_processor",
"=",
"(",
"(",
"lambda",
"x",
":",
"x",
")",
"if",
"path_prefix",
"is",
"None",
"else",
"get_rp_stripper",
"(",
"path_prefix",
")",
")",
"reports",
"=",
"[",
"]",
"for",
"result",
"in",
"bads",
":",
"if",
"len",
"(",
"result",
")",
"==",
"3",
":",
"depended_lib",
",",
"depending_lib",
",",
"missing_archs",
"=",
"result",
"reports",
".",
"append",
"(",
"\"{0} needs {1} {2} missing from {3}\"",
".",
"format",
"(",
"path_processor",
"(",
"depending_lib",
")",
",",
"'archs'",
"if",
"len",
"(",
"missing_archs",
")",
">",
"1",
"else",
"'arch'",
",",
"', '",
".",
"join",
"(",
"sorted",
"(",
"missing_archs",
")",
")",
",",
"path_processor",
"(",
"depended_lib",
")",
")",
")",
"elif",
"len",
"(",
"result",
")",
"==",
"2",
":",
"depending_lib",
",",
"missing_archs",
"=",
"result",
"reports",
".",
"append",
"(",
"\"Required {0} {1} missing from {2}\"",
".",
"format",
"(",
"'archs'",
"if",
"len",
"(",
"missing_archs",
")",
">",
"1",
"else",
"'arch'",
",",
"', '",
".",
"join",
"(",
"sorted",
"(",
"missing_archs",
")",
")",
",",
"path_processor",
"(",
"depending_lib",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Report tuple should be length 2 or 3'",
")",
"return",
"'\\n'",
".",
"join",
"(",
"sorted",
"(",
"reports",
")",
")"
] | Return a nice report of bad architectures in `bads`
Parameters
----------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
path_prefix : None or str, optional
Path prefix to strip from ``depended_lib`` and ``depending_lib``. None
means do not strip anything.
Returns
-------
report : str
A nice report for printing | [
"Return",
"a",
"nice",
"report",
"of",
"bad",
"architectures",
"in",
"bads"
] | python | train |
GreatFruitOmsk/tailhead | tailhead/__init__.py | https://github.com/GreatFruitOmsk/tailhead/blob/a3b1324a39935f8ffcfda59328a9a458672889d9/tailhead/__init__.py#L143-L196 | def seek_previous_line(self):
"""
Seek previous line relative to the current file position.
:return: Position of the line or -1 if previous line was not found.
"""
where = self.file.tell()
offset = 0
while True:
if offset == where:
break
read_size = self.read_size if self.read_size <= where else where
self.file.seek(where - offset - read_size, SEEK_SET)
data_len, data = self.read(read_size)
# Consider the following example: Foo\r | \nBar where " | " denotes current position,
# '\nBar' is the read part and 'Foo\r' is the remaining part.
# We should completely consume terminator "\r\n" by reading one extra byte.
if b'\r\n' in self.LINE_TERMINATORS and data[0] == b'\n'[0]:
terminator_where = self.file.tell()
if terminator_where > data_len + 1:
self.file.seek(where - offset - data_len - 1, SEEK_SET)
terminator_len, terminator_data = self.read(1)
if terminator_data[0] == b'\r'[0]:
data_len += 1
data = b'\r' + data
self.file.seek(terminator_where)
data_where = data_len
while data_where > 0:
terminator = self.suffix_line_terminator(data[:data_where])
if terminator and offset == 0 and data_where == data_len:
# The last character is a line terminator that finishes current line. Ignore it.
data_where -= len(terminator)
elif terminator:
self.file.seek(where - offset - (data_len - data_where))
return self.file.tell()
else:
data_where -= 1
offset += data_len
if where == 0:
# Nothing more to read.
return -1
else:
# Very first line.
self.file.seek(0)
return 0 | [
"def",
"seek_previous_line",
"(",
"self",
")",
":",
"where",
"=",
"self",
".",
"file",
".",
"tell",
"(",
")",
"offset",
"=",
"0",
"while",
"True",
":",
"if",
"offset",
"==",
"where",
":",
"break",
"read_size",
"=",
"self",
".",
"read_size",
"if",
"self",
".",
"read_size",
"<=",
"where",
"else",
"where",
"self",
".",
"file",
".",
"seek",
"(",
"where",
"-",
"offset",
"-",
"read_size",
",",
"SEEK_SET",
")",
"data_len",
",",
"data",
"=",
"self",
".",
"read",
"(",
"read_size",
")",
"# Consider the following example: Foo\\r | \\nBar where \" | \" denotes current position,",
"# '\\nBar' is the read part and 'Foo\\r' is the remaining part.",
"# We should completely consume terminator \"\\r\\n\" by reading one extra byte.",
"if",
"b'\\r\\n'",
"in",
"self",
".",
"LINE_TERMINATORS",
"and",
"data",
"[",
"0",
"]",
"==",
"b'\\n'",
"[",
"0",
"]",
":",
"terminator_where",
"=",
"self",
".",
"file",
".",
"tell",
"(",
")",
"if",
"terminator_where",
">",
"data_len",
"+",
"1",
":",
"self",
".",
"file",
".",
"seek",
"(",
"where",
"-",
"offset",
"-",
"data_len",
"-",
"1",
",",
"SEEK_SET",
")",
"terminator_len",
",",
"terminator_data",
"=",
"self",
".",
"read",
"(",
"1",
")",
"if",
"terminator_data",
"[",
"0",
"]",
"==",
"b'\\r'",
"[",
"0",
"]",
":",
"data_len",
"+=",
"1",
"data",
"=",
"b'\\r'",
"+",
"data",
"self",
".",
"file",
".",
"seek",
"(",
"terminator_where",
")",
"data_where",
"=",
"data_len",
"while",
"data_where",
">",
"0",
":",
"terminator",
"=",
"self",
".",
"suffix_line_terminator",
"(",
"data",
"[",
":",
"data_where",
"]",
")",
"if",
"terminator",
"and",
"offset",
"==",
"0",
"and",
"data_where",
"==",
"data_len",
":",
"# The last character is a line terminator that finishes current line. Ignore it.",
"data_where",
"-=",
"len",
"(",
"terminator",
")",
"elif",
"terminator",
":",
"self",
".",
"file",
".",
"seek",
"(",
"where",
"-",
"offset",
"-",
"(",
"data_len",
"-",
"data_where",
")",
")",
"return",
"self",
".",
"file",
".",
"tell",
"(",
")",
"else",
":",
"data_where",
"-=",
"1",
"offset",
"+=",
"data_len",
"if",
"where",
"==",
"0",
":",
"# Nothing more to read.",
"return",
"-",
"1",
"else",
":",
"# Very first line.",
"self",
".",
"file",
".",
"seek",
"(",
"0",
")",
"return",
"0"
] | Seek previous line relative to the current file position.
:return: Position of the line or -1 if previous line was not found. | [
"Seek",
"previous",
"line",
"relative",
"to",
"the",
"current",
"file",
"position",
"."
] | python | test |
fprimex/zdesk | zdesk/zdesk_api.py | https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L822-L825 | def community_post_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/posts#create-post"
api_path = "/api/v2/community/posts.json"
return self.call(api_path, method="POST", data=data, **kwargs) | [
"def",
"community_post_create",
"(",
"self",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/community/posts.json\"",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"method",
"=",
"\"POST\"",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
] | https://developer.zendesk.com/rest_api/docs/help_center/posts#create-post | [
"https",
":",
"//",
"developer",
".",
"zendesk",
".",
"com",
"/",
"rest_api",
"/",
"docs",
"/",
"help_center",
"/",
"posts#create",
"-",
"post"
] | python | train |
molmod/molmod | molmod/unit_cells.py | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/unit_cells.py#L203-L218 | def alignment_a(self):
"""Computes the rotation matrix that aligns the unit cell with the
Cartesian axes, starting with cell vector a.
* a parallel to x
* b in xy-plane with b_y positive
* c with c_z positive
"""
from molmod.transformations import Rotation
new_x = self.matrix[:, 0].copy()
new_x /= np.linalg.norm(new_x)
new_z = np.cross(new_x, self.matrix[:, 1])
new_z /= np.linalg.norm(new_z)
new_y = np.cross(new_z, new_x)
new_y /= np.linalg.norm(new_y)
return Rotation(np.array([new_x, new_y, new_z])) | [
"def",
"alignment_a",
"(",
"self",
")",
":",
"from",
"molmod",
".",
"transformations",
"import",
"Rotation",
"new_x",
"=",
"self",
".",
"matrix",
"[",
":",
",",
"0",
"]",
".",
"copy",
"(",
")",
"new_x",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"new_x",
")",
"new_z",
"=",
"np",
".",
"cross",
"(",
"new_x",
",",
"self",
".",
"matrix",
"[",
":",
",",
"1",
"]",
")",
"new_z",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"new_z",
")",
"new_y",
"=",
"np",
".",
"cross",
"(",
"new_z",
",",
"new_x",
")",
"new_y",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"new_y",
")",
"return",
"Rotation",
"(",
"np",
".",
"array",
"(",
"[",
"new_x",
",",
"new_y",
",",
"new_z",
"]",
")",
")"
] | Computes the rotation matrix that aligns the unit cell with the
Cartesian axes, starting with cell vector a.
* a parallel to x
* b in xy-plane with b_y positive
* c with c_z positive | [
"Computes",
"the",
"rotation",
"matrix",
"that",
"aligns",
"the",
"unit",
"cell",
"with",
"the",
"Cartesian",
"axes",
"starting",
"with",
"cell",
"vector",
"a",
"."
] | python | train |
hatemile/hatemile-for-python | hatemile/implementation/assoc.py | https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/assoc.py#L136-L158 | def _validate_header(self, hed):
"""
Validate the list that represents the table header.
:param hed: The list that represents the table header.
:type hed: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
:return: True if the table header is valid or False if the table header
is not valid.
:rtype: bool
"""
# pylint: disable=no-self-use
if not bool(hed):
return False
length = -1
for row in hed:
if not bool(row):
return False
elif length == -1:
length = len(row)
elif len(row) != length:
return False
return True | [
"def",
"_validate_header",
"(",
"self",
",",
"hed",
")",
":",
"# pylint: disable=no-self-use",
"if",
"not",
"bool",
"(",
"hed",
")",
":",
"return",
"False",
"length",
"=",
"-",
"1",
"for",
"row",
"in",
"hed",
":",
"if",
"not",
"bool",
"(",
"row",
")",
":",
"return",
"False",
"elif",
"length",
"==",
"-",
"1",
":",
"length",
"=",
"len",
"(",
"row",
")",
"elif",
"len",
"(",
"row",
")",
"!=",
"length",
":",
"return",
"False",
"return",
"True"
] | Validate the list that represents the table header.
:param hed: The list that represents the table header.
:type hed: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
:return: True if the table header is valid or False if the table header
is not valid.
:rtype: bool | [
"Validate",
"the",
"list",
"that",
"represents",
"the",
"table",
"header",
"."
] | python | train |
mattupstate/cubric | cubric/tasks.py | https://github.com/mattupstate/cubric/blob/a648ce00e4467cd14d71e754240ef6c1f87a34b5/cubric/tasks.py#L11-L17 | def create_server(initialize=True):
"""Create a server"""
with provider() as p:
host_string = p.create_server()
if initialize:
env.host_string = host_string
initialize_server() | [
"def",
"create_server",
"(",
"initialize",
"=",
"True",
")",
":",
"with",
"provider",
"(",
")",
"as",
"p",
":",
"host_string",
"=",
"p",
".",
"create_server",
"(",
")",
"if",
"initialize",
":",
"env",
".",
"host_string",
"=",
"host_string",
"initialize_server",
"(",
")"
] | Create a server | [
"Create",
"a",
"server"
] | python | train |
dswah/pyGAM | pygam/pygam.py | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L2686-L2718 | def predict(self, X, exposure=None):
"""
preduct expected value of target given model and input X
often this is done via expected value of GAM given input X
Parameters
---------
X : array-like of shape (n_samples, m_features), default: None
containing the input dataset
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
Returns
-------
y : np.array of shape (n_samples,)
containing predicted values under the model
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
if exposure is not None:
exposure = np.array(exposure).astype('f')
else:
exposure = np.ones(X.shape[0]).astype('f')
check_lengths(X, exposure)
return self.predict_mu(X) * exposure | [
"def",
"predict",
"(",
"self",
",",
"X",
",",
"exposure",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_is_fitted",
":",
"raise",
"AttributeError",
"(",
"'GAM has not been fitted. Call fit first.'",
")",
"X",
"=",
"check_X",
"(",
"X",
",",
"n_feats",
"=",
"self",
".",
"statistics_",
"[",
"'m_features'",
"]",
",",
"edge_knots",
"=",
"self",
".",
"edge_knots_",
",",
"dtypes",
"=",
"self",
".",
"dtype",
",",
"features",
"=",
"self",
".",
"feature",
",",
"verbose",
"=",
"self",
".",
"verbose",
")",
"if",
"exposure",
"is",
"not",
"None",
":",
"exposure",
"=",
"np",
".",
"array",
"(",
"exposure",
")",
".",
"astype",
"(",
"'f'",
")",
"else",
":",
"exposure",
"=",
"np",
".",
"ones",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
".",
"astype",
"(",
"'f'",
")",
"check_lengths",
"(",
"X",
",",
"exposure",
")",
"return",
"self",
".",
"predict_mu",
"(",
"X",
")",
"*",
"exposure"
] | preduct expected value of target given model and input X
often this is done via expected value of GAM given input X
Parameters
---------
X : array-like of shape (n_samples, m_features), default: None
containing the input dataset
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
Returns
-------
y : np.array of shape (n_samples,)
containing predicted values under the model | [
"preduct",
"expected",
"value",
"of",
"target",
"given",
"model",
"and",
"input",
"X",
"often",
"this",
"is",
"done",
"via",
"expected",
"value",
"of",
"GAM",
"given",
"input",
"X"
] | python | train |
saltstack/salt | salt/states/pip_state.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pip_state.py#L964-L1019 | def removed(name,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
cwd=None,
use_vt=False):
'''
Make sure that a package is not installed.
name
The name of the package to uninstall
user
The user under which to run pip
bin_env : None
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error uninstalling \'{0}\': {1}'.format(name, err)
return ret
if name not in pip_list:
ret['result'] = True
ret['comment'] = 'Package is not installed.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0} is set to be removed'.format(name)
return ret
if __salt__['pip.uninstall'](pkgs=name,
requirements=requirements,
bin_env=bin_env,
log=log,
proxy=proxy,
timeout=timeout,
user=user,
cwd=cwd,
use_vt=use_vt):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove package.'
return ret | [
"def",
"removed",
"(",
"name",
",",
"requirements",
"=",
"None",
",",
"bin_env",
"=",
"None",
",",
"log",
"=",
"None",
",",
"proxy",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"user",
"=",
"None",
",",
"cwd",
"=",
"None",
",",
"use_vt",
"=",
"False",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"try",
":",
"pip_list",
"=",
"__salt__",
"[",
"'pip.list'",
"]",
"(",
"bin_env",
"=",
"bin_env",
",",
"user",
"=",
"user",
",",
"cwd",
"=",
"cwd",
")",
"except",
"(",
"CommandExecutionError",
",",
"CommandNotFoundError",
")",
"as",
"err",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Error uninstalling \\'{0}\\': {1}'",
".",
"format",
"(",
"name",
",",
"err",
")",
"return",
"ret",
"if",
"name",
"not",
"in",
"pip_list",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Package is not installed.'",
"return",
"ret",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'Package {0} is set to be removed'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"__salt__",
"[",
"'pip.uninstall'",
"]",
"(",
"pkgs",
"=",
"name",
",",
"requirements",
"=",
"requirements",
",",
"bin_env",
"=",
"bin_env",
",",
"log",
"=",
"log",
",",
"proxy",
"=",
"proxy",
",",
"timeout",
"=",
"timeout",
",",
"user",
"=",
"user",
",",
"cwd",
"=",
"cwd",
",",
"use_vt",
"=",
"use_vt",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"'Removed'",
"ret",
"[",
"'comment'",
"]",
"=",
"'Package was successfully removed.'",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Could not remove package.'",
"return",
"ret"
] | Make sure that a package is not installed.
name
The name of the package to uninstall
user
The user under which to run pip
bin_env : None
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing) | [
"Make",
"sure",
"that",
"a",
"package",
"is",
"not",
"installed",
"."
] | python | train |
dgraph-io/pydgraph | pydgraph/client_stub.py | https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/client_stub.py#L58-L62 | def commit_or_abort(self, ctx, timeout=None, metadata=None,
credentials=None):
"""Runs commit or abort operation."""
return self.stub.CommitOrAbort(ctx, timeout=timeout, metadata=metadata,
credentials=credentials) | [
"def",
"commit_or_abort",
"(",
"self",
",",
"ctx",
",",
"timeout",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"return",
"self",
".",
"stub",
".",
"CommitOrAbort",
"(",
"ctx",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
",",
"credentials",
"=",
"credentials",
")"
] | Runs commit or abort operation. | [
"Runs",
"commit",
"or",
"abort",
"operation",
"."
] | python | train |
thoth-station/solver | thoth/solver/compile.py | https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/compile.py#L30-L52 | def pip_compile(*packages: str):
"""Run pip-compile to pin down packages, also resolve their transitive dependencies."""
result = None
packages = "\n".join(packages)
with tempfile.TemporaryDirectory() as tmp_dirname, cwd(tmp_dirname):
with open("requirements.in", "w") as requirements_file:
requirements_file.write(packages)
runner = CliRunner()
try:
result = runner.invoke(cli, ["requirements.in"], catch_exceptions=False)
except Exception as exc:
raise ThothPipCompileError(str(exc)) from exc
if result.exit_code != 0:
error_msg = (
f"pip-compile returned non-zero ({result.exit_code:d}) " f"output: {result.output_bytes.decode():s}"
)
raise ThothPipCompileError(error_msg)
return result.output_bytes.decode() | [
"def",
"pip_compile",
"(",
"*",
"packages",
":",
"str",
")",
":",
"result",
"=",
"None",
"packages",
"=",
"\"\\n\"",
".",
"join",
"(",
"packages",
")",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"as",
"tmp_dirname",
",",
"cwd",
"(",
"tmp_dirname",
")",
":",
"with",
"open",
"(",
"\"requirements.in\"",
",",
"\"w\"",
")",
"as",
"requirements_file",
":",
"requirements_file",
".",
"write",
"(",
"packages",
")",
"runner",
"=",
"CliRunner",
"(",
")",
"try",
":",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"requirements.in\"",
"]",
",",
"catch_exceptions",
"=",
"False",
")",
"except",
"Exception",
"as",
"exc",
":",
"raise",
"ThothPipCompileError",
"(",
"str",
"(",
"exc",
")",
")",
"from",
"exc",
"if",
"result",
".",
"exit_code",
"!=",
"0",
":",
"error_msg",
"=",
"(",
"f\"pip-compile returned non-zero ({result.exit_code:d}) \"",
"f\"output: {result.output_bytes.decode():s}\"",
")",
"raise",
"ThothPipCompileError",
"(",
"error_msg",
")",
"return",
"result",
".",
"output_bytes",
".",
"decode",
"(",
")"
] | Run pip-compile to pin down packages, also resolve their transitive dependencies. | [
"Run",
"pip",
"-",
"compile",
"to",
"pin",
"down",
"packages",
"also",
"resolve",
"their",
"transitive",
"dependencies",
"."
] | python | train |
pytroll/satpy | satpy/readers/hrpt.py | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/hrpt.py#L66-L82 | def time_seconds(tc_array, year):
"""Return the time object from the timecodes
"""
tc_array = np.array(tc_array, copy=True)
word = tc_array[:, 0]
day = word >> 1
word = tc_array[:, 1].astype(np.uint64)
msecs = ((127) & word) * 1024
word = tc_array[:, 2]
msecs += word & 1023
msecs *= 1024
word = tc_array[:, 3]
msecs += word & 1023
return (np.datetime64(
str(year) + '-01-01T00:00:00Z', 's') +
msecs[:].astype('timedelta64[ms]') +
(day - 1)[:].astype('timedelta64[D]')) | [
"def",
"time_seconds",
"(",
"tc_array",
",",
"year",
")",
":",
"tc_array",
"=",
"np",
".",
"array",
"(",
"tc_array",
",",
"copy",
"=",
"True",
")",
"word",
"=",
"tc_array",
"[",
":",
",",
"0",
"]",
"day",
"=",
"word",
">>",
"1",
"word",
"=",
"tc_array",
"[",
":",
",",
"1",
"]",
".",
"astype",
"(",
"np",
".",
"uint64",
")",
"msecs",
"=",
"(",
"(",
"127",
")",
"&",
"word",
")",
"*",
"1024",
"word",
"=",
"tc_array",
"[",
":",
",",
"2",
"]",
"msecs",
"+=",
"word",
"&",
"1023",
"msecs",
"*=",
"1024",
"word",
"=",
"tc_array",
"[",
":",
",",
"3",
"]",
"msecs",
"+=",
"word",
"&",
"1023",
"return",
"(",
"np",
".",
"datetime64",
"(",
"str",
"(",
"year",
")",
"+",
"'-01-01T00:00:00Z'",
",",
"'s'",
")",
"+",
"msecs",
"[",
":",
"]",
".",
"astype",
"(",
"'timedelta64[ms]'",
")",
"+",
"(",
"day",
"-",
"1",
")",
"[",
":",
"]",
".",
"astype",
"(",
"'timedelta64[D]'",
")",
")"
] | Return the time object from the timecodes | [
"Return",
"the",
"time",
"object",
"from",
"the",
"timecodes"
] | python | train |
saltstack/salt | salt/modules/boto3_sns.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto3_sns.py#L350-L383 | def unsubscribe(SubscriptionArn, region=None, key=None, keyid=None, profile=None):
'''
Unsubscribe a specific SubscriptionArn of a topic.
CLI Example:
.. code-block:: bash
salt myminion boto3_sns.unsubscribe my_subscription_arn region=us-east-1
'''
if not SubscriptionArn.startswith('arn:aws:sns:'):
# Grrr, AWS sent us an ARN that's NOT and ARN....
# This can happen if, for instance, a subscription is left in PendingAcceptance or similar
# Note that anything left in PendingConfirmation will be auto-deleted by AWS after 30 days
# anyway, so this isn't as ugly a hack as it might seem at first...
log.info('Invalid subscription ARN `%s` passed - likely a PendingConfirmaton or such. '
'Skipping unsubscribe attempt as it would almost certainly fail...',
SubscriptionArn)
return True
subs = list_subscriptions(region=region, key=key, keyid=keyid, profile=profile)
sub = [s for s in subs if s.get('SubscriptionArn') == SubscriptionArn]
if not sub:
log.error('Subscription ARN %s not found', SubscriptionArn)
return False
TopicArn = sub[0]['TopicArn']
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.unsubscribe(SubscriptionArn=SubscriptionArn)
log.info('Deleted subscription %s from SNS topic %s',
SubscriptionArn, TopicArn)
return True
except botocore.exceptions.ClientError as e:
log.error('Failed to delete subscription %s: %s', SubscriptionArn, e)
return False | [
"def",
"unsubscribe",
"(",
"SubscriptionArn",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"if",
"not",
"SubscriptionArn",
".",
"startswith",
"(",
"'arn:aws:sns:'",
")",
":",
"# Grrr, AWS sent us an ARN that's NOT and ARN....",
"# This can happen if, for instance, a subscription is left in PendingAcceptance or similar",
"# Note that anything left in PendingConfirmation will be auto-deleted by AWS after 30 days",
"# anyway, so this isn't as ugly a hack as it might seem at first...",
"log",
".",
"info",
"(",
"'Invalid subscription ARN `%s` passed - likely a PendingConfirmaton or such. '",
"'Skipping unsubscribe attempt as it would almost certainly fail...'",
",",
"SubscriptionArn",
")",
"return",
"True",
"subs",
"=",
"list_subscriptions",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"sub",
"=",
"[",
"s",
"for",
"s",
"in",
"subs",
"if",
"s",
".",
"get",
"(",
"'SubscriptionArn'",
")",
"==",
"SubscriptionArn",
"]",
"if",
"not",
"sub",
":",
"log",
".",
"error",
"(",
"'Subscription ARN %s not found'",
",",
"SubscriptionArn",
")",
"return",
"False",
"TopicArn",
"=",
"sub",
"[",
"0",
"]",
"[",
"'TopicArn'",
"]",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"try",
":",
"conn",
".",
"unsubscribe",
"(",
"SubscriptionArn",
"=",
"SubscriptionArn",
")",
"log",
".",
"info",
"(",
"'Deleted subscription %s from SNS topic %s'",
",",
"SubscriptionArn",
",",
"TopicArn",
")",
"return",
"True",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"e",
":",
"log",
".",
"error",
"(",
"'Failed to delete subscription %s: %s'",
",",
"SubscriptionArn",
",",
"e",
")",
"return",
"False"
] | Unsubscribe a specific SubscriptionArn of a topic.
CLI Example:
.. code-block:: bash
salt myminion boto3_sns.unsubscribe my_subscription_arn region=us-east-1 | [
"Unsubscribe",
"a",
"specific",
"SubscriptionArn",
"of",
"a",
"topic",
"."
] | python | train |
SiLab-Bonn/pyBAR | pybar/fei4/register_utils.py | https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L770-L797 | def make_box_pixel_mask_from_col_row(column, row, default=0, value=1):
'''Generate box shaped mask from column and row lists. Takes the minimum and maximum value from each list.
Parameters
----------
column : iterable, int
List of colums values.
row : iterable, int
List of row values.
default : int
Value of pixels that are not selected by the mask.
value : int
Value of pixels that are selected by the mask.
Returns
-------
numpy.ndarray
'''
# FE columns and rows start from 1
col_array = np.array(column) - 1
row_array = np.array(row) - 1
if np.any(col_array >= 80) or np.any(col_array < 0) or np.any(row_array >= 336) or np.any(row_array < 0):
raise ValueError('Column and/or row out of range')
shape = (80, 336)
mask = np.full(shape, default, dtype=np.uint8)
if column and row:
mask[col_array.min():col_array.max() + 1, row_array.min():row_array.max() + 1] = value # advanced indexing
return mask | [
"def",
"make_box_pixel_mask_from_col_row",
"(",
"column",
",",
"row",
",",
"default",
"=",
"0",
",",
"value",
"=",
"1",
")",
":",
"# FE columns and rows start from 1\r",
"col_array",
"=",
"np",
".",
"array",
"(",
"column",
")",
"-",
"1",
"row_array",
"=",
"np",
".",
"array",
"(",
"row",
")",
"-",
"1",
"if",
"np",
".",
"any",
"(",
"col_array",
">=",
"80",
")",
"or",
"np",
".",
"any",
"(",
"col_array",
"<",
"0",
")",
"or",
"np",
".",
"any",
"(",
"row_array",
">=",
"336",
")",
"or",
"np",
".",
"any",
"(",
"row_array",
"<",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'Column and/or row out of range'",
")",
"shape",
"=",
"(",
"80",
",",
"336",
")",
"mask",
"=",
"np",
".",
"full",
"(",
"shape",
",",
"default",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"if",
"column",
"and",
"row",
":",
"mask",
"[",
"col_array",
".",
"min",
"(",
")",
":",
"col_array",
".",
"max",
"(",
")",
"+",
"1",
",",
"row_array",
".",
"min",
"(",
")",
":",
"row_array",
".",
"max",
"(",
")",
"+",
"1",
"]",
"=",
"value",
"# advanced indexing\r",
"return",
"mask"
] | Generate box shaped mask from column and row lists. Takes the minimum and maximum value from each list.
Parameters
----------
column : iterable, int
List of colums values.
row : iterable, int
List of row values.
default : int
Value of pixels that are not selected by the mask.
value : int
Value of pixels that are selected by the mask.
Returns
-------
numpy.ndarray | [
"Generate",
"box",
"shaped",
"mask",
"from",
"column",
"and",
"row",
"lists",
".",
"Takes",
"the",
"minimum",
"and",
"maximum",
"value",
"from",
"each",
"list",
".",
"Parameters",
"----------",
"column",
":",
"iterable",
"int",
"List",
"of",
"colums",
"values",
".",
"row",
":",
"iterable",
"int",
"List",
"of",
"row",
"values",
".",
"default",
":",
"int",
"Value",
"of",
"pixels",
"that",
"are",
"not",
"selected",
"by",
"the",
"mask",
".",
"value",
":",
"int",
"Value",
"of",
"pixels",
"that",
"are",
"selected",
"by",
"the",
"mask",
".",
"Returns",
"-------",
"numpy",
".",
"ndarray"
] | python | train |
ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_heliplane.py | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_heliplane.py#L102-L130 | def update_channels(self):
'''update which channels provide input'''
self.interlock_channel = -1
self.override_channel = -1
self.zero_I_channel = -1
self.no_vtol_channel = -1
# output channels
self.rsc_out_channel = 9
self.fwd_thr_channel = 10
for ch in range(1,16):
option = self.get_mav_param("RC%u_OPTION" % ch, 0)
if option == 32:
self.interlock_channel = ch;
elif option == 63:
self.override_channel = ch;
elif option == 64:
self.zero_I_channel = ch;
elif option == 65:
self.override_channel = ch;
elif option == 66:
self.no_vtol_channel = ch;
function = self.get_mav_param("SERVO%u_FUNCTION" % ch, 0)
if function == 32:
self.rsc_out_channel = ch
if function == 70:
self.fwd_thr_channel = ch | [
"def",
"update_channels",
"(",
"self",
")",
":",
"self",
".",
"interlock_channel",
"=",
"-",
"1",
"self",
".",
"override_channel",
"=",
"-",
"1",
"self",
".",
"zero_I_channel",
"=",
"-",
"1",
"self",
".",
"no_vtol_channel",
"=",
"-",
"1",
"# output channels",
"self",
".",
"rsc_out_channel",
"=",
"9",
"self",
".",
"fwd_thr_channel",
"=",
"10",
"for",
"ch",
"in",
"range",
"(",
"1",
",",
"16",
")",
":",
"option",
"=",
"self",
".",
"get_mav_param",
"(",
"\"RC%u_OPTION\"",
"%",
"ch",
",",
"0",
")",
"if",
"option",
"==",
"32",
":",
"self",
".",
"interlock_channel",
"=",
"ch",
"elif",
"option",
"==",
"63",
":",
"self",
".",
"override_channel",
"=",
"ch",
"elif",
"option",
"==",
"64",
":",
"self",
".",
"zero_I_channel",
"=",
"ch",
"elif",
"option",
"==",
"65",
":",
"self",
".",
"override_channel",
"=",
"ch",
"elif",
"option",
"==",
"66",
":",
"self",
".",
"no_vtol_channel",
"=",
"ch",
"function",
"=",
"self",
".",
"get_mav_param",
"(",
"\"SERVO%u_FUNCTION\"",
"%",
"ch",
",",
"0",
")",
"if",
"function",
"==",
"32",
":",
"self",
".",
"rsc_out_channel",
"=",
"ch",
"if",
"function",
"==",
"70",
":",
"self",
".",
"fwd_thr_channel",
"=",
"ch"
] | update which channels provide input | [
"update",
"which",
"channels",
"provide",
"input"
] | python | train |
OpenTreeOfLife/peyotl | peyotl/amendments/amendments_shard.py | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/amendments/amendments_shard.py#L161-L177 | def _mint_new_ott_ids(self, how_many=1):
""" ASSUMES the caller holds the _doc_counter_lock !
Checks the current int value of the next ottid, reserves a block of
{how_many} ids, advances the counter to the next available value,
stores the counter in a file in case the server is restarted.
Checks out master branch as a side effect."""
first_minted_id = self._next_ott_id
self._next_ott_id = first_minted_id + how_many
content = u'{"next_ott_id": %d}\n' % self._next_ott_id
# The content is JSON, but we hand-rolled the string above
# so that we can use it as a commit_msg
self._write_master_branch_resource(content,
self._id_minting_file,
commit_msg=content,
is_json=False)
last_minted_id = self._next_ott_id - 1
return first_minted_id, last_minted_id | [
"def",
"_mint_new_ott_ids",
"(",
"self",
",",
"how_many",
"=",
"1",
")",
":",
"first_minted_id",
"=",
"self",
".",
"_next_ott_id",
"self",
".",
"_next_ott_id",
"=",
"first_minted_id",
"+",
"how_many",
"content",
"=",
"u'{\"next_ott_id\": %d}\\n'",
"%",
"self",
".",
"_next_ott_id",
"# The content is JSON, but we hand-rolled the string above",
"# so that we can use it as a commit_msg",
"self",
".",
"_write_master_branch_resource",
"(",
"content",
",",
"self",
".",
"_id_minting_file",
",",
"commit_msg",
"=",
"content",
",",
"is_json",
"=",
"False",
")",
"last_minted_id",
"=",
"self",
".",
"_next_ott_id",
"-",
"1",
"return",
"first_minted_id",
",",
"last_minted_id"
] | ASSUMES the caller holds the _doc_counter_lock !
Checks the current int value of the next ottid, reserves a block of
{how_many} ids, advances the counter to the next available value,
stores the counter in a file in case the server is restarted.
Checks out master branch as a side effect. | [
"ASSUMES",
"the",
"caller",
"holds",
"the",
"_doc_counter_lock",
"!",
"Checks",
"the",
"current",
"int",
"value",
"of",
"the",
"next",
"ottid",
"reserves",
"a",
"block",
"of",
"{",
"how_many",
"}",
"ids",
"advances",
"the",
"counter",
"to",
"the",
"next",
"available",
"value",
"stores",
"the",
"counter",
"in",
"a",
"file",
"in",
"case",
"the",
"server",
"is",
"restarted",
".",
"Checks",
"out",
"master",
"branch",
"as",
"a",
"side",
"effect",
"."
] | python | train |
square/pylink | pylink/__main__.py | https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/__main__.py#L172-L184 | def run(self, args):
"""Erases the device connected to the J-Link.
Args:
self (EraseCommand): the ``EraseCommand`` instance
args (Namespace): the arguments passed on the command-line
Returns:
``None``
"""
jlink = self.create_jlink(args)
erased = jlink.erase()
print('Bytes Erased: %d' % erased) | [
"def",
"run",
"(",
"self",
",",
"args",
")",
":",
"jlink",
"=",
"self",
".",
"create_jlink",
"(",
"args",
")",
"erased",
"=",
"jlink",
".",
"erase",
"(",
")",
"print",
"(",
"'Bytes Erased: %d'",
"%",
"erased",
")"
] | Erases the device connected to the J-Link.
Args:
self (EraseCommand): the ``EraseCommand`` instance
args (Namespace): the arguments passed on the command-line
Returns:
``None`` | [
"Erases",
"the",
"device",
"connected",
"to",
"the",
"J",
"-",
"Link",
"."
] | python | train |
honzajavorek/redis-collections | redis_collections/lists.py | https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/lists.py#L739-L744 | def append(self, value):
"""Add *value* to the right side of the collection."""
def append_trans(pipe):
self._append_helper(value, pipe)
self._transaction(append_trans) | [
"def",
"append",
"(",
"self",
",",
"value",
")",
":",
"def",
"append_trans",
"(",
"pipe",
")",
":",
"self",
".",
"_append_helper",
"(",
"value",
",",
"pipe",
")",
"self",
".",
"_transaction",
"(",
"append_trans",
")"
] | Add *value* to the right side of the collection. | [
"Add",
"*",
"value",
"*",
"to",
"the",
"right",
"side",
"of",
"the",
"collection",
"."
] | python | train |
Qiskit/qiskit-api-py | IBMQuantumExperience/IBMQuantumExperience.py | https://github.com/Qiskit/qiskit-api-py/blob/2ab240110fb7e653254e44c4833f3643e8ae7f0f/IBMQuantumExperience/IBMQuantumExperience.py#L22-L34 | def get_job_url(config, hub, group, project):
"""
Util method to get job url
"""
if ((config is not None) and ('hub' in config) and (hub is None)):
hub = config["hub"]
if ((config is not None) and ('group' in config) and (group is None)):
group = config["group"]
if ((config is not None) and ('project' in config) and (project is None)):
project = config["project"]
if ((hub is not None) and (group is not None) and (project is not None)):
return '/Network/{}/Groups/{}/Projects/{}/jobs'.format(hub, group, project)
return '/Jobs' | [
"def",
"get_job_url",
"(",
"config",
",",
"hub",
",",
"group",
",",
"project",
")",
":",
"if",
"(",
"(",
"config",
"is",
"not",
"None",
")",
"and",
"(",
"'hub'",
"in",
"config",
")",
"and",
"(",
"hub",
"is",
"None",
")",
")",
":",
"hub",
"=",
"config",
"[",
"\"hub\"",
"]",
"if",
"(",
"(",
"config",
"is",
"not",
"None",
")",
"and",
"(",
"'group'",
"in",
"config",
")",
"and",
"(",
"group",
"is",
"None",
")",
")",
":",
"group",
"=",
"config",
"[",
"\"group\"",
"]",
"if",
"(",
"(",
"config",
"is",
"not",
"None",
")",
"and",
"(",
"'project'",
"in",
"config",
")",
"and",
"(",
"project",
"is",
"None",
")",
")",
":",
"project",
"=",
"config",
"[",
"\"project\"",
"]",
"if",
"(",
"(",
"hub",
"is",
"not",
"None",
")",
"and",
"(",
"group",
"is",
"not",
"None",
")",
"and",
"(",
"project",
"is",
"not",
"None",
")",
")",
":",
"return",
"'/Network/{}/Groups/{}/Projects/{}/jobs'",
".",
"format",
"(",
"hub",
",",
"group",
",",
"project",
")",
"return",
"'/Jobs'"
] | Util method to get job url | [
"Util",
"method",
"to",
"get",
"job",
"url"
] | python | train |