repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
pytroll/posttroll | posttroll/message_broadcaster.py | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/message_broadcaster.py#L127-L150 | def _run(self):
"""Broadcasts forever.
"""
self._is_running = True
network_fail = False
try:
while self._do_run:
try:
if network_fail is True:
LOGGER.info("Network connection re-established!")
network_fail = False
self._sender(self._message)
except IOError as err:
if err.errno == errno.ENETUNREACH:
LOGGER.error("Network unreachable. "
"Trying again in %d s.",
self._interval)
network_fail = True
else:
raise
time.sleep(self._interval)
finally:
self._is_running = False
self._sender.close() | [
"def",
"_run",
"(",
"self",
")",
":",
"self",
".",
"_is_running",
"=",
"True",
"network_fail",
"=",
"False",
"try",
":",
"while",
"self",
".",
"_do_run",
":",
"try",
":",
"if",
"network_fail",
"is",
"True",
":",
"LOGGER",
".",
"info",
"(",
"\"Network connection re-established!\"",
")",
"network_fail",
"=",
"False",
"self",
".",
"_sender",
"(",
"self",
".",
"_message",
")",
"except",
"IOError",
"as",
"err",
":",
"if",
"err",
".",
"errno",
"==",
"errno",
".",
"ENETUNREACH",
":",
"LOGGER",
".",
"error",
"(",
"\"Network unreachable. \"",
"\"Trying again in %d s.\"",
",",
"self",
".",
"_interval",
")",
"network_fail",
"=",
"True",
"else",
":",
"raise",
"time",
".",
"sleep",
"(",
"self",
".",
"_interval",
")",
"finally",
":",
"self",
".",
"_is_running",
"=",
"False",
"self",
".",
"_sender",
".",
"close",
"(",
")"
] | Broadcasts forever. | [
"Broadcasts",
"forever",
"."
] | python | train |
PMEAL/OpenPNM | openpnm/core/ModelsMixin.py | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/core/ModelsMixin.py#L307-L335 | def remove_model(self, propname=None, mode=['model', 'data']):
r"""
Removes model and data from object.
Parameters
----------
propname : string or list of strings
The property or list of properties to remove
mode : list of strings
Controls what is removed. Options are:
*'model'* : Removes the model but not any numerical data that may
already exist.
*'data'* : Removes the data but leaves the model.
The default is both.
"""
if type(propname) is str:
propname = [propname]
for item in propname:
if 'model' in mode:
if item in self.models.keys():
del self.models[item]
if 'data' in mode:
if item in self.keys():
del self[item] | [
"def",
"remove_model",
"(",
"self",
",",
"propname",
"=",
"None",
",",
"mode",
"=",
"[",
"'model'",
",",
"'data'",
"]",
")",
":",
"if",
"type",
"(",
"propname",
")",
"is",
"str",
":",
"propname",
"=",
"[",
"propname",
"]",
"for",
"item",
"in",
"propname",
":",
"if",
"'model'",
"in",
"mode",
":",
"if",
"item",
"in",
"self",
".",
"models",
".",
"keys",
"(",
")",
":",
"del",
"self",
".",
"models",
"[",
"item",
"]",
"if",
"'data'",
"in",
"mode",
":",
"if",
"item",
"in",
"self",
".",
"keys",
"(",
")",
":",
"del",
"self",
"[",
"item",
"]"
] | r"""
Removes model and data from object.
Parameters
----------
propname : string or list of strings
The property or list of properties to remove
mode : list of strings
Controls what is removed. Options are:
*'model'* : Removes the model but not any numerical data that may
already exist.
*'data'* : Removes the data but leaves the model.
The default is both. | [
"r",
"Removes",
"model",
"and",
"data",
"from",
"object",
"."
] | python | train |
tanghaibao/goatools | goatools/grouper/read_goids.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/read_goids.py#L94-L107 | def _read_finish(self, goids_fin, prt):
"""Get one of: {'goids':...} or {'sections':...} from reading a file."""
# Report unused sections, if any
if len(self.section2goids) != len(self.sections_seen):
self._rpt_unused_sections(prt)
# If there are no sections, then goids_fin holds all GO IDs in file
if not self.sections_seen:
self.goids_fin = goids_fin
if goids_fin:
return self.internal_get_goids_or_sections() # {'goids':...} or {'sections':...}
else:
sys.stdout.write(
"\n**WARNING: GO IDs MUST BE THE FIRST 10 CHARACTERS OF EACH LINE\n\n") | [
"def",
"_read_finish",
"(",
"self",
",",
"goids_fin",
",",
"prt",
")",
":",
"# Report unused sections, if any",
"if",
"len",
"(",
"self",
".",
"section2goids",
")",
"!=",
"len",
"(",
"self",
".",
"sections_seen",
")",
":",
"self",
".",
"_rpt_unused_sections",
"(",
"prt",
")",
"# If there are no sections, then goids_fin holds all GO IDs in file",
"if",
"not",
"self",
".",
"sections_seen",
":",
"self",
".",
"goids_fin",
"=",
"goids_fin",
"if",
"goids_fin",
":",
"return",
"self",
".",
"internal_get_goids_or_sections",
"(",
")",
"# {'goids':...} or {'sections':...}",
"else",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\n**WARNING: GO IDs MUST BE THE FIRST 10 CHARACTERS OF EACH LINE\\n\\n\"",
")"
] | Get one of: {'goids':...} or {'sections':...} from reading a file. | [
"Get",
"one",
"of",
":",
"{",
"goids",
":",
"...",
"}",
"or",
"{",
"sections",
":",
"...",
"}",
"from",
"reading",
"a",
"file",
"."
] | python | train |
StanfordVL/robosuite | robosuite/devices/spacemouse.py | https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/devices/spacemouse.py#L138-L153 | def get_controller_state(self):
"""Returns the current state of the 3d mouse, a dictionary of pos, orn, grasp, and reset."""
dpos = self.control[:3] * 0.005
roll, pitch, yaw = self.control[3:] * 0.005
self.grasp = self.control_gripper
# convert RPY to an absolute orientation
drot1 = rotation_matrix(angle=-pitch, direction=[1., 0, 0], point=None)[:3, :3]
drot2 = rotation_matrix(angle=roll, direction=[0, 1., 0], point=None)[:3, :3]
drot3 = rotation_matrix(angle=yaw, direction=[0, 0, 1.], point=None)[:3, :3]
self.rotation = self.rotation.dot(drot1.dot(drot2.dot(drot3)))
return dict(
dpos=dpos, rotation=self.rotation, grasp=self.grasp, reset=self._reset_state
) | [
"def",
"get_controller_state",
"(",
"self",
")",
":",
"dpos",
"=",
"self",
".",
"control",
"[",
":",
"3",
"]",
"*",
"0.005",
"roll",
",",
"pitch",
",",
"yaw",
"=",
"self",
".",
"control",
"[",
"3",
":",
"]",
"*",
"0.005",
"self",
".",
"grasp",
"=",
"self",
".",
"control_gripper",
"# convert RPY to an absolute orientation",
"drot1",
"=",
"rotation_matrix",
"(",
"angle",
"=",
"-",
"pitch",
",",
"direction",
"=",
"[",
"1.",
",",
"0",
",",
"0",
"]",
",",
"point",
"=",
"None",
")",
"[",
":",
"3",
",",
":",
"3",
"]",
"drot2",
"=",
"rotation_matrix",
"(",
"angle",
"=",
"roll",
",",
"direction",
"=",
"[",
"0",
",",
"1.",
",",
"0",
"]",
",",
"point",
"=",
"None",
")",
"[",
":",
"3",
",",
":",
"3",
"]",
"drot3",
"=",
"rotation_matrix",
"(",
"angle",
"=",
"yaw",
",",
"direction",
"=",
"[",
"0",
",",
"0",
",",
"1.",
"]",
",",
"point",
"=",
"None",
")",
"[",
":",
"3",
",",
":",
"3",
"]",
"self",
".",
"rotation",
"=",
"self",
".",
"rotation",
".",
"dot",
"(",
"drot1",
".",
"dot",
"(",
"drot2",
".",
"dot",
"(",
"drot3",
")",
")",
")",
"return",
"dict",
"(",
"dpos",
"=",
"dpos",
",",
"rotation",
"=",
"self",
".",
"rotation",
",",
"grasp",
"=",
"self",
".",
"grasp",
",",
"reset",
"=",
"self",
".",
"_reset_state",
")"
] | Returns the current state of the 3d mouse, a dictionary of pos, orn, grasp, and reset. | [
"Returns",
"the",
"current",
"state",
"of",
"the",
"3d",
"mouse",
"a",
"dictionary",
"of",
"pos",
"orn",
"grasp",
"and",
"reset",
"."
] | python | train |
fermiPy/fermipy | fermipy/roi_model.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/roi_model.py#L2087-L2099 | def _build_src_index(self):
"""Build an indices for fast lookup of a source given its name
or coordinates."""
self._srcs = sorted(self._srcs, key=lambda t: t['offset'])
nsrc = len(self._srcs)
radec = np.zeros((2, nsrc))
for i, src in enumerate(self._srcs):
radec[:, i] = src.radec
self._src_skydir = SkyCoord(ra=radec[0], dec=radec[1], unit=u.deg)
self._src_radius = self._src_skydir.separation(self.skydir) | [
"def",
"_build_src_index",
"(",
"self",
")",
":",
"self",
".",
"_srcs",
"=",
"sorted",
"(",
"self",
".",
"_srcs",
",",
"key",
"=",
"lambda",
"t",
":",
"t",
"[",
"'offset'",
"]",
")",
"nsrc",
"=",
"len",
"(",
"self",
".",
"_srcs",
")",
"radec",
"=",
"np",
".",
"zeros",
"(",
"(",
"2",
",",
"nsrc",
")",
")",
"for",
"i",
",",
"src",
"in",
"enumerate",
"(",
"self",
".",
"_srcs",
")",
":",
"radec",
"[",
":",
",",
"i",
"]",
"=",
"src",
".",
"radec",
"self",
".",
"_src_skydir",
"=",
"SkyCoord",
"(",
"ra",
"=",
"radec",
"[",
"0",
"]",
",",
"dec",
"=",
"radec",
"[",
"1",
"]",
",",
"unit",
"=",
"u",
".",
"deg",
")",
"self",
".",
"_src_radius",
"=",
"self",
".",
"_src_skydir",
".",
"separation",
"(",
"self",
".",
"skydir",
")"
] | Build an indices for fast lookup of a source given its name
or coordinates. | [
"Build",
"an",
"indices",
"for",
"fast",
"lookup",
"of",
"a",
"source",
"given",
"its",
"name",
"or",
"coordinates",
"."
] | python | train |
pavlov99/jsonapi | jsonapi/django_utils.py | https://github.com/pavlov99/jsonapi/blob/c27943f22f1f1d30d651fe267a99d2b38f69d604/jsonapi/django_utils.py#L38-L52 | def get_model_name(model):
""" Get model name for the field.
Django 1.5 uses module_name, does not support model_name
Django 1.6 uses module_name and model_name
DJango 1.7 uses model_name, module_name raises RemovedInDjango18Warning
"""
opts = model._meta
if django.VERSION[:2] < (1, 7):
model_name = opts.module_name
else:
model_name = opts.model_name
return model_name | [
"def",
"get_model_name",
"(",
"model",
")",
":",
"opts",
"=",
"model",
".",
"_meta",
"if",
"django",
".",
"VERSION",
"[",
":",
"2",
"]",
"<",
"(",
"1",
",",
"7",
")",
":",
"model_name",
"=",
"opts",
".",
"module_name",
"else",
":",
"model_name",
"=",
"opts",
".",
"model_name",
"return",
"model_name"
] | Get model name for the field.
Django 1.5 uses module_name, does not support model_name
Django 1.6 uses module_name and model_name
DJango 1.7 uses model_name, module_name raises RemovedInDjango18Warning | [
"Get",
"model",
"name",
"for",
"the",
"field",
"."
] | python | train |
mosesschwartz/scrypture | scrypture/scrypture.py | https://github.com/mosesschwartz/scrypture/blob/d51eb0c9835a5122a655078268185ce8ab9ec86a/scrypture/scrypture.py#L145-L183 | def run_script_api(module_name):
'''API handler. Take script input (from script_input above), run the run()
function, and return the results'''
filename = ''
file_stream = ''
#form = {k : try_json(v) for k,v in request.values.items()}
form = request.values.to_dict(flat=False)
if request.json:
form.update(request.json)
for x in form:
if type(form[x]) == list and len(form[x]) == 1:
form[x] = form[x][0]
for x in form:
form[x] = try_json(form[x])
if len(request.files) > 0:
# Get the name of the uploaded file
f = request.files['file_upload']
# Make the filename safe, remove unsupported chars
filename = secure_filename(f.filename)
file_stream = f.stream
form['HTTP_AUTHORIZATION'] = get_authorization()
form['filename'] = filename
form['file_stream'] = file_stream
try:
result = registered_modules[module_name].WebAPI().run(form)
except:
raise
#return jsonify({'error' : str(traceback.format_exc())})
if result['output_type'] == 'file':
return Response(result['output'],
mimetype='application/octet-stream',
headers={'Content-Disposition':
'attachment;filename='+result['filename']})
else:
return jsonify(result) | [
"def",
"run_script_api",
"(",
"module_name",
")",
":",
"filename",
"=",
"''",
"file_stream",
"=",
"''",
"#form = {k : try_json(v) for k,v in request.values.items()}",
"form",
"=",
"request",
".",
"values",
".",
"to_dict",
"(",
"flat",
"=",
"False",
")",
"if",
"request",
".",
"json",
":",
"form",
".",
"update",
"(",
"request",
".",
"json",
")",
"for",
"x",
"in",
"form",
":",
"if",
"type",
"(",
"form",
"[",
"x",
"]",
")",
"==",
"list",
"and",
"len",
"(",
"form",
"[",
"x",
"]",
")",
"==",
"1",
":",
"form",
"[",
"x",
"]",
"=",
"form",
"[",
"x",
"]",
"[",
"0",
"]",
"for",
"x",
"in",
"form",
":",
"form",
"[",
"x",
"]",
"=",
"try_json",
"(",
"form",
"[",
"x",
"]",
")",
"if",
"len",
"(",
"request",
".",
"files",
")",
">",
"0",
":",
"# Get the name of the uploaded file",
"f",
"=",
"request",
".",
"files",
"[",
"'file_upload'",
"]",
"# Make the filename safe, remove unsupported chars",
"filename",
"=",
"secure_filename",
"(",
"f",
".",
"filename",
")",
"file_stream",
"=",
"f",
".",
"stream",
"form",
"[",
"'HTTP_AUTHORIZATION'",
"]",
"=",
"get_authorization",
"(",
")",
"form",
"[",
"'filename'",
"]",
"=",
"filename",
"form",
"[",
"'file_stream'",
"]",
"=",
"file_stream",
"try",
":",
"result",
"=",
"registered_modules",
"[",
"module_name",
"]",
".",
"WebAPI",
"(",
")",
".",
"run",
"(",
"form",
")",
"except",
":",
"raise",
"#return jsonify({'error' : str(traceback.format_exc())})",
"if",
"result",
"[",
"'output_type'",
"]",
"==",
"'file'",
":",
"return",
"Response",
"(",
"result",
"[",
"'output'",
"]",
",",
"mimetype",
"=",
"'application/octet-stream'",
",",
"headers",
"=",
"{",
"'Content-Disposition'",
":",
"'attachment;filename='",
"+",
"result",
"[",
"'filename'",
"]",
"}",
")",
"else",
":",
"return",
"jsonify",
"(",
"result",
")"
] | API handler. Take script input (from script_input above), run the run()
function, and return the results | [
"API",
"handler",
".",
"Take",
"script",
"input",
"(",
"from",
"script_input",
"above",
")",
"run",
"the",
"run",
"()",
"function",
"and",
"return",
"the",
"results"
] | python | train |
ungarj/tilematrix | tilematrix/_tilepyramid.py | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L82-L90 | def matrix_height(self, zoom):
"""
Tile matrix height (number of rows) at zoom level.
- zoom: zoom level
"""
validate_zoom(zoom)
height = int(math.ceil(self.grid.shape.height * 2**(zoom) / self.metatiling))
return 1 if height < 1 else height | [
"def",
"matrix_height",
"(",
"self",
",",
"zoom",
")",
":",
"validate_zoom",
"(",
"zoom",
")",
"height",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"self",
".",
"grid",
".",
"shape",
".",
"height",
"*",
"2",
"**",
"(",
"zoom",
")",
"/",
"self",
".",
"metatiling",
")",
")",
"return",
"1",
"if",
"height",
"<",
"1",
"else",
"height"
] | Tile matrix height (number of rows) at zoom level.
- zoom: zoom level | [
"Tile",
"matrix",
"height",
"(",
"number",
"of",
"rows",
")",
"at",
"zoom",
"level",
"."
] | python | train |
rdussurget/py-altimetry | altimetry/tools/spectrum.py | https://github.com/rdussurget/py-altimetry/blob/57ce7f2d63c6bbc4993821af0bbe46929e3a2d98/altimetry/tools/spectrum.py#L1075-L1094 | def optimal_AR_spectrum(dx, Y, ndegrees=None,return_min=True):
'''
Get the optimal order AR spectrum by minimizing the BIC.
'''
if ndegrees is None : ndegrees=len(Y)-ndegrees
aicc=np.arange(ndegrees)
aic=aicc.copy()
bic=aicc.copy()
tmpStr=[]
for i in np.arange(1, ndegrees):
dum=yule_walker_regression(dx,Y, i)
tmpStr.append(dum)
aicc[i-1]=(tmpStr[i-1])['esd'].model['aicc']
aic[i-1]=(tmpStr[i-1])['esd'].model['aic']
bic[i-1]=(tmpStr[i-1])['esd'].model['bic']
if return_min : return np.argmin(bic)+1
else : return {'aicc':aicc,'aic':aic,'bic':bic} | [
"def",
"optimal_AR_spectrum",
"(",
"dx",
",",
"Y",
",",
"ndegrees",
"=",
"None",
",",
"return_min",
"=",
"True",
")",
":",
"if",
"ndegrees",
"is",
"None",
":",
"ndegrees",
"=",
"len",
"(",
"Y",
")",
"-",
"ndegrees",
"aicc",
"=",
"np",
".",
"arange",
"(",
"ndegrees",
")",
"aic",
"=",
"aicc",
".",
"copy",
"(",
")",
"bic",
"=",
"aicc",
".",
"copy",
"(",
")",
"tmpStr",
"=",
"[",
"]",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"1",
",",
"ndegrees",
")",
":",
"dum",
"=",
"yule_walker_regression",
"(",
"dx",
",",
"Y",
",",
"i",
")",
"tmpStr",
".",
"append",
"(",
"dum",
")",
"aicc",
"[",
"i",
"-",
"1",
"]",
"=",
"(",
"tmpStr",
"[",
"i",
"-",
"1",
"]",
")",
"[",
"'esd'",
"]",
".",
"model",
"[",
"'aicc'",
"]",
"aic",
"[",
"i",
"-",
"1",
"]",
"=",
"(",
"tmpStr",
"[",
"i",
"-",
"1",
"]",
")",
"[",
"'esd'",
"]",
".",
"model",
"[",
"'aic'",
"]",
"bic",
"[",
"i",
"-",
"1",
"]",
"=",
"(",
"tmpStr",
"[",
"i",
"-",
"1",
"]",
")",
"[",
"'esd'",
"]",
".",
"model",
"[",
"'bic'",
"]",
"if",
"return_min",
":",
"return",
"np",
".",
"argmin",
"(",
"bic",
")",
"+",
"1",
"else",
":",
"return",
"{",
"'aicc'",
":",
"aicc",
",",
"'aic'",
":",
"aic",
",",
"'bic'",
":",
"bic",
"}"
] | Get the optimal order AR spectrum by minimizing the BIC. | [
"Get",
"the",
"optimal",
"order",
"AR",
"spectrum",
"by",
"minimizing",
"the",
"BIC",
"."
] | python | train |
bachya/regenmaschine | regenmaschine/zone.py | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/zone.py#L17-L25 | async def all(
self, *, details: bool = False,
include_inactive: bool = False) -> list:
"""Return all zones (with optional advanced properties)."""
endpoint = 'zone'
if details:
endpoint += '/properties'
data = await self._request('get', endpoint)
return [z for z in data['zones'] if include_inactive or z['active']] | [
"async",
"def",
"all",
"(",
"self",
",",
"*",
",",
"details",
":",
"bool",
"=",
"False",
",",
"include_inactive",
":",
"bool",
"=",
"False",
")",
"->",
"list",
":",
"endpoint",
"=",
"'zone'",
"if",
"details",
":",
"endpoint",
"+=",
"'/properties'",
"data",
"=",
"await",
"self",
".",
"_request",
"(",
"'get'",
",",
"endpoint",
")",
"return",
"[",
"z",
"for",
"z",
"in",
"data",
"[",
"'zones'",
"]",
"if",
"include_inactive",
"or",
"z",
"[",
"'active'",
"]",
"]"
] | Return all zones (with optional advanced properties). | [
"Return",
"all",
"zones",
"(",
"with",
"optional",
"advanced",
"properties",
")",
"."
] | python | train |
square/pylink | pylink/util.py | https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/util.py#L159-L182 | def calculate_parity(n):
"""Calculates and returns the parity of a number.
The parity of a number is ``1`` if the number has an odd number of ones
in its binary representation, otherwise ``0``.
Args:
n (int): the number whose parity to calculate
Returns:
``1`` if the number has an odd number of ones, otherwise ``0``.
Raises:
ValueError: if ``n`` is less than ``0``.
"""
if not is_natural(n):
raise ValueError('Expected n to be a positive integer.')
y = 0
n = abs(n)
while n:
y += n & 1
n = n >> 1
return y & 1 | [
"def",
"calculate_parity",
"(",
"n",
")",
":",
"if",
"not",
"is_natural",
"(",
"n",
")",
":",
"raise",
"ValueError",
"(",
"'Expected n to be a positive integer.'",
")",
"y",
"=",
"0",
"n",
"=",
"abs",
"(",
"n",
")",
"while",
"n",
":",
"y",
"+=",
"n",
"&",
"1",
"n",
"=",
"n",
">>",
"1",
"return",
"y",
"&",
"1"
] | Calculates and returns the parity of a number.
The parity of a number is ``1`` if the number has an odd number of ones
in its binary representation, otherwise ``0``.
Args:
n (int): the number whose parity to calculate
Returns:
``1`` if the number has an odd number of ones, otherwise ``0``.
Raises:
ValueError: if ``n`` is less than ``0``. | [
"Calculates",
"and",
"returns",
"the",
"parity",
"of",
"a",
"number",
"."
] | python | train |
DLR-RM/RAFCON | source/rafcon/gui/mygaphas/items/ports.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/items/ports.py#L570-L586 | def _draw_rectangle(context, width, height):
"""Draw a rectangle
Assertion: The current point is the center point of the rectangle
:param context: Cairo context
:param width: Width of the rectangle
:param height: Height of the rectangle
"""
c = context
# First move to upper left corner
c.rel_move_to(-width / 2., -height / 2.)
# Draw closed rectangle
c.rel_line_to(width, 0)
c.rel_line_to(0, height)
c.rel_line_to(-width, 0)
c.close_path() | [
"def",
"_draw_rectangle",
"(",
"context",
",",
"width",
",",
"height",
")",
":",
"c",
"=",
"context",
"# First move to upper left corner",
"c",
".",
"rel_move_to",
"(",
"-",
"width",
"/",
"2.",
",",
"-",
"height",
"/",
"2.",
")",
"# Draw closed rectangle",
"c",
".",
"rel_line_to",
"(",
"width",
",",
"0",
")",
"c",
".",
"rel_line_to",
"(",
"0",
",",
"height",
")",
"c",
".",
"rel_line_to",
"(",
"-",
"width",
",",
"0",
")",
"c",
".",
"close_path",
"(",
")"
] | Draw a rectangle
Assertion: The current point is the center point of the rectangle
:param context: Cairo context
:param width: Width of the rectangle
:param height: Height of the rectangle | [
"Draw",
"a",
"rectangle"
] | python | train |
draios/python-sdc-client | sdcclient/_secure.py | https://github.com/draios/python-sdc-client/blob/47f83415842048778939b90944f64386a3bcb205/sdcclient/_secure.py#L932-L948 | def get_command_audit(self, id, metrics=[]):
'''**Description**
Get a command audit.
**Arguments**
- id: the id of the command audit to get.
**Success Return Value**
A JSON representation of the command audit.
'''
url = "{url}/api/commands/{id}?from=0&to={to}{metrics}".format(
url=self.url,
id=id,
to=int(time.time() * 10**6),
metrics="&metrics=" + json.dumps(metrics) if metrics else "")
res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify)
return self._request_result(res) | [
"def",
"get_command_audit",
"(",
"self",
",",
"id",
",",
"metrics",
"=",
"[",
"]",
")",
":",
"url",
"=",
"\"{url}/api/commands/{id}?from=0&to={to}{metrics}\"",
".",
"format",
"(",
"url",
"=",
"self",
".",
"url",
",",
"id",
"=",
"id",
",",
"to",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"10",
"**",
"6",
")",
",",
"metrics",
"=",
"\"&metrics=\"",
"+",
"json",
".",
"dumps",
"(",
"metrics",
")",
"if",
"metrics",
"else",
"\"\"",
")",
"res",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"hdrs",
",",
"verify",
"=",
"self",
".",
"ssl_verify",
")",
"return",
"self",
".",
"_request_result",
"(",
"res",
")"
] | **Description**
Get a command audit.
**Arguments**
- id: the id of the command audit to get.
**Success Return Value**
A JSON representation of the command audit. | [
"**",
"Description",
"**",
"Get",
"a",
"command",
"audit",
"."
] | python | test |
Fantomas42/django-blog-zinnia | zinnia/search.py | https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/search.py#L90-L113 | def union_q(token):
"""
Appends all the Q() objects.
"""
query = Q()
operation = 'and'
negation = False
for t in token:
if type(t) is ParseResults: # See tokens recursively
query &= union_q(t)
else:
if t in ('or', 'and'): # Set the new op and go to next token
operation = t
elif t == '-': # Next tokens needs to be negated
negation = True
else: # Append to query the token
if negation:
t = ~t
if operation == 'or':
query |= t
else:
query &= t
return query | [
"def",
"union_q",
"(",
"token",
")",
":",
"query",
"=",
"Q",
"(",
")",
"operation",
"=",
"'and'",
"negation",
"=",
"False",
"for",
"t",
"in",
"token",
":",
"if",
"type",
"(",
"t",
")",
"is",
"ParseResults",
":",
"# See tokens recursively",
"query",
"&=",
"union_q",
"(",
"t",
")",
"else",
":",
"if",
"t",
"in",
"(",
"'or'",
",",
"'and'",
")",
":",
"# Set the new op and go to next token",
"operation",
"=",
"t",
"elif",
"t",
"==",
"'-'",
":",
"# Next tokens needs to be negated",
"negation",
"=",
"True",
"else",
":",
"# Append to query the token",
"if",
"negation",
":",
"t",
"=",
"~",
"t",
"if",
"operation",
"==",
"'or'",
":",
"query",
"|=",
"t",
"else",
":",
"query",
"&=",
"t",
"return",
"query"
] | Appends all the Q() objects. | [
"Appends",
"all",
"the",
"Q",
"()",
"objects",
"."
] | python | train |
nesaro/pydsl | pydsl/extract.py | https://github.com/nesaro/pydsl/blob/00b4fffd72036b80335e1a44a888fac57917ab41/pydsl/extract.py#L76-L111 | def extract(grammar, inputdata, fixed_start = False, return_first=False):
"""
Receives a sequence and a grammar,
returns a list of PositionTokens with all of the parts of the sequence that
are recognized by the grammar
"""
if not inputdata:
return []
checker = checker_factory(grammar)
totallen = len(inputdata)
from pydsl.grammar.PEG import Choice
try:
maxl = grammar.maxsize or totallen
except NotImplementedError:
maxl = totallen
try:
#minl = grammar.minsize #FIXME: It won't work with incompatible alphabets
minl = 1
except NotImplementedError:
minl = 1
if fixed_start:
max_start = 1
else:
max_start = totallen
result = []
for i in range(max_start):
for j in range(i+minl, min(i+maxl, totallen) + 1):
slice = inputdata[i:j]
check = checker.check(slice)
if check:
this_pt = PositionToken(slice, grammar, i, j)
if return_first:
return this_pt
result.append(this_pt)
return result | [
"def",
"extract",
"(",
"grammar",
",",
"inputdata",
",",
"fixed_start",
"=",
"False",
",",
"return_first",
"=",
"False",
")",
":",
"if",
"not",
"inputdata",
":",
"return",
"[",
"]",
"checker",
"=",
"checker_factory",
"(",
"grammar",
")",
"totallen",
"=",
"len",
"(",
"inputdata",
")",
"from",
"pydsl",
".",
"grammar",
".",
"PEG",
"import",
"Choice",
"try",
":",
"maxl",
"=",
"grammar",
".",
"maxsize",
"or",
"totallen",
"except",
"NotImplementedError",
":",
"maxl",
"=",
"totallen",
"try",
":",
"#minl = grammar.minsize #FIXME: It won't work with incompatible alphabets",
"minl",
"=",
"1",
"except",
"NotImplementedError",
":",
"minl",
"=",
"1",
"if",
"fixed_start",
":",
"max_start",
"=",
"1",
"else",
":",
"max_start",
"=",
"totallen",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"max_start",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
"+",
"minl",
",",
"min",
"(",
"i",
"+",
"maxl",
",",
"totallen",
")",
"+",
"1",
")",
":",
"slice",
"=",
"inputdata",
"[",
"i",
":",
"j",
"]",
"check",
"=",
"checker",
".",
"check",
"(",
"slice",
")",
"if",
"check",
":",
"this_pt",
"=",
"PositionToken",
"(",
"slice",
",",
"grammar",
",",
"i",
",",
"j",
")",
"if",
"return_first",
":",
"return",
"this_pt",
"result",
".",
"append",
"(",
"this_pt",
")",
"return",
"result"
] | Receives a sequence and a grammar,
returns a list of PositionTokens with all of the parts of the sequence that
are recognized by the grammar | [
"Receives",
"a",
"sequence",
"and",
"a",
"grammar",
"returns",
"a",
"list",
"of",
"PositionTokens",
"with",
"all",
"of",
"the",
"parts",
"of",
"the",
"sequence",
"that",
"are",
"recognized",
"by",
"the",
"grammar"
] | python | train |
ansible/tower-cli | tower_cli/conf.py | https://github.com/ansible/tower-cli/blob/a2b151fed93c47725018d3034848cb3a1814bed7/tower_cli/conf.py#L337-L347 | def config_from_environment():
"""Read tower-cli config values from the environment if present, being
careful not to override config values that were explicitly passed in.
"""
kwargs = {}
for k in CONFIG_OPTIONS:
env = 'TOWER_' + k.upper()
v = os.getenv(env, None)
if v is not None:
kwargs[k] = v
return kwargs | [
"def",
"config_from_environment",
"(",
")",
":",
"kwargs",
"=",
"{",
"}",
"for",
"k",
"in",
"CONFIG_OPTIONS",
":",
"env",
"=",
"'TOWER_'",
"+",
"k",
".",
"upper",
"(",
")",
"v",
"=",
"os",
".",
"getenv",
"(",
"env",
",",
"None",
")",
"if",
"v",
"is",
"not",
"None",
":",
"kwargs",
"[",
"k",
"]",
"=",
"v",
"return",
"kwargs"
] | Read tower-cli config values from the environment if present, being
careful not to override config values that were explicitly passed in. | [
"Read",
"tower",
"-",
"cli",
"config",
"values",
"from",
"the",
"environment",
"if",
"present",
"being",
"careful",
"not",
"to",
"override",
"config",
"values",
"that",
"were",
"explicitly",
"passed",
"in",
"."
] | python | valid |
autokey/autokey | lib/autokey/qtui/configwindow.py | https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/qtui/configwindow.py#L146-L163 | def _set_platform_specific_keyboard_shortcuts(self):
"""
QtDesigner does not support QKeySequence::StandardKey enum based default keyboard shortcuts.
This means that all default key combinations ("Save", "Quit", etc) have to be defined in code.
"""
self.action_new_phrase.setShortcuts(QKeySequence.New)
self.action_save.setShortcuts(QKeySequence.Save)
self.action_close_window.setShortcuts(QKeySequence.Close)
self.action_quit.setShortcuts(QKeySequence.Quit)
self.action_undo.setShortcuts(QKeySequence.Undo)
self.action_redo.setShortcuts(QKeySequence.Redo)
self.action_cut_item.setShortcuts(QKeySequence.Cut)
self.action_copy_item.setShortcuts(QKeySequence.Copy)
self.action_paste_item.setShortcuts(QKeySequence.Paste)
self.action_delete_item.setShortcuts(QKeySequence.Delete)
self.action_configure_autokey.setShortcuts(QKeySequence.Preferences) | [
"def",
"_set_platform_specific_keyboard_shortcuts",
"(",
"self",
")",
":",
"self",
".",
"action_new_phrase",
".",
"setShortcuts",
"(",
"QKeySequence",
".",
"New",
")",
"self",
".",
"action_save",
".",
"setShortcuts",
"(",
"QKeySequence",
".",
"Save",
")",
"self",
".",
"action_close_window",
".",
"setShortcuts",
"(",
"QKeySequence",
".",
"Close",
")",
"self",
".",
"action_quit",
".",
"setShortcuts",
"(",
"QKeySequence",
".",
"Quit",
")",
"self",
".",
"action_undo",
".",
"setShortcuts",
"(",
"QKeySequence",
".",
"Undo",
")",
"self",
".",
"action_redo",
".",
"setShortcuts",
"(",
"QKeySequence",
".",
"Redo",
")",
"self",
".",
"action_cut_item",
".",
"setShortcuts",
"(",
"QKeySequence",
".",
"Cut",
")",
"self",
".",
"action_copy_item",
".",
"setShortcuts",
"(",
"QKeySequence",
".",
"Copy",
")",
"self",
".",
"action_paste_item",
".",
"setShortcuts",
"(",
"QKeySequence",
".",
"Paste",
")",
"self",
".",
"action_delete_item",
".",
"setShortcuts",
"(",
"QKeySequence",
".",
"Delete",
")",
"self",
".",
"action_configure_autokey",
".",
"setShortcuts",
"(",
"QKeySequence",
".",
"Preferences",
")"
] | QtDesigner does not support QKeySequence::StandardKey enum based default keyboard shortcuts.
This means that all default key combinations ("Save", "Quit", etc) have to be defined in code. | [
"QtDesigner",
"does",
"not",
"support",
"QKeySequence",
"::",
"StandardKey",
"enum",
"based",
"default",
"keyboard",
"shortcuts",
".",
"This",
"means",
"that",
"all",
"default",
"key",
"combinations",
"(",
"Save",
"Quit",
"etc",
")",
"have",
"to",
"be",
"defined",
"in",
"code",
"."
] | python | train |
estnltk/estnltk | estnltk/mw_verbs/utils.py | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/utils.py#L166-L200 | def matchingAnalyses(self, tokenJson):
'''Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate and returns a list of analyses (elements of
tokenJson[ANALYSIS]) that are matching all the rules. An empty list is
returned if none of the analyses match (all the rules), or (!) if none of
the rules are describing the ANALYSIS part of the token;
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token;
'''
matchingResults = []
if self.otherRules != None:
otherMatches = []
for field in self.otherRules:
match = field in tokenJson and ((self.otherRules[field]).match(tokenJson[field]) != None)
otherMatches.append( match )
if not otherMatches or not all(otherMatches):
return matchingResults
if self.analysisRules != None:
assert ANALYSIS in tokenJson, "No ANALYSIS found within token: "+str(tokenJson)
for analysis in tokenJson[ANALYSIS]:
# Check whether this analysis satisfies all the rules
# (if not, discard the analysis)
matches = []
for field in self.analysisRules:
value = analysis[field] if field in analysis else ""
match = (self.analysisRules[field]).match(value) != None
matches.append( match )
if matches and all(matches):
matchingResults.append( analysis )
# Return True iff there was at least one analysis that
# satisfied all the rules;
return matchingResults
return matchingResults | [
"def",
"matchingAnalyses",
"(",
"self",
",",
"tokenJson",
")",
":",
"matchingResults",
"=",
"[",
"]",
"if",
"self",
".",
"otherRules",
"!=",
"None",
":",
"otherMatches",
"=",
"[",
"]",
"for",
"field",
"in",
"self",
".",
"otherRules",
":",
"match",
"=",
"field",
"in",
"tokenJson",
"and",
"(",
"(",
"self",
".",
"otherRules",
"[",
"field",
"]",
")",
".",
"match",
"(",
"tokenJson",
"[",
"field",
"]",
")",
"!=",
"None",
")",
"otherMatches",
".",
"append",
"(",
"match",
")",
"if",
"not",
"otherMatches",
"or",
"not",
"all",
"(",
"otherMatches",
")",
":",
"return",
"matchingResults",
"if",
"self",
".",
"analysisRules",
"!=",
"None",
":",
"assert",
"ANALYSIS",
"in",
"tokenJson",
",",
"\"No ANALYSIS found within token: \"",
"+",
"str",
"(",
"tokenJson",
")",
"for",
"analysis",
"in",
"tokenJson",
"[",
"ANALYSIS",
"]",
":",
"# Check whether this analysis satisfies all the rules \r",
"# (if not, discard the analysis)\r",
"matches",
"=",
"[",
"]",
"for",
"field",
"in",
"self",
".",
"analysisRules",
":",
"value",
"=",
"analysis",
"[",
"field",
"]",
"if",
"field",
"in",
"analysis",
"else",
"\"\"",
"match",
"=",
"(",
"self",
".",
"analysisRules",
"[",
"field",
"]",
")",
".",
"match",
"(",
"value",
")",
"!=",
"None",
"matches",
".",
"append",
"(",
"match",
")",
"if",
"matches",
"and",
"all",
"(",
"matches",
")",
":",
"matchingResults",
".",
"append",
"(",
"analysis",
")",
"# Return True iff there was at least one analysis that \r",
"# satisfied all the rules;\r",
"return",
"matchingResults",
"return",
"matchingResults"
] | Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate and returns a list of analyses (elements of
tokenJson[ANALYSIS]) that are matching all the rules. An empty list is
returned if none of the analyses match (all the rules), or (!) if none of
the rules are describing the ANALYSIS part of the token;
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token; | [
"Determines",
"whether",
"given",
"token",
"(",
"tokenJson",
")",
"satisfies",
"all",
"the",
"rules",
"listed",
"in",
"the",
"WordTemplate",
"and",
"returns",
"a",
"list",
"of",
"analyses",
"(",
"elements",
"of",
"tokenJson",
"[",
"ANALYSIS",
"]",
")",
"that",
"are",
"matching",
"all",
"the",
"rules",
".",
"An",
"empty",
"list",
"is",
"returned",
"if",
"none",
"of",
"the",
"analyses",
"match",
"(",
"all",
"the",
"rules",
")",
"or",
"(",
"!",
")",
"if",
"none",
"of",
"the",
"rules",
"are",
"describing",
"the",
"ANALYSIS",
"part",
"of",
"the",
"token",
";",
"Parameters",
"----------",
"tokenJson",
":",
"pyvabamorf",
"s",
"analysis",
"of",
"a",
"single",
"word",
"token",
";"
] | python | train |
Gandi/gandi.cli | gandi/cli/core/conf.py | https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/core/conf.py#L191-L202 | def configure(cls, global_, key, val):
""" Update and save configuration value to file. """
# first retrieve current configuration
scope = 'global' if global_ else 'local'
if scope not in cls._conffiles:
cls._conffiles[scope] = {}
config = cls._conffiles.get(scope, {})
# apply modification to fields
cls._set(scope, key, val)
conf_file = cls.home_config if global_ else cls.local_config
# save configuration to file
cls.save(os.path.expanduser(conf_file), config) | [
"def",
"configure",
"(",
"cls",
",",
"global_",
",",
"key",
",",
"val",
")",
":",
"# first retrieve current configuration",
"scope",
"=",
"'global'",
"if",
"global_",
"else",
"'local'",
"if",
"scope",
"not",
"in",
"cls",
".",
"_conffiles",
":",
"cls",
".",
"_conffiles",
"[",
"scope",
"]",
"=",
"{",
"}",
"config",
"=",
"cls",
".",
"_conffiles",
".",
"get",
"(",
"scope",
",",
"{",
"}",
")",
"# apply modification to fields",
"cls",
".",
"_set",
"(",
"scope",
",",
"key",
",",
"val",
")",
"conf_file",
"=",
"cls",
".",
"home_config",
"if",
"global_",
"else",
"cls",
".",
"local_config",
"# save configuration to file",
"cls",
".",
"save",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"conf_file",
")",
",",
"config",
")"
] | Update and save configuration value to file. | [
"Update",
"and",
"save",
"configuration",
"value",
"to",
"file",
"."
] | python | train |
pywbem/pywbem | pywbem_mock/_wbemconnection_mock.py | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem_mock/_wbemconnection_mock.py#L416-L451 | def add_namespace(self, namespace):
"""
Add a CIM namespace to the mock repository.
The namespace must not yet exist in the mock repository.
Note that the default connection namespace is automatically added to
the mock repository upon creation of this object.
Parameters:
namespace (:term:`string`):
The name of the CIM namespace in the mock repository. Must not be
`None`. Any leading and trailing slash characters are split off
from the provided string.
Raises:
ValueError: Namespace argument must not be None
CIMError: CIM_ERR_ALREADY_EXISTS if the namespace already exists in
the mock repository.
"""
if namespace is None:
raise ValueError("Namespace argument must not be None")
# Normalize the namespace name
namespace = namespace.strip('/')
if namespace in self.namespaces:
raise CIMError(
CIM_ERR_ALREADY_EXISTS,
_format("Namespace {0!A} already exists in the mock "
"repository", namespace))
self.namespaces[namespace] = True | [
"def",
"add_namespace",
"(",
"self",
",",
"namespace",
")",
":",
"if",
"namespace",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Namespace argument must not be None\"",
")",
"# Normalize the namespace name",
"namespace",
"=",
"namespace",
".",
"strip",
"(",
"'/'",
")",
"if",
"namespace",
"in",
"self",
".",
"namespaces",
":",
"raise",
"CIMError",
"(",
"CIM_ERR_ALREADY_EXISTS",
",",
"_format",
"(",
"\"Namespace {0!A} already exists in the mock \"",
"\"repository\"",
",",
"namespace",
")",
")",
"self",
".",
"namespaces",
"[",
"namespace",
"]",
"=",
"True"
] | Add a CIM namespace to the mock repository.
The namespace must not yet exist in the mock repository.
Note that the default connection namespace is automatically added to
the mock repository upon creation of this object.
Parameters:
namespace (:term:`string`):
The name of the CIM namespace in the mock repository. Must not be
`None`. Any leading and trailing slash characters are split off
from the provided string.
Raises:
ValueError: Namespace argument must not be None
CIMError: CIM_ERR_ALREADY_EXISTS if the namespace already exists in
the mock repository. | [
"Add",
"a",
"CIM",
"namespace",
"to",
"the",
"mock",
"repository",
"."
] | python | train |
jspricke/python-icstask | icstask.py | https://github.com/jspricke/python-icstask/blob/0802233cca569c2174bd96aed0682d04a2a63790/icstask.py#L316-L326 | def replace_vobject(self, uuid, vtodo, project=None):
"""Update the task with the UID from the vObject
uuid -- the UID of the task
vtodo -- the iCalendar to add
project -- the project to add (see get_filesnames() as well)
"""
self._update()
uuid = uuid.split('@')[0]
if project:
project = basename(project)
return self.to_task(vtodo.vtodo, project, uuid) | [
"def",
"replace_vobject",
"(",
"self",
",",
"uuid",
",",
"vtodo",
",",
"project",
"=",
"None",
")",
":",
"self",
".",
"_update",
"(",
")",
"uuid",
"=",
"uuid",
".",
"split",
"(",
"'@'",
")",
"[",
"0",
"]",
"if",
"project",
":",
"project",
"=",
"basename",
"(",
"project",
")",
"return",
"self",
".",
"to_task",
"(",
"vtodo",
".",
"vtodo",
",",
"project",
",",
"uuid",
")"
] | Update the task with the UID from the vObject
uuid -- the UID of the task
vtodo -- the iCalendar to add
project -- the project to add (see get_filesnames() as well) | [
"Update",
"the",
"task",
"with",
"the",
"UID",
"from",
"the",
"vObject",
"uuid",
"--",
"the",
"UID",
"of",
"the",
"task",
"vtodo",
"--",
"the",
"iCalendar",
"to",
"add",
"project",
"--",
"the",
"project",
"to",
"add",
"(",
"see",
"get_filesnames",
"()",
"as",
"well",
")"
] | python | train |
Nekroze/partpy | partpy/sourcestring.py | https://github.com/Nekroze/partpy/blob/dbb7d2fb285464fc43d85bc31f5af46192d301f6/partpy/sourcestring.py#L61-L69 | def eol_distance_next(self, offset=0):
"""Return the amount of characters until the next newline."""
distance = 0
for char in self.string[self.pos + offset:]:
if char == '\n':
break
else:
distance += 1
return distance | [
"def",
"eol_distance_next",
"(",
"self",
",",
"offset",
"=",
"0",
")",
":",
"distance",
"=",
"0",
"for",
"char",
"in",
"self",
".",
"string",
"[",
"self",
".",
"pos",
"+",
"offset",
":",
"]",
":",
"if",
"char",
"==",
"'\\n'",
":",
"break",
"else",
":",
"distance",
"+=",
"1",
"return",
"distance"
] | Return the amount of characters until the next newline. | [
"Return",
"the",
"amount",
"of",
"characters",
"until",
"the",
"next",
"newline",
"."
] | python | train |
Qiskit/qiskit-terra | qiskit/quantum_info/operators/predicates.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/predicates.py#L110-L123 | def is_positive_semidefinite_matrix(mat, rtol=RTOL_DEFAULT, atol=ATOL_DEFAULT):
"""Test if a matrix is positive semidefinite"""
if atol is None:
atol = ATOL_DEFAULT
if rtol is None:
rtol = RTOL_DEFAULT
if not is_hermitian_matrix(mat, rtol=rtol, atol=atol):
return False
# Check eigenvalues are all positive
vals = np.linalg.eigvalsh(mat)
for v in vals:
if v < -atol:
return False
return True | [
"def",
"is_positive_semidefinite_matrix",
"(",
"mat",
",",
"rtol",
"=",
"RTOL_DEFAULT",
",",
"atol",
"=",
"ATOL_DEFAULT",
")",
":",
"if",
"atol",
"is",
"None",
":",
"atol",
"=",
"ATOL_DEFAULT",
"if",
"rtol",
"is",
"None",
":",
"rtol",
"=",
"RTOL_DEFAULT",
"if",
"not",
"is_hermitian_matrix",
"(",
"mat",
",",
"rtol",
"=",
"rtol",
",",
"atol",
"=",
"atol",
")",
":",
"return",
"False",
"# Check eigenvalues are all positive",
"vals",
"=",
"np",
".",
"linalg",
".",
"eigvalsh",
"(",
"mat",
")",
"for",
"v",
"in",
"vals",
":",
"if",
"v",
"<",
"-",
"atol",
":",
"return",
"False",
"return",
"True"
] | Test if a matrix is positive semidefinite | [
"Test",
"if",
"a",
"matrix",
"is",
"positive",
"semidefinite"
] | python | test |
crypto101/arthur | arthur/ui.py | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L306-L311 | def _makeTextWidgets(self):
"""Makes an editable prompt widget.
"""
self.prompt = urwid.Edit(self.promptText, multiline=False)
return [self.prompt] | [
"def",
"_makeTextWidgets",
"(",
"self",
")",
":",
"self",
".",
"prompt",
"=",
"urwid",
".",
"Edit",
"(",
"self",
".",
"promptText",
",",
"multiline",
"=",
"False",
")",
"return",
"[",
"self",
".",
"prompt",
"]"
] | Makes an editable prompt widget. | [
"Makes",
"an",
"editable",
"prompt",
"widget",
"."
] | python | train |
EmbodiedCognition/py-c3d | c3d.py | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L239-L260 | def write(self, group_id, handle):
'''Write binary data for this parameter to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group that holds this parameter.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
handle.write(struct.pack('bb', len(name), group_id))
handle.write(name)
handle.write(struct.pack('<h', self.binary_size() - 2 - len(name)))
handle.write(struct.pack('b', self.bytes_per_element))
handle.write(struct.pack('B', len(self.dimensions)))
handle.write(struct.pack('B' * len(self.dimensions), *self.dimensions))
if self.bytes:
handle.write(self.bytes)
desc = self.desc.encode('utf-8')
handle.write(struct.pack('B', len(desc)))
handle.write(desc) | [
"def",
"write",
"(",
"self",
",",
"group_id",
",",
"handle",
")",
":",
"name",
"=",
"self",
".",
"name",
".",
"encode",
"(",
"'utf-8'",
")",
"handle",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'bb'",
",",
"len",
"(",
"name",
")",
",",
"group_id",
")",
")",
"handle",
".",
"write",
"(",
"name",
")",
"handle",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'<h'",
",",
"self",
".",
"binary_size",
"(",
")",
"-",
"2",
"-",
"len",
"(",
"name",
")",
")",
")",
"handle",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'b'",
",",
"self",
".",
"bytes_per_element",
")",
")",
"handle",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'B'",
",",
"len",
"(",
"self",
".",
"dimensions",
")",
")",
")",
"handle",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'B'",
"*",
"len",
"(",
"self",
".",
"dimensions",
")",
",",
"*",
"self",
".",
"dimensions",
")",
")",
"if",
"self",
".",
"bytes",
":",
"handle",
".",
"write",
"(",
"self",
".",
"bytes",
")",
"desc",
"=",
"self",
".",
"desc",
".",
"encode",
"(",
"'utf-8'",
")",
"handle",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'B'",
",",
"len",
"(",
"desc",
")",
")",
")",
"handle",
".",
"write",
"(",
"desc",
")"
] | Write binary data for this parameter to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group that holds this parameter.
handle : file handle
An open, writable, binary file handle. | [
"Write",
"binary",
"data",
"for",
"this",
"parameter",
"to",
"a",
"file",
"handle",
"."
] | python | train |
mabuchilab/QNET | src/qnet/printing/_unicode_mappings.py | https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/printing/_unicode_mappings.py#L44-L120 | def render_unicode_sub_super(
name, subs=None, supers=None, sub_first=True, translate_symbols=True,
unicode_sub_super=True, sep=',', subscript_max_len=1):
"""Assemble a string from the primary name and the given sub- and
superscripts::
>>> render_unicode_sub_super(name='alpha', subs=['mu', 'nu'], supers=[2])
'α_μ,ν^2'
>>> render_unicode_sub_super(
... name='alpha', subs=['1', '2'], supers=['(1)'], sep='')
'α₁₂⁽¹⁾'
>>> render_unicode_sub_super(
... name='alpha', subs=['1', '2'], supers=['(1)'], sep='',
... unicode_sub_super=False)
'α_12^(1)'
Args:
name (str): the string without the subscript/superscript
subs (list or None): list of subscripts
supers (list or None): list of superscripts
translate_symbols (bool): If True, try to translate (Greek) symbols in
`name, `subs`, and `supers` to unicode
unicode_sub_super (bool): It True, try to use unicode
subscript/superscript symbols
sep (str): Separator to use if there are multiple
subscripts/superscripts
subscript_max_len (int): Maximum character length of subscript that is
eligible to be rendered in unicode. This defaults to 1, because
spelling out enire words as a unicode subscript looks terrible in
monospace (letter spacing too large)
"""
if subs is None:
subs = []
if supers is None:
supers = []
if translate_symbols:
supers = [_translate_symbols(sup) for sup in supers]
subs = [_translate_symbols(sub) for sub in subs]
name = _translate_symbols(name)
res = name
try:
if unicode_sub_super:
supers_modified = [
_unicode_sub_super(s, _SUPERSCRIPT_MAPPING)
for s in supers]
subs_modified = [
_unicode_sub_super(
s, _SUBSCRIPT_MAPPING, max_len=subscript_max_len)
for s in subs]
if sub_first:
if len(subs_modified) > 0:
res += sep.join(subs_modified)
if len(supers_modified) > 0:
res += sep.join(supers_modified)
else:
if len(supers_modified) > 0:
res += sep.join(supers_modified)
if len(subs_modified) > 0:
res += sep.join(subs_modified)
except KeyError:
unicode_sub_super = False
if not unicode_sub_super:
sub = sep.join(subs)
sup = sep.join(supers)
if sub_first:
if len(sub) > 0:
res += "_%s" % sub
if len(sup) > 0:
res += "^%s" % sup
else:
if len(sup) > 0:
res += "^%s" % sup
if len(sub) > 0:
res += "_%s" % sub
return res | [
"def",
"render_unicode_sub_super",
"(",
"name",
",",
"subs",
"=",
"None",
",",
"supers",
"=",
"None",
",",
"sub_first",
"=",
"True",
",",
"translate_symbols",
"=",
"True",
",",
"unicode_sub_super",
"=",
"True",
",",
"sep",
"=",
"','",
",",
"subscript_max_len",
"=",
"1",
")",
":",
"if",
"subs",
"is",
"None",
":",
"subs",
"=",
"[",
"]",
"if",
"supers",
"is",
"None",
":",
"supers",
"=",
"[",
"]",
"if",
"translate_symbols",
":",
"supers",
"=",
"[",
"_translate_symbols",
"(",
"sup",
")",
"for",
"sup",
"in",
"supers",
"]",
"subs",
"=",
"[",
"_translate_symbols",
"(",
"sub",
")",
"for",
"sub",
"in",
"subs",
"]",
"name",
"=",
"_translate_symbols",
"(",
"name",
")",
"res",
"=",
"name",
"try",
":",
"if",
"unicode_sub_super",
":",
"supers_modified",
"=",
"[",
"_unicode_sub_super",
"(",
"s",
",",
"_SUPERSCRIPT_MAPPING",
")",
"for",
"s",
"in",
"supers",
"]",
"subs_modified",
"=",
"[",
"_unicode_sub_super",
"(",
"s",
",",
"_SUBSCRIPT_MAPPING",
",",
"max_len",
"=",
"subscript_max_len",
")",
"for",
"s",
"in",
"subs",
"]",
"if",
"sub_first",
":",
"if",
"len",
"(",
"subs_modified",
")",
">",
"0",
":",
"res",
"+=",
"sep",
".",
"join",
"(",
"subs_modified",
")",
"if",
"len",
"(",
"supers_modified",
")",
">",
"0",
":",
"res",
"+=",
"sep",
".",
"join",
"(",
"supers_modified",
")",
"else",
":",
"if",
"len",
"(",
"supers_modified",
")",
">",
"0",
":",
"res",
"+=",
"sep",
".",
"join",
"(",
"supers_modified",
")",
"if",
"len",
"(",
"subs_modified",
")",
">",
"0",
":",
"res",
"+=",
"sep",
".",
"join",
"(",
"subs_modified",
")",
"except",
"KeyError",
":",
"unicode_sub_super",
"=",
"False",
"if",
"not",
"unicode_sub_super",
":",
"sub",
"=",
"sep",
".",
"join",
"(",
"subs",
")",
"sup",
"=",
"sep",
".",
"join",
"(",
"supers",
")",
"if",
"sub_first",
":",
"if",
"len",
"(",
"sub",
")",
">",
"0",
":",
"res",
"+=",
"\"_%s\"",
"%",
"sub",
"if",
"len",
"(",
"sup",
")",
">",
"0",
":",
"res",
"+=",
"\"^%s\"",
"%",
"sup",
"else",
":",
"if",
"len",
"(",
"sup",
")",
">",
"0",
":",
"res",
"+=",
"\"^%s\"",
"%",
"sup",
"if",
"len",
"(",
"sub",
")",
">",
"0",
":",
"res",
"+=",
"\"_%s\"",
"%",
"sub",
"return",
"res"
] | Assemble a string from the primary name and the given sub- and
superscripts::
>>> render_unicode_sub_super(name='alpha', subs=['mu', 'nu'], supers=[2])
'α_μ,ν^2'
>>> render_unicode_sub_super(
... name='alpha', subs=['1', '2'], supers=['(1)'], sep='')
'α₁₂⁽¹⁾'
>>> render_unicode_sub_super(
... name='alpha', subs=['1', '2'], supers=['(1)'], sep='',
... unicode_sub_super=False)
'α_12^(1)'
Args:
name (str): the string without the subscript/superscript
subs (list or None): list of subscripts
supers (list or None): list of superscripts
translate_symbols (bool): If True, try to translate (Greek) symbols in
`name, `subs`, and `supers` to unicode
unicode_sub_super (bool): It True, try to use unicode
subscript/superscript symbols
sep (str): Separator to use if there are multiple
subscripts/superscripts
subscript_max_len (int): Maximum character length of subscript that is
eligible to be rendered in unicode. This defaults to 1, because
spelling out enire words as a unicode subscript looks terrible in
monospace (letter spacing too large) | [
"Assemble",
"a",
"string",
"from",
"the",
"primary",
"name",
"and",
"the",
"given",
"sub",
"-",
"and",
"superscripts",
"::"
] | python | train |
nion-software/nionswift | nion/swift/model/Profile.py | https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/Profile.py#L99-L117 | def _migrate_library(workspace_dir: pathlib.Path, do_logging: bool=True) -> pathlib.Path:
""" Migrate library to latest version. """
library_path_11 = workspace_dir / "Nion Swift Workspace.nslib"
library_path_12 = workspace_dir / "Nion Swift Library 12.nslib"
library_path_13 = workspace_dir / "Nion Swift Library 13.nslib"
library_paths = (library_path_11, library_path_12)
library_path_latest = library_path_13
if not os.path.exists(library_path_latest):
for library_path in reversed(library_paths):
if os.path.exists(library_path):
if do_logging:
logging.info("Migrating library: %s -> %s", library_path, library_path_latest)
shutil.copyfile(library_path, library_path_latest)
break
return library_path_latest | [
"def",
"_migrate_library",
"(",
"workspace_dir",
":",
"pathlib",
".",
"Path",
",",
"do_logging",
":",
"bool",
"=",
"True",
")",
"->",
"pathlib",
".",
"Path",
":",
"library_path_11",
"=",
"workspace_dir",
"/",
"\"Nion Swift Workspace.nslib\"",
"library_path_12",
"=",
"workspace_dir",
"/",
"\"Nion Swift Library 12.nslib\"",
"library_path_13",
"=",
"workspace_dir",
"/",
"\"Nion Swift Library 13.nslib\"",
"library_paths",
"=",
"(",
"library_path_11",
",",
"library_path_12",
")",
"library_path_latest",
"=",
"library_path_13",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"library_path_latest",
")",
":",
"for",
"library_path",
"in",
"reversed",
"(",
"library_paths",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"library_path",
")",
":",
"if",
"do_logging",
":",
"logging",
".",
"info",
"(",
"\"Migrating library: %s -> %s\"",
",",
"library_path",
",",
"library_path_latest",
")",
"shutil",
".",
"copyfile",
"(",
"library_path",
",",
"library_path_latest",
")",
"break",
"return",
"library_path_latest"
] | Migrate library to latest version. | [
"Migrate",
"library",
"to",
"latest",
"version",
"."
] | python | train |
dourvaris/nano-python | src/nano/rpc.py | https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L2667-L2695 | def wallet_work_get(self, wallet):
"""
Returns a list of pairs of account and work from **wallet**
.. enable_control required
.. version 8.0 required
:param wallet: Wallet to return work for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_work_get(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"xrb_1111111111111111111111111111111111111111111111111111hifc8npp":
"432e5cf728c90f4f"
}
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('wallet_work_get', payload)
return resp.get('works') or {} | [
"def",
"wallet_work_get",
"(",
"self",
",",
"wallet",
")",
":",
"wallet",
"=",
"self",
".",
"_process_value",
"(",
"wallet",
",",
"'wallet'",
")",
"payload",
"=",
"{",
"\"wallet\"",
":",
"wallet",
"}",
"resp",
"=",
"self",
".",
"call",
"(",
"'wallet_work_get'",
",",
"payload",
")",
"return",
"resp",
".",
"get",
"(",
"'works'",
")",
"or",
"{",
"}"
] | Returns a list of pairs of account and work from **wallet**
.. enable_control required
.. version 8.0 required
:param wallet: Wallet to return work for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_work_get(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"xrb_1111111111111111111111111111111111111111111111111111hifc8npp":
"432e5cf728c90f4f"
} | [
"Returns",
"a",
"list",
"of",
"pairs",
"of",
"account",
"and",
"work",
"from",
"**",
"wallet",
"**"
] | python | train |
ralphhaygood/sklearn-gbmi | sklearn_gbmi/sklearn_gbmi.py | https://github.com/ralphhaygood/sklearn-gbmi/blob/23a1e7fd50e53d6261379f22a337d8fa4ee6aabe/sklearn_gbmi/sklearn_gbmi.py#L16-L95 | def h(gbm, array_or_frame, indices_or_columns = 'all'):
"""
PURPOSE
Compute Friedman and Popescu's H statistic, in order to look for an interaction in the passed gradient-boosting
model among the variables represented by the elements of the passed array or frame and specified by the passed
indices or columns.
See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat.
2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1.
ARGUMENTS
gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or
sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here).
array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas
.DataFrame).
indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of
array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a
pandas data frame. If it is 'all', then all columns of array_or_frame are used.
RETURNS
The H statistic of the variables or NaN if the computation is spoiled by weak main effects and rounding errors.
H varies from 0 to 1. The larger H, the stronger the evidence for an interaction among the variables.
EXAMPLES
Friedman and Popescu's (2008) formulas (44) and (46) correspond to
h(F, x, [j, k])
and
h(F, x, [j, k, l])
respectively.
NOTES
1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths
of main effects are available as gbm.feature_importances_ once gbm has been fitted.
2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in
the target function. To forestall such spurious interactions, check for strong correlations among variables before
fitting gbm.
"""
if indices_or_columns == 'all':
if gbm.max_depth < array_or_frame.shape[1]:
raise \
Exception(
"gbm.max_depth == {} < array_or_frame.shape[1] == {}, so indices_or_columns must not be 'all'."
.format(gbm.max_depth, array_or_frame.shape[1])
)
else:
if gbm.max_depth < len(indices_or_columns):
raise \
Exception(
"gbm.max_depth == {}, so indices_or_columns must contain at most {} {}."
.format(gbm.max_depth, gbm.max_depth, "element" if gbm.max_depth == 1 else "elements")
)
check_args_contd(array_or_frame, indices_or_columns)
arr, model_inds = get_arr_and_model_inds(array_or_frame, indices_or_columns)
width = arr.shape[1]
f_vals = {}
for n in range(width, 0, -1):
for inds in itertools.combinations(range(width), n):
f_vals[inds] = compute_f_vals(gbm, model_inds, arr, inds)
return compute_h_val(f_vals, arr, tuple(range(width))) | [
"def",
"h",
"(",
"gbm",
",",
"array_or_frame",
",",
"indices_or_columns",
"=",
"'all'",
")",
":",
"if",
"indices_or_columns",
"==",
"'all'",
":",
"if",
"gbm",
".",
"max_depth",
"<",
"array_or_frame",
".",
"shape",
"[",
"1",
"]",
":",
"raise",
"Exception",
"(",
"\"gbm.max_depth == {} < array_or_frame.shape[1] == {}, so indices_or_columns must not be 'all'.\"",
".",
"format",
"(",
"gbm",
".",
"max_depth",
",",
"array_or_frame",
".",
"shape",
"[",
"1",
"]",
")",
")",
"else",
":",
"if",
"gbm",
".",
"max_depth",
"<",
"len",
"(",
"indices_or_columns",
")",
":",
"raise",
"Exception",
"(",
"\"gbm.max_depth == {}, so indices_or_columns must contain at most {} {}.\"",
".",
"format",
"(",
"gbm",
".",
"max_depth",
",",
"gbm",
".",
"max_depth",
",",
"\"element\"",
"if",
"gbm",
".",
"max_depth",
"==",
"1",
"else",
"\"elements\"",
")",
")",
"check_args_contd",
"(",
"array_or_frame",
",",
"indices_or_columns",
")",
"arr",
",",
"model_inds",
"=",
"get_arr_and_model_inds",
"(",
"array_or_frame",
",",
"indices_or_columns",
")",
"width",
"=",
"arr",
".",
"shape",
"[",
"1",
"]",
"f_vals",
"=",
"{",
"}",
"for",
"n",
"in",
"range",
"(",
"width",
",",
"0",
",",
"-",
"1",
")",
":",
"for",
"inds",
"in",
"itertools",
".",
"combinations",
"(",
"range",
"(",
"width",
")",
",",
"n",
")",
":",
"f_vals",
"[",
"inds",
"]",
"=",
"compute_f_vals",
"(",
"gbm",
",",
"model_inds",
",",
"arr",
",",
"inds",
")",
"return",
"compute_h_val",
"(",
"f_vals",
",",
"arr",
",",
"tuple",
"(",
"range",
"(",
"width",
")",
")",
")"
] | PURPOSE
Compute Friedman and Popescu's H statistic, in order to look for an interaction in the passed gradient-boosting
model among the variables represented by the elements of the passed array or frame and specified by the passed
indices or columns.
See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat.
2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1.
ARGUMENTS
gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or
sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here).
array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas
.DataFrame).
indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of
array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a
pandas data frame. If it is 'all', then all columns of array_or_frame are used.
RETURNS
The H statistic of the variables or NaN if the computation is spoiled by weak main effects and rounding errors.
H varies from 0 to 1. The larger H, the stronger the evidence for an interaction among the variables.
EXAMPLES
Friedman and Popescu's (2008) formulas (44) and (46) correspond to
h(F, x, [j, k])
and
h(F, x, [j, k, l])
respectively.
NOTES
1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths
of main effects are available as gbm.feature_importances_ once gbm has been fitted.
2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in
the target function. To forestall such spurious interactions, check for strong correlations among variables before
fitting gbm. | [
"PURPOSE"
] | python | train |
mikedh/trimesh | trimesh/util.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/util.py#L1067-L1106 | def array_to_encoded(array, dtype=None, encoding='base64'):
"""
Export a numpy array to a compact serializable dictionary.
Parameters
------------
array : array
Any numpy array
dtype : str or None
Optional dtype to encode array
encoding : str
'base64' or 'binary'
Returns
---------
encoded : dict
Has keys:
'dtype': str, of dtype
'shape': tuple of shape
'base64': str, base64 encoded string
"""
array = np.asanyarray(array)
shape = array.shape
# ravel also forces contiguous
flat = np.ravel(array)
if dtype is None:
dtype = array.dtype
encoded = {'dtype': np.dtype(dtype).str,
'shape': shape}
if encoding in ['base64', 'dict64']:
packed = base64.b64encode(flat.astype(dtype).tostring())
if hasattr(packed, 'decode'):
packed = packed.decode('utf-8')
encoded['base64'] = packed
elif encoding == 'binary':
encoded['binary'] = array.tostring(order='C')
else:
raise ValueError('encoding {} is not available!'.format(encoding))
return encoded | [
"def",
"array_to_encoded",
"(",
"array",
",",
"dtype",
"=",
"None",
",",
"encoding",
"=",
"'base64'",
")",
":",
"array",
"=",
"np",
".",
"asanyarray",
"(",
"array",
")",
"shape",
"=",
"array",
".",
"shape",
"# ravel also forces contiguous",
"flat",
"=",
"np",
".",
"ravel",
"(",
"array",
")",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"array",
".",
"dtype",
"encoded",
"=",
"{",
"'dtype'",
":",
"np",
".",
"dtype",
"(",
"dtype",
")",
".",
"str",
",",
"'shape'",
":",
"shape",
"}",
"if",
"encoding",
"in",
"[",
"'base64'",
",",
"'dict64'",
"]",
":",
"packed",
"=",
"base64",
".",
"b64encode",
"(",
"flat",
".",
"astype",
"(",
"dtype",
")",
".",
"tostring",
"(",
")",
")",
"if",
"hasattr",
"(",
"packed",
",",
"'decode'",
")",
":",
"packed",
"=",
"packed",
".",
"decode",
"(",
"'utf-8'",
")",
"encoded",
"[",
"'base64'",
"]",
"=",
"packed",
"elif",
"encoding",
"==",
"'binary'",
":",
"encoded",
"[",
"'binary'",
"]",
"=",
"array",
".",
"tostring",
"(",
"order",
"=",
"'C'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'encoding {} is not available!'",
".",
"format",
"(",
"encoding",
")",
")",
"return",
"encoded"
] | Export a numpy array to a compact serializable dictionary.
Parameters
------------
array : array
Any numpy array
dtype : str or None
Optional dtype to encode array
encoding : str
'base64' or 'binary'
Returns
---------
encoded : dict
Has keys:
'dtype': str, of dtype
'shape': tuple of shape
'base64': str, base64 encoded string | [
"Export",
"a",
"numpy",
"array",
"to",
"a",
"compact",
"serializable",
"dictionary",
"."
] | python | train |
rvswift/EB | EB/builder/postanalysis/postanalysis.py | https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/postanalysis/postanalysis.py#L106-L133 | def evaluate_list(molecules, ensemble_lookup, options):
"""
Evaluate a list of ensembles and return statistics and ROC plots if appropriate
"""
# create stats dictionaries to store results from each ensemble
stats = {} # {file name : metric_List}
# print progress messages
if options.write_roc:
print(" Determining virtual screening performance and writing ROC data ... ")
print('')
else:
print(" Determining virtual screening performance ...")
print('')
for filename in sorted(ensemble_lookup.keys()):
metric_List = calculate_metrics(molecules, ensemble_lookup, filename, options)
stats[filename] = metric_List
# write results summary
output.write_summary(stats, options, fw_type = None)
# plot
if options.plot:
print(" Making plots ... ")
print
plotter(molecules, ensemble_lookup, options) | [
"def",
"evaluate_list",
"(",
"molecules",
",",
"ensemble_lookup",
",",
"options",
")",
":",
"# create stats dictionaries to store results from each ensemble",
"stats",
"=",
"{",
"}",
"# {file name : metric_List}",
"# print progress messages",
"if",
"options",
".",
"write_roc",
":",
"print",
"(",
"\" Determining virtual screening performance and writing ROC data ... \"",
")",
"print",
"(",
"''",
")",
"else",
":",
"print",
"(",
"\" Determining virtual screening performance ...\"",
")",
"print",
"(",
"''",
")",
"for",
"filename",
"in",
"sorted",
"(",
"ensemble_lookup",
".",
"keys",
"(",
")",
")",
":",
"metric_List",
"=",
"calculate_metrics",
"(",
"molecules",
",",
"ensemble_lookup",
",",
"filename",
",",
"options",
")",
"stats",
"[",
"filename",
"]",
"=",
"metric_List",
"# write results summary",
"output",
".",
"write_summary",
"(",
"stats",
",",
"options",
",",
"fw_type",
"=",
"None",
")",
"# plot",
"if",
"options",
".",
"plot",
":",
"print",
"(",
"\" Making plots ... \"",
")",
"print",
"plotter",
"(",
"molecules",
",",
"ensemble_lookup",
",",
"options",
")"
] | Evaluate a list of ensembles and return statistics and ROC plots if appropriate | [
"Evaluate",
"a",
"list",
"of",
"ensembles",
"and",
"return",
"statistics",
"and",
"ROC",
"plots",
"if",
"appropriate"
] | python | train |
evolbioinfo/pastml | pastml/ml.py | https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/ml.py#L476-L493 | def convert_likelihoods_to_probabilities(tree, feature, states):
"""
Normalizes each node marginal likelihoods to convert them to marginal probabilities.
:param states: numpy array of states in the order corresponding to the marginal likelihood arrays
:param tree: ete3.Tree, the tree of interest
:param feature: str, character for which the probabilities are calculated
:return: pandas DataFrame, that maps node names to their marginal likelihoods.
"""
lh_feature = get_personalized_feature_name(feature, LH)
name2probs = {}
for node in tree.traverse():
lh = getattr(node, lh_feature)
name2probs[node.name] = lh / lh.sum()
return pd.DataFrame.from_dict(name2probs, orient='index', columns=states) | [
"def",
"convert_likelihoods_to_probabilities",
"(",
"tree",
",",
"feature",
",",
"states",
")",
":",
"lh_feature",
"=",
"get_personalized_feature_name",
"(",
"feature",
",",
"LH",
")",
"name2probs",
"=",
"{",
"}",
"for",
"node",
"in",
"tree",
".",
"traverse",
"(",
")",
":",
"lh",
"=",
"getattr",
"(",
"node",
",",
"lh_feature",
")",
"name2probs",
"[",
"node",
".",
"name",
"]",
"=",
"lh",
"/",
"lh",
".",
"sum",
"(",
")",
"return",
"pd",
".",
"DataFrame",
".",
"from_dict",
"(",
"name2probs",
",",
"orient",
"=",
"'index'",
",",
"columns",
"=",
"states",
")"
] | Normalizes each node marginal likelihoods to convert them to marginal probabilities.
:param states: numpy array of states in the order corresponding to the marginal likelihood arrays
:param tree: ete3.Tree, the tree of interest
:param feature: str, character for which the probabilities are calculated
:return: pandas DataFrame, that maps node names to their marginal likelihoods. | [
"Normalizes",
"each",
"node",
"marginal",
"likelihoods",
"to",
"convert",
"them",
"to",
"marginal",
"probabilities",
"."
] | python | train |
MisterY/gnucash-portfolio | gnucash_portfolio/model/price_model.py | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/model/price_model.py#L27-L32 | def parse_value(self, value_string: str):
"""
Parses the amount string.
"""
self.value = Decimal(value_string)
return self.value | [
"def",
"parse_value",
"(",
"self",
",",
"value_string",
":",
"str",
")",
":",
"self",
".",
"value",
"=",
"Decimal",
"(",
"value_string",
")",
"return",
"self",
".",
"value"
] | Parses the amount string. | [
"Parses",
"the",
"amount",
"string",
"."
] | python | train |
annoviko/pyclustering | pyclustering/cluster/birch.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/birch.py#L175-L187 | def __decode_data(self):
"""!
@brief Decodes data from CF-tree features.
"""
self.__clusters = [ [] for _ in range(self.__number_clusters) ];
self.__noise = [];
for index_point in range(0, len(self.__pointer_data)):
(_, cluster_index) = self.__get_nearest_feature(self.__pointer_data[index_point], self.__features);
self.__clusters[cluster_index].append(index_point); | [
"def",
"__decode_data",
"(",
"self",
")",
":",
"self",
".",
"__clusters",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"__number_clusters",
")",
"]",
"self",
".",
"__noise",
"=",
"[",
"]",
"for",
"index_point",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"__pointer_data",
")",
")",
":",
"(",
"_",
",",
"cluster_index",
")",
"=",
"self",
".",
"__get_nearest_feature",
"(",
"self",
".",
"__pointer_data",
"[",
"index_point",
"]",
",",
"self",
".",
"__features",
")",
"self",
".",
"__clusters",
"[",
"cluster_index",
"]",
".",
"append",
"(",
"index_point",
")"
] | !
@brief Decodes data from CF-tree features. | [
"!"
] | python | valid |
amelchio/pysonos | pysonos/core.py | https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/core.py#L705-L716 | def mute(self):
"""bool: The speaker's mute state.
True if muted, False otherwise.
"""
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return bool(int(mute_state)) | [
"def",
"mute",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"renderingControl",
".",
"GetMute",
"(",
"[",
"(",
"'InstanceID'",
",",
"0",
")",
",",
"(",
"'Channel'",
",",
"'Master'",
")",
"]",
")",
"mute_state",
"=",
"response",
"[",
"'CurrentMute'",
"]",
"return",
"bool",
"(",
"int",
"(",
"mute_state",
")",
")"
] | bool: The speaker's mute state.
True if muted, False otherwise. | [
"bool",
":",
"The",
"speaker",
"s",
"mute",
"state",
"."
] | python | train |
saltstack/salt | salt/modules/bcache.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bcache.py#L267-L357 | def cache_make(dev, reserved=None, force=False, block_size=None, bucket_size=None, attach=True):
'''
Create BCache cache on a block device.
If blkdiscard is available the entire device will be properly cleared in advance.
CLI example:
.. code-block:: bash
salt '*' bcache.cache_make sdb reserved=10% block_size=4096
:param reserved: if dev is a full device, create a partition table with this size empty.
.. note::
this increases the amount of reserved space available to SSD garbage collectors,
potentially (vastly) increasing performance
:param block_size: Block size of the cache; defaults to devices' logical block size
:param force: Overwrite existing BCache sets
:param attach: Attach all existing backend devices immediately
'''
# TODO: multiple devs == md jbod
# pylint: disable=too-many-return-statements
# ---------------- Preflight checks ----------------
cache = uuid()
if cache:
if not force:
log.error('BCache cache %s is already on the system', cache)
return False
cache = _bdev()
dev = _devbase(dev)
udev = __salt__['udev.env'](dev)
if ('ID_FS_TYPE' in udev or (udev.get('DEVTYPE', None) != 'partition' and 'ID_PART_TABLE_TYPE' in udev)) \
and not force:
log.error('%s already contains data, wipe first or force', dev)
return False
elif reserved is not None and udev.get('DEVTYPE', None) != 'disk':
log.error('Need a partitionable blockdev for reserved to work')
return False
_, block, bucket = _sizes(dev)
if bucket_size is None:
bucket_size = bucket
# TODO: bucket from _sizes() makes no sense
bucket_size = False
if block_size is None:
block_size = block
# ---------------- Still here, start doing destructive stuff ----------------
if cache:
if not stop():
return False
# Wipe the current cache device as well,
# forever ruining any chance of it accidentally popping up again
elif not _wipe(cache):
return False
# Can't do enough wiping
if not _wipe(dev):
return False
if reserved:
cmd = 'parted -m -s -a optimal -- ' \
'/dev/{0} mklabel gpt mkpart bcache-reserved 1M {1} mkpart bcache {1} 100%'.format(dev, reserved)
# if wipe was incomplete & part layout remains the same,
# this is one condition set where udev would make it accidentally popup again
if not _run_all(cmd, 'error', 'Error creating bcache partitions on {0}: %s'.format(dev)):
return False
dev = '{0}2'.format(dev)
# ---------------- Finally, create a cache ----------------
cmd = 'make-bcache --cache /dev/{0} --block {1} --wipe-bcache'.format(dev, block_size)
# Actually bucket_size should always have a value, but for testing 0 is possible as well
if bucket_size:
cmd += ' --bucket {0}'.format(bucket_size)
if not _run_all(cmd, 'error', 'Error creating cache {0}: %s'.format(dev)):
return False
elif not _wait(lambda: uuid() is not False,
'error', 'Cache {0} seemingly created OK, but FS did not activate'.format(dev)):
return False
if attach:
return _alltrue(attach_())
else:
return True | [
"def",
"cache_make",
"(",
"dev",
",",
"reserved",
"=",
"None",
",",
"force",
"=",
"False",
",",
"block_size",
"=",
"None",
",",
"bucket_size",
"=",
"None",
",",
"attach",
"=",
"True",
")",
":",
"# TODO: multiple devs == md jbod",
"# pylint: disable=too-many-return-statements",
"# ---------------- Preflight checks ----------------",
"cache",
"=",
"uuid",
"(",
")",
"if",
"cache",
":",
"if",
"not",
"force",
":",
"log",
".",
"error",
"(",
"'BCache cache %s is already on the system'",
",",
"cache",
")",
"return",
"False",
"cache",
"=",
"_bdev",
"(",
")",
"dev",
"=",
"_devbase",
"(",
"dev",
")",
"udev",
"=",
"__salt__",
"[",
"'udev.env'",
"]",
"(",
"dev",
")",
"if",
"(",
"'ID_FS_TYPE'",
"in",
"udev",
"or",
"(",
"udev",
".",
"get",
"(",
"'DEVTYPE'",
",",
"None",
")",
"!=",
"'partition'",
"and",
"'ID_PART_TABLE_TYPE'",
"in",
"udev",
")",
")",
"and",
"not",
"force",
":",
"log",
".",
"error",
"(",
"'%s already contains data, wipe first or force'",
",",
"dev",
")",
"return",
"False",
"elif",
"reserved",
"is",
"not",
"None",
"and",
"udev",
".",
"get",
"(",
"'DEVTYPE'",
",",
"None",
")",
"!=",
"'disk'",
":",
"log",
".",
"error",
"(",
"'Need a partitionable blockdev for reserved to work'",
")",
"return",
"False",
"_",
",",
"block",
",",
"bucket",
"=",
"_sizes",
"(",
"dev",
")",
"if",
"bucket_size",
"is",
"None",
":",
"bucket_size",
"=",
"bucket",
"# TODO: bucket from _sizes() makes no sense",
"bucket_size",
"=",
"False",
"if",
"block_size",
"is",
"None",
":",
"block_size",
"=",
"block",
"# ---------------- Still here, start doing destructive stuff ----------------",
"if",
"cache",
":",
"if",
"not",
"stop",
"(",
")",
":",
"return",
"False",
"# Wipe the current cache device as well,",
"# forever ruining any chance of it accidentally popping up again",
"elif",
"not",
"_wipe",
"(",
"cache",
")",
":",
"return",
"False",
"# Can't do enough wiping",
"if",
"not",
"_wipe",
"(",
"dev",
")",
":",
"return",
"False",
"if",
"reserved",
":",
"cmd",
"=",
"'parted -m -s -a optimal -- '",
"'/dev/{0} mklabel gpt mkpart bcache-reserved 1M {1} mkpart bcache {1} 100%'",
".",
"format",
"(",
"dev",
",",
"reserved",
")",
"# if wipe was incomplete & part layout remains the same,",
"# this is one condition set where udev would make it accidentally popup again",
"if",
"not",
"_run_all",
"(",
"cmd",
",",
"'error'",
",",
"'Error creating bcache partitions on {0}: %s'",
".",
"format",
"(",
"dev",
")",
")",
":",
"return",
"False",
"dev",
"=",
"'{0}2'",
".",
"format",
"(",
"dev",
")",
"# ---------------- Finally, create a cache ----------------",
"cmd",
"=",
"'make-bcache --cache /dev/{0} --block {1} --wipe-bcache'",
".",
"format",
"(",
"dev",
",",
"block_size",
")",
"# Actually bucket_size should always have a value, but for testing 0 is possible as well",
"if",
"bucket_size",
":",
"cmd",
"+=",
"' --bucket {0}'",
".",
"format",
"(",
"bucket_size",
")",
"if",
"not",
"_run_all",
"(",
"cmd",
",",
"'error'",
",",
"'Error creating cache {0}: %s'",
".",
"format",
"(",
"dev",
")",
")",
":",
"return",
"False",
"elif",
"not",
"_wait",
"(",
"lambda",
":",
"uuid",
"(",
")",
"is",
"not",
"False",
",",
"'error'",
",",
"'Cache {0} seemingly created OK, but FS did not activate'",
".",
"format",
"(",
"dev",
")",
")",
":",
"return",
"False",
"if",
"attach",
":",
"return",
"_alltrue",
"(",
"attach_",
"(",
")",
")",
"else",
":",
"return",
"True"
] | Create BCache cache on a block device.
If blkdiscard is available the entire device will be properly cleared in advance.
CLI example:
.. code-block:: bash
salt '*' bcache.cache_make sdb reserved=10% block_size=4096
:param reserved: if dev is a full device, create a partition table with this size empty.
.. note::
this increases the amount of reserved space available to SSD garbage collectors,
potentially (vastly) increasing performance
:param block_size: Block size of the cache; defaults to devices' logical block size
:param force: Overwrite existing BCache sets
:param attach: Attach all existing backend devices immediately | [
"Create",
"BCache",
"cache",
"on",
"a",
"block",
"device",
".",
"If",
"blkdiscard",
"is",
"available",
"the",
"entire",
"device",
"will",
"be",
"properly",
"cleared",
"in",
"advance",
"."
] | python | train |
hsharrison/pyglet2d | src/pyglet2d.py | https://github.com/hsharrison/pyglet2d/blob/46f610b3c76221bff19e5c0cf3d35d7875ce37a0/src/pyglet2d.py#L336-L343 | def draw(self):
"""Draw the shape in the current OpenGL context.
"""
if self.enabled:
self._vertex_list.colors = self._gl_colors
self._vertex_list.vertices = self._gl_vertices
self._vertex_list.draw(pyglet.gl.GL_TRIANGLES) | [
"def",
"draw",
"(",
"self",
")",
":",
"if",
"self",
".",
"enabled",
":",
"self",
".",
"_vertex_list",
".",
"colors",
"=",
"self",
".",
"_gl_colors",
"self",
".",
"_vertex_list",
".",
"vertices",
"=",
"self",
".",
"_gl_vertices",
"self",
".",
"_vertex_list",
".",
"draw",
"(",
"pyglet",
".",
"gl",
".",
"GL_TRIANGLES",
")"
] | Draw the shape in the current OpenGL context. | [
"Draw",
"the",
"shape",
"in",
"the",
"current",
"OpenGL",
"context",
"."
] | python | valid |
django-userena-ce/django-userena-ce | userena/managers.py | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/managers.py#L220-L234 | def delete_expired_users(self):
"""
Checks for expired users and delete's the ``User`` associated with
it. Skips if the user ``is_staff``.
:return: A list containing the deleted users.
"""
deleted_users = []
for user in get_user_model().objects.filter(is_staff=False,
is_active=False):
if user.userena_signup.activation_key_expired():
deleted_users.append(user)
user.delete()
return deleted_users | [
"def",
"delete_expired_users",
"(",
"self",
")",
":",
"deleted_users",
"=",
"[",
"]",
"for",
"user",
"in",
"get_user_model",
"(",
")",
".",
"objects",
".",
"filter",
"(",
"is_staff",
"=",
"False",
",",
"is_active",
"=",
"False",
")",
":",
"if",
"user",
".",
"userena_signup",
".",
"activation_key_expired",
"(",
")",
":",
"deleted_users",
".",
"append",
"(",
"user",
")",
"user",
".",
"delete",
"(",
")",
"return",
"deleted_users"
] | Checks for expired users and delete's the ``User`` associated with
it. Skips if the user ``is_staff``.
:return: A list containing the deleted users. | [
"Checks",
"for",
"expired",
"users",
"and",
"delete",
"s",
"the",
"User",
"associated",
"with",
"it",
".",
"Skips",
"if",
"the",
"user",
"is_staff",
"."
] | python | train |
pip-services3-python/pip-services3-components-python | pip_services3_components/count/CachedCounters.py | https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/count/CachedCounters.py#L142-L167 | def get(self, name, typ):
"""
Gets a counter specified by its name.
It counter does not exist or its type doesn't match the specified type
it creates a new one.
:param name: a counter name to retrieve.
:param typ: a counter type.
:return: an existing or newly created counter of the specified type.
"""
if name == None or len(name) == 0:
raise Exception("Counter name was not set")
self._lock.acquire()
try:
counter = self._cache[name] if name in self._cache else None
if counter == None or counter.type != typ:
counter = Counter(name, typ)
self._cache[name] = counter
return counter
finally:
self._lock.release() | [
"def",
"get",
"(",
"self",
",",
"name",
",",
"typ",
")",
":",
"if",
"name",
"==",
"None",
"or",
"len",
"(",
"name",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"\"Counter name was not set\"",
")",
"self",
".",
"_lock",
".",
"acquire",
"(",
")",
"try",
":",
"counter",
"=",
"self",
".",
"_cache",
"[",
"name",
"]",
"if",
"name",
"in",
"self",
".",
"_cache",
"else",
"None",
"if",
"counter",
"==",
"None",
"or",
"counter",
".",
"type",
"!=",
"typ",
":",
"counter",
"=",
"Counter",
"(",
"name",
",",
"typ",
")",
"self",
".",
"_cache",
"[",
"name",
"]",
"=",
"counter",
"return",
"counter",
"finally",
":",
"self",
".",
"_lock",
".",
"release",
"(",
")"
] | Gets a counter specified by its name.
It counter does not exist or its type doesn't match the specified type
it creates a new one.
:param name: a counter name to retrieve.
:param typ: a counter type.
:return: an existing or newly created counter of the specified type. | [
"Gets",
"a",
"counter",
"specified",
"by",
"its",
"name",
".",
"It",
"counter",
"does",
"not",
"exist",
"or",
"its",
"type",
"doesn",
"t",
"match",
"the",
"specified",
"type",
"it",
"creates",
"a",
"new",
"one",
"."
] | python | train |
markchil/gptools | gptools/utils.py | https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L520-L554 | def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
if size is None:
size = 1
single_val = True
else:
single_val = False
out_shape = [len(self.bounds)]
try:
out_shape.extend(size)
except TypeError:
out_shape.append(size)
out = scipy.zeros(out_shape)
for j in xrange(0, len(self.bounds)):
if j != 2:
out[j, :] = numpy.random.uniform(low=self.bounds[j][0],
high=self.bounds[j][1],
size=size)
else:
out[j, :] = numpy.random.uniform(low=self.bounds[j][0],
high=out[j - 1, :],
size=size)
if not single_val:
return out
else:
return out.ravel() | [
"def",
"random_draw",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"1",
"single_val",
"=",
"True",
"else",
":",
"single_val",
"=",
"False",
"out_shape",
"=",
"[",
"len",
"(",
"self",
".",
"bounds",
")",
"]",
"try",
":",
"out_shape",
".",
"extend",
"(",
"size",
")",
"except",
"TypeError",
":",
"out_shape",
".",
"append",
"(",
"size",
")",
"out",
"=",
"scipy",
".",
"zeros",
"(",
"out_shape",
")",
"for",
"j",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"self",
".",
"bounds",
")",
")",
":",
"if",
"j",
"!=",
"2",
":",
"out",
"[",
"j",
",",
":",
"]",
"=",
"numpy",
".",
"random",
".",
"uniform",
"(",
"low",
"=",
"self",
".",
"bounds",
"[",
"j",
"]",
"[",
"0",
"]",
",",
"high",
"=",
"self",
".",
"bounds",
"[",
"j",
"]",
"[",
"1",
"]",
",",
"size",
"=",
"size",
")",
"else",
":",
"out",
"[",
"j",
",",
":",
"]",
"=",
"numpy",
".",
"random",
".",
"uniform",
"(",
"low",
"=",
"self",
".",
"bounds",
"[",
"j",
"]",
"[",
"0",
"]",
",",
"high",
"=",
"out",
"[",
"j",
"-",
"1",
",",
":",
"]",
",",
"size",
"=",
"size",
")",
"if",
"not",
"single_val",
":",
"return",
"out",
"else",
":",
"return",
"out",
".",
"ravel",
"(",
")"
] | Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None. | [
"Draw",
"random",
"samples",
"of",
"the",
"hyperparameters",
".",
"Parameters",
"----------",
"size",
":",
"None",
"int",
"or",
"array",
"-",
"like",
"optional",
"The",
"number",
"/",
"shape",
"of",
"samples",
"to",
"draw",
".",
"If",
"None",
"only",
"one",
"sample",
"is",
"returned",
".",
"Default",
"is",
"None",
"."
] | python | train |
72squared/redpipe | redpipe/keyspaces.py | https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/keyspaces.py#L235-L244 | def restorenx(self, name, value, pttl=0):
"""
Restore serialized dump of a key back into redis
:param name: str the name of the redis key
:param value: redis RDB-like serialization
:param pttl: milliseconds till key expires
:return: Future()
"""
return self.eval(lua_restorenx, 1, name, pttl, value) | [
"def",
"restorenx",
"(",
"self",
",",
"name",
",",
"value",
",",
"pttl",
"=",
"0",
")",
":",
"return",
"self",
".",
"eval",
"(",
"lua_restorenx",
",",
"1",
",",
"name",
",",
"pttl",
",",
"value",
")"
] | Restore serialized dump of a key back into redis
:param name: str the name of the redis key
:param value: redis RDB-like serialization
:param pttl: milliseconds till key expires
:return: Future() | [
"Restore",
"serialized",
"dump",
"of",
"a",
"key",
"back",
"into",
"redis"
] | python | train |
blockstack/blockstack-files | blockstack_file/blockstack_file.py | https://github.com/blockstack/blockstack-files/blob/8d88cc48bdf8ed57f17d4bba860e972bde321921/blockstack_file/blockstack_file.py#L442-L470 | def file_verify( sender_blockchain_id, sender_key_id, input_path, sig, config_path=CONFIG_PATH, wallet_keys=None ):
"""
Verify that a file was signed with the given blockchain ID
@config_path should be for the *client*, not blockstack-file
Return {'status': True} on succes
Return {'error': ...} on error
"""
config_dir = os.path.dirname(config_path)
old_key = False
old_key_index = 0
sender_old_key_index = 0
# get the sender key
sender_key_info = file_key_lookup( sender_blockchain_id, None, None, key_id=sender_key_id, config_path=config_path, wallet_keys=wallet_keys )
if 'error' in sender_key_info:
log.error("Failed to look up sender key: %s" % sender_key_info['error'])
return {'error': 'Failed to lookup sender key'}
if 'stale_key_index' in sender_key_info.keys():
old_key = True
sender_old_key_index = sender_key_info['sender_key_index']
# attempt to verify
res = blockstack_gpg.gpg_verify( input_path, sig, sender_key_info, config_dir=config_dir )
if 'error' in res:
log.error("Failed to verify from %s.%s" % (sender_blockchain_id, sender_key_id))
return {'error': 'Failed to verify'}
return {'status': True} | [
"def",
"file_verify",
"(",
"sender_blockchain_id",
",",
"sender_key_id",
",",
"input_path",
",",
"sig",
",",
"config_path",
"=",
"CONFIG_PATH",
",",
"wallet_keys",
"=",
"None",
")",
":",
"config_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"config_path",
")",
"old_key",
"=",
"False",
"old_key_index",
"=",
"0",
"sender_old_key_index",
"=",
"0",
"# get the sender key ",
"sender_key_info",
"=",
"file_key_lookup",
"(",
"sender_blockchain_id",
",",
"None",
",",
"None",
",",
"key_id",
"=",
"sender_key_id",
",",
"config_path",
"=",
"config_path",
",",
"wallet_keys",
"=",
"wallet_keys",
")",
"if",
"'error'",
"in",
"sender_key_info",
":",
"log",
".",
"error",
"(",
"\"Failed to look up sender key: %s\"",
"%",
"sender_key_info",
"[",
"'error'",
"]",
")",
"return",
"{",
"'error'",
":",
"'Failed to lookup sender key'",
"}",
"if",
"'stale_key_index'",
"in",
"sender_key_info",
".",
"keys",
"(",
")",
":",
"old_key",
"=",
"True",
"sender_old_key_index",
"=",
"sender_key_info",
"[",
"'sender_key_index'",
"]",
"# attempt to verify ",
"res",
"=",
"blockstack_gpg",
".",
"gpg_verify",
"(",
"input_path",
",",
"sig",
",",
"sender_key_info",
",",
"config_dir",
"=",
"config_dir",
")",
"if",
"'error'",
"in",
"res",
":",
"log",
".",
"error",
"(",
"\"Failed to verify from %s.%s\"",
"%",
"(",
"sender_blockchain_id",
",",
"sender_key_id",
")",
")",
"return",
"{",
"'error'",
":",
"'Failed to verify'",
"}",
"return",
"{",
"'status'",
":",
"True",
"}"
] | Verify that a file was signed with the given blockchain ID
@config_path should be for the *client*, not blockstack-file
Return {'status': True} on succes
Return {'error': ...} on error | [
"Verify",
"that",
"a",
"file",
"was",
"signed",
"with",
"the",
"given",
"blockchain",
"ID"
] | python | train |
lucasmaystre/choix | choix/lsr.py | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L279-L314 | def ilsr_rankings(
n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_rankings, n_items=n_items, data=data, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol) | [
"def",
"ilsr_rankings",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"0.0",
",",
"initial_params",
"=",
"None",
",",
"max_iter",
"=",
"100",
",",
"tol",
"=",
"1e-8",
")",
":",
"fun",
"=",
"functools",
".",
"partial",
"(",
"lsr_rankings",
",",
"n_items",
"=",
"n_items",
",",
"data",
"=",
"data",
",",
"alpha",
"=",
"alpha",
")",
"return",
"_ilsr",
"(",
"fun",
",",
"initial_params",
",",
"max_iter",
",",
"tol",
")"
] | Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters. | [
"Compute",
"the",
"ML",
"estimate",
"of",
"model",
"parameters",
"using",
"I",
"-",
"LSR",
"."
] | python | train |
openstack/networking-cisco | networking_cisco/ml2_drivers/ucsm/deprecated_network_driver.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/ucsm/deprecated_network_driver.py#L1009-L1020 | def ucs_manager_disconnect(self, handle, ucsm_ip):
"""Disconnects from the UCS Manager.
After the disconnect, the handle associated with this connection
is no longer valid.
"""
try:
handle.Logout()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmDisconnectFailed(ucsm_ip=ucsm_ip, exc=e) | [
"def",
"ucs_manager_disconnect",
"(",
"self",
",",
"handle",
",",
"ucsm_ip",
")",
":",
"try",
":",
"handle",
".",
"Logout",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"# Raise a Neutron exception. Include a description of",
"# the original exception.",
"raise",
"cexc",
".",
"UcsmDisconnectFailed",
"(",
"ucsm_ip",
"=",
"ucsm_ip",
",",
"exc",
"=",
"e",
")"
] | Disconnects from the UCS Manager.
After the disconnect, the handle associated with this connection
is no longer valid. | [
"Disconnects",
"from",
"the",
"UCS",
"Manager",
"."
] | python | train |
hyperledger/indy-plenum | plenum/server/monitor.py | https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L308-L326 | def reset(self):
"""
Reset the monitor. Sets all monitored values to defaults.
"""
logger.debug("{}'s Monitor being reset".format(self))
instances_ids = self.instances.started.keys()
self.numOrderedRequests = {inst_id: (0, 0) for inst_id in instances_ids}
self.requestTracker.reset()
self.masterReqLatencies = {}
self.masterReqLatencyTooHigh = False
self.totalViewChanges += 1
self.lastKnownTraffic = self.calculateTraffic()
if self.acc_monitor:
self.acc_monitor.reset()
for i in instances_ids:
rm = self.create_throughput_measurement(self.config)
self.throughputs[i] = rm
lm = self.latency_measurement_cls(self.config)
self.clientAvgReqLatencies[i] = lm | [
"def",
"reset",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"{}'s Monitor being reset\"",
".",
"format",
"(",
"self",
")",
")",
"instances_ids",
"=",
"self",
".",
"instances",
".",
"started",
".",
"keys",
"(",
")",
"self",
".",
"numOrderedRequests",
"=",
"{",
"inst_id",
":",
"(",
"0",
",",
"0",
")",
"for",
"inst_id",
"in",
"instances_ids",
"}",
"self",
".",
"requestTracker",
".",
"reset",
"(",
")",
"self",
".",
"masterReqLatencies",
"=",
"{",
"}",
"self",
".",
"masterReqLatencyTooHigh",
"=",
"False",
"self",
".",
"totalViewChanges",
"+=",
"1",
"self",
".",
"lastKnownTraffic",
"=",
"self",
".",
"calculateTraffic",
"(",
")",
"if",
"self",
".",
"acc_monitor",
":",
"self",
".",
"acc_monitor",
".",
"reset",
"(",
")",
"for",
"i",
"in",
"instances_ids",
":",
"rm",
"=",
"self",
".",
"create_throughput_measurement",
"(",
"self",
".",
"config",
")",
"self",
".",
"throughputs",
"[",
"i",
"]",
"=",
"rm",
"lm",
"=",
"self",
".",
"latency_measurement_cls",
"(",
"self",
".",
"config",
")",
"self",
".",
"clientAvgReqLatencies",
"[",
"i",
"]",
"=",
"lm"
] | Reset the monitor. Sets all monitored values to defaults. | [
"Reset",
"the",
"monitor",
".",
"Sets",
"all",
"monitored",
"values",
"to",
"defaults",
"."
] | python | train |
saltstack/salt | salt/modules/parallels.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/parallels.py#L358-L374 | def reset(name, runas=None):
'''
Reset a VM by performing a hard shutdown and then a restart
:param str name:
Name/ID of VM to reset
:param str runas:
The user that the prlctl command will be run as
Example:
.. code-block:: bash
salt '*' parallels.reset macvm runas=macdev
'''
return prlctl('reset', salt.utils.data.decode(name), runas=runas) | [
"def",
"reset",
"(",
"name",
",",
"runas",
"=",
"None",
")",
":",
"return",
"prlctl",
"(",
"'reset'",
",",
"salt",
".",
"utils",
".",
"data",
".",
"decode",
"(",
"name",
")",
",",
"runas",
"=",
"runas",
")"
] | Reset a VM by performing a hard shutdown and then a restart
:param str name:
Name/ID of VM to reset
:param str runas:
The user that the prlctl command will be run as
Example:
.. code-block:: bash
salt '*' parallels.reset macvm runas=macdev | [
"Reset",
"a",
"VM",
"by",
"performing",
"a",
"hard",
"shutdown",
"and",
"then",
"a",
"restart"
] | python | train |
aholkner/bacon | native/Vendor/FreeType/src/tools/docmaker/tohtml.py | https://github.com/aholkner/bacon/blob/edf3810dcb211942d392a8637945871399b0650d/native/Vendor/FreeType/src/tools/docmaker/tohtml.py#L287-L301 | def make_html_para( self, words ):
""" convert words of a paragraph into tagged HTML text, handle xrefs """
line = ""
if words:
line = self.make_html_word( words[0] )
for word in words[1:]:
line = line + " " + self.make_html_word( word )
# convert `...' quotations into real left and right single quotes
line = re.sub( r"(^|\W)`(.*?)'(\W|$)", \
r'\1‘\2’\3', \
line )
# convert tilde into non-breakable space
line = string.replace( line, "~", " " )
return para_header + line + para_footer | [
"def",
"make_html_para",
"(",
"self",
",",
"words",
")",
":",
"line",
"=",
"\"\"",
"if",
"words",
":",
"line",
"=",
"self",
".",
"make_html_word",
"(",
"words",
"[",
"0",
"]",
")",
"for",
"word",
"in",
"words",
"[",
"1",
":",
"]",
":",
"line",
"=",
"line",
"+",
"\" \"",
"+",
"self",
".",
"make_html_word",
"(",
"word",
")",
"# convert `...' quotations into real left and right single quotes",
"line",
"=",
"re",
".",
"sub",
"(",
"r\"(^|\\W)`(.*?)'(\\W|$)\"",
",",
"r'\\1‘\\2’\\3'",
",",
"line",
")",
"# convert tilde into non-breakable space",
"line",
"=",
"string",
".",
"replace",
"(",
"line",
",",
"\"~\"",
",",
"\" \"",
")",
"return",
"para_header",
"+",
"line",
"+",
"para_footer"
] | convert words of a paragraph into tagged HTML text, handle xrefs | [
"convert",
"words",
"of",
"a",
"paragraph",
"into",
"tagged",
"HTML",
"text",
"handle",
"xrefs"
] | python | test |
onelogin/python3-saml | src/onelogin/saml2/settings.py | https://github.com/onelogin/python3-saml/blob/064b7275fba1e5f39a9116ba1cdcc5d01fc34daa/src/onelogin/saml2/settings.py#L45-L59 | def validate_url(url):
"""
Auxiliary method to validate an urllib
:param url: An url to be validated
:type url: string
:returns: True if the url is valid
:rtype: bool
"""
scheme = url.split('://')[0].lower()
if scheme not in url_schemes:
return False
if not bool(url_regex.search(url)):
return False
return True | [
"def",
"validate_url",
"(",
"url",
")",
":",
"scheme",
"=",
"url",
".",
"split",
"(",
"'://'",
")",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"if",
"scheme",
"not",
"in",
"url_schemes",
":",
"return",
"False",
"if",
"not",
"bool",
"(",
"url_regex",
".",
"search",
"(",
"url",
")",
")",
":",
"return",
"False",
"return",
"True"
] | Auxiliary method to validate an urllib
:param url: An url to be validated
:type url: string
:returns: True if the url is valid
:rtype: bool | [
"Auxiliary",
"method",
"to",
"validate",
"an",
"urllib",
":",
"param",
"url",
":",
"An",
"url",
"to",
"be",
"validated",
":",
"type",
"url",
":",
"string",
":",
"returns",
":",
"True",
"if",
"the",
"url",
"is",
"valid",
":",
"rtype",
":",
"bool"
] | python | train |
Jaza/flask-restplus-patched | flask_restplus_patched/resource.py | https://github.com/Jaza/flask-restplus-patched/blob/38b4a030f28e6aec374d105173aa5e9b6bd51e5e/flask_restplus_patched/resource.py#L16-L30 | def _apply_decorator_to_methods(cls, decorator):
"""
This helper can apply a given decorator to all methods on the current
Resource.
NOTE: In contrast to ``Resource.method_decorators``, which has a
similar use-case, this method applies decorators directly and override
methods in-place, while the decorators listed in
``Resource.method_decorators`` are applied on every request which is
quite a waste of resources.
"""
for method in cls.methods:
method_name = method.lower()
decorated_method_func = decorator(getattr(cls, method_name))
setattr(cls, method_name, decorated_method_func) | [
"def",
"_apply_decorator_to_methods",
"(",
"cls",
",",
"decorator",
")",
":",
"for",
"method",
"in",
"cls",
".",
"methods",
":",
"method_name",
"=",
"method",
".",
"lower",
"(",
")",
"decorated_method_func",
"=",
"decorator",
"(",
"getattr",
"(",
"cls",
",",
"method_name",
")",
")",
"setattr",
"(",
"cls",
",",
"method_name",
",",
"decorated_method_func",
")"
] | This helper can apply a given decorator to all methods on the current
Resource.
NOTE: In contrast to ``Resource.method_decorators``, which has a
similar use-case, this method applies decorators directly and override
methods in-place, while the decorators listed in
``Resource.method_decorators`` are applied on every request which is
quite a waste of resources. | [
"This",
"helper",
"can",
"apply",
"a",
"given",
"decorator",
"to",
"all",
"methods",
"on",
"the",
"current",
"Resource",
"."
] | python | train |
joshblum/beanstalk-dispatch | beanstalk_dispatch/safe_task.py | https://github.com/joshblum/beanstalk-dispatch/blob/1cc57e5496bb7114dba6de7c7988e5680d791603/beanstalk_dispatch/safe_task.py#L53-L73 | def process(self, *args, **kwargs):
"""
args: list of arguments for the `runnable`
kwargs: dictionary of arguments for the `runnable`
"""
try:
timeout_seconds = self.timeout_timedelta.total_seconds()
with timeout(self.run, timeout_seconds) as run:
run(*args, **kwargs)
except Exception as e:
if self.verbose:
if isinstance(e, TimeoutError):
logger.error('SafeTask timed out: %s', e, exc_info=True)
else:
logger.error('Error running SafeTask: %s',
e, exc_info=True)
self.on_error(e, *args, **kwargs)
else:
self.on_success(*args, **kwargs)
finally:
self.on_completion(*args, **kwargs) | [
"def",
"process",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"timeout_seconds",
"=",
"self",
".",
"timeout_timedelta",
".",
"total_seconds",
"(",
")",
"with",
"timeout",
"(",
"self",
".",
"run",
",",
"timeout_seconds",
")",
"as",
"run",
":",
"run",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"self",
".",
"verbose",
":",
"if",
"isinstance",
"(",
"e",
",",
"TimeoutError",
")",
":",
"logger",
".",
"error",
"(",
"'SafeTask timed out: %s'",
",",
"e",
",",
"exc_info",
"=",
"True",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Error running SafeTask: %s'",
",",
"e",
",",
"exc_info",
"=",
"True",
")",
"self",
".",
"on_error",
"(",
"e",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"self",
".",
"on_success",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"self",
".",
"on_completion",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | args: list of arguments for the `runnable`
kwargs: dictionary of arguments for the `runnable` | [
"args",
":",
"list",
"of",
"arguments",
"for",
"the",
"runnable",
"kwargs",
":",
"dictionary",
"of",
"arguments",
"for",
"the",
"runnable"
] | python | train |
mckib2/rawdatarinator | rawdatarinator/quickview.py | https://github.com/mckib2/rawdatarinator/blob/03a85fd8f5e380b424027d28e97972bd7a6a3f1b/rawdatarinator/quickview.py#L7-L53 | def quickview(filename,
noIFFT=False):
'''
Display processed MRI data from `.hdf5`, `.npz`, or `.dat` files. No arguments displays the IFFT of the k-space data. The type of file is guessed by the file extension (i.e., if extension is `.dat` then readMeasData15 will be run to get the data).
Command-line Options:
-nifft (no IFFT)
Display k-space data, log magnitude and phase plots.
'''
if filename.endswith('.npz'):
data = np.load(filename)
elif filename.endswith('.dat'):
from rawdatarinator.readMeasDataVB15 import readMeasDataVB15 as rmd
data = rmd(filename)
elif filename.endswith('.mat'):
import scipy.io
data = scipy.io.loadmat('file.mat')
else:
data = h5py.File(filename,'r')
if 'kSpace' in data:
key = 'kSpace'
else:
key = 'imSpace'
noIFFT = not noIFFT
# Average over all the averages, use first coil
coil = 0
num_avgs = data[key].shape[2]
avg = (np.squeeze(np.sum(data[key],axis=2))/num_avgs)[:,:,coil]
if noIFFT is False:
imData = np.fft.ifftshift(np.fft.ifft2(avg))
plt.imshow(np.absolute(imData),cmap='gray')
plt.title('Image Data')
else:
mag = np.log(np.absolute(avg))
phase = np.angle(avg)
f,(ax1,ax2) = plt.subplots(1,2,sharey=True)
ax1.imshow(mag,cmap='gray')
ax2.imshow(phase,cmap='gray')
ax1.set_title('log(Magnitude)')
ax2.set_title('Phase')
plt.show() | [
"def",
"quickview",
"(",
"filename",
",",
"noIFFT",
"=",
"False",
")",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.npz'",
")",
":",
"data",
"=",
"np",
".",
"load",
"(",
"filename",
")",
"elif",
"filename",
".",
"endswith",
"(",
"'.dat'",
")",
":",
"from",
"rawdatarinator",
".",
"readMeasDataVB15",
"import",
"readMeasDataVB15",
"as",
"rmd",
"data",
"=",
"rmd",
"(",
"filename",
")",
"elif",
"filename",
".",
"endswith",
"(",
"'.mat'",
")",
":",
"import",
"scipy",
".",
"io",
"data",
"=",
"scipy",
".",
"io",
".",
"loadmat",
"(",
"'file.mat'",
")",
"else",
":",
"data",
"=",
"h5py",
".",
"File",
"(",
"filename",
",",
"'r'",
")",
"if",
"'kSpace'",
"in",
"data",
":",
"key",
"=",
"'kSpace'",
"else",
":",
"key",
"=",
"'imSpace'",
"noIFFT",
"=",
"not",
"noIFFT",
"# Average over all the averages, use first coil",
"coil",
"=",
"0",
"num_avgs",
"=",
"data",
"[",
"key",
"]",
".",
"shape",
"[",
"2",
"]",
"avg",
"=",
"(",
"np",
".",
"squeeze",
"(",
"np",
".",
"sum",
"(",
"data",
"[",
"key",
"]",
",",
"axis",
"=",
"2",
")",
")",
"/",
"num_avgs",
")",
"[",
":",
",",
":",
",",
"coil",
"]",
"if",
"noIFFT",
"is",
"False",
":",
"imData",
"=",
"np",
".",
"fft",
".",
"ifftshift",
"(",
"np",
".",
"fft",
".",
"ifft2",
"(",
"avg",
")",
")",
"plt",
".",
"imshow",
"(",
"np",
".",
"absolute",
"(",
"imData",
")",
",",
"cmap",
"=",
"'gray'",
")",
"plt",
".",
"title",
"(",
"'Image Data'",
")",
"else",
":",
"mag",
"=",
"np",
".",
"log",
"(",
"np",
".",
"absolute",
"(",
"avg",
")",
")",
"phase",
"=",
"np",
".",
"angle",
"(",
"avg",
")",
"f",
",",
"(",
"ax1",
",",
"ax2",
")",
"=",
"plt",
".",
"subplots",
"(",
"1",
",",
"2",
",",
"sharey",
"=",
"True",
")",
"ax1",
".",
"imshow",
"(",
"mag",
",",
"cmap",
"=",
"'gray'",
")",
"ax2",
".",
"imshow",
"(",
"phase",
",",
"cmap",
"=",
"'gray'",
")",
"ax1",
".",
"set_title",
"(",
"'log(Magnitude)'",
")",
"ax2",
".",
"set_title",
"(",
"'Phase'",
")",
"plt",
".",
"show",
"(",
")"
] | Display processed MRI data from `.hdf5`, `.npz`, or `.dat` files. No arguments displays the IFFT of the k-space data. The type of file is guessed by the file extension (i.e., if extension is `.dat` then readMeasData15 will be run to get the data).
Command-line Options:
-nifft (no IFFT)
Display k-space data, log magnitude and phase plots. | [
"Display",
"processed",
"MRI",
"data",
"from",
".",
"hdf5",
".",
"npz",
"or",
".",
"dat",
"files",
".",
"No",
"arguments",
"displays",
"the",
"IFFT",
"of",
"the",
"k",
"-",
"space",
"data",
".",
"The",
"type",
"of",
"file",
"is",
"guessed",
"by",
"the",
"file",
"extension",
"(",
"i",
".",
"e",
".",
"if",
"extension",
"is",
".",
"dat",
"then",
"readMeasData15",
"will",
"be",
"run",
"to",
"get",
"the",
"data",
")",
"."
] | python | train |
googleapis/google-cloud-python | logging/google/cloud/logging/sink.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/sink.py#L182-L206 | def update(self, client=None, unique_writer_identity=False):
"""API call: update sink configuration via a PUT request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
"""
client = self._require_client(client)
resource = client.sinks_api.sink_update(
self.project,
self.name,
self.filter_,
self.destination,
unique_writer_identity=unique_writer_identity,
)
self._update_from_api_repr(resource) | [
"def",
"update",
"(",
"self",
",",
"client",
"=",
"None",
",",
"unique_writer_identity",
"=",
"False",
")",
":",
"client",
"=",
"self",
".",
"_require_client",
"(",
"client",
")",
"resource",
"=",
"client",
".",
"sinks_api",
".",
"sink_update",
"(",
"self",
".",
"project",
",",
"self",
".",
"name",
",",
"self",
".",
"filter_",
",",
"self",
".",
"destination",
",",
"unique_writer_identity",
"=",
"unique_writer_identity",
",",
")",
"self",
".",
"_update_from_api_repr",
"(",
"resource",
")"
] | API call: update sink configuration via a PUT request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink. | [
"API",
"call",
":",
"update",
"sink",
"configuration",
"via",
"a",
"PUT",
"request"
] | python | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L100-L168 | def main():
"""Script entrypoint."""
# Parse the arguments
parser = argparse.ArgumentParser(
description='Convert MSBuild XML to JSON format')
parser.add_argument(
'-t', '--toolchain', help='The name of the toolchain', required=True)
parser.add_argument(
'-o', '--output', help='The output directory', default='')
parser.add_argument(
'-r',
'--overwrite',
help='Whether previously output should be overwritten',
dest='overwrite',
action='store_true')
parser.set_defaults(overwrite=False)
parser.add_argument(
'-d',
'--debug',
help="Debug tool output",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.WARNING)
parser.add_argument(
'-v',
'--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO)
parser.add_argument('input', help='The input files', nargs='+')
args = parser.parse_args()
toolchain = args.toolchain
logging.basicConfig(level=args.loglevel)
logging.info('Creating %s toolchain files', toolchain)
values = {}
# Iterate through the inputs
for input in args.input:
input = __get_path(input)
read_msbuild_xml(input, values)
# Determine if the output directory needs to be created
output_dir = __get_path(args.output)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
logging.info('Created output directory %s', output_dir)
for key, value in values.items():
output_path = __output_path(toolchain, key, output_dir)
if os.path.exists(output_path) and not args.overwrite:
logging.info('Comparing previous output to current')
__merge_json_values(value, read_msbuild_json(output_path))
else:
logging.info('Original output will be overwritten')
logging.info('Writing MS Build JSON file at %s', output_path)
__write_json_file(output_path, value) | [
"def",
"main",
"(",
")",
":",
"# Parse the arguments",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Convert MSBuild XML to JSON format'",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--toolchain'",
",",
"help",
"=",
"'The name of the toolchain'",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--output'",
",",
"help",
"=",
"'The output directory'",
",",
"default",
"=",
"''",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"'--overwrite'",
",",
"help",
"=",
"'Whether previously output should be overwritten'",
",",
"dest",
"=",
"'overwrite'",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"set_defaults",
"(",
"overwrite",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--debug'",
",",
"help",
"=",
"\"Debug tool output\"",
",",
"action",
"=",
"\"store_const\"",
",",
"dest",
"=",
"\"loglevel\"",
",",
"const",
"=",
"logging",
".",
"DEBUG",
",",
"default",
"=",
"logging",
".",
"WARNING",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"help",
"=",
"\"Verbose output\"",
",",
"action",
"=",
"\"store_const\"",
",",
"dest",
"=",
"\"loglevel\"",
",",
"const",
"=",
"logging",
".",
"INFO",
")",
"parser",
".",
"add_argument",
"(",
"'input'",
",",
"help",
"=",
"'The input files'",
",",
"nargs",
"=",
"'+'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"toolchain",
"=",
"args",
".",
"toolchain",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"args",
".",
"loglevel",
")",
"logging",
".",
"info",
"(",
"'Creating %s toolchain files'",
",",
"toolchain",
")",
"values",
"=",
"{",
"}",
"# Iterate through the inputs",
"for",
"input",
"in",
"args",
".",
"input",
":",
"input",
"=",
"__get_path",
"(",
"input",
")",
"read_msbuild_xml",
"(",
"input",
",",
"values",
")",
"# Determine if the output directory needs to be created",
"output_dir",
"=",
"__get_path",
"(",
"args",
".",
"output",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"output_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"output_dir",
")",
"logging",
".",
"info",
"(",
"'Created output directory %s'",
",",
"output_dir",
")",
"for",
"key",
",",
"value",
"in",
"values",
".",
"items",
"(",
")",
":",
"output_path",
"=",
"__output_path",
"(",
"toolchain",
",",
"key",
",",
"output_dir",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"output_path",
")",
"and",
"not",
"args",
".",
"overwrite",
":",
"logging",
".",
"info",
"(",
"'Comparing previous output to current'",
")",
"__merge_json_values",
"(",
"value",
",",
"read_msbuild_json",
"(",
"output_path",
")",
")",
"else",
":",
"logging",
".",
"info",
"(",
"'Original output will be overwritten'",
")",
"logging",
".",
"info",
"(",
"'Writing MS Build JSON file at %s'",
",",
"output_path",
")",
"__write_json_file",
"(",
"output_path",
",",
"value",
")"
] | Script entrypoint. | [
"Script",
"entrypoint",
"."
] | python | train |
abarker/pdfCropMargins | src/pdfCropMargins/main_pdfCropMargins.py | https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/main_pdfCropMargins.py#L148-L205 | def get_full_page_box_assigning_media_and_crop(page):
"""This returns whatever PDF box was selected (by the user option
'--fullPageBox') to represent the full page size. All cropping is done
relative to this box. The default selection option is the MediaBox
intersected with the CropBox so multiple crops work as expected. The
argument page should be a pyPdf page object. This function also by default
sets the MediaBox and CropBox to the full-page size and saves the old values
in the same page namespace, and so it should only be called once for each
page. It returns a RectangleObject box."""
# Find the page rotation angle (degrees).
# Note rotation is clockwise, and four values are allowed: 0 90 180 270
try:
rotation = page["/Rotate"].getObject() # this works, needs try
#rotation = page.get("/Rotate", 0) # from the PyPDF2 source, default 0
except KeyError:
rotation = 0
while rotation >= 360: rotation -= 360
while rotation < 0: rotation += 360
# Save the rotation value in the page's namespace so we can restore it later.
page.rotationAngle = rotation
# Un-rotate the page, leaving it with an rotation of 0.
page.rotateClockwise(-rotation)
# Save copies of some values in the page's namespace, to possibly restore later.
page.originalMediaBox = page.mediaBox
page.originalCropBox = page.cropBox
first_loop = True
for box_string in args.fullPageBox:
if box_string == "m": f_box = page.mediaBox
if box_string == "c": f_box = page.cropBox
if box_string == "t": f_box = page.trimBox
if box_string == "a": f_box = page.artBox
if box_string == "b": f_box = page.bleedBox
# Take intersection over all chosen boxes.
if first_loop:
full_box = f_box
else:
full_box = intersect_boxes(full_box, f_box)
first_loop = False
# Do any absolute pre-cropping specified for the page (after modifying any
# absolutePreCrop arguments to take into account rotations to the page).
a = mod_box_for_rotation(args.absolutePreCrop, rotation)
full_box = RectangleObject([float(full_box.lowerLeft[0]) + a[0],
float(full_box.lowerLeft[1]) + a[1],
float(full_box.upperRight[0]) - a[2],
float(full_box.upperRight[1]) - a[3]])
page.mediaBox = full_box
page.cropBox = full_box
return full_box | [
"def",
"get_full_page_box_assigning_media_and_crop",
"(",
"page",
")",
":",
"# Find the page rotation angle (degrees).",
"# Note rotation is clockwise, and four values are allowed: 0 90 180 270",
"try",
":",
"rotation",
"=",
"page",
"[",
"\"/Rotate\"",
"]",
".",
"getObject",
"(",
")",
"# this works, needs try",
"#rotation = page.get(\"/Rotate\", 0) # from the PyPDF2 source, default 0",
"except",
"KeyError",
":",
"rotation",
"=",
"0",
"while",
"rotation",
">=",
"360",
":",
"rotation",
"-=",
"360",
"while",
"rotation",
"<",
"0",
":",
"rotation",
"+=",
"360",
"# Save the rotation value in the page's namespace so we can restore it later.",
"page",
".",
"rotationAngle",
"=",
"rotation",
"# Un-rotate the page, leaving it with an rotation of 0.",
"page",
".",
"rotateClockwise",
"(",
"-",
"rotation",
")",
"# Save copies of some values in the page's namespace, to possibly restore later.",
"page",
".",
"originalMediaBox",
"=",
"page",
".",
"mediaBox",
"page",
".",
"originalCropBox",
"=",
"page",
".",
"cropBox",
"first_loop",
"=",
"True",
"for",
"box_string",
"in",
"args",
".",
"fullPageBox",
":",
"if",
"box_string",
"==",
"\"m\"",
":",
"f_box",
"=",
"page",
".",
"mediaBox",
"if",
"box_string",
"==",
"\"c\"",
":",
"f_box",
"=",
"page",
".",
"cropBox",
"if",
"box_string",
"==",
"\"t\"",
":",
"f_box",
"=",
"page",
".",
"trimBox",
"if",
"box_string",
"==",
"\"a\"",
":",
"f_box",
"=",
"page",
".",
"artBox",
"if",
"box_string",
"==",
"\"b\"",
":",
"f_box",
"=",
"page",
".",
"bleedBox",
"# Take intersection over all chosen boxes.",
"if",
"first_loop",
":",
"full_box",
"=",
"f_box",
"else",
":",
"full_box",
"=",
"intersect_boxes",
"(",
"full_box",
",",
"f_box",
")",
"first_loop",
"=",
"False",
"# Do any absolute pre-cropping specified for the page (after modifying any",
"# absolutePreCrop arguments to take into account rotations to the page).",
"a",
"=",
"mod_box_for_rotation",
"(",
"args",
".",
"absolutePreCrop",
",",
"rotation",
")",
"full_box",
"=",
"RectangleObject",
"(",
"[",
"float",
"(",
"full_box",
".",
"lowerLeft",
"[",
"0",
"]",
")",
"+",
"a",
"[",
"0",
"]",
",",
"float",
"(",
"full_box",
".",
"lowerLeft",
"[",
"1",
"]",
")",
"+",
"a",
"[",
"1",
"]",
",",
"float",
"(",
"full_box",
".",
"upperRight",
"[",
"0",
"]",
")",
"-",
"a",
"[",
"2",
"]",
",",
"float",
"(",
"full_box",
".",
"upperRight",
"[",
"1",
"]",
")",
"-",
"a",
"[",
"3",
"]",
"]",
")",
"page",
".",
"mediaBox",
"=",
"full_box",
"page",
".",
"cropBox",
"=",
"full_box",
"return",
"full_box"
] | This returns whatever PDF box was selected (by the user option
'--fullPageBox') to represent the full page size. All cropping is done
relative to this box. The default selection option is the MediaBox
intersected with the CropBox so multiple crops work as expected. The
argument page should be a pyPdf page object. This function also by default
sets the MediaBox and CropBox to the full-page size and saves the old values
in the same page namespace, and so it should only be called once for each
page. It returns a RectangleObject box. | [
"This",
"returns",
"whatever",
"PDF",
"box",
"was",
"selected",
"(",
"by",
"the",
"user",
"option",
"--",
"fullPageBox",
")",
"to",
"represent",
"the",
"full",
"page",
"size",
".",
"All",
"cropping",
"is",
"done",
"relative",
"to",
"this",
"box",
".",
"The",
"default",
"selection",
"option",
"is",
"the",
"MediaBox",
"intersected",
"with",
"the",
"CropBox",
"so",
"multiple",
"crops",
"work",
"as",
"expected",
".",
"The",
"argument",
"page",
"should",
"be",
"a",
"pyPdf",
"page",
"object",
".",
"This",
"function",
"also",
"by",
"default",
"sets",
"the",
"MediaBox",
"and",
"CropBox",
"to",
"the",
"full",
"-",
"page",
"size",
"and",
"saves",
"the",
"old",
"values",
"in",
"the",
"same",
"page",
"namespace",
"and",
"so",
"it",
"should",
"only",
"be",
"called",
"once",
"for",
"each",
"page",
".",
"It",
"returns",
"a",
"RectangleObject",
"box",
"."
] | python | train |
twitterdev/twitter-python-ads-sdk | twitter_ads/client.py | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/client.py#L72-L80 | def trace():
"""Enables and disables request tracing."""
def fget(self):
return self._options.get('trace', None)
def fset(self, value):
self._options['trace'] = value
return locals() | [
"def",
"trace",
"(",
")",
":",
"def",
"fget",
"(",
"self",
")",
":",
"return",
"self",
".",
"_options",
".",
"get",
"(",
"'trace'",
",",
"None",
")",
"def",
"fset",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_options",
"[",
"'trace'",
"]",
"=",
"value",
"return",
"locals",
"(",
")"
] | Enables and disables request tracing. | [
"Enables",
"and",
"disables",
"request",
"tracing",
"."
] | python | train |
maas/python-libmaas | maas/client/viscera/__init__.py | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L488-L518 | def ManagedCreate(super_cls):
"""Dynamically creates a `create` method for a `ObjectSet.Managed` class
that calls the `super_cls.create`.
The first positional argument that is passed to the `super_cls.create` is
the `_manager` that was set using `ObjectSet.Managed`. The created object
is added to the `ObjectSet.Managed` also placed in the correct
`_data[field]` and `_orig_data[field]` for the `_manager` object.
"""
@wraps(super_cls.create)
async def _create(self, *args, **kwargs):
cls = type(self)
manager = getattr(cls, '_manager', None)
manager_field = getattr(cls, '_manager_field', None)
if manager is not None and manager_field is not None:
args = (manager,) + args
new_obj = await super_cls.create(*args, **kwargs)
self._items = self._items + [new_obj]
manager._data[manager_field.name] = (
manager._data[manager_field.name] +
[new_obj._data])
manager._orig_data[manager_field.name] = (
manager._orig_data[manager_field.name] +
[new_obj._data])
return new_obj
else:
raise AttributeError(
'create is not supported; %s is not a managed set' % (
super_cls.__name__))
return _create | [
"def",
"ManagedCreate",
"(",
"super_cls",
")",
":",
"@",
"wraps",
"(",
"super_cls",
".",
"create",
")",
"async",
"def",
"_create",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
"=",
"type",
"(",
"self",
")",
"manager",
"=",
"getattr",
"(",
"cls",
",",
"'_manager'",
",",
"None",
")",
"manager_field",
"=",
"getattr",
"(",
"cls",
",",
"'_manager_field'",
",",
"None",
")",
"if",
"manager",
"is",
"not",
"None",
"and",
"manager_field",
"is",
"not",
"None",
":",
"args",
"=",
"(",
"manager",
",",
")",
"+",
"args",
"new_obj",
"=",
"await",
"super_cls",
".",
"create",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_items",
"=",
"self",
".",
"_items",
"+",
"[",
"new_obj",
"]",
"manager",
".",
"_data",
"[",
"manager_field",
".",
"name",
"]",
"=",
"(",
"manager",
".",
"_data",
"[",
"manager_field",
".",
"name",
"]",
"+",
"[",
"new_obj",
".",
"_data",
"]",
")",
"manager",
".",
"_orig_data",
"[",
"manager_field",
".",
"name",
"]",
"=",
"(",
"manager",
".",
"_orig_data",
"[",
"manager_field",
".",
"name",
"]",
"+",
"[",
"new_obj",
".",
"_data",
"]",
")",
"return",
"new_obj",
"else",
":",
"raise",
"AttributeError",
"(",
"'create is not supported; %s is not a managed set'",
"%",
"(",
"super_cls",
".",
"__name__",
")",
")",
"return",
"_create"
] | Dynamically creates a `create` method for a `ObjectSet.Managed` class
that calls the `super_cls.create`.
The first positional argument that is passed to the `super_cls.create` is
the `_manager` that was set using `ObjectSet.Managed`. The created object
is added to the `ObjectSet.Managed` also placed in the correct
`_data[field]` and `_orig_data[field]` for the `_manager` object. | [
"Dynamically",
"creates",
"a",
"create",
"method",
"for",
"a",
"ObjectSet",
".",
"Managed",
"class",
"that",
"calls",
"the",
"super_cls",
".",
"create",
"."
] | python | train |
phaethon/kamene | kamene/crypto/cert.py | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/crypto/cert.py#L509-L570 | def verify(self, M, S, t=None, h=None, mgf=None, sLen=None):
"""
Verify alleged signature 'S' is indeed the signature of message 'M' using
't' signature scheme where 't' can be:
- None: the alleged signature 'S' is directly applied the RSAVP1 signature
primitive, as described in PKCS#1 v2.1, i.e. RFC 3447 Sect
5.2.1. Simply put, the provided signature is applied a moular
exponentiation using the public key. Then, a comparison of the
result is done against 'M'. On match, True is returned.
Additionnal method parameters are just ignored.
- 'pkcs': the alleged signature 'S' and message 'M' are applied
RSASSA-PKCS1-v1_5-VERIFY signature verification scheme as
described in Sect. 8.2.2 of RFC 3447. In that context,
the hash function name is passed using 'h'. Possible values are
"md2", "md4", "md5", "sha1", "tls", "sha224", "sha256", "sha384"
and "sha512". If none is provided, sha1 is used. Other additionnal
parameters are ignored.
- 'pss': the alleged signature 'S' and message 'M' are applied
RSASSA-PSS-VERIFY signature scheme as described in Sect. 8.1.2.
of RFC 3447. In that context,
o 'h' parameter provides the name of the hash method to use.
Possible values are "md2", "md4", "md5", "sha1", "tls", "sha224",
"sha256", "sha384" and "sha512". if none is provided, sha1
is used.
o 'mgf' is the mask generation function. By default, mgf
is derived from the provided hash function using the
generic MGF1 (see pkcs_mgf1() for details).
o 'sLen' is the length in octet of the salt. You can overload the
default value (the octet length of the hash value for provided
algorithm) by providing another one with that parameter.
"""
if h is not None:
h = mapHashFunc(h)
if t is None: #RSAVP1
pad_inst = padding.AsymmetricPadding()
elif t == "pkcs": # RSASSA-PKCS1-v1_5-VERIFY
if h is None:
h = hashes.SHA1
pad_inst = padding.PKCS1v15()
elif t == "pss": # RSASSA-PSS-VERIFY
pad_inst = padding.PSS(mgf = mgf, salg_length = sLen)
else:
warning("Key.verify(): Unknown signature type (%s) provided" % t)
return None
try:
self.key.verify(
signature=S,
data=M,
padding=pad_inst,
algorithm=h(),
)
return True
except InvalidSignature:
return False | [
"def",
"verify",
"(",
"self",
",",
"M",
",",
"S",
",",
"t",
"=",
"None",
",",
"h",
"=",
"None",
",",
"mgf",
"=",
"None",
",",
"sLen",
"=",
"None",
")",
":",
"if",
"h",
"is",
"not",
"None",
":",
"h",
"=",
"mapHashFunc",
"(",
"h",
")",
"if",
"t",
"is",
"None",
":",
"#RSAVP1",
"pad_inst",
"=",
"padding",
".",
"AsymmetricPadding",
"(",
")",
"elif",
"t",
"==",
"\"pkcs\"",
":",
"# RSASSA-PKCS1-v1_5-VERIFY",
"if",
"h",
"is",
"None",
":",
"h",
"=",
"hashes",
".",
"SHA1",
"pad_inst",
"=",
"padding",
".",
"PKCS1v15",
"(",
")",
"elif",
"t",
"==",
"\"pss\"",
":",
"# RSASSA-PSS-VERIFY",
"pad_inst",
"=",
"padding",
".",
"PSS",
"(",
"mgf",
"=",
"mgf",
",",
"salg_length",
"=",
"sLen",
")",
"else",
":",
"warning",
"(",
"\"Key.verify(): Unknown signature type (%s) provided\"",
"%",
"t",
")",
"return",
"None",
"try",
":",
"self",
".",
"key",
".",
"verify",
"(",
"signature",
"=",
"S",
",",
"data",
"=",
"M",
",",
"padding",
"=",
"pad_inst",
",",
"algorithm",
"=",
"h",
"(",
")",
",",
")",
"return",
"True",
"except",
"InvalidSignature",
":",
"return",
"False"
] | Verify alleged signature 'S' is indeed the signature of message 'M' using
't' signature scheme where 't' can be:
- None: the alleged signature 'S' is directly applied the RSAVP1 signature
primitive, as described in PKCS#1 v2.1, i.e. RFC 3447 Sect
5.2.1. Simply put, the provided signature is applied a moular
exponentiation using the public key. Then, a comparison of the
result is done against 'M'. On match, True is returned.
Additionnal method parameters are just ignored.
- 'pkcs': the alleged signature 'S' and message 'M' are applied
RSASSA-PKCS1-v1_5-VERIFY signature verification scheme as
described in Sect. 8.2.2 of RFC 3447. In that context,
the hash function name is passed using 'h'. Possible values are
"md2", "md4", "md5", "sha1", "tls", "sha224", "sha256", "sha384"
and "sha512". If none is provided, sha1 is used. Other additionnal
parameters are ignored.
- 'pss': the alleged signature 'S' and message 'M' are applied
RSASSA-PSS-VERIFY signature scheme as described in Sect. 8.1.2.
of RFC 3447. In that context,
o 'h' parameter provides the name of the hash method to use.
Possible values are "md2", "md4", "md5", "sha1", "tls", "sha224",
"sha256", "sha384" and "sha512". if none is provided, sha1
is used.
o 'mgf' is the mask generation function. By default, mgf
is derived from the provided hash function using the
generic MGF1 (see pkcs_mgf1() for details).
o 'sLen' is the length in octet of the salt. You can overload the
default value (the octet length of the hash value for provided
algorithm) by providing another one with that parameter. | [
"Verify",
"alleged",
"signature",
"S",
"is",
"indeed",
"the",
"signature",
"of",
"message",
"M",
"using",
"t",
"signature",
"scheme",
"where",
"t",
"can",
"be",
":"
] | python | train |
rkcosmos/deepcut | deepcut/deepcut.py | https://github.com/rkcosmos/deepcut/blob/9a2729071d01972af805acede85d7aa9e7a6da30/deepcut/deepcut.py#L79-L91 | def _check_stop_list(stop):
"""
Check stop words list
ref: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L87-L95
"""
if stop == "thai":
return THAI_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
# assume it's a collection
return frozenset(stop) | [
"def",
"_check_stop_list",
"(",
"stop",
")",
":",
"if",
"stop",
"==",
"\"thai\"",
":",
"return",
"THAI_STOP_WORDS",
"elif",
"isinstance",
"(",
"stop",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"ValueError",
"(",
"\"not a built-in stop list: %s\"",
"%",
"stop",
")",
"elif",
"stop",
"is",
"None",
":",
"return",
"None",
"# assume it's a collection",
"return",
"frozenset",
"(",
"stop",
")"
] | Check stop words list
ref: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L87-L95 | [
"Check",
"stop",
"words",
"list",
"ref",
":",
"https",
":",
"//",
"github",
".",
"com",
"/",
"scikit",
"-",
"learn",
"/",
"scikit",
"-",
"learn",
"/",
"blob",
"/",
"master",
"/",
"sklearn",
"/",
"feature_extraction",
"/",
"text",
".",
"py#L87",
"-",
"L95"
] | python | valid |
bitprophet/ssh | ssh/client.py | https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/client.py#L179-L195 | def save_host_keys(self, filename):
"""
Save the host keys back to a file. Only the host keys loaded with
L{load_host_keys} (plus any added directly) will be saved -- not any
host keys loaded with L{load_system_host_keys}.
@param filename: the filename to save to
@type filename: str
@raise IOError: if the file could not be written
"""
f = open(filename, 'w')
f.write('# SSH host keys collected by ssh\n')
for hostname, keys in self._host_keys.iteritems():
for keytype, key in keys.iteritems():
f.write('%s %s %s\n' % (hostname, keytype, key.get_base64()))
f.close() | [
"def",
"save_host_keys",
"(",
"self",
",",
"filename",
")",
":",
"f",
"=",
"open",
"(",
"filename",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"'# SSH host keys collected by ssh\\n'",
")",
"for",
"hostname",
",",
"keys",
"in",
"self",
".",
"_host_keys",
".",
"iteritems",
"(",
")",
":",
"for",
"keytype",
",",
"key",
"in",
"keys",
".",
"iteritems",
"(",
")",
":",
"f",
".",
"write",
"(",
"'%s %s %s\\n'",
"%",
"(",
"hostname",
",",
"keytype",
",",
"key",
".",
"get_base64",
"(",
")",
")",
")",
"f",
".",
"close",
"(",
")"
] | Save the host keys back to a file. Only the host keys loaded with
L{load_host_keys} (plus any added directly) will be saved -- not any
host keys loaded with L{load_system_host_keys}.
@param filename: the filename to save to
@type filename: str
@raise IOError: if the file could not be written | [
"Save",
"the",
"host",
"keys",
"back",
"to",
"a",
"file",
".",
"Only",
"the",
"host",
"keys",
"loaded",
"with",
"L",
"{",
"load_host_keys",
"}",
"(",
"plus",
"any",
"added",
"directly",
")",
"will",
"be",
"saved",
"--",
"not",
"any",
"host",
"keys",
"loaded",
"with",
"L",
"{",
"load_system_host_keys",
"}",
"."
] | python | train |
pycontribs/python-crowd | crowd.py | https://github.com/pycontribs/python-crowd/blob/a075e45774dd5baecf0217843cda747084268e32/crowd.py#L94-L106 | def _post(self, *args, **kwargs):
"""Wrapper around Requests for POST requests
Returns:
Response:
A Requests Response object
"""
if 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
req = self.session.post(*args, **kwargs)
return req | [
"def",
"_post",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'timeout'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'timeout'",
"]",
"=",
"self",
".",
"timeout",
"req",
"=",
"self",
".",
"session",
".",
"post",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"req"
] | Wrapper around Requests for POST requests
Returns:
Response:
A Requests Response object | [
"Wrapper",
"around",
"Requests",
"for",
"POST",
"requests"
] | python | train |
za-creature/gulpless | gulpless/handlers.py | https://github.com/za-creature/gulpless/blob/fd73907dbe86880086719816bb042233f85121f6/gulpless/handlers.py#L75-L126 | def _build(self, src, path, dest, mtime):
"""Calls `build` after testing that at least one output file (as
returned by `_outputs()` does not exist or is older than `mtime`. If
the build fails, the build time is recorded and no other builds will be
attempted on `input` until this method is called with a larger mtime.
"""
input_path = os.path.join(src, path)
output_paths = [os.path.join(dest, output) for output in
self._outputs(src, path)]
if path in self.failures and mtime <= self.failures[path]:
# the input file was not modified since the last recorded failure
# as such, assume that the task will fail again and skip it
return
for output in output_paths:
try:
if \
os.path.exists(output) and \
mtime <= os.path.getmtime(output):
# output file exists and is up to date; no need to trigger
# build on this file's expense
continue
except EnvironmentError:
# usually happens when the output file has been deleted in
# between the call to exists and the call to getmtime
pass
start = time.time()
try:
self.build(input_path, output_paths)
except Exception as e:
if isinstance(e, EnvironmentError):
# non-zero return code in sub-process; only show message
logging.error("{0} failed after {1:.2f}s: {2}".format(
termcolor.colored(path, "red", attrs=["bold"]),
time.time() - start, e.args[0]
))
else:
# probably a bug in the handler; show full trace
logging.exception("{0} failed after {1:.2f}s".format(
termcolor.colored(path, "red", attrs=["bold"]),
time.time() - start
))
self.failures[path] = start
else:
logging.info("{0} completed in {1:.2f}s".format(
termcolor.colored(path, "green", attrs=["bold"]),
time.time() - start
))
self.failures.pop(path, None)
break | [
"def",
"_build",
"(",
"self",
",",
"src",
",",
"path",
",",
"dest",
",",
"mtime",
")",
":",
"input_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src",
",",
"path",
")",
"output_paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"dest",
",",
"output",
")",
"for",
"output",
"in",
"self",
".",
"_outputs",
"(",
"src",
",",
"path",
")",
"]",
"if",
"path",
"in",
"self",
".",
"failures",
"and",
"mtime",
"<=",
"self",
".",
"failures",
"[",
"path",
"]",
":",
"# the input file was not modified since the last recorded failure",
"# as such, assume that the task will fail again and skip it",
"return",
"for",
"output",
"in",
"output_paths",
":",
"try",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"output",
")",
"and",
"mtime",
"<=",
"os",
".",
"path",
".",
"getmtime",
"(",
"output",
")",
":",
"# output file exists and is up to date; no need to trigger",
"# build on this file's expense",
"continue",
"except",
"EnvironmentError",
":",
"# usually happens when the output file has been deleted in",
"# between the call to exists and the call to getmtime",
"pass",
"start",
"=",
"time",
".",
"time",
"(",
")",
"try",
":",
"self",
".",
"build",
"(",
"input_path",
",",
"output_paths",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"EnvironmentError",
")",
":",
"# non-zero return code in sub-process; only show message",
"logging",
".",
"error",
"(",
"\"{0} failed after {1:.2f}s: {2}\"",
".",
"format",
"(",
"termcolor",
".",
"colored",
"(",
"path",
",",
"\"red\"",
",",
"attrs",
"=",
"[",
"\"bold\"",
"]",
")",
",",
"time",
".",
"time",
"(",
")",
"-",
"start",
",",
"e",
".",
"args",
"[",
"0",
"]",
")",
")",
"else",
":",
"# probably a bug in the handler; show full trace",
"logging",
".",
"exception",
"(",
"\"{0} failed after {1:.2f}s\"",
".",
"format",
"(",
"termcolor",
".",
"colored",
"(",
"path",
",",
"\"red\"",
",",
"attrs",
"=",
"[",
"\"bold\"",
"]",
")",
",",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"self",
".",
"failures",
"[",
"path",
"]",
"=",
"start",
"else",
":",
"logging",
".",
"info",
"(",
"\"{0} completed in {1:.2f}s\"",
".",
"format",
"(",
"termcolor",
".",
"colored",
"(",
"path",
",",
"\"green\"",
",",
"attrs",
"=",
"[",
"\"bold\"",
"]",
")",
",",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"self",
".",
"failures",
".",
"pop",
"(",
"path",
",",
"None",
")",
"break"
] | Calls `build` after testing that at least one output file (as
returned by `_outputs()` does not exist or is older than `mtime`. If
the build fails, the build time is recorded and no other builds will be
attempted on `input` until this method is called with a larger mtime. | [
"Calls",
"build",
"after",
"testing",
"that",
"at",
"least",
"one",
"output",
"file",
"(",
"as",
"returned",
"by",
"_outputs",
"()",
"does",
"not",
"exist",
"or",
"is",
"older",
"than",
"mtime",
".",
"If",
"the",
"build",
"fails",
"the",
"build",
"time",
"is",
"recorded",
"and",
"no",
"other",
"builds",
"will",
"be",
"attempted",
"on",
"input",
"until",
"this",
"method",
"is",
"called",
"with",
"a",
"larger",
"mtime",
"."
] | python | train |
cykl/infoqscraper | infoqscraper/convert.py | https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/convert.py#L92-L113 | def download_video(self):
"""Downloads the video.
If self.client.cache_enabled is True, then the disk cache is used.
Returns:
The path where the video has been saved.
Raises:
DownloadError: If the video cannot be downloaded.
"""
rvideo_path = self.presentation.metadata['video_path']
if self.presentation.client.cache:
video_path = self.presentation.client.cache.get_path(rvideo_path)
if not video_path:
video_path = self.download_video_no_cache()
self.presentation.client.cache.put_path(rvideo_path, video_path)
else:
video_path = self.download_video_no_cache()
return video_path | [
"def",
"download_video",
"(",
"self",
")",
":",
"rvideo_path",
"=",
"self",
".",
"presentation",
".",
"metadata",
"[",
"'video_path'",
"]",
"if",
"self",
".",
"presentation",
".",
"client",
".",
"cache",
":",
"video_path",
"=",
"self",
".",
"presentation",
".",
"client",
".",
"cache",
".",
"get_path",
"(",
"rvideo_path",
")",
"if",
"not",
"video_path",
":",
"video_path",
"=",
"self",
".",
"download_video_no_cache",
"(",
")",
"self",
".",
"presentation",
".",
"client",
".",
"cache",
".",
"put_path",
"(",
"rvideo_path",
",",
"video_path",
")",
"else",
":",
"video_path",
"=",
"self",
".",
"download_video_no_cache",
"(",
")",
"return",
"video_path"
] | Downloads the video.
If self.client.cache_enabled is True, then the disk cache is used.
Returns:
The path where the video has been saved.
Raises:
DownloadError: If the video cannot be downloaded. | [
"Downloads",
"the",
"video",
"."
] | python | train |
NiklasRosenstein-Python/nr-deprecated | nr/stream.py | https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/stream.py#L70-L85 | def unique(cls, iterable, key=None):
"""
Yields unique items from *iterable* whilst preserving the original order.
"""
if key is None:
key = lambda x: x
def generator():
seen = set()
seen_add = seen.add
for item in iterable:
key_val = key(item)
if key_val not in seen:
seen_add(key_val)
yield item
return cls(generator()) | [
"def",
"unique",
"(",
"cls",
",",
"iterable",
",",
"key",
"=",
"None",
")",
":",
"if",
"key",
"is",
"None",
":",
"key",
"=",
"lambda",
"x",
":",
"x",
"def",
"generator",
"(",
")",
":",
"seen",
"=",
"set",
"(",
")",
"seen_add",
"=",
"seen",
".",
"add",
"for",
"item",
"in",
"iterable",
":",
"key_val",
"=",
"key",
"(",
"item",
")",
"if",
"key_val",
"not",
"in",
"seen",
":",
"seen_add",
"(",
"key_val",
")",
"yield",
"item",
"return",
"cls",
"(",
"generator",
"(",
")",
")"
] | Yields unique items from *iterable* whilst preserving the original order. | [
"Yields",
"unique",
"items",
"from",
"*",
"iterable",
"*",
"whilst",
"preserving",
"the",
"original",
"order",
"."
] | python | train |
ahtn/python-easyhid | easyhid/easyhid.py | https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L112-L126 | def open(self):
"""
Open the HID device for reading and writing.
"""
if self._is_open:
raise HIDException("Failed to open device: HIDDevice already open")
path = self.path.encode('utf-8')
dev = hidapi.hid_open_path(path)
if dev:
self._is_open = True
self._device = dev
else:
raise HIDException("Failed to open device") | [
"def",
"open",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_open",
":",
"raise",
"HIDException",
"(",
"\"Failed to open device: HIDDevice already open\"",
")",
"path",
"=",
"self",
".",
"path",
".",
"encode",
"(",
"'utf-8'",
")",
"dev",
"=",
"hidapi",
".",
"hid_open_path",
"(",
"path",
")",
"if",
"dev",
":",
"self",
".",
"_is_open",
"=",
"True",
"self",
".",
"_device",
"=",
"dev",
"else",
":",
"raise",
"HIDException",
"(",
"\"Failed to open device\"",
")"
] | Open the HID device for reading and writing. | [
"Open",
"the",
"HID",
"device",
"for",
"reading",
"and",
"writing",
"."
] | python | train |
fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/window.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/window.py#L590-L604 | def show(self, bAsync = True):
"""
Make the window visible.
@see: L{hide}
@type bAsync: bool
@param bAsync: Perform the request asynchronously.
@raise WindowsError: An error occured while processing this request.
"""
if bAsync:
win32.ShowWindowAsync( self.get_handle(), win32.SW_SHOW )
else:
win32.ShowWindow( self.get_handle(), win32.SW_SHOW ) | [
"def",
"show",
"(",
"self",
",",
"bAsync",
"=",
"True",
")",
":",
"if",
"bAsync",
":",
"win32",
".",
"ShowWindowAsync",
"(",
"self",
".",
"get_handle",
"(",
")",
",",
"win32",
".",
"SW_SHOW",
")",
"else",
":",
"win32",
".",
"ShowWindow",
"(",
"self",
".",
"get_handle",
"(",
")",
",",
"win32",
".",
"SW_SHOW",
")"
] | Make the window visible.
@see: L{hide}
@type bAsync: bool
@param bAsync: Perform the request asynchronously.
@raise WindowsError: An error occured while processing this request. | [
"Make",
"the",
"window",
"visible",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/local_env.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/local_env.py#L1446-L1490 | def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with O'Keeffe parameters.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
try:
eln = site.specie.element
except:
eln = site.species_string
reldists_neighs = []
for neigh, dist in neighs_dists:
try:
el2 = neigh.specie.element
except:
el2 = neigh.species_string
reldists_neighs.append([dist / get_okeeffe_distance_prediction(
eln, el2), neigh])
siw = []
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, s in reldists_neighs:
if reldist < (1.0 + self.tol) * min_reldist:
w = min_reldist / reldist
siw.append({'site': s,
'image': self._get_image(structure, s),
'weight': w,
'site_index': self._get_original_site(structure,
s)})
return siw | [
"def",
"get_nn_info",
"(",
"self",
",",
"structure",
",",
"n",
")",
":",
"site",
"=",
"structure",
"[",
"n",
"]",
"neighs_dists",
"=",
"structure",
".",
"get_neighbors",
"(",
"site",
",",
"self",
".",
"cutoff",
")",
"try",
":",
"eln",
"=",
"site",
".",
"specie",
".",
"element",
"except",
":",
"eln",
"=",
"site",
".",
"species_string",
"reldists_neighs",
"=",
"[",
"]",
"for",
"neigh",
",",
"dist",
"in",
"neighs_dists",
":",
"try",
":",
"el2",
"=",
"neigh",
".",
"specie",
".",
"element",
"except",
":",
"el2",
"=",
"neigh",
".",
"species_string",
"reldists_neighs",
".",
"append",
"(",
"[",
"dist",
"/",
"get_okeeffe_distance_prediction",
"(",
"eln",
",",
"el2",
")",
",",
"neigh",
"]",
")",
"siw",
"=",
"[",
"]",
"min_reldist",
"=",
"min",
"(",
"[",
"reldist",
"for",
"reldist",
",",
"neigh",
"in",
"reldists_neighs",
"]",
")",
"for",
"reldist",
",",
"s",
"in",
"reldists_neighs",
":",
"if",
"reldist",
"<",
"(",
"1.0",
"+",
"self",
".",
"tol",
")",
"*",
"min_reldist",
":",
"w",
"=",
"min_reldist",
"/",
"reldist",
"siw",
".",
"append",
"(",
"{",
"'site'",
":",
"s",
",",
"'image'",
":",
"self",
".",
"_get_image",
"(",
"structure",
",",
"s",
")",
",",
"'weight'",
":",
"w",
",",
"'site_index'",
":",
"self",
".",
"_get_original_site",
"(",
"structure",
",",
"s",
")",
"}",
")",
"return",
"siw"
] | Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with O'Keeffe parameters.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight. | [
"Get",
"all",
"near",
"-",
"neighbor",
"sites",
"as",
"well",
"as",
"the",
"associated",
"image",
"locations",
"and",
"weights",
"of",
"the",
"site",
"with",
"index",
"n",
"using",
"the",
"closest",
"relative",
"neighbor",
"distance",
"-",
"based",
"method",
"with",
"O",
"Keeffe",
"parameters",
"."
] | python | train |
hobson/pug-dj | pug/dj/db.py | https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L1638-L1670 | def write_queryset_to_csv(qs, filename):
"""Write a QuerySet or ValuesListQuerySet to a CSV file
based on djangosnippets by zbyte64 and http://palewi.re
Arguments:
qs (QuerySet or ValuesListQuerySet): The records your want to write to a text file (UTF-8)
filename (str): full path and file name to write to
"""
model = qs.model
with open(filename, 'w') as fp:
writer = csv.writer(fp)
try:
headers = list(qs._fields)
except:
headers = [field.name for field in model._meta.fields]
writer.writerow(headers)
for obj in qs:
row = []
for colnum, field in enumerate(headers):
try:
value = getattr(obj, field, obj[colnum])
except:
value = ''
if callable(value):
value = value()
if isinstance(value, basestring):
value = value.encode("utf-8")
else:
value = str(value).encode("utf-8")
row += [value]
writer.writerow(row) | [
"def",
"write_queryset_to_csv",
"(",
"qs",
",",
"filename",
")",
":",
"model",
"=",
"qs",
".",
"model",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"fp",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"fp",
")",
"try",
":",
"headers",
"=",
"list",
"(",
"qs",
".",
"_fields",
")",
"except",
":",
"headers",
"=",
"[",
"field",
".",
"name",
"for",
"field",
"in",
"model",
".",
"_meta",
".",
"fields",
"]",
"writer",
".",
"writerow",
"(",
"headers",
")",
"for",
"obj",
"in",
"qs",
":",
"row",
"=",
"[",
"]",
"for",
"colnum",
",",
"field",
"in",
"enumerate",
"(",
"headers",
")",
":",
"try",
":",
"value",
"=",
"getattr",
"(",
"obj",
",",
"field",
",",
"obj",
"[",
"colnum",
"]",
")",
"except",
":",
"value",
"=",
"''",
"if",
"callable",
"(",
"value",
")",
":",
"value",
"=",
"value",
"(",
")",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
")",
":",
"value",
"=",
"value",
".",
"encode",
"(",
"\"utf-8\"",
")",
"else",
":",
"value",
"=",
"str",
"(",
"value",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
"row",
"+=",
"[",
"value",
"]",
"writer",
".",
"writerow",
"(",
"row",
")"
] | Write a QuerySet or ValuesListQuerySet to a CSV file
based on djangosnippets by zbyte64 and http://palewi.re
Arguments:
qs (QuerySet or ValuesListQuerySet): The records your want to write to a text file (UTF-8)
filename (str): full path and file name to write to | [
"Write",
"a",
"QuerySet",
"or",
"ValuesListQuerySet",
"to",
"a",
"CSV",
"file"
] | python | train |
Azure/msrest-for-python | msrest/serialization.py | https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/serialization.py#L742-L759 | def serialize_unicode(self, data):
"""Special handling for serializing unicode strings in Py2.
Encode to UTF-8 if unicode, otherwise handle as a str.
:param data: Object to be serialized.
:rtype: str
"""
try:
return data.value
except AttributeError:
pass
try:
if isinstance(data, unicode):
return data.encode(encoding='utf-8')
except NameError:
return str(data)
else:
return str(data) | [
"def",
"serialize_unicode",
"(",
"self",
",",
"data",
")",
":",
"try",
":",
"return",
"data",
".",
"value",
"except",
"AttributeError",
":",
"pass",
"try",
":",
"if",
"isinstance",
"(",
"data",
",",
"unicode",
")",
":",
"return",
"data",
".",
"encode",
"(",
"encoding",
"=",
"'utf-8'",
")",
"except",
"NameError",
":",
"return",
"str",
"(",
"data",
")",
"else",
":",
"return",
"str",
"(",
"data",
")"
] | Special handling for serializing unicode strings in Py2.
Encode to UTF-8 if unicode, otherwise handle as a str.
:param data: Object to be serialized.
:rtype: str | [
"Special",
"handling",
"for",
"serializing",
"unicode",
"strings",
"in",
"Py2",
".",
"Encode",
"to",
"UTF",
"-",
"8",
"if",
"unicode",
"otherwise",
"handle",
"as",
"a",
"str",
"."
] | python | train |
wilfilho/BingTranslator | BingTranslator/__init__.py | https://github.com/wilfilho/BingTranslator/blob/6bada6fe1ac4177cc7dc62ff16dab561ba714534/BingTranslator/__init__.py#L35-L49 | def _get_token(self):
"""
Get token for make request. The The data obtained herein are used
in the variable header.
Returns:
To perform the request, receive in return a dictionary
with several keys. With this method only return the token
as it will use it for subsequent requests, such as a
sentence translate. Returns one string type.
"""
informations = self._set_format_oauth()
oauth_url = "https://datamarket.accesscontrol.windows.net/v2/OAuth2-13"
token = requests.post(oauth_url, informations).json()
return token["access_token"] | [
"def",
"_get_token",
"(",
"self",
")",
":",
"informations",
"=",
"self",
".",
"_set_format_oauth",
"(",
")",
"oauth_url",
"=",
"\"https://datamarket.accesscontrol.windows.net/v2/OAuth2-13\"",
"token",
"=",
"requests",
".",
"post",
"(",
"oauth_url",
",",
"informations",
")",
".",
"json",
"(",
")",
"return",
"token",
"[",
"\"access_token\"",
"]"
] | Get token for make request. The The data obtained herein are used
in the variable header.
Returns:
To perform the request, receive in return a dictionary
with several keys. With this method only return the token
as it will use it for subsequent requests, such as a
sentence translate. Returns one string type. | [
"Get",
"token",
"for",
"make",
"request",
".",
"The",
"The",
"data",
"obtained",
"herein",
"are",
"used",
"in",
"the",
"variable",
"header",
".",
"Returns",
":",
"To",
"perform",
"the",
"request",
"receive",
"in",
"return",
"a",
"dictionary",
"with",
"several",
"keys",
".",
"With",
"this",
"method",
"only",
"return",
"the",
"token",
"as",
"it",
"will",
"use",
"it",
"for",
"subsequent",
"requests",
"such",
"as",
"a",
"sentence",
"translate",
".",
"Returns",
"one",
"string",
"type",
"."
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/tracker.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/tracker.py#L126-L134 | def print_diff(self, summary1=None, summary2=None):
"""Compute diff between to summaries and print it.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
"""
summary.print_(self.diff(summary1=summary1, summary2=summary2)) | [
"def",
"print_diff",
"(",
"self",
",",
"summary1",
"=",
"None",
",",
"summary2",
"=",
"None",
")",
":",
"summary",
".",
"print_",
"(",
"self",
".",
"diff",
"(",
"summary1",
"=",
"summary1",
",",
"summary2",
"=",
"summary2",
")",
")"
] | Compute diff between to summaries and print it.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used. | [
"Compute",
"diff",
"between",
"to",
"summaries",
"and",
"print",
"it",
"."
] | python | train |
kakwa/ldapcherry | ldapcherry/backend/backendLdap.py | https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/backend/backendLdap.py#L250-L306 | def _search(self, searchfilter, attrs, basedn):
"""Generic search"""
if attrs == NO_ATTR:
attrlist = []
elif attrs == DISPLAYED_ATTRS:
# fix me later (to much attributes)
attrlist = self.attrlist
elif attrs == LISTED_ATTRS:
attrlist = self.attrlist
elif attrs == ALL_ATTRS:
attrlist = None
else:
attrlist = None
self._logger(
severity=logging.DEBUG,
msg="%(backend)s: executing search "
"with filter '%(filter)s' in DN '%(dn)s'" % {
'backend': self.backend_name,
'dn': basedn,
'filter': self._uni(searchfilter)
}
)
# bind and search the ldap
ldap_client = self._bind()
try:
r = ldap_client.search_s(
basedn,
ldap.SCOPE_SUBTREE,
searchfilter,
attrlist=attrlist
)
except Exception as e:
ldap_client.unbind_s()
self._exception_handler(e)
ldap_client.unbind_s()
# python-ldap doesn't know utf-8,
# it treates everything as bytes.
# So it's necessary to reencode
# it's output in utf-8.
ret = []
for entry in r:
uni_dn = self._uni(entry[0])
uni_attrs = {}
for attr in entry[1]:
if type(entry[1][attr]) is list:
tmp = []
for value in entry[1][attr]:
tmp.append(self._uni(value))
else:
tmp = self._uni(entry[1][attr])
uni_attrs[self._uni(attr)] = tmp
ret.append((uni_dn, uni_attrs))
return ret | [
"def",
"_search",
"(",
"self",
",",
"searchfilter",
",",
"attrs",
",",
"basedn",
")",
":",
"if",
"attrs",
"==",
"NO_ATTR",
":",
"attrlist",
"=",
"[",
"]",
"elif",
"attrs",
"==",
"DISPLAYED_ATTRS",
":",
"# fix me later (to much attributes)",
"attrlist",
"=",
"self",
".",
"attrlist",
"elif",
"attrs",
"==",
"LISTED_ATTRS",
":",
"attrlist",
"=",
"self",
".",
"attrlist",
"elif",
"attrs",
"==",
"ALL_ATTRS",
":",
"attrlist",
"=",
"None",
"else",
":",
"attrlist",
"=",
"None",
"self",
".",
"_logger",
"(",
"severity",
"=",
"logging",
".",
"DEBUG",
",",
"msg",
"=",
"\"%(backend)s: executing search \"",
"\"with filter '%(filter)s' in DN '%(dn)s'\"",
"%",
"{",
"'backend'",
":",
"self",
".",
"backend_name",
",",
"'dn'",
":",
"basedn",
",",
"'filter'",
":",
"self",
".",
"_uni",
"(",
"searchfilter",
")",
"}",
")",
"# bind and search the ldap",
"ldap_client",
"=",
"self",
".",
"_bind",
"(",
")",
"try",
":",
"r",
"=",
"ldap_client",
".",
"search_s",
"(",
"basedn",
",",
"ldap",
".",
"SCOPE_SUBTREE",
",",
"searchfilter",
",",
"attrlist",
"=",
"attrlist",
")",
"except",
"Exception",
"as",
"e",
":",
"ldap_client",
".",
"unbind_s",
"(",
")",
"self",
".",
"_exception_handler",
"(",
"e",
")",
"ldap_client",
".",
"unbind_s",
"(",
")",
"# python-ldap doesn't know utf-8,",
"# it treates everything as bytes.",
"# So it's necessary to reencode",
"# it's output in utf-8.",
"ret",
"=",
"[",
"]",
"for",
"entry",
"in",
"r",
":",
"uni_dn",
"=",
"self",
".",
"_uni",
"(",
"entry",
"[",
"0",
"]",
")",
"uni_attrs",
"=",
"{",
"}",
"for",
"attr",
"in",
"entry",
"[",
"1",
"]",
":",
"if",
"type",
"(",
"entry",
"[",
"1",
"]",
"[",
"attr",
"]",
")",
"is",
"list",
":",
"tmp",
"=",
"[",
"]",
"for",
"value",
"in",
"entry",
"[",
"1",
"]",
"[",
"attr",
"]",
":",
"tmp",
".",
"append",
"(",
"self",
".",
"_uni",
"(",
"value",
")",
")",
"else",
":",
"tmp",
"=",
"self",
".",
"_uni",
"(",
"entry",
"[",
"1",
"]",
"[",
"attr",
"]",
")",
"uni_attrs",
"[",
"self",
".",
"_uni",
"(",
"attr",
")",
"]",
"=",
"tmp",
"ret",
".",
"append",
"(",
"(",
"uni_dn",
",",
"uni_attrs",
")",
")",
"return",
"ret"
] | Generic search | [
"Generic",
"search"
] | python | train |
kensho-technologies/graphql-compiler | graphql_compiler/query_formatting/representations.py | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/query_formatting/representations.py#L8-L29 | def represent_float_as_str(value):
"""Represent a float as a string without losing precision."""
# In Python 2, calling str() on a float object loses precision:
#
# In [1]: 1.23456789012345678
# Out[1]: 1.2345678901234567
#
# In [2]: 1.2345678901234567
# Out[2]: 1.2345678901234567
#
# In [3]: str(1.2345678901234567)
# Out[3]: '1.23456789012'
#
# The best way to ensure precision is not lost is to convert to string via Decimal:
# https://github.com/mogui/pyorient/pull/226/files
if not isinstance(value, float):
raise GraphQLInvalidArgumentError(u'Attempting to represent a non-float as a float: '
u'{}'.format(value))
with decimal.localcontext() as ctx:
ctx.prec = 20 # floats are max 80-bits wide = 20 significant digits
return u'{:f}'.format(decimal.Decimal(value)) | [
"def",
"represent_float_as_str",
"(",
"value",
")",
":",
"# In Python 2, calling str() on a float object loses precision:",
"#",
"# In [1]: 1.23456789012345678",
"# Out[1]: 1.2345678901234567",
"#",
"# In [2]: 1.2345678901234567",
"# Out[2]: 1.2345678901234567",
"#",
"# In [3]: str(1.2345678901234567)",
"# Out[3]: '1.23456789012'",
"#",
"# The best way to ensure precision is not lost is to convert to string via Decimal:",
"# https://github.com/mogui/pyorient/pull/226/files",
"if",
"not",
"isinstance",
"(",
"value",
",",
"float",
")",
":",
"raise",
"GraphQLInvalidArgumentError",
"(",
"u'Attempting to represent a non-float as a float: '",
"u'{}'",
".",
"format",
"(",
"value",
")",
")",
"with",
"decimal",
".",
"localcontext",
"(",
")",
"as",
"ctx",
":",
"ctx",
".",
"prec",
"=",
"20",
"# floats are max 80-bits wide = 20 significant digits",
"return",
"u'{:f}'",
".",
"format",
"(",
"decimal",
".",
"Decimal",
"(",
"value",
")",
")"
] | Represent a float as a string without losing precision. | [
"Represent",
"a",
"float",
"as",
"a",
"string",
"without",
"losing",
"precision",
"."
] | python | train |
kennethreitz/flask-sslify | flask_sslify.py | https://github.com/kennethreitz/flask-sslify/blob/425a1deb4a1a8f693319f4b97134196e0235848c/flask_sslify.py#L52-L59 | def hsts_header(self):
"""Returns the proper HSTS policy."""
hsts_policy = 'max-age={0}'.format(self.hsts_age)
if self.hsts_include_subdomains:
hsts_policy += '; includeSubDomains'
return hsts_policy | [
"def",
"hsts_header",
"(",
"self",
")",
":",
"hsts_policy",
"=",
"'max-age={0}'",
".",
"format",
"(",
"self",
".",
"hsts_age",
")",
"if",
"self",
".",
"hsts_include_subdomains",
":",
"hsts_policy",
"+=",
"'; includeSubDomains'",
"return",
"hsts_policy"
] | Returns the proper HSTS policy. | [
"Returns",
"the",
"proper",
"HSTS",
"policy",
"."
] | python | train |
asweigart/pyautogui | pyautogui/__init__.py | https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/__init__.py#L955-L975 | def keyDown(key, pause=None, _pause=True):
"""Performs a keyboard key press without the release. This will put that
key in a held down state.
NOTE: For some reason, this does not seem to cause key repeats like would
happen if a keyboard key was held down on a text field.
Args:
key (str): The key to be pressed down. The valid names are listed in
KEYBOARD_KEYS.
Returns:
None
"""
if len(key) > 1:
key = key.lower()
_failSafeCheck()
platformModule._keyDown(key)
_autoPause(pause, _pause) | [
"def",
"keyDown",
"(",
"key",
",",
"pause",
"=",
"None",
",",
"_pause",
"=",
"True",
")",
":",
"if",
"len",
"(",
"key",
")",
">",
"1",
":",
"key",
"=",
"key",
".",
"lower",
"(",
")",
"_failSafeCheck",
"(",
")",
"platformModule",
".",
"_keyDown",
"(",
"key",
")",
"_autoPause",
"(",
"pause",
",",
"_pause",
")"
] | Performs a keyboard key press without the release. This will put that
key in a held down state.
NOTE: For some reason, this does not seem to cause key repeats like would
happen if a keyboard key was held down on a text field.
Args:
key (str): The key to be pressed down. The valid names are listed in
KEYBOARD_KEYS.
Returns:
None | [
"Performs",
"a",
"keyboard",
"key",
"press",
"without",
"the",
"release",
".",
"This",
"will",
"put",
"that",
"key",
"in",
"a",
"held",
"down",
"state",
"."
] | python | train |
ethereum/py-evm | eth/vm/base.py | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L918-L924 | def validate_seal(cls, header: BlockHeader) -> None:
"""
Validate the seal on the given header.
"""
check_pow(
header.block_number, header.mining_hash,
header.mix_hash, header.nonce, header.difficulty) | [
"def",
"validate_seal",
"(",
"cls",
",",
"header",
":",
"BlockHeader",
")",
"->",
"None",
":",
"check_pow",
"(",
"header",
".",
"block_number",
",",
"header",
".",
"mining_hash",
",",
"header",
".",
"mix_hash",
",",
"header",
".",
"nonce",
",",
"header",
".",
"difficulty",
")"
] | Validate the seal on the given header. | [
"Validate",
"the",
"seal",
"on",
"the",
"given",
"header",
"."
] | python | train |
openid/JWTConnect-Python-CryptoJWT | src/cryptojwt/simple_jwt.py | https://github.com/openid/JWTConnect-Python-CryptoJWT/blob/8863cfbfe77ca885084870b234a66b55bd52930c/src/cryptojwt/simple_jwt.py#L87-L106 | def payload(self):
"""
Picks out the payload from the different parts of the signed/encrypted
JSON Web Token. If the content type is said to be 'jwt' deserialize the
payload into a Python object otherwise return as-is.
:return: The payload
"""
_msg = as_unicode(self.part[1])
# If not JSON web token assume JSON
if "cty" in self.headers and self.headers["cty"].lower() != "jwt":
pass
else:
try:
_msg = json.loads(_msg)
except ValueError:
pass
return _msg | [
"def",
"payload",
"(",
"self",
")",
":",
"_msg",
"=",
"as_unicode",
"(",
"self",
".",
"part",
"[",
"1",
"]",
")",
"# If not JSON web token assume JSON",
"if",
"\"cty\"",
"in",
"self",
".",
"headers",
"and",
"self",
".",
"headers",
"[",
"\"cty\"",
"]",
".",
"lower",
"(",
")",
"!=",
"\"jwt\"",
":",
"pass",
"else",
":",
"try",
":",
"_msg",
"=",
"json",
".",
"loads",
"(",
"_msg",
")",
"except",
"ValueError",
":",
"pass",
"return",
"_msg"
] | Picks out the payload from the different parts of the signed/encrypted
JSON Web Token. If the content type is said to be 'jwt' deserialize the
payload into a Python object otherwise return as-is.
:return: The payload | [
"Picks",
"out",
"the",
"payload",
"from",
"the",
"different",
"parts",
"of",
"the",
"signed",
"/",
"encrypted",
"JSON",
"Web",
"Token",
".",
"If",
"the",
"content",
"type",
"is",
"said",
"to",
"be",
"jwt",
"deserialize",
"the",
"payload",
"into",
"a",
"Python",
"object",
"otherwise",
"return",
"as",
"-",
"is",
"."
] | python | train |
SuryaSankar/flask-sqlalchemy-booster | flask_sqlalchemy_booster/model_booster/queryable_mixin.py | https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/model_booster/queryable_mixin.py#L497-L537 | def get_all(cls, keyvals, key='id', user_id=None):
"""Works like a map function from keyvals to instances.
Args:
keyvals(list): The list of values of the attribute.
key (str, optional): The attribute to search by. By default, it is
'id'.
Returns:
list: A list of model instances, in the same order as the list of
keyvals.
Examples:
>>> User.get_all([2,5,7, 8000, 11])
[email protected], [email protected], [email protected], None, [email protected]
>>> User.get_all(['[email protected]', '[email protected]'], key='email')
[email protected], [email protected]
"""
if len(keyvals) == 0:
return []
original_keyvals = keyvals
keyvals_set = list(set(keyvals))
resultset = cls.query.filter(getattr(cls, key).in_(keyvals_set))
# This is ridiculous. user_id check cannot be here. A hangover
# from the time this lib was inside our app codebase
# if user_id and hasattr(cls, 'user_id'):
# resultset = resultset.filter(cls.user_id == user_id)
# We need the results in the same order as the input keyvals
# So order by field in SQL
key_result_mapping = {getattr(result, key): result for result in resultset.all()}
return [key_result_mapping.get(kv) for kv in original_keyvals] | [
"def",
"get_all",
"(",
"cls",
",",
"keyvals",
",",
"key",
"=",
"'id'",
",",
"user_id",
"=",
"None",
")",
":",
"if",
"len",
"(",
"keyvals",
")",
"==",
"0",
":",
"return",
"[",
"]",
"original_keyvals",
"=",
"keyvals",
"keyvals_set",
"=",
"list",
"(",
"set",
"(",
"keyvals",
")",
")",
"resultset",
"=",
"cls",
".",
"query",
".",
"filter",
"(",
"getattr",
"(",
"cls",
",",
"key",
")",
".",
"in_",
"(",
"keyvals_set",
")",
")",
"# This is ridiculous. user_id check cannot be here. A hangover",
"# from the time this lib was inside our app codebase",
"# if user_id and hasattr(cls, 'user_id'):",
"# resultset = resultset.filter(cls.user_id == user_id)",
"# We need the results in the same order as the input keyvals",
"# So order by field in SQL",
"key_result_mapping",
"=",
"{",
"getattr",
"(",
"result",
",",
"key",
")",
":",
"result",
"for",
"result",
"in",
"resultset",
".",
"all",
"(",
")",
"}",
"return",
"[",
"key_result_mapping",
".",
"get",
"(",
"kv",
")",
"for",
"kv",
"in",
"original_keyvals",
"]"
] | Works like a map function from keyvals to instances.
Args:
keyvals(list): The list of values of the attribute.
key (str, optional): The attribute to search by. By default, it is
'id'.
Returns:
list: A list of model instances, in the same order as the list of
keyvals.
Examples:
>>> User.get_all([2,5,7, 8000, 11])
[email protected], [email protected], [email protected], None, [email protected]
>>> User.get_all(['[email protected]', '[email protected]'], key='email')
[email protected], [email protected] | [
"Works",
"like",
"a",
"map",
"function",
"from",
"keyvals",
"to",
"instances",
"."
] | python | train |
MaxHalford/starboost | starboost/boosting.py | https://github.com/MaxHalford/starboost/blob/59d96dcc983404cbc326878facd8171fd2655ce1/starboost/boosting.py#L343-L367 | def iter_predict_proba(self, X, include_init=False):
"""Returns the predicted probabilities for ``X`` at every stage of the boosting procedure.
Arguments:
X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples, n_classes) containing the predicted
probabilities at each stage
"""
utils.validation.check_is_fitted(self, 'init_estimator_')
X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False)
probas = np.empty(shape=(len(X), len(self.classes_)), dtype=np.float64)
for y_pred in super().iter_predict(X, include_init=include_init):
if len(self.classes_) == 2:
probas[:, 1] = sigmoid(y_pred[:, 0])
probas[:, 0] = 1. - probas[:, 1]
else:
probas[:] = softmax(y_pred)
yield probas | [
"def",
"iter_predict_proba",
"(",
"self",
",",
"X",
",",
"include_init",
"=",
"False",
")",
":",
"utils",
".",
"validation",
".",
"check_is_fitted",
"(",
"self",
",",
"'init_estimator_'",
")",
"X",
"=",
"utils",
".",
"check_array",
"(",
"X",
",",
"accept_sparse",
"=",
"[",
"'csr'",
",",
"'csc'",
"]",
",",
"dtype",
"=",
"None",
",",
"force_all_finite",
"=",
"False",
")",
"probas",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"(",
"len",
"(",
"X",
")",
",",
"len",
"(",
"self",
".",
"classes_",
")",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"for",
"y_pred",
"in",
"super",
"(",
")",
".",
"iter_predict",
"(",
"X",
",",
"include_init",
"=",
"include_init",
")",
":",
"if",
"len",
"(",
"self",
".",
"classes_",
")",
"==",
"2",
":",
"probas",
"[",
":",
",",
"1",
"]",
"=",
"sigmoid",
"(",
"y_pred",
"[",
":",
",",
"0",
"]",
")",
"probas",
"[",
":",
",",
"0",
"]",
"=",
"1.",
"-",
"probas",
"[",
":",
",",
"1",
"]",
"else",
":",
"probas",
"[",
":",
"]",
"=",
"softmax",
"(",
"y_pred",
")",
"yield",
"probas"
] | Returns the predicted probabilities for ``X`` at every stage of the boosting procedure.
Arguments:
X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples, n_classes) containing the predicted
probabilities at each stage | [
"Returns",
"the",
"predicted",
"probabilities",
"for",
"X",
"at",
"every",
"stage",
"of",
"the",
"boosting",
"procedure",
"."
] | python | train |
biolink/ontobio | ontobio/io/ontol_renderers.py | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/ontol_renderers.py#L70-L75 | def write_subgraph(self, ontol, nodes, **args):
"""
Write a `ontology` object after inducing a subgraph
"""
subont = ontol.subontology(nodes, **args)
self.write(subont, **args) | [
"def",
"write_subgraph",
"(",
"self",
",",
"ontol",
",",
"nodes",
",",
"*",
"*",
"args",
")",
":",
"subont",
"=",
"ontol",
".",
"subontology",
"(",
"nodes",
",",
"*",
"*",
"args",
")",
"self",
".",
"write",
"(",
"subont",
",",
"*",
"*",
"args",
")"
] | Write a `ontology` object after inducing a subgraph | [
"Write",
"a",
"ontology",
"object",
"after",
"inducing",
"a",
"subgraph"
] | python | train |
saltstack/salt | salt/states/marathon_app.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/marathon_app.py#L122-L157 | def running(name, restart=False, force=True):
'''
Ensure that the marathon app with the given id is present and restart if set.
:param name: The app name/id
:param restart: Restart the app
:param force: Override the current deployment
:return: A standard Salt changes dictionary
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if not __salt__['marathon.has_app'](name):
ret['result'] = False
ret['comment'] = 'App {0} cannot be restarted because it is absent'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
qualifier = 'is' if restart else 'is not'
ret['comment'] = 'App {0} {1} set to be restarted'.format(name, qualifier)
return ret
restart_result = __salt__['marathon.restart_app'](name, restart, force)
if 'exception' in restart_result:
ret['result'] = False
ret['comment'] = 'Failed to restart app {0}: {1}'.format(
name,
restart_result['exception']
)
return ret
else:
ret['changes'] = restart_result
ret['result'] = True
qualifier = 'Restarted' if restart else 'Did not restart'
ret['comment'] = '{0} app {1}'.format(qualifier, name)
return ret | [
"def",
"running",
"(",
"name",
",",
"restart",
"=",
"False",
",",
"force",
"=",
"True",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"False",
",",
"'comment'",
":",
"''",
"}",
"if",
"not",
"__salt__",
"[",
"'marathon.has_app'",
"]",
"(",
"name",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'App {0} cannot be restarted because it is absent'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"qualifier",
"=",
"'is'",
"if",
"restart",
"else",
"'is not'",
"ret",
"[",
"'comment'",
"]",
"=",
"'App {0} {1} set to be restarted'",
".",
"format",
"(",
"name",
",",
"qualifier",
")",
"return",
"ret",
"restart_result",
"=",
"__salt__",
"[",
"'marathon.restart_app'",
"]",
"(",
"name",
",",
"restart",
",",
"force",
")",
"if",
"'exception'",
"in",
"restart_result",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to restart app {0}: {1}'",
".",
"format",
"(",
"name",
",",
"restart_result",
"[",
"'exception'",
"]",
")",
"return",
"ret",
"else",
":",
"ret",
"[",
"'changes'",
"]",
"=",
"restart_result",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"qualifier",
"=",
"'Restarted'",
"if",
"restart",
"else",
"'Did not restart'",
"ret",
"[",
"'comment'",
"]",
"=",
"'{0} app {1}'",
".",
"format",
"(",
"qualifier",
",",
"name",
")",
"return",
"ret"
] | Ensure that the marathon app with the given id is present and restart if set.
:param name: The app name/id
:param restart: Restart the app
:param force: Override the current deployment
:return: A standard Salt changes dictionary | [
"Ensure",
"that",
"the",
"marathon",
"app",
"with",
"the",
"given",
"id",
"is",
"present",
"and",
"restart",
"if",
"set",
"."
] | python | train |
grahambell/pymoc | lib/pymoc/util/tool.py | https://github.com/grahambell/pymoc/blob/0e2e57ce07ff3de6ac024627c1fb6ad30c2fde48/lib/pymoc/util/tool.py#L136-L206 | def catalog(self):
"""Create MOC from catalog of coordinates.
This command requires that the Healpy and Astropy libraries
be available. It attempts to load the given catalog,
and merges it with the running MOC.
The name of an ASCII catalog file should be given. The file
should contain either "RA" and "Dec" columns (for ICRS coordinates)
or "Lon" and "Lat" columns (for galactic coordinates). The MOC
order and radius (in arcseconds) can be given with additional
options.
::
pymoctool --catalog coords.txt
[order 12]
[radius 3600]
[unit (hour | deg | rad) (deg | rad)]
[format commented_header]
[inclusive]
Units (if not specified) are assumed to be hours and degrees for ICRS
coordinates and degrees for galactic coordinates. The format, if not
specified (as an Astropy ASCII table format name) is assumed to be
commented header, e.g.:
::
# RA Dec
01:30:00 +45:00:00
22:30:00 +45:00:00
"""
from .catalog import catalog_to_moc, read_ascii_catalog
filename = self.params.pop()
order = 12
radius = 3600
unit = None
format_ = 'commented_header'
kwargs = {}
while self.params:
if self.params[-1] == 'order':
self.params.pop()
order = int(self.params.pop())
elif self.params[-1] == 'radius':
self.params.pop()
radius = float(self.params.pop())
elif self.params[-1] == 'unit':
self.params.pop()
unit_x = self.params.pop()
unit_y = self.params.pop()
unit = (unit_x, unit_y)
elif self.params[-1] == 'format':
self.params.pop()
format_ = self.params.pop()
elif self.params[-1] == 'inclusive':
self.params.pop()
kwargs['inclusive'] = True
else:
break
coords = read_ascii_catalog(filename, format_=format_, unit=unit)
catalog_moc = catalog_to_moc(coords, radius, order, **kwargs)
if self.moc is None:
self.moc = catalog_moc
else:
self.moc += catalog_moc | [
"def",
"catalog",
"(",
"self",
")",
":",
"from",
".",
"catalog",
"import",
"catalog_to_moc",
",",
"read_ascii_catalog",
"filename",
"=",
"self",
".",
"params",
".",
"pop",
"(",
")",
"order",
"=",
"12",
"radius",
"=",
"3600",
"unit",
"=",
"None",
"format_",
"=",
"'commented_header'",
"kwargs",
"=",
"{",
"}",
"while",
"self",
".",
"params",
":",
"if",
"self",
".",
"params",
"[",
"-",
"1",
"]",
"==",
"'order'",
":",
"self",
".",
"params",
".",
"pop",
"(",
")",
"order",
"=",
"int",
"(",
"self",
".",
"params",
".",
"pop",
"(",
")",
")",
"elif",
"self",
".",
"params",
"[",
"-",
"1",
"]",
"==",
"'radius'",
":",
"self",
".",
"params",
".",
"pop",
"(",
")",
"radius",
"=",
"float",
"(",
"self",
".",
"params",
".",
"pop",
"(",
")",
")",
"elif",
"self",
".",
"params",
"[",
"-",
"1",
"]",
"==",
"'unit'",
":",
"self",
".",
"params",
".",
"pop",
"(",
")",
"unit_x",
"=",
"self",
".",
"params",
".",
"pop",
"(",
")",
"unit_y",
"=",
"self",
".",
"params",
".",
"pop",
"(",
")",
"unit",
"=",
"(",
"unit_x",
",",
"unit_y",
")",
"elif",
"self",
".",
"params",
"[",
"-",
"1",
"]",
"==",
"'format'",
":",
"self",
".",
"params",
".",
"pop",
"(",
")",
"format_",
"=",
"self",
".",
"params",
".",
"pop",
"(",
")",
"elif",
"self",
".",
"params",
"[",
"-",
"1",
"]",
"==",
"'inclusive'",
":",
"self",
".",
"params",
".",
"pop",
"(",
")",
"kwargs",
"[",
"'inclusive'",
"]",
"=",
"True",
"else",
":",
"break",
"coords",
"=",
"read_ascii_catalog",
"(",
"filename",
",",
"format_",
"=",
"format_",
",",
"unit",
"=",
"unit",
")",
"catalog_moc",
"=",
"catalog_to_moc",
"(",
"coords",
",",
"radius",
",",
"order",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"moc",
"is",
"None",
":",
"self",
".",
"moc",
"=",
"catalog_moc",
"else",
":",
"self",
".",
"moc",
"+=",
"catalog_moc"
] | Create MOC from catalog of coordinates.
This command requires that the Healpy and Astropy libraries
be available. It attempts to load the given catalog,
and merges it with the running MOC.
The name of an ASCII catalog file should be given. The file
should contain either "RA" and "Dec" columns (for ICRS coordinates)
or "Lon" and "Lat" columns (for galactic coordinates). The MOC
order and radius (in arcseconds) can be given with additional
options.
::
pymoctool --catalog coords.txt
[order 12]
[radius 3600]
[unit (hour | deg | rad) (deg | rad)]
[format commented_header]
[inclusive]
Units (if not specified) are assumed to be hours and degrees for ICRS
coordinates and degrees for galactic coordinates. The format, if not
specified (as an Astropy ASCII table format name) is assumed to be
commented header, e.g.:
::
# RA Dec
01:30:00 +45:00:00
22:30:00 +45:00:00 | [
"Create",
"MOC",
"from",
"catalog",
"of",
"coordinates",
"."
] | python | train |
sods/paramz | paramz/core/indexable.py | https://github.com/sods/paramz/blob/ae6fc6274b70fb723d91e48fc5026a9bc5a06508/paramz/core/indexable.py#L150-L162 | def _raveled_index_for(self, param):
"""
get the raveled index for a param
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work
"""
from ..param import ParamConcatenation
if isinstance(param, ParamConcatenation):
return np.hstack((self._raveled_index_for(p) for p in param.params))
return param._raveled_index() + self._offset_for(param) | [
"def",
"_raveled_index_for",
"(",
"self",
",",
"param",
")",
":",
"from",
".",
".",
"param",
"import",
"ParamConcatenation",
"if",
"isinstance",
"(",
"param",
",",
"ParamConcatenation",
")",
":",
"return",
"np",
".",
"hstack",
"(",
"(",
"self",
".",
"_raveled_index_for",
"(",
"p",
")",
"for",
"p",
"in",
"param",
".",
"params",
")",
")",
"return",
"param",
".",
"_raveled_index",
"(",
")",
"+",
"self",
".",
"_offset_for",
"(",
"param",
")"
] | get the raveled index for a param
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work | [
"get",
"the",
"raveled",
"index",
"for",
"a",
"param",
"that",
"is",
"an",
"int",
"array",
"containing",
"the",
"indexes",
"for",
"the",
"flattened",
"param",
"inside",
"this",
"parameterized",
"logic",
"."
] | python | train |
rwl/pylon | pylon/opf.py | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/opf.py#L782-L818 | def linear_constraints(self):
""" Returns the linear constraints.
"""
if self.lin_N == 0:
return None, array([]), array([])
A = lil_matrix((self.lin_N, self.var_N), dtype=float64)
l = -Inf * ones(self.lin_N)
u = -l
for lin in self.lin_constraints:
if lin.N: # non-zero number of rows to add
Ak = lin.A # A for kth linear constrain set
i1 = lin.i1 # starting row index
iN = lin.iN # ending row index
vsl = lin.vs # var set list
kN = -1 # initialize last col of Ak used
Ai = lil_matrix((lin.N, self.var_N), dtype=float64)
for v in vsl:
var = self.get_var(v)
j1 = var.i1 # starting column in A
jN = var.iN # ending column in A
k1 = kN + 1 # starting column in Ak
kN = kN + var.N # ending column in Ak
if j1 == jN:
# FIXME: Single column slicing broken in lil.
for i in range(Ai.shape[0]):
Ai[i, j1] = Ak[i, k1]
else:
Ai[:, j1:jN + 1] = Ak[:, k1:kN + 1]
A[i1:iN + 1, :] = Ai
l[i1:iN + 1] = lin.l
u[i1:iN + 1] = lin.u
return A.tocsr(), l, u | [
"def",
"linear_constraints",
"(",
"self",
")",
":",
"if",
"self",
".",
"lin_N",
"==",
"0",
":",
"return",
"None",
",",
"array",
"(",
"[",
"]",
")",
",",
"array",
"(",
"[",
"]",
")",
"A",
"=",
"lil_matrix",
"(",
"(",
"self",
".",
"lin_N",
",",
"self",
".",
"var_N",
")",
",",
"dtype",
"=",
"float64",
")",
"l",
"=",
"-",
"Inf",
"*",
"ones",
"(",
"self",
".",
"lin_N",
")",
"u",
"=",
"-",
"l",
"for",
"lin",
"in",
"self",
".",
"lin_constraints",
":",
"if",
"lin",
".",
"N",
":",
"# non-zero number of rows to add",
"Ak",
"=",
"lin",
".",
"A",
"# A for kth linear constrain set",
"i1",
"=",
"lin",
".",
"i1",
"# starting row index",
"iN",
"=",
"lin",
".",
"iN",
"# ending row index",
"vsl",
"=",
"lin",
".",
"vs",
"# var set list",
"kN",
"=",
"-",
"1",
"# initialize last col of Ak used",
"Ai",
"=",
"lil_matrix",
"(",
"(",
"lin",
".",
"N",
",",
"self",
".",
"var_N",
")",
",",
"dtype",
"=",
"float64",
")",
"for",
"v",
"in",
"vsl",
":",
"var",
"=",
"self",
".",
"get_var",
"(",
"v",
")",
"j1",
"=",
"var",
".",
"i1",
"# starting column in A",
"jN",
"=",
"var",
".",
"iN",
"# ending column in A",
"k1",
"=",
"kN",
"+",
"1",
"# starting column in Ak",
"kN",
"=",
"kN",
"+",
"var",
".",
"N",
"# ending column in Ak",
"if",
"j1",
"==",
"jN",
":",
"# FIXME: Single column slicing broken in lil.",
"for",
"i",
"in",
"range",
"(",
"Ai",
".",
"shape",
"[",
"0",
"]",
")",
":",
"Ai",
"[",
"i",
",",
"j1",
"]",
"=",
"Ak",
"[",
"i",
",",
"k1",
"]",
"else",
":",
"Ai",
"[",
":",
",",
"j1",
":",
"jN",
"+",
"1",
"]",
"=",
"Ak",
"[",
":",
",",
"k1",
":",
"kN",
"+",
"1",
"]",
"A",
"[",
"i1",
":",
"iN",
"+",
"1",
",",
":",
"]",
"=",
"Ai",
"l",
"[",
"i1",
":",
"iN",
"+",
"1",
"]",
"=",
"lin",
".",
"l",
"u",
"[",
"i1",
":",
"iN",
"+",
"1",
"]",
"=",
"lin",
".",
"u",
"return",
"A",
".",
"tocsr",
"(",
")",
",",
"l",
",",
"u"
] | Returns the linear constraints. | [
"Returns",
"the",
"linear",
"constraints",
"."
] | python | train |
CalebBell/thermo | thermo/electrochem.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/electrochem.py#L190-L235 | def Laliberte_viscosity(T, ws, CASRNs):
r'''Calculate the viscosity of an aqueous mixture using the form proposed by [1]_.
Parameters are loaded by the function as needed. Units are Kelvin and Pa*s.
.. math::
\mu_m = \mu_w^{w_w} \Pi\mu_i^{w_i}
Parameters
----------
T : float
Temperature of fluid [K]
ws : array
Weight fractions of fluid components other than water
CASRNs : array
CAS numbers of the fluid components other than water
Returns
-------
mu_i : float
Solute partial viscosity, Pa*s
Notes
-----
Temperature range check is not used here.
Check is performed using NaCl at 5 degC from the first value in [1]_'s spreadsheet.
Examples
--------
>>> Laliberte_viscosity(273.15+5, [0.005810], ['7647-14-5'])
0.0015285828581961414
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
mu_w = Laliberte_viscosity_w(T)*1000.
w_w = 1 - sum(ws)
mu = mu_w**(w_w)
for i in range(len(CASRNs)):
d = _Laliberte_Viscosity_ParametersDict[CASRNs[i]]
mu_i = Laliberte_viscosity_i(T, w_w, d["V1"], d["V2"], d["V3"], d["V4"], d["V5"], d["V6"])*1000.
mu = mu_i**(ws[i])*mu
return mu/1000. | [
"def",
"Laliberte_viscosity",
"(",
"T",
",",
"ws",
",",
"CASRNs",
")",
":",
"mu_w",
"=",
"Laliberte_viscosity_w",
"(",
"T",
")",
"*",
"1000.",
"w_w",
"=",
"1",
"-",
"sum",
"(",
"ws",
")",
"mu",
"=",
"mu_w",
"**",
"(",
"w_w",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"CASRNs",
")",
")",
":",
"d",
"=",
"_Laliberte_Viscosity_ParametersDict",
"[",
"CASRNs",
"[",
"i",
"]",
"]",
"mu_i",
"=",
"Laliberte_viscosity_i",
"(",
"T",
",",
"w_w",
",",
"d",
"[",
"\"V1\"",
"]",
",",
"d",
"[",
"\"V2\"",
"]",
",",
"d",
"[",
"\"V3\"",
"]",
",",
"d",
"[",
"\"V4\"",
"]",
",",
"d",
"[",
"\"V5\"",
"]",
",",
"d",
"[",
"\"V6\"",
"]",
")",
"*",
"1000.",
"mu",
"=",
"mu_i",
"**",
"(",
"ws",
"[",
"i",
"]",
")",
"*",
"mu",
"return",
"mu",
"/",
"1000."
] | r'''Calculate the viscosity of an aqueous mixture using the form proposed by [1]_.
Parameters are loaded by the function as needed. Units are Kelvin and Pa*s.
.. math::
\mu_m = \mu_w^{w_w} \Pi\mu_i^{w_i}
Parameters
----------
T : float
Temperature of fluid [K]
ws : array
Weight fractions of fluid components other than water
CASRNs : array
CAS numbers of the fluid components other than water
Returns
-------
mu_i : float
Solute partial viscosity, Pa*s
Notes
-----
Temperature range check is not used here.
Check is performed using NaCl at 5 degC from the first value in [1]_'s spreadsheet.
Examples
--------
>>> Laliberte_viscosity(273.15+5, [0.005810], ['7647-14-5'])
0.0015285828581961414
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123 | [
"r",
"Calculate",
"the",
"viscosity",
"of",
"an",
"aqueous",
"mixture",
"using",
"the",
"form",
"proposed",
"by",
"[",
"1",
"]",
"_",
".",
"Parameters",
"are",
"loaded",
"by",
"the",
"function",
"as",
"needed",
".",
"Units",
"are",
"Kelvin",
"and",
"Pa",
"*",
"s",
"."
] | python | valid |
jtwhite79/pyemu | pyemu/utils/gw_utils.py | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/gw_utils.py#L462-L538 | def setup_mflist_budget_obs(list_filename,flx_filename="flux.dat",
vol_filename="vol.dat",start_datetime="1-1'1970",prefix='',
save_setup_file=False):
""" setup observations of budget volume and flux from modflow list file. writes
an instruction file and also a _setup_.csv to use when constructing a pest
control file
Parameters
----------
list_filename : str
modflow list file
flx_filename : str
output filename that will contain the budget flux observations. Default is
"flux.dat"
vol_filename : str)
output filename that will contain the budget volume observations. Default
is "vol.dat"
start_datetime : str
an str that can be parsed into a pandas.TimeStamp. used to give budget
observations meaningful names
prefix : str
a prefix to add to the water budget observations. Useful if processing
more than one list file as part of the forward run process. Default is ''.
save_setup_file : (boolean)
a flag to save _setup_<list_filename>.csv file that contains useful
control file information
Returns
-------
df : pandas.DataFrame
a dataframe with information for constructing a control file. If INSCHEK fails
to run, reutrns None
Note
----
This function uses INSCHEK to get observation values; the observation values are
the values of the list file list_filename. If INSCHEK fails to run, the obseravtion
values are set to 1.0E+10
the instruction files are named <flux_file>.ins and <vol_file>.ins, respectively
It is recommended to use the default values for flux_file and vol_file.
"""
flx,vol = apply_mflist_budget_obs(list_filename,flx_filename,vol_filename,
start_datetime)
_write_mflist_ins(flx_filename+".ins",flx,prefix+"flx")
_write_mflist_ins(vol_filename+".ins",vol, prefix+"vol")
#run("inschek {0}.ins {0}".format(flx_filename))
#run("inschek {0}.ins {0}".format(vol_filename))
try:
#os.system("inschek {0}.ins {0}".format(flx_filename))
#os.system("inschek {0}.ins {0}".format(vol_filename))
run("inschek {0}.ins {0}".format(flx_filename))
run("inschek {0}.ins {0}".format(vol_filename))
except:
print("error running inschek")
return None
flx_obf = flx_filename+".obf"
vol_obf = vol_filename + ".obf"
if os.path.exists(flx_obf) and os.path.exists(vol_obf):
df = pd.read_csv(flx_obf,delim_whitespace=True,header=None,names=["obsnme","obsval"])
df.loc[:,"obgnme"] = df.obsnme.apply(lambda x: x[:-9])
df2 = pd.read_csv(vol_obf, delim_whitespace=True, header=None, names=["obsnme", "obsval"])
df2.loc[:, "obgnme"] = df2.obsnme.apply(lambda x: x[:-9])
df = df.append(df2)
if save_setup_file:
df.to_csv("_setup_"+os.path.split(list_filename)[-1]+'.csv',index=False)
df.index = df.obsnme
return df | [
"def",
"setup_mflist_budget_obs",
"(",
"list_filename",
",",
"flx_filename",
"=",
"\"flux.dat\"",
",",
"vol_filename",
"=",
"\"vol.dat\"",
",",
"start_datetime",
"=",
"\"1-1'1970\"",
",",
"prefix",
"=",
"''",
",",
"save_setup_file",
"=",
"False",
")",
":",
"flx",
",",
"vol",
"=",
"apply_mflist_budget_obs",
"(",
"list_filename",
",",
"flx_filename",
",",
"vol_filename",
",",
"start_datetime",
")",
"_write_mflist_ins",
"(",
"flx_filename",
"+",
"\".ins\"",
",",
"flx",
",",
"prefix",
"+",
"\"flx\"",
")",
"_write_mflist_ins",
"(",
"vol_filename",
"+",
"\".ins\"",
",",
"vol",
",",
"prefix",
"+",
"\"vol\"",
")",
"#run(\"inschek {0}.ins {0}\".format(flx_filename))",
"#run(\"inschek {0}.ins {0}\".format(vol_filename))",
"try",
":",
"#os.system(\"inschek {0}.ins {0}\".format(flx_filename))",
"#os.system(\"inschek {0}.ins {0}\".format(vol_filename))",
"run",
"(",
"\"inschek {0}.ins {0}\"",
".",
"format",
"(",
"flx_filename",
")",
")",
"run",
"(",
"\"inschek {0}.ins {0}\"",
".",
"format",
"(",
"vol_filename",
")",
")",
"except",
":",
"print",
"(",
"\"error running inschek\"",
")",
"return",
"None",
"flx_obf",
"=",
"flx_filename",
"+",
"\".obf\"",
"vol_obf",
"=",
"vol_filename",
"+",
"\".obf\"",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"flx_obf",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"vol_obf",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"flx_obf",
",",
"delim_whitespace",
"=",
"True",
",",
"header",
"=",
"None",
",",
"names",
"=",
"[",
"\"obsnme\"",
",",
"\"obsval\"",
"]",
")",
"df",
".",
"loc",
"[",
":",
",",
"\"obgnme\"",
"]",
"=",
"df",
".",
"obsnme",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"[",
":",
"-",
"9",
"]",
")",
"df2",
"=",
"pd",
".",
"read_csv",
"(",
"vol_obf",
",",
"delim_whitespace",
"=",
"True",
",",
"header",
"=",
"None",
",",
"names",
"=",
"[",
"\"obsnme\"",
",",
"\"obsval\"",
"]",
")",
"df2",
".",
"loc",
"[",
":",
",",
"\"obgnme\"",
"]",
"=",
"df2",
".",
"obsnme",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"[",
":",
"-",
"9",
"]",
")",
"df",
"=",
"df",
".",
"append",
"(",
"df2",
")",
"if",
"save_setup_file",
":",
"df",
".",
"to_csv",
"(",
"\"_setup_\"",
"+",
"os",
".",
"path",
".",
"split",
"(",
"list_filename",
")",
"[",
"-",
"1",
"]",
"+",
"'.csv'",
",",
"index",
"=",
"False",
")",
"df",
".",
"index",
"=",
"df",
".",
"obsnme",
"return",
"df"
] | setup observations of budget volume and flux from modflow list file. writes
an instruction file and also a _setup_.csv to use when constructing a pest
control file
Parameters
----------
list_filename : str
modflow list file
flx_filename : str
output filename that will contain the budget flux observations. Default is
"flux.dat"
vol_filename : str)
output filename that will contain the budget volume observations. Default
is "vol.dat"
start_datetime : str
an str that can be parsed into a pandas.TimeStamp. used to give budget
observations meaningful names
prefix : str
a prefix to add to the water budget observations. Useful if processing
more than one list file as part of the forward run process. Default is ''.
save_setup_file : (boolean)
a flag to save _setup_<list_filename>.csv file that contains useful
control file information
Returns
-------
df : pandas.DataFrame
a dataframe with information for constructing a control file. If INSCHEK fails
to run, reutrns None
Note
----
This function uses INSCHEK to get observation values; the observation values are
the values of the list file list_filename. If INSCHEK fails to run, the obseravtion
values are set to 1.0E+10
the instruction files are named <flux_file>.ins and <vol_file>.ins, respectively
It is recommended to use the default values for flux_file and vol_file. | [
"setup",
"observations",
"of",
"budget",
"volume",
"and",
"flux",
"from",
"modflow",
"list",
"file",
".",
"writes",
"an",
"instruction",
"file",
"and",
"also",
"a",
"_setup_",
".",
"csv",
"to",
"use",
"when",
"constructing",
"a",
"pest",
"control",
"file"
] | python | train |
QualiSystems/vCenterShell | package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py | https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py#L413-L491 | def clone_vm(self, clone_params, logger, cancellation_context):
"""
Clone a VM from a template/VM and return the vm oject or throws argument is not valid
:param cancellation_context:
:param clone_params: CloneVmParameters =
:param logger:
"""
result = self.CloneVmResult()
if not isinstance(clone_params.si, self.vim.ServiceInstance):
result.error = 'si must be init as ServiceInstance'
return result
if clone_params.template_name is None:
result.error = 'template_name param cannot be None'
return result
if clone_params.vm_name is None:
result.error = 'vm_name param cannot be None'
return result
if clone_params.vm_folder is None:
result.error = 'vm_folder param cannot be None'
return result
datacenter = self.get_datacenter(clone_params)
dest_folder = self._get_destination_folder(clone_params)
vm_location = VMLocation.create_from_full_path(clone_params.template_name)
template = self._get_template(clone_params, vm_location)
snapshot = self._get_snapshot(clone_params, template)
resource_pool, host = self._get_resource_pool(datacenter.name, clone_params)
if not resource_pool and not host:
raise ValueError('The specifed host, cluster or resource pool could not be found')
'# set relo_spec'
placement = self.vim.vm.RelocateSpec()
if resource_pool:
placement.pool = resource_pool
if host:
placement.host = host
clone_spec = self.vim.vm.CloneSpec()
if snapshot:
clone_spec.snapshot = snapshot
clone_spec.template = False
placement.diskMoveType = 'createNewChildDiskBacking'
placement.datastore = self._get_datastore(clone_params)
# after deployment the vm must be powered off and will be powered on if needed by orchestration driver
clone_spec.location = placement
# clone_params.power_on
# due to hotfix 1 for release 1.0,
clone_spec.powerOn = False
logger.info("cloning VM...")
try:
task = template.Clone(folder=dest_folder, name=clone_params.vm_name, spec=clone_spec)
vm = self.task_waiter.wait_for_task(task=task, logger=logger, action_name='Clone VM',
cancellation_context=cancellation_context)
except TaskFaultException:
raise
except vim.fault.NoPermission as error:
logger.error("vcenter returned - no permission: {0}".format(error))
raise Exception('Permissions is not set correctly, please check the log for more info.')
except Exception as e:
logger.error("error deploying: {0}".format(e))
raise Exception('Error has occurred while deploying, please look at the log for more info.')
result.vm = vm
return result | [
"def",
"clone_vm",
"(",
"self",
",",
"clone_params",
",",
"logger",
",",
"cancellation_context",
")",
":",
"result",
"=",
"self",
".",
"CloneVmResult",
"(",
")",
"if",
"not",
"isinstance",
"(",
"clone_params",
".",
"si",
",",
"self",
".",
"vim",
".",
"ServiceInstance",
")",
":",
"result",
".",
"error",
"=",
"'si must be init as ServiceInstance'",
"return",
"result",
"if",
"clone_params",
".",
"template_name",
"is",
"None",
":",
"result",
".",
"error",
"=",
"'template_name param cannot be None'",
"return",
"result",
"if",
"clone_params",
".",
"vm_name",
"is",
"None",
":",
"result",
".",
"error",
"=",
"'vm_name param cannot be None'",
"return",
"result",
"if",
"clone_params",
".",
"vm_folder",
"is",
"None",
":",
"result",
".",
"error",
"=",
"'vm_folder param cannot be None'",
"return",
"result",
"datacenter",
"=",
"self",
".",
"get_datacenter",
"(",
"clone_params",
")",
"dest_folder",
"=",
"self",
".",
"_get_destination_folder",
"(",
"clone_params",
")",
"vm_location",
"=",
"VMLocation",
".",
"create_from_full_path",
"(",
"clone_params",
".",
"template_name",
")",
"template",
"=",
"self",
".",
"_get_template",
"(",
"clone_params",
",",
"vm_location",
")",
"snapshot",
"=",
"self",
".",
"_get_snapshot",
"(",
"clone_params",
",",
"template",
")",
"resource_pool",
",",
"host",
"=",
"self",
".",
"_get_resource_pool",
"(",
"datacenter",
".",
"name",
",",
"clone_params",
")",
"if",
"not",
"resource_pool",
"and",
"not",
"host",
":",
"raise",
"ValueError",
"(",
"'The specifed host, cluster or resource pool could not be found'",
")",
"'# set relo_spec'",
"placement",
"=",
"self",
".",
"vim",
".",
"vm",
".",
"RelocateSpec",
"(",
")",
"if",
"resource_pool",
":",
"placement",
".",
"pool",
"=",
"resource_pool",
"if",
"host",
":",
"placement",
".",
"host",
"=",
"host",
"clone_spec",
"=",
"self",
".",
"vim",
".",
"vm",
".",
"CloneSpec",
"(",
")",
"if",
"snapshot",
":",
"clone_spec",
".",
"snapshot",
"=",
"snapshot",
"clone_spec",
".",
"template",
"=",
"False",
"placement",
".",
"diskMoveType",
"=",
"'createNewChildDiskBacking'",
"placement",
".",
"datastore",
"=",
"self",
".",
"_get_datastore",
"(",
"clone_params",
")",
"# after deployment the vm must be powered off and will be powered on if needed by orchestration driver",
"clone_spec",
".",
"location",
"=",
"placement",
"# clone_params.power_on",
"# due to hotfix 1 for release 1.0,",
"clone_spec",
".",
"powerOn",
"=",
"False",
"logger",
".",
"info",
"(",
"\"cloning VM...\"",
")",
"try",
":",
"task",
"=",
"template",
".",
"Clone",
"(",
"folder",
"=",
"dest_folder",
",",
"name",
"=",
"clone_params",
".",
"vm_name",
",",
"spec",
"=",
"clone_spec",
")",
"vm",
"=",
"self",
".",
"task_waiter",
".",
"wait_for_task",
"(",
"task",
"=",
"task",
",",
"logger",
"=",
"logger",
",",
"action_name",
"=",
"'Clone VM'",
",",
"cancellation_context",
"=",
"cancellation_context",
")",
"except",
"TaskFaultException",
":",
"raise",
"except",
"vim",
".",
"fault",
".",
"NoPermission",
"as",
"error",
":",
"logger",
".",
"error",
"(",
"\"vcenter returned - no permission: {0}\"",
".",
"format",
"(",
"error",
")",
")",
"raise",
"Exception",
"(",
"'Permissions is not set correctly, please check the log for more info.'",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"error deploying: {0}\"",
".",
"format",
"(",
"e",
")",
")",
"raise",
"Exception",
"(",
"'Error has occurred while deploying, please look at the log for more info.'",
")",
"result",
".",
"vm",
"=",
"vm",
"return",
"result"
] | Clone a VM from a template/VM and return the vm oject or throws argument is not valid
:param cancellation_context:
:param clone_params: CloneVmParameters =
:param logger: | [
"Clone",
"a",
"VM",
"from",
"a",
"template",
"/",
"VM",
"and",
"return",
"the",
"vm",
"oject",
"or",
"throws",
"argument",
"is",
"not",
"valid"
] | python | train |
cokelaer/spectrum | src/spectrum/covar.py | https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/covar.py#L260-L328 | def arcovar(x, order):
r"""Simple and fast implementation of the covariance AR estimate
This code is 10 times faster than :func:`arcovar_marple` and more importantly
only 10 lines of code, compared to a 200 loc for :func:`arcovar_marple`
:param array X: Array of complex data samples
:param int oder: Order of linear prediction model
:return:
* a - Array of complex forward linear prediction coefficients
* e - error
The covariance method fits a Pth order autoregressive (AR) model to the
input signal, which is assumed to be the output of
an AR system driven by white noise. This method minimizes the forward
prediction error in the least-squares sense. The output vector
contains the normalized estimate of the AR system parameters
The white noise input variance estimate is also returned.
If is the power spectral density of y(n), then:
.. math:: \frac{e}{\left| A(e^{jw}) \right|^2} = \frac{e}{\left| 1+\sum_{k-1}^P a(k)e^{-jwk}\right|^2}
Because the method characterizes the input data using an all-pole model,
the correct choice of the model order p is important.
.. plot::
:width: 80%
:include-source:
from spectrum import arcovar, marple_data, arma2psd
from pylab import plot, log10, linspace, axis
ar_values, error = arcovar(marple_data, 15)
psd = arma2psd(ar_values, sides='centerdc')
plot(linspace(-0.5, 0.5, len(psd)), 10*log10(psd/max(psd)))
axis([-0.5, 0.5, -60, 0])
.. seealso:: :class:`pcovar`
:validation: the AR parameters are the same as those returned by
a completely different function :func:`arcovar_marple`.
:References: [Mathworks]_
"""
from spectrum import corrmtx
import scipy.linalg
X = corrmtx(x, order, 'covariance')
Xc = np.matrix(X[:, 1:])
X1 = np.array(X[:, 0])
# Coefficients estimated via the covariance method
# Here we use lstsq rathre than solve function because Xc is not square
# matrix
a, _residues, _rank, _singular_values = scipy.linalg.lstsq(-Xc, X1)
# Estimate the input white noise variance
Cz = np.dot(X1.conj().transpose(), Xc)
e = np.dot(X1.conj().transpose(), X1) + np.dot(Cz, a)
assert e.imag < 1e-4, 'wierd behaviour'
e = float(e.real) # ignore imag part that should be small
return a, e | [
"def",
"arcovar",
"(",
"x",
",",
"order",
")",
":",
"from",
"spectrum",
"import",
"corrmtx",
"import",
"scipy",
".",
"linalg",
"X",
"=",
"corrmtx",
"(",
"x",
",",
"order",
",",
"'covariance'",
")",
"Xc",
"=",
"np",
".",
"matrix",
"(",
"X",
"[",
":",
",",
"1",
":",
"]",
")",
"X1",
"=",
"np",
".",
"array",
"(",
"X",
"[",
":",
",",
"0",
"]",
")",
"# Coefficients estimated via the covariance method",
"# Here we use lstsq rathre than solve function because Xc is not square",
"# matrix",
"a",
",",
"_residues",
",",
"_rank",
",",
"_singular_values",
"=",
"scipy",
".",
"linalg",
".",
"lstsq",
"(",
"-",
"Xc",
",",
"X1",
")",
"# Estimate the input white noise variance",
"Cz",
"=",
"np",
".",
"dot",
"(",
"X1",
".",
"conj",
"(",
")",
".",
"transpose",
"(",
")",
",",
"Xc",
")",
"e",
"=",
"np",
".",
"dot",
"(",
"X1",
".",
"conj",
"(",
")",
".",
"transpose",
"(",
")",
",",
"X1",
")",
"+",
"np",
".",
"dot",
"(",
"Cz",
",",
"a",
")",
"assert",
"e",
".",
"imag",
"<",
"1e-4",
",",
"'wierd behaviour'",
"e",
"=",
"float",
"(",
"e",
".",
"real",
")",
"# ignore imag part that should be small",
"return",
"a",
",",
"e"
] | r"""Simple and fast implementation of the covariance AR estimate
This code is 10 times faster than :func:`arcovar_marple` and more importantly
only 10 lines of code, compared to a 200 loc for :func:`arcovar_marple`
:param array X: Array of complex data samples
:param int oder: Order of linear prediction model
:return:
* a - Array of complex forward linear prediction coefficients
* e - error
The covariance method fits a Pth order autoregressive (AR) model to the
input signal, which is assumed to be the output of
an AR system driven by white noise. This method minimizes the forward
prediction error in the least-squares sense. The output vector
contains the normalized estimate of the AR system parameters
The white noise input variance estimate is also returned.
If is the power spectral density of y(n), then:
.. math:: \frac{e}{\left| A(e^{jw}) \right|^2} = \frac{e}{\left| 1+\sum_{k-1}^P a(k)e^{-jwk}\right|^2}
Because the method characterizes the input data using an all-pole model,
the correct choice of the model order p is important.
.. plot::
:width: 80%
:include-source:
from spectrum import arcovar, marple_data, arma2psd
from pylab import plot, log10, linspace, axis
ar_values, error = arcovar(marple_data, 15)
psd = arma2psd(ar_values, sides='centerdc')
plot(linspace(-0.5, 0.5, len(psd)), 10*log10(psd/max(psd)))
axis([-0.5, 0.5, -60, 0])
.. seealso:: :class:`pcovar`
:validation: the AR parameters are the same as those returned by
a completely different function :func:`arcovar_marple`.
:References: [Mathworks]_ | [
"r",
"Simple",
"and",
"fast",
"implementation",
"of",
"the",
"covariance",
"AR",
"estimate"
] | python | valid |
lltk/lltk | lltk/scrapers/verbix.py | https://github.com/lltk/lltk/blob/d171de55c1b97695fddedf4b02401ae27bf1d634/lltk/scrapers/verbix.py#L22-L27 | def _normalize(self, string):
''' Returns a sanitized string. '''
string = string.replace(u'\xa0', '')
string = string.strip()
return string | [
"def",
"_normalize",
"(",
"self",
",",
"string",
")",
":",
"string",
"=",
"string",
".",
"replace",
"(",
"u'\\xa0'",
",",
"''",
")",
"string",
"=",
"string",
".",
"strip",
"(",
")",
"return",
"string"
] | Returns a sanitized string. | [
"Returns",
"a",
"sanitized",
"string",
"."
] | python | train |
ChargePoint/pydnp3 | examples/master_cmd.py | https://github.com/ChargePoint/pydnp3/blob/5bcd8240d1fc0aa1579e71f2efcab63b4c61c547/examples/master_cmd.py#L150-L152 | def do_scan_range(self, line):
"""Do an ad-hoc scan of a range of points (group 1, variation 2, indexes 0-3). Command syntax is: scan_range"""
self.application.master.ScanRange(opendnp3.GroupVariationID(1, 2), 0, 3, opendnp3.TaskConfig().Default()) | [
"def",
"do_scan_range",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"application",
".",
"master",
".",
"ScanRange",
"(",
"opendnp3",
".",
"GroupVariationID",
"(",
"1",
",",
"2",
")",
",",
"0",
",",
"3",
",",
"opendnp3",
".",
"TaskConfig",
"(",
")",
".",
"Default",
"(",
")",
")"
] | Do an ad-hoc scan of a range of points (group 1, variation 2, indexes 0-3). Command syntax is: scan_range | [
"Do",
"an",
"ad",
"-",
"hoc",
"scan",
"of",
"a",
"range",
"of",
"points",
"(",
"group",
"1",
"variation",
"2",
"indexes",
"0",
"-",
"3",
")",
".",
"Command",
"syntax",
"is",
":",
"scan_range"
] | python | valid |
sorgerlab/indra | indra/assemblers/pybel/assembler.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pybel/assembler.py#L320-L344 | def _assemble_conversion(self, stmt):
"""Example: p(HGNC:HK1) => rxn(reactants(a(CHEBI:"CHEBI:17634")),
products(a(CHEBI:"CHEBI:4170")))"""
pybel_lists = ([], [])
for pybel_list, agent_list in \
zip(pybel_lists, (stmt.obj_from, stmt.obj_to)):
for agent in agent_list:
node = _get_agent_grounding(agent)
# TODO check for missing grounding?
pybel_list.append(node)
rxn_node_data = reaction(
reactants=pybel_lists[0],
products=pybel_lists[1],
)
obj_node = self.model.add_node_from_data(rxn_node_data)
obj_edge = None # TODO: Any edge information possible here?
# Add node for controller, if there is one
if stmt.subj is not None:
subj_attr, subj_edge = _get_agent_node(stmt.subj)
subj_node = self.model.add_node_from_data(subj_attr)
edge_data_list = _combine_edge_data(pc.DIRECTLY_INCREASES,
subj_edge, obj_edge, stmt.evidence)
for edge_data in edge_data_list:
self.model.add_edge(subj_node, obj_node, **edge_data) | [
"def",
"_assemble_conversion",
"(",
"self",
",",
"stmt",
")",
":",
"pybel_lists",
"=",
"(",
"[",
"]",
",",
"[",
"]",
")",
"for",
"pybel_list",
",",
"agent_list",
"in",
"zip",
"(",
"pybel_lists",
",",
"(",
"stmt",
".",
"obj_from",
",",
"stmt",
".",
"obj_to",
")",
")",
":",
"for",
"agent",
"in",
"agent_list",
":",
"node",
"=",
"_get_agent_grounding",
"(",
"agent",
")",
"# TODO check for missing grounding?",
"pybel_list",
".",
"append",
"(",
"node",
")",
"rxn_node_data",
"=",
"reaction",
"(",
"reactants",
"=",
"pybel_lists",
"[",
"0",
"]",
",",
"products",
"=",
"pybel_lists",
"[",
"1",
"]",
",",
")",
"obj_node",
"=",
"self",
".",
"model",
".",
"add_node_from_data",
"(",
"rxn_node_data",
")",
"obj_edge",
"=",
"None",
"# TODO: Any edge information possible here?",
"# Add node for controller, if there is one",
"if",
"stmt",
".",
"subj",
"is",
"not",
"None",
":",
"subj_attr",
",",
"subj_edge",
"=",
"_get_agent_node",
"(",
"stmt",
".",
"subj",
")",
"subj_node",
"=",
"self",
".",
"model",
".",
"add_node_from_data",
"(",
"subj_attr",
")",
"edge_data_list",
"=",
"_combine_edge_data",
"(",
"pc",
".",
"DIRECTLY_INCREASES",
",",
"subj_edge",
",",
"obj_edge",
",",
"stmt",
".",
"evidence",
")",
"for",
"edge_data",
"in",
"edge_data_list",
":",
"self",
".",
"model",
".",
"add_edge",
"(",
"subj_node",
",",
"obj_node",
",",
"*",
"*",
"edge_data",
")"
] | Example: p(HGNC:HK1) => rxn(reactants(a(CHEBI:"CHEBI:17634")),
products(a(CHEBI:"CHEBI:4170"))) | [
"Example",
":",
"p",
"(",
"HGNC",
":",
"HK1",
")",
"=",
">",
"rxn",
"(",
"reactants",
"(",
"a",
"(",
"CHEBI",
":",
"CHEBI",
":",
"17634",
"))",
"products",
"(",
"a",
"(",
"CHEBI",
":",
"CHEBI",
":",
"4170",
")))"
] | python | train |
aetros/aetros-cli | aetros/git.py | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L941-L955 | def commit_index(self, message):
"""
Commit the current index.
:param message: str
:return: str the generated commit sha
"""
tree_id = self.write_tree()
args = ['commit-tree', tree_id, '-p', self.ref_head]
# todo, this can end in a race-condition with other processes adding commits
commit = self.command_exec(args, message)[0].decode('utf-8').strip()
self.command_exec(['update-ref', self.ref_head, commit])
return commit | [
"def",
"commit_index",
"(",
"self",
",",
"message",
")",
":",
"tree_id",
"=",
"self",
".",
"write_tree",
"(",
")",
"args",
"=",
"[",
"'commit-tree'",
",",
"tree_id",
",",
"'-p'",
",",
"self",
".",
"ref_head",
"]",
"# todo, this can end in a race-condition with other processes adding commits",
"commit",
"=",
"self",
".",
"command_exec",
"(",
"args",
",",
"message",
")",
"[",
"0",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"strip",
"(",
")",
"self",
".",
"command_exec",
"(",
"[",
"'update-ref'",
",",
"self",
".",
"ref_head",
",",
"commit",
"]",
")",
"return",
"commit"
] | Commit the current index.
:param message: str
:return: str the generated commit sha | [
"Commit",
"the",
"current",
"index",
".",
":",
"param",
"message",
":",
"str",
":",
"return",
":",
"str",
"the",
"generated",
"commit",
"sha"
] | python | train |
mseclab/PyJFuzz | pyjfuzz/core/pjf_mutators.py | https://github.com/mseclab/PyJFuzz/blob/f777067076f62c9ab74ffea6e90fd54402b7a1b4/pyjfuzz/core/pjf_mutators.py#L127-L131 | def _get_random(self, obj_type):
"""
Get a random mutator from a list of mutators
"""
return self.mutator[obj_type][random.randint(0, self.config.level)] | [
"def",
"_get_random",
"(",
"self",
",",
"obj_type",
")",
":",
"return",
"self",
".",
"mutator",
"[",
"obj_type",
"]",
"[",
"random",
".",
"randint",
"(",
"0",
",",
"self",
".",
"config",
".",
"level",
")",
"]"
] | Get a random mutator from a list of mutators | [
"Get",
"a",
"random",
"mutator",
"from",
"a",
"list",
"of",
"mutators"
] | python | test |
MisterY/price-database | pricedb/csv.py | https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/csv.py#L50-L75 | def parse_line(self, line: str) -> PriceModel:
""" Parse a CSV line into a price element """
line = line.rstrip()
parts = line.split(',')
result = PriceModel()
# symbol
result.symbol = self.translate_symbol(parts[0])
# value
result.value = Decimal(parts[1])
# date
date_str = parts[2]
date_str = date_str.replace('"', '')
date_parts = date_str.split('/')
year_str = date_parts[2]
month_str = date_parts[1]
day_str = date_parts[0]
logging.debug(f"parsing {date_parts} into date")
result.datetime = datetime(int(year_str), int(month_str), int(day_str))
return result | [
"def",
"parse_line",
"(",
"self",
",",
"line",
":",
"str",
")",
"->",
"PriceModel",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"parts",
"=",
"line",
".",
"split",
"(",
"','",
")",
"result",
"=",
"PriceModel",
"(",
")",
"# symbol",
"result",
".",
"symbol",
"=",
"self",
".",
"translate_symbol",
"(",
"parts",
"[",
"0",
"]",
")",
"# value",
"result",
".",
"value",
"=",
"Decimal",
"(",
"parts",
"[",
"1",
"]",
")",
"# date",
"date_str",
"=",
"parts",
"[",
"2",
"]",
"date_str",
"=",
"date_str",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
"date_parts",
"=",
"date_str",
".",
"split",
"(",
"'/'",
")",
"year_str",
"=",
"date_parts",
"[",
"2",
"]",
"month_str",
"=",
"date_parts",
"[",
"1",
"]",
"day_str",
"=",
"date_parts",
"[",
"0",
"]",
"logging",
".",
"debug",
"(",
"f\"parsing {date_parts} into date\"",
")",
"result",
".",
"datetime",
"=",
"datetime",
"(",
"int",
"(",
"year_str",
")",
",",
"int",
"(",
"month_str",
")",
",",
"int",
"(",
"day_str",
")",
")",
"return",
"result"
] | Parse a CSV line into a price element | [
"Parse",
"a",
"CSV",
"line",
"into",
"a",
"price",
"element"
] | python | test |
Rockhopper-Technologies/pluginlib | pluginlib/_objects.py | https://github.com/Rockhopper-Technologies/pluginlib/blob/8beb78984dd9c97c493642df9da9f1b5a1c5e2b2/pluginlib/_objects.py#L119-L165 | def _filter(self, blacklist=None, newest_only=False, type_filter=None, **kwargs):
"""
Args:
blacklist(tuple): Iterable of of BlacklistEntry objects
newest_only(bool): Only the newest version of each plugin is returned
type(str): Plugin type to retrieve
name(str): Plugin name to retrieve
version(str): Plugin version to retrieve
Returns nested dictionary of plugins
If a blacklist is supplied, plugins are evaluated against the blacklist entries
"""
plugins = DictWithDotNotation()
filtered_name = kwargs.get(self._key_attr, None)
for key, val in self._items(type_filter, filtered_name):
plugin_blacklist = None
skip = False
if blacklist:
# Assume blacklist is correct format since it is checked by PluginLoade
plugin_blacklist = []
for entry in blacklist:
if getattr(entry, self._key_attr) not in (key, None):
continue
if all(getattr(entry, attr) is None for attr in self._bl_skip_attrs):
if not self._skip_empty:
plugins[key] = None if filtered_name else self._bl_empty()
skip = True
break
plugin_blacklist.append(entry)
if not skip:
# pylint: disable=protected-access
result = val._filter(plugin_blacklist, newest_only=newest_only, **kwargs)
if result or not self._skip_empty:
plugins[key] = result
if filtered_name:
return plugins.get(filtered_name, None)
return plugins | [
"def",
"_filter",
"(",
"self",
",",
"blacklist",
"=",
"None",
",",
"newest_only",
"=",
"False",
",",
"type_filter",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"plugins",
"=",
"DictWithDotNotation",
"(",
")",
"filtered_name",
"=",
"kwargs",
".",
"get",
"(",
"self",
".",
"_key_attr",
",",
"None",
")",
"for",
"key",
",",
"val",
"in",
"self",
".",
"_items",
"(",
"type_filter",
",",
"filtered_name",
")",
":",
"plugin_blacklist",
"=",
"None",
"skip",
"=",
"False",
"if",
"blacklist",
":",
"# Assume blacklist is correct format since it is checked by PluginLoade",
"plugin_blacklist",
"=",
"[",
"]",
"for",
"entry",
"in",
"blacklist",
":",
"if",
"getattr",
"(",
"entry",
",",
"self",
".",
"_key_attr",
")",
"not",
"in",
"(",
"key",
",",
"None",
")",
":",
"continue",
"if",
"all",
"(",
"getattr",
"(",
"entry",
",",
"attr",
")",
"is",
"None",
"for",
"attr",
"in",
"self",
".",
"_bl_skip_attrs",
")",
":",
"if",
"not",
"self",
".",
"_skip_empty",
":",
"plugins",
"[",
"key",
"]",
"=",
"None",
"if",
"filtered_name",
"else",
"self",
".",
"_bl_empty",
"(",
")",
"skip",
"=",
"True",
"break",
"plugin_blacklist",
".",
"append",
"(",
"entry",
")",
"if",
"not",
"skip",
":",
"# pylint: disable=protected-access",
"result",
"=",
"val",
".",
"_filter",
"(",
"plugin_blacklist",
",",
"newest_only",
"=",
"newest_only",
",",
"*",
"*",
"kwargs",
")",
"if",
"result",
"or",
"not",
"self",
".",
"_skip_empty",
":",
"plugins",
"[",
"key",
"]",
"=",
"result",
"if",
"filtered_name",
":",
"return",
"plugins",
".",
"get",
"(",
"filtered_name",
",",
"None",
")",
"return",
"plugins"
] | Args:
blacklist(tuple): Iterable of of BlacklistEntry objects
newest_only(bool): Only the newest version of each plugin is returned
type(str): Plugin type to retrieve
name(str): Plugin name to retrieve
version(str): Plugin version to retrieve
Returns nested dictionary of plugins
If a blacklist is supplied, plugins are evaluated against the blacklist entries | [
"Args",
":",
"blacklist",
"(",
"tuple",
")",
":",
"Iterable",
"of",
"of",
"BlacklistEntry",
"objects",
"newest_only",
"(",
"bool",
")",
":",
"Only",
"the",
"newest",
"version",
"of",
"each",
"plugin",
"is",
"returned",
"type",
"(",
"str",
")",
":",
"Plugin",
"type",
"to",
"retrieve",
"name",
"(",
"str",
")",
":",
"Plugin",
"name",
"to",
"retrieve",
"version",
"(",
"str",
")",
":",
"Plugin",
"version",
"to",
"retrieve"
] | python | train |
Min-ops/cruddy | cruddy/lambdaclient.py | https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/lambdaclient.py#L62-L68 | def call_operation(self, operation, **kwargs):
"""
A generic method to call any operation supported by the Lambda handler
"""
data = {'operation': operation}
data.update(kwargs)
return self.invoke(data) | [
"def",
"call_operation",
"(",
"self",
",",
"operation",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"{",
"'operation'",
":",
"operation",
"}",
"data",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"invoke",
"(",
"data",
")"
] | A generic method to call any operation supported by the Lambda handler | [
"A",
"generic",
"method",
"to",
"call",
"any",
"operation",
"supported",
"by",
"the",
"Lambda",
"handler"
] | python | train |
jgillick/LendingClub | lendingclub/__init__.py | https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/__init__.py#L712-L808 | def search_my_notes(self, loan_id=None, order_id=None, grade=None, portfolio_name=None, status=None, term=None):
"""
Search for notes you are invested in. Use the parameters to define how to search.
Passing no parameters is the same as calling `my_notes(get_all=True)`
Parameters
----------
loan_id : int, optional
Search for notes for a specific loan. Since a loan is broken up into a pool of notes, it's possible
to invest multiple notes in a single loan
order_id : int, optional
Search for notes from a particular investment order.
grade : {A, B, C, D, E, F, G}, optional
Match by a particular loan grade
portfolio_name : string, optional
Search for notes in a portfolio with this name (case sensitive)
status : string, {issued, in-review, in-funding, current, charged-off, late, in-grace-period, fully-paid}, optional
The funding status string.
term : {60, 36}, optional
Term length, either 60 or 36 (for 5 year and 3 year, respectively)
Returns
-------
dict
A dictionary with a list of matching notes on the `loans` key
"""
assert grade is None or type(grade) is str, 'grade must be a string'
assert portfolio_name is None or type(portfolio_name) is str, 'portfolio_name must be a string'
index = 0
found = []
sort_by = 'orderId' if order_id is not None else 'loanId'
group_id = order_id if order_id is not None else loan_id # first match by order, then by loan
# Normalize grade
if grade is not None:
grade = grade[0].upper()
# Normalize status
if status is not None:
status = re.sub('[^a-zA-Z\-]', ' ', status.lower()) # remove all non alpha characters
status = re.sub('days', ' ', status) # remove days
status = re.sub('\s+', '-', status.strip()) # replace spaces with dash
status = re.sub('(^-+)|(-+$)', '', status)
while True:
notes = self.my_notes(start_index=index, sort_by=sort_by)
if notes['result'] != 'success':
break
# If the first note has a higher ID, we've passed it
if group_id is not None and notes['loans'][0][sort_by] > group_id:
break
# If the last note has a higher ID, it could be in this record set
if group_id is None or notes['loans'][-1][sort_by] >= group_id:
for note in notes['loans']:
# Order ID, no match
if order_id is not None and note['orderId'] != order_id:
continue
# Loan ID, no match
if loan_id is not None and note['loanId'] != loan_id:
continue
# Grade, no match
if grade is not None and note['rate'][0] != grade:
continue
# Portfolio, no match
if portfolio_name is not None and note['portfolioName'][0] != portfolio_name:
continue
# Term, no match
if term is not None and note['loanLength'] != term:
continue
# Status
if status is not None:
# Normalize status message
nstatus = re.sub('[^a-zA-Z\-]', ' ', note['status'].lower()) # remove all non alpha characters
nstatus = re.sub('days', ' ', nstatus) # remove days
nstatus = re.sub('\s+', '-', nstatus.strip()) # replace spaces with dash
nstatus = re.sub('(^-+)|(-+$)', '', nstatus)
# No match
if nstatus != status:
continue
# Must be a match
found.append(note)
index += 100
return found | [
"def",
"search_my_notes",
"(",
"self",
",",
"loan_id",
"=",
"None",
",",
"order_id",
"=",
"None",
",",
"grade",
"=",
"None",
",",
"portfolio_name",
"=",
"None",
",",
"status",
"=",
"None",
",",
"term",
"=",
"None",
")",
":",
"assert",
"grade",
"is",
"None",
"or",
"type",
"(",
"grade",
")",
"is",
"str",
",",
"'grade must be a string'",
"assert",
"portfolio_name",
"is",
"None",
"or",
"type",
"(",
"portfolio_name",
")",
"is",
"str",
",",
"'portfolio_name must be a string'",
"index",
"=",
"0",
"found",
"=",
"[",
"]",
"sort_by",
"=",
"'orderId'",
"if",
"order_id",
"is",
"not",
"None",
"else",
"'loanId'",
"group_id",
"=",
"order_id",
"if",
"order_id",
"is",
"not",
"None",
"else",
"loan_id",
"# first match by order, then by loan",
"# Normalize grade",
"if",
"grade",
"is",
"not",
"None",
":",
"grade",
"=",
"grade",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"# Normalize status",
"if",
"status",
"is",
"not",
"None",
":",
"status",
"=",
"re",
".",
"sub",
"(",
"'[^a-zA-Z\\-]'",
",",
"' '",
",",
"status",
".",
"lower",
"(",
")",
")",
"# remove all non alpha characters",
"status",
"=",
"re",
".",
"sub",
"(",
"'days'",
",",
"' '",
",",
"status",
")",
"# remove days",
"status",
"=",
"re",
".",
"sub",
"(",
"'\\s+'",
",",
"'-'",
",",
"status",
".",
"strip",
"(",
")",
")",
"# replace spaces with dash",
"status",
"=",
"re",
".",
"sub",
"(",
"'(^-+)|(-+$)'",
",",
"''",
",",
"status",
")",
"while",
"True",
":",
"notes",
"=",
"self",
".",
"my_notes",
"(",
"start_index",
"=",
"index",
",",
"sort_by",
"=",
"sort_by",
")",
"if",
"notes",
"[",
"'result'",
"]",
"!=",
"'success'",
":",
"break",
"# If the first note has a higher ID, we've passed it",
"if",
"group_id",
"is",
"not",
"None",
"and",
"notes",
"[",
"'loans'",
"]",
"[",
"0",
"]",
"[",
"sort_by",
"]",
">",
"group_id",
":",
"break",
"# If the last note has a higher ID, it could be in this record set",
"if",
"group_id",
"is",
"None",
"or",
"notes",
"[",
"'loans'",
"]",
"[",
"-",
"1",
"]",
"[",
"sort_by",
"]",
">=",
"group_id",
":",
"for",
"note",
"in",
"notes",
"[",
"'loans'",
"]",
":",
"# Order ID, no match",
"if",
"order_id",
"is",
"not",
"None",
"and",
"note",
"[",
"'orderId'",
"]",
"!=",
"order_id",
":",
"continue",
"# Loan ID, no match",
"if",
"loan_id",
"is",
"not",
"None",
"and",
"note",
"[",
"'loanId'",
"]",
"!=",
"loan_id",
":",
"continue",
"# Grade, no match",
"if",
"grade",
"is",
"not",
"None",
"and",
"note",
"[",
"'rate'",
"]",
"[",
"0",
"]",
"!=",
"grade",
":",
"continue",
"# Portfolio, no match",
"if",
"portfolio_name",
"is",
"not",
"None",
"and",
"note",
"[",
"'portfolioName'",
"]",
"[",
"0",
"]",
"!=",
"portfolio_name",
":",
"continue",
"# Term, no match",
"if",
"term",
"is",
"not",
"None",
"and",
"note",
"[",
"'loanLength'",
"]",
"!=",
"term",
":",
"continue",
"# Status",
"if",
"status",
"is",
"not",
"None",
":",
"# Normalize status message",
"nstatus",
"=",
"re",
".",
"sub",
"(",
"'[^a-zA-Z\\-]'",
",",
"' '",
",",
"note",
"[",
"'status'",
"]",
".",
"lower",
"(",
")",
")",
"# remove all non alpha characters",
"nstatus",
"=",
"re",
".",
"sub",
"(",
"'days'",
",",
"' '",
",",
"nstatus",
")",
"# remove days",
"nstatus",
"=",
"re",
".",
"sub",
"(",
"'\\s+'",
",",
"'-'",
",",
"nstatus",
".",
"strip",
"(",
")",
")",
"# replace spaces with dash",
"nstatus",
"=",
"re",
".",
"sub",
"(",
"'(^-+)|(-+$)'",
",",
"''",
",",
"nstatus",
")",
"# No match",
"if",
"nstatus",
"!=",
"status",
":",
"continue",
"# Must be a match",
"found",
".",
"append",
"(",
"note",
")",
"index",
"+=",
"100",
"return",
"found"
] | Search for notes you are invested in. Use the parameters to define how to search.
Passing no parameters is the same as calling `my_notes(get_all=True)`
Parameters
----------
loan_id : int, optional
Search for notes for a specific loan. Since a loan is broken up into a pool of notes, it's possible
to invest multiple notes in a single loan
order_id : int, optional
Search for notes from a particular investment order.
grade : {A, B, C, D, E, F, G}, optional
Match by a particular loan grade
portfolio_name : string, optional
Search for notes in a portfolio with this name (case sensitive)
status : string, {issued, in-review, in-funding, current, charged-off, late, in-grace-period, fully-paid}, optional
The funding status string.
term : {60, 36}, optional
Term length, either 60 or 36 (for 5 year and 3 year, respectively)
Returns
-------
dict
A dictionary with a list of matching notes on the `loans` key | [
"Search",
"for",
"notes",
"you",
"are",
"invested",
"in",
".",
"Use",
"the",
"parameters",
"to",
"define",
"how",
"to",
"search",
".",
"Passing",
"no",
"parameters",
"is",
"the",
"same",
"as",
"calling",
"my_notes",
"(",
"get_all",
"=",
"True",
")"
] | python | train |
SylvanasSun/python-common-cache | common_cache/__init__.py | https://github.com/SylvanasSun/python-common-cache/blob/f113eb3cd751eed5ab5373e8610a31a444220cf8/common_cache/__init__.py#L355-L371 | def replace_evict_func(self, func, only_read=False):
"""
>>> cache = Cache(log_level=logging.WARNING)
>>> def evict(dict, evict_number=10): pass
>>> cache.replace_evict_func(evict)
True
>>> def evict_b(dict): pass
>>> cache.replace_evict_func(evict_b)
False
>>> def evict_c(dict, a, b): pass
>>> cache.replace_evict_func(evict_c)
False
"""
self.logger.info('Replace the evict function %s ---> %s' % (
get_function_signature(self.evict_func), get_function_signature(func)))
self.evict_func = func
return True | [
"def",
"replace_evict_func",
"(",
"self",
",",
"func",
",",
"only_read",
"=",
"False",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Replace the evict function %s ---> %s'",
"%",
"(",
"get_function_signature",
"(",
"self",
".",
"evict_func",
")",
",",
"get_function_signature",
"(",
"func",
")",
")",
")",
"self",
".",
"evict_func",
"=",
"func",
"return",
"True"
] | >>> cache = Cache(log_level=logging.WARNING)
>>> def evict(dict, evict_number=10): pass
>>> cache.replace_evict_func(evict)
True
>>> def evict_b(dict): pass
>>> cache.replace_evict_func(evict_b)
False
>>> def evict_c(dict, a, b): pass
>>> cache.replace_evict_func(evict_c)
False | [
">>>",
"cache",
"=",
"Cache",
"(",
"log_level",
"=",
"logging",
".",
"WARNING",
")",
">>>",
"def",
"evict",
"(",
"dict",
"evict_number",
"=",
"10",
")",
":",
"pass",
">>>",
"cache",
".",
"replace_evict_func",
"(",
"evict",
")",
"True",
">>>",
"def",
"evict_b",
"(",
"dict",
")",
":",
"pass",
">>>",
"cache",
".",
"replace_evict_func",
"(",
"evict_b",
")",
"False",
">>>",
"def",
"evict_c",
"(",
"dict",
"a",
"b",
")",
":",
"pass",
">>>",
"cache",
".",
"replace_evict_func",
"(",
"evict_c",
")",
"False"
] | python | train |
nbedi/typecaster | typecaster/utils.py | https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/utils.py#L11-L53 | def text_to_speech(text, synthesizer, synth_args, sentence_break):
"""
Converts given text to a pydub AudioSegment using a specified speech
synthesizer. At the moment, IBM Watson's text-to-speech API is the only
available synthesizer.
:param text:
The text that will be synthesized to audio.
:param synthesizer:
The text-to-speech synthesizer to use. At the moment, 'watson' is the
only available input.
:param synth_args:
A dictionary of arguments to pass to the synthesizer. Parameters for
authorization (username/password) should be passed here.
:param sentence_break:
A string that identifies a sentence break or another logical break in
the text. Necessary for text longer than 50 words. Defaults to '. '.
"""
if len(text.split()) < 50:
if synthesizer == 'watson':
with open('.temp.wav', 'wb') as temp:
temp.write(watson_request(text=text, synth_args=synth_args).content)
response = AudioSegment.from_wav('.temp.wav')
os.remove('.temp.wav')
return response
else:
raise ValueError('"' + synthesizer + '" synthesizer not found.')
else:
segments = []
for i, sentence in enumerate(text.split(sentence_break)):
if synthesizer == 'watson':
with open('.temp' + str(i) + '.wav', 'wb') as temp:
temp.write(watson_request(text=sentence, synth_args=synth_args).content)
segments.append(AudioSegment.from_wav('.temp' + str(i) + '.wav'))
os.remove('.temp' + str(i) + '.wav')
else:
raise ValueError('"' + synthesizer + '" synthesizer not found.')
response = segments[0]
for segment in segments[1:]:
response = response + segment
return response | [
"def",
"text_to_speech",
"(",
"text",
",",
"synthesizer",
",",
"synth_args",
",",
"sentence_break",
")",
":",
"if",
"len",
"(",
"text",
".",
"split",
"(",
")",
")",
"<",
"50",
":",
"if",
"synthesizer",
"==",
"'watson'",
":",
"with",
"open",
"(",
"'.temp.wav'",
",",
"'wb'",
")",
"as",
"temp",
":",
"temp",
".",
"write",
"(",
"watson_request",
"(",
"text",
"=",
"text",
",",
"synth_args",
"=",
"synth_args",
")",
".",
"content",
")",
"response",
"=",
"AudioSegment",
".",
"from_wav",
"(",
"'.temp.wav'",
")",
"os",
".",
"remove",
"(",
"'.temp.wav'",
")",
"return",
"response",
"else",
":",
"raise",
"ValueError",
"(",
"'\"'",
"+",
"synthesizer",
"+",
"'\" synthesizer not found.'",
")",
"else",
":",
"segments",
"=",
"[",
"]",
"for",
"i",
",",
"sentence",
"in",
"enumerate",
"(",
"text",
".",
"split",
"(",
"sentence_break",
")",
")",
":",
"if",
"synthesizer",
"==",
"'watson'",
":",
"with",
"open",
"(",
"'.temp'",
"+",
"str",
"(",
"i",
")",
"+",
"'.wav'",
",",
"'wb'",
")",
"as",
"temp",
":",
"temp",
".",
"write",
"(",
"watson_request",
"(",
"text",
"=",
"sentence",
",",
"synth_args",
"=",
"synth_args",
")",
".",
"content",
")",
"segments",
".",
"append",
"(",
"AudioSegment",
".",
"from_wav",
"(",
"'.temp'",
"+",
"str",
"(",
"i",
")",
"+",
"'.wav'",
")",
")",
"os",
".",
"remove",
"(",
"'.temp'",
"+",
"str",
"(",
"i",
")",
"+",
"'.wav'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'\"'",
"+",
"synthesizer",
"+",
"'\" synthesizer not found.'",
")",
"response",
"=",
"segments",
"[",
"0",
"]",
"for",
"segment",
"in",
"segments",
"[",
"1",
":",
"]",
":",
"response",
"=",
"response",
"+",
"segment",
"return",
"response"
] | Converts given text to a pydub AudioSegment using a specified speech
synthesizer. At the moment, IBM Watson's text-to-speech API is the only
available synthesizer.
:param text:
The text that will be synthesized to audio.
:param synthesizer:
The text-to-speech synthesizer to use. At the moment, 'watson' is the
only available input.
:param synth_args:
A dictionary of arguments to pass to the synthesizer. Parameters for
authorization (username/password) should be passed here.
:param sentence_break:
A string that identifies a sentence break or another logical break in
the text. Necessary for text longer than 50 words. Defaults to '. '. | [
"Converts",
"given",
"text",
"to",
"a",
"pydub",
"AudioSegment",
"using",
"a",
"specified",
"speech",
"synthesizer",
".",
"At",
"the",
"moment",
"IBM",
"Watson",
"s",
"text",
"-",
"to",
"-",
"speech",
"API",
"is",
"the",
"only",
"available",
"synthesizer",
"."
] | python | train |
bitesofcode/projexui | projexui/xcommands.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/xcommands.py#L567-L579 | def stylize(obj, style='plastique', theme='projexui'):
"""
Styles the inputed object with the given options.
:param obj | <QtGui.QWidget> || <QtGui.QApplication>
style | <str>
base | <str>
"""
obj.setStyle(style)
if theme:
sheet = resources.read('styles/{0}/style.css'.format(theme))
if sheet:
obj.setStyleSheet(sheet) | [
"def",
"stylize",
"(",
"obj",
",",
"style",
"=",
"'plastique'",
",",
"theme",
"=",
"'projexui'",
")",
":",
"obj",
".",
"setStyle",
"(",
"style",
")",
"if",
"theme",
":",
"sheet",
"=",
"resources",
".",
"read",
"(",
"'styles/{0}/style.css'",
".",
"format",
"(",
"theme",
")",
")",
"if",
"sheet",
":",
"obj",
".",
"setStyleSheet",
"(",
"sheet",
")"
] | Styles the inputed object with the given options.
:param obj | <QtGui.QWidget> || <QtGui.QApplication>
style | <str>
base | <str> | [
"Styles",
"the",
"inputed",
"object",
"with",
"the",
"given",
"options",
".",
":",
"param",
"obj",
"|",
"<QtGui",
".",
"QWidget",
">",
"||",
"<QtGui",
".",
"QApplication",
">",
"style",
"|",
"<str",
">",
"base",
"|",
"<str",
">"
] | python | train |
googleapis/oauth2client | oauth2client/contrib/flask_util.py | https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/contrib/flask_util.py#L456-L469 | def email(self):
"""Returns the user's email address or None if there are no credentials.
The email address is provided by the current credentials' id_token.
This should not be used as unique identifier as the user can change
their email. If you need a unique identifier, use user_id.
"""
if not self.credentials:
return None
try:
return self.credentials.id_token['email']
except KeyError:
current_app.logger.error(
'Invalid id_token {0}'.format(self.credentials.id_token)) | [
"def",
"email",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"credentials",
":",
"return",
"None",
"try",
":",
"return",
"self",
".",
"credentials",
".",
"id_token",
"[",
"'email'",
"]",
"except",
"KeyError",
":",
"current_app",
".",
"logger",
".",
"error",
"(",
"'Invalid id_token {0}'",
".",
"format",
"(",
"self",
".",
"credentials",
".",
"id_token",
")",
")"
] | Returns the user's email address or None if there are no credentials.
The email address is provided by the current credentials' id_token.
This should not be used as unique identifier as the user can change
their email. If you need a unique identifier, use user_id. | [
"Returns",
"the",
"user",
"s",
"email",
"address",
"or",
"None",
"if",
"there",
"are",
"no",
"credentials",
"."
] | python | valid |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.