repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
ev3dev/ev3dev-lang-python | ev3dev2/motor.py | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1479-L1487 | def max_pulse_sp(self):
"""
Used to set the pulse size in milliseconds for the signal that tells the
servo to drive to the maximum (clockwise) position_sp. Default value is 2400.
Valid values are 2300 to 2700. You must write to the position_sp attribute for
changes to this attribute to take effect.
"""
self._max_pulse_sp, value = self.get_attr_int(self._max_pulse_sp, 'max_pulse_sp')
return value | [
"def",
"max_pulse_sp",
"(",
"self",
")",
":",
"self",
".",
"_max_pulse_sp",
",",
"value",
"=",
"self",
".",
"get_attr_int",
"(",
"self",
".",
"_max_pulse_sp",
",",
"'max_pulse_sp'",
")",
"return",
"value"
] | Used to set the pulse size in milliseconds for the signal that tells the
servo to drive to the maximum (clockwise) position_sp. Default value is 2400.
Valid values are 2300 to 2700. You must write to the position_sp attribute for
changes to this attribute to take effect. | [
"Used",
"to",
"set",
"the",
"pulse",
"size",
"in",
"milliseconds",
"for",
"the",
"signal",
"that",
"tells",
"the",
"servo",
"to",
"drive",
"to",
"the",
"maximum",
"(",
"clockwise",
")",
"position_sp",
".",
"Default",
"value",
"is",
"2400",
".",
"Valid",
"values",
"are",
"2300",
"to",
"2700",
".",
"You",
"must",
"write",
"to",
"the",
"position_sp",
"attribute",
"for",
"changes",
"to",
"this",
"attribute",
"to",
"take",
"effect",
"."
] | python | train |
pantsbuild/pants | src/python/pants/util/contextutil.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/util/contextutil.py#L210-L233 | def temporary_file(root_dir=None, cleanup=True, suffix='', permissions=None, binary_mode=True):
"""
A with-context that creates a temporary file and returns a writeable file descriptor to it.
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
:param str suffix: If suffix is specified, the file name will end with that suffix.
Otherwise there will be no suffix.
mkstemp() does not put a dot between the file name and the suffix;
if you need one, put it at the beginning of suffix.
See :py:class:`tempfile.NamedTemporaryFile`.
:param int permissions: If provided, sets the file to use these permissions.
:param bool binary_mode: Whether file opens in binary or text mode.
"""
mode = 'w+b' if binary_mode else 'w+' # tempfile's default is 'w+b'
with tempfile.NamedTemporaryFile(suffix=suffix, dir=root_dir, delete=False, mode=mode) as fd:
try:
if permissions is not None:
os.chmod(fd.name, permissions)
yield fd
finally:
if cleanup:
safe_delete(fd.name) | [
"def",
"temporary_file",
"(",
"root_dir",
"=",
"None",
",",
"cleanup",
"=",
"True",
",",
"suffix",
"=",
"''",
",",
"permissions",
"=",
"None",
",",
"binary_mode",
"=",
"True",
")",
":",
"mode",
"=",
"'w+b'",
"if",
"binary_mode",
"else",
"'w+'",
"# tempfile's default is 'w+b'",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"suffix",
",",
"dir",
"=",
"root_dir",
",",
"delete",
"=",
"False",
",",
"mode",
"=",
"mode",
")",
"as",
"fd",
":",
"try",
":",
"if",
"permissions",
"is",
"not",
"None",
":",
"os",
".",
"chmod",
"(",
"fd",
".",
"name",
",",
"permissions",
")",
"yield",
"fd",
"finally",
":",
"if",
"cleanup",
":",
"safe_delete",
"(",
"fd",
".",
"name",
")"
] | A with-context that creates a temporary file and returns a writeable file descriptor to it.
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
:param str suffix: If suffix is specified, the file name will end with that suffix.
Otherwise there will be no suffix.
mkstemp() does not put a dot between the file name and the suffix;
if you need one, put it at the beginning of suffix.
See :py:class:`tempfile.NamedTemporaryFile`.
:param int permissions: If provided, sets the file to use these permissions.
:param bool binary_mode: Whether file opens in binary or text mode. | [
"A",
"with",
"-",
"context",
"that",
"creates",
"a",
"temporary",
"file",
"and",
"returns",
"a",
"writeable",
"file",
"descriptor",
"to",
"it",
"."
] | python | train |
geoadmin/lib-gatilegrid | gatilegrid/tilegrids.py | https://github.com/geoadmin/lib-gatilegrid/blob/28e39cba22451f6ef0ddcb93cbc0838f06815505/gatilegrid/tilegrids.py#L298-L301 | def getZoom(self, resolution):
"Return the zoom level for a given resolution"
assert resolution in self.RESOLUTIONS
return self.RESOLUTIONS.index(resolution) | [
"def",
"getZoom",
"(",
"self",
",",
"resolution",
")",
":",
"assert",
"resolution",
"in",
"self",
".",
"RESOLUTIONS",
"return",
"self",
".",
"RESOLUTIONS",
".",
"index",
"(",
"resolution",
")"
] | Return the zoom level for a given resolution | [
"Return",
"the",
"zoom",
"level",
"for",
"a",
"given",
"resolution"
] | python | train |
aloetesting/aloe_webdriver | aloe_webdriver/css.py | https://github.com/aloetesting/aloe_webdriver/blob/65d847da4bdc63f9c015cb19d4efdee87df8ffad/aloe_webdriver/css.py#L64-L98 | def load_jquery(func):
"""
A decorator to ensure a function is run with jQuery available.
If an exception from a function indicates jQuery is missing, it is loaded
and the function re-executed.
The browser to load jQuery into must be the first argument of the function.
"""
@wraps(func)
def wrapped(browser, *args, **kwargs):
"""Run the function, loading jQuery if needed."""
try:
return func(browser, *args, **kwargs)
except WebDriverException as ex:
if not is_jquery_not_defined_error(ex.msg):
raise
load_script(browser, JQUERY)
@wait_for
def jquery_available():
"""Assert that jQuery has loaded."""
try:
return browser.execute_script('return $')
except WebDriverException:
raise AssertionError("jQuery is not loaded")
jquery_available()
return func(browser, *args, **kwargs)
return wrapped | [
"def",
"load_jquery",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped",
"(",
"browser",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Run the function, loading jQuery if needed.\"\"\"",
"try",
":",
"return",
"func",
"(",
"browser",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"WebDriverException",
"as",
"ex",
":",
"if",
"not",
"is_jquery_not_defined_error",
"(",
"ex",
".",
"msg",
")",
":",
"raise",
"load_script",
"(",
"browser",
",",
"JQUERY",
")",
"@",
"wait_for",
"def",
"jquery_available",
"(",
")",
":",
"\"\"\"Assert that jQuery has loaded.\"\"\"",
"try",
":",
"return",
"browser",
".",
"execute_script",
"(",
"'return $'",
")",
"except",
"WebDriverException",
":",
"raise",
"AssertionError",
"(",
"\"jQuery is not loaded\"",
")",
"jquery_available",
"(",
")",
"return",
"func",
"(",
"browser",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped"
] | A decorator to ensure a function is run with jQuery available.
If an exception from a function indicates jQuery is missing, it is loaded
and the function re-executed.
The browser to load jQuery into must be the first argument of the function. | [
"A",
"decorator",
"to",
"ensure",
"a",
"function",
"is",
"run",
"with",
"jQuery",
"available",
"."
] | python | train |
awslabs/serverless-application-model | samtranslator/sdk/parameter.py | https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/sdk/parameter.py#L61-L67 | def add_pseudo_parameter_values(self):
"""
Add pseudo parameter values
:return: parameter values that have pseudo parameter in it
"""
if 'AWS::Region' not in self.parameter_values:
self.parameter_values['AWS::Region'] = boto3.session.Session().region_name | [
"def",
"add_pseudo_parameter_values",
"(",
"self",
")",
":",
"if",
"'AWS::Region'",
"not",
"in",
"self",
".",
"parameter_values",
":",
"self",
".",
"parameter_values",
"[",
"'AWS::Region'",
"]",
"=",
"boto3",
".",
"session",
".",
"Session",
"(",
")",
".",
"region_name"
] | Add pseudo parameter values
:return: parameter values that have pseudo parameter in it | [
"Add",
"pseudo",
"parameter",
"values",
":",
"return",
":",
"parameter",
"values",
"that",
"have",
"pseudo",
"parameter",
"in",
"it"
] | python | train |
serhatbolsu/robotframework-appiumlibrary | AppiumLibrary/keywords/_applicationmanagement.py | https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_applicationmanagement.py#L317-L325 | def get_capability(self, capability_name):
"""
Return the desired capability value by desired capability name
"""
try:
capability = self._current_application().capabilities[capability_name]
except Exception as e:
raise e
return capability | [
"def",
"get_capability",
"(",
"self",
",",
"capability_name",
")",
":",
"try",
":",
"capability",
"=",
"self",
".",
"_current_application",
"(",
")",
".",
"capabilities",
"[",
"capability_name",
"]",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"capability"
] | Return the desired capability value by desired capability name | [
"Return",
"the",
"desired",
"capability",
"value",
"by",
"desired",
"capability",
"name"
] | python | train |
mcs07/ChemDataExtractor | chemdataextractor/cli/evaluate.py | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/evaluate.py#L83-L88 | def get_labels(cs):
"""Return list of every label."""
records = []
for c in cs:
records.extend(c.get('labels', []))
return records | [
"def",
"get_labels",
"(",
"cs",
")",
":",
"records",
"=",
"[",
"]",
"for",
"c",
"in",
"cs",
":",
"records",
".",
"extend",
"(",
"c",
".",
"get",
"(",
"'labels'",
",",
"[",
"]",
")",
")",
"return",
"records"
] | Return list of every label. | [
"Return",
"list",
"of",
"every",
"label",
"."
] | python | train |
woolfson-group/isambard | isambard/ampal/base_ampal.py | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/base_ampal.py#L543-L555 | def relabel_atoms(self, start=1):
"""Relabels all `Atoms` in numerical order.
Parameters
----------
start : int, optional
Offset the labelling by `start` residues.
"""
counter = start
for atom in self.get_atoms():
atom.id = counter
counter += 1
return | [
"def",
"relabel_atoms",
"(",
"self",
",",
"start",
"=",
"1",
")",
":",
"counter",
"=",
"start",
"for",
"atom",
"in",
"self",
".",
"get_atoms",
"(",
")",
":",
"atom",
".",
"id",
"=",
"counter",
"counter",
"+=",
"1",
"return"
] | Relabels all `Atoms` in numerical order.
Parameters
----------
start : int, optional
Offset the labelling by `start` residues. | [
"Relabels",
"all",
"Atoms",
"in",
"numerical",
"order",
"."
] | python | train |
pandas-dev/pandas | pandas/core/arrays/categorical.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1477-L1495 | def get_values(self):
"""
Return the values.
For internal compatibility with pandas formatting.
Returns
-------
numpy.array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.astype("object").take(self._codes,
fill_value=np.nan)
return np.array(self) | [
"def",
"get_values",
"(",
"self",
")",
":",
"# if we are a datetime and period index, return Index to keep metadata",
"if",
"is_datetimelike",
"(",
"self",
".",
"categories",
")",
":",
"return",
"self",
".",
"categories",
".",
"take",
"(",
"self",
".",
"_codes",
",",
"fill_value",
"=",
"np",
".",
"nan",
")",
"elif",
"is_integer_dtype",
"(",
"self",
".",
"categories",
")",
"and",
"-",
"1",
"in",
"self",
".",
"_codes",
":",
"return",
"self",
".",
"categories",
".",
"astype",
"(",
"\"object\"",
")",
".",
"take",
"(",
"self",
".",
"_codes",
",",
"fill_value",
"=",
"np",
".",
"nan",
")",
"return",
"np",
".",
"array",
"(",
"self",
")"
] | Return the values.
For internal compatibility with pandas formatting.
Returns
-------
numpy.array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods. | [
"Return",
"the",
"values",
"."
] | python | train |
Clinical-Genomics/scout | scout/server/blueprints/institutes/views.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/institutes/views.py#L17-L41 | def institutes():
"""Display a list of all user institutes."""
institute_objs = user_institutes(store, current_user)
institutes = []
for ins_obj in institute_objs:
sanger_recipients = []
for user_mail in ins_obj.get('sanger_recipients',[]):
user_obj = store.user(user_mail)
if not user_obj:
continue
sanger_recipients.append(user_obj['name'])
institutes.append(
{
'display_name': ins_obj['display_name'],
'internal_id': ins_obj['_id'],
'coverage_cutoff': ins_obj.get('coverage_cutoff', 'None'),
'sanger_recipients': sanger_recipients,
'frequency_cutoff': ins_obj.get('frequency_cutoff', 'None'),
'phenotype_groups': ins_obj.get('phenotype_groups', PHENOTYPE_GROUPS)
}
)
data = dict(institutes=institutes)
return render_template(
'overview/institutes.html', **data) | [
"def",
"institutes",
"(",
")",
":",
"institute_objs",
"=",
"user_institutes",
"(",
"store",
",",
"current_user",
")",
"institutes",
"=",
"[",
"]",
"for",
"ins_obj",
"in",
"institute_objs",
":",
"sanger_recipients",
"=",
"[",
"]",
"for",
"user_mail",
"in",
"ins_obj",
".",
"get",
"(",
"'sanger_recipients'",
",",
"[",
"]",
")",
":",
"user_obj",
"=",
"store",
".",
"user",
"(",
"user_mail",
")",
"if",
"not",
"user_obj",
":",
"continue",
"sanger_recipients",
".",
"append",
"(",
"user_obj",
"[",
"'name'",
"]",
")",
"institutes",
".",
"append",
"(",
"{",
"'display_name'",
":",
"ins_obj",
"[",
"'display_name'",
"]",
",",
"'internal_id'",
":",
"ins_obj",
"[",
"'_id'",
"]",
",",
"'coverage_cutoff'",
":",
"ins_obj",
".",
"get",
"(",
"'coverage_cutoff'",
",",
"'None'",
")",
",",
"'sanger_recipients'",
":",
"sanger_recipients",
",",
"'frequency_cutoff'",
":",
"ins_obj",
".",
"get",
"(",
"'frequency_cutoff'",
",",
"'None'",
")",
",",
"'phenotype_groups'",
":",
"ins_obj",
".",
"get",
"(",
"'phenotype_groups'",
",",
"PHENOTYPE_GROUPS",
")",
"}",
")",
"data",
"=",
"dict",
"(",
"institutes",
"=",
"institutes",
")",
"return",
"render_template",
"(",
"'overview/institutes.html'",
",",
"*",
"*",
"data",
")"
] | Display a list of all user institutes. | [
"Display",
"a",
"list",
"of",
"all",
"user",
"institutes",
"."
] | python | test |
rigetti/quantumflow | quantumflow/qaoa.py | https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/qaoa.py#L22-L64 | def qubo_circuit(
graph: nx.Graph,
steps: int,
beta: Sequence,
gamma: Sequence) -> Circuit:
"""
A QAOA circuit for the Quadratic Unconstrained Binary Optimization
problem (i.e. an Ising model).
Args:
graph : a networkx graph instance with optional edge and node weights
steps : number of QAOA steps
beta : driver parameters (One per step)
gamma : cost parameters (One per step)
"""
qubits = list(graph.nodes())
# Initialization
circ = Circuit()
for q0 in qubits:
circ += H(q0)
# Run for given number of QAOA steps
for p in range(0, steps):
# Cost
for q0, q1 in graph.edges():
weight = graph[q0][q1].get('weight', 1.0)
# Note factor of pi due to parameterization of ZZ gate
circ += ZZ(-weight * gamma[p] / np.pi, q0, q1)
for q0 in qubits:
node_weight = graph.nodes[q0].get('weight', None)
if node_weight is not None:
circ += RZ(node_weight, q0)
# Drive
for q0 in qubits:
circ += RX(beta[p], q0)
return circ | [
"def",
"qubo_circuit",
"(",
"graph",
":",
"nx",
".",
"Graph",
",",
"steps",
":",
"int",
",",
"beta",
":",
"Sequence",
",",
"gamma",
":",
"Sequence",
")",
"->",
"Circuit",
":",
"qubits",
"=",
"list",
"(",
"graph",
".",
"nodes",
"(",
")",
")",
"# Initialization",
"circ",
"=",
"Circuit",
"(",
")",
"for",
"q0",
"in",
"qubits",
":",
"circ",
"+=",
"H",
"(",
"q0",
")",
"# Run for given number of QAOA steps",
"for",
"p",
"in",
"range",
"(",
"0",
",",
"steps",
")",
":",
"# Cost",
"for",
"q0",
",",
"q1",
"in",
"graph",
".",
"edges",
"(",
")",
":",
"weight",
"=",
"graph",
"[",
"q0",
"]",
"[",
"q1",
"]",
".",
"get",
"(",
"'weight'",
",",
"1.0",
")",
"# Note factor of pi due to parameterization of ZZ gate",
"circ",
"+=",
"ZZ",
"(",
"-",
"weight",
"*",
"gamma",
"[",
"p",
"]",
"/",
"np",
".",
"pi",
",",
"q0",
",",
"q1",
")",
"for",
"q0",
"in",
"qubits",
":",
"node_weight",
"=",
"graph",
".",
"nodes",
"[",
"q0",
"]",
".",
"get",
"(",
"'weight'",
",",
"None",
")",
"if",
"node_weight",
"is",
"not",
"None",
":",
"circ",
"+=",
"RZ",
"(",
"node_weight",
",",
"q0",
")",
"# Drive",
"for",
"q0",
"in",
"qubits",
":",
"circ",
"+=",
"RX",
"(",
"beta",
"[",
"p",
"]",
",",
"q0",
")",
"return",
"circ"
] | A QAOA circuit for the Quadratic Unconstrained Binary Optimization
problem (i.e. an Ising model).
Args:
graph : a networkx graph instance with optional edge and node weights
steps : number of QAOA steps
beta : driver parameters (One per step)
gamma : cost parameters (One per step) | [
"A",
"QAOA",
"circuit",
"for",
"the",
"Quadratic",
"Unconstrained",
"Binary",
"Optimization",
"problem",
"(",
"i",
".",
"e",
".",
"an",
"Ising",
"model",
")",
"."
] | python | train |
bootphon/h5features | h5features/properties.py | https://github.com/bootphon/h5features/blob/d5f95db0f1cee58ac1ba4575d1212e796c39e1f9/h5features/properties.py#L57-L72 | def _eq_dicts(d1, d2):
"""Returns True if d1 == d2, False otherwise"""
if not d1.keys() == d2.keys():
return False
for k, v1 in d1.items():
v2 = d2[k]
if not type(v1) == type(v2):
return False
if isinstance(v1, np.ndarray):
if not np.array_equal(v1, v2):
return False
else:
if not v1 == v2:
return False
return True | [
"def",
"_eq_dicts",
"(",
"d1",
",",
"d2",
")",
":",
"if",
"not",
"d1",
".",
"keys",
"(",
")",
"==",
"d2",
".",
"keys",
"(",
")",
":",
"return",
"False",
"for",
"k",
",",
"v1",
"in",
"d1",
".",
"items",
"(",
")",
":",
"v2",
"=",
"d2",
"[",
"k",
"]",
"if",
"not",
"type",
"(",
"v1",
")",
"==",
"type",
"(",
"v2",
")",
":",
"return",
"False",
"if",
"isinstance",
"(",
"v1",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"not",
"np",
".",
"array_equal",
"(",
"v1",
",",
"v2",
")",
":",
"return",
"False",
"else",
":",
"if",
"not",
"v1",
"==",
"v2",
":",
"return",
"False",
"return",
"True"
] | Returns True if d1 == d2, False otherwise | [
"Returns",
"True",
"if",
"d1",
"==",
"d2",
"False",
"otherwise"
] | python | train |
christophertbrown/bioscripts | ctbBio/rax.py | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rax.py#L205-L241 | def rax(a, boot, threads, \
fast = False, run_rax = False, run_iq = False, model = False, cluster = False, node = False):
"""
run raxml on 'a' (alignment) with 'boot' (bootstraps) and 'threads' (threads)
store all files in raxml_a_b
1. give every sequence a short identifier
2. convert fasta to phylip
3. run raxml
4. convert ids in raxml tree to original names
"""
a = os.path.abspath(a)
a_base = a.rsplit('/', 1)[1]
out_dir = '%s/%s_rax_boots_%s' % \
(a.rsplit('/', 1)[0], a_base.rsplit('.', 1)[0], boot)
os.system('mkdir -p %s' % (out_dir))
os.system('ln -sf %s %s/%s' % (os.path.abspath(a), out_dir, a.rsplit('/', 1)[1]))
os.chdir(out_dir)
a_id, a_id_lookup = get_ids(a_base)
a_id_phylip = convert2phylip(a_id)
rax_out = '%s.raxml.txt' % (a_id_phylip)
if fast is True:
final_fast = '%s.fasttree.tree' % (a_id_lookup.rsplit('.', 2)[0])
fast_tree = run_fast(a_id, threads, cluster, node)
good_fast = fix_tree(fast_tree, a_id_lookup, final_fast)
yield '%s/%s' % (out_dir, final_fast)
# run IQ-Tree or RAxML
if run_iq is True:
final_iq = '%s.iq.tree' % (a_id_lookup.rsplit('.', 2)[0])
iq_out = '%s.iq.out' % (a_id_phylip)
iq_tree = run_iqtree(a_id_phylip, model, threads, cluster, node)
good_tree = fix_tree(iq_tree, a_id_lookup, final_iq)
yield '%s/%s' % (out_dir, final_iq)
elif run_rax is True:
final_rax = '%s.raxml.tree' % (a_id_lookup.rsplit('.', 2)[0])
rax_tree = run_raxml(rax_out, boot, a_id_phylip, threads, a_id, model, cluster, node)
good_tree = fix_tree(rax_tree, a_id_lookup, final_rax)
yield '%s/%s' % (out_dir, final_rax) | [
"def",
"rax",
"(",
"a",
",",
"boot",
",",
"threads",
",",
"fast",
"=",
"False",
",",
"run_rax",
"=",
"False",
",",
"run_iq",
"=",
"False",
",",
"model",
"=",
"False",
",",
"cluster",
"=",
"False",
",",
"node",
"=",
"False",
")",
":",
"a",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"a",
")",
"a_base",
"=",
"a",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"[",
"1",
"]",
"out_dir",
"=",
"'%s/%s_rax_boots_%s'",
"%",
"(",
"a",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"[",
"0",
"]",
",",
"a_base",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
",",
"boot",
")",
"os",
".",
"system",
"(",
"'mkdir -p %s'",
"%",
"(",
"out_dir",
")",
")",
"os",
".",
"system",
"(",
"'ln -sf %s %s/%s'",
"%",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"a",
")",
",",
"out_dir",
",",
"a",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"[",
"1",
"]",
")",
")",
"os",
".",
"chdir",
"(",
"out_dir",
")",
"a_id",
",",
"a_id_lookup",
"=",
"get_ids",
"(",
"a_base",
")",
"a_id_phylip",
"=",
"convert2phylip",
"(",
"a_id",
")",
"rax_out",
"=",
"'%s.raxml.txt'",
"%",
"(",
"a_id_phylip",
")",
"if",
"fast",
"is",
"True",
":",
"final_fast",
"=",
"'%s.fasttree.tree'",
"%",
"(",
"a_id_lookup",
".",
"rsplit",
"(",
"'.'",
",",
"2",
")",
"[",
"0",
"]",
")",
"fast_tree",
"=",
"run_fast",
"(",
"a_id",
",",
"threads",
",",
"cluster",
",",
"node",
")",
"good_fast",
"=",
"fix_tree",
"(",
"fast_tree",
",",
"a_id_lookup",
",",
"final_fast",
")",
"yield",
"'%s/%s'",
"%",
"(",
"out_dir",
",",
"final_fast",
")",
"# run IQ-Tree or RAxML",
"if",
"run_iq",
"is",
"True",
":",
"final_iq",
"=",
"'%s.iq.tree'",
"%",
"(",
"a_id_lookup",
".",
"rsplit",
"(",
"'.'",
",",
"2",
")",
"[",
"0",
"]",
")",
"iq_out",
"=",
"'%s.iq.out'",
"%",
"(",
"a_id_phylip",
")",
"iq_tree",
"=",
"run_iqtree",
"(",
"a_id_phylip",
",",
"model",
",",
"threads",
",",
"cluster",
",",
"node",
")",
"good_tree",
"=",
"fix_tree",
"(",
"iq_tree",
",",
"a_id_lookup",
",",
"final_iq",
")",
"yield",
"'%s/%s'",
"%",
"(",
"out_dir",
",",
"final_iq",
")",
"elif",
"run_rax",
"is",
"True",
":",
"final_rax",
"=",
"'%s.raxml.tree'",
"%",
"(",
"a_id_lookup",
".",
"rsplit",
"(",
"'.'",
",",
"2",
")",
"[",
"0",
"]",
")",
"rax_tree",
"=",
"run_raxml",
"(",
"rax_out",
",",
"boot",
",",
"a_id_phylip",
",",
"threads",
",",
"a_id",
",",
"model",
",",
"cluster",
",",
"node",
")",
"good_tree",
"=",
"fix_tree",
"(",
"rax_tree",
",",
"a_id_lookup",
",",
"final_rax",
")",
"yield",
"'%s/%s'",
"%",
"(",
"out_dir",
",",
"final_rax",
")"
] | run raxml on 'a' (alignment) with 'boot' (bootstraps) and 'threads' (threads)
store all files in raxml_a_b
1. give every sequence a short identifier
2. convert fasta to phylip
3. run raxml
4. convert ids in raxml tree to original names | [
"run",
"raxml",
"on",
"a",
"(",
"alignment",
")",
"with",
"boot",
"(",
"bootstraps",
")",
"and",
"threads",
"(",
"threads",
")",
"store",
"all",
"files",
"in",
"raxml_a_b",
"1",
".",
"give",
"every",
"sequence",
"a",
"short",
"identifier",
"2",
".",
"convert",
"fasta",
"to",
"phylip",
"3",
".",
"run",
"raxml",
"4",
".",
"convert",
"ids",
"in",
"raxml",
"tree",
"to",
"original",
"names"
] | python | train |
googleapis/google-cloud-python | api_core/google/api_core/path_template.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/path_template.py#L131-L159 | def _replace_variable_with_pattern(match):
"""Replace a variable match with a pattern that can be used to validate it.
Args:
match (re.Match): A regular expression match
Returns:
str: A regular expression pattern that can be used to validate the
variable in an expanded path.
Raises:
ValueError: If an unexpected template expression is encountered.
"""
positional = match.group("positional")
name = match.group("name")
template = match.group("template")
if name is not None:
if not template:
return _SINGLE_SEGMENT_PATTERN.format(name)
elif template == "**":
return _MULTI_SEGMENT_PATTERN.format(name)
else:
return _generate_pattern_for_template(template)
elif positional == "*":
return _SINGLE_SEGMENT_PATTERN
elif positional == "**":
return _MULTI_SEGMENT_PATTERN
else:
raise ValueError("Unknown template expression {}".format(match.group(0))) | [
"def",
"_replace_variable_with_pattern",
"(",
"match",
")",
":",
"positional",
"=",
"match",
".",
"group",
"(",
"\"positional\"",
")",
"name",
"=",
"match",
".",
"group",
"(",
"\"name\"",
")",
"template",
"=",
"match",
".",
"group",
"(",
"\"template\"",
")",
"if",
"name",
"is",
"not",
"None",
":",
"if",
"not",
"template",
":",
"return",
"_SINGLE_SEGMENT_PATTERN",
".",
"format",
"(",
"name",
")",
"elif",
"template",
"==",
"\"**\"",
":",
"return",
"_MULTI_SEGMENT_PATTERN",
".",
"format",
"(",
"name",
")",
"else",
":",
"return",
"_generate_pattern_for_template",
"(",
"template",
")",
"elif",
"positional",
"==",
"\"*\"",
":",
"return",
"_SINGLE_SEGMENT_PATTERN",
"elif",
"positional",
"==",
"\"**\"",
":",
"return",
"_MULTI_SEGMENT_PATTERN",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown template expression {}\"",
".",
"format",
"(",
"match",
".",
"group",
"(",
"0",
")",
")",
")"
] | Replace a variable match with a pattern that can be used to validate it.
Args:
match (re.Match): A regular expression match
Returns:
str: A regular expression pattern that can be used to validate the
variable in an expanded path.
Raises:
ValueError: If an unexpected template expression is encountered. | [
"Replace",
"a",
"variable",
"match",
"with",
"a",
"pattern",
"that",
"can",
"be",
"used",
"to",
"validate",
"it",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/parse.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/parse.py#L1201-L1228 | def parse(format, string, extra_types=None, evaluate_result=True, case_sensitive=False):
'''Using "format" attempt to pull values from "string".
The format must match the string contents exactly. If the value
you're looking for is instead just a part of the string use
search().
If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False the return value will be a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
The default behaviour is to match strings case insensitively. You may match with
case by specifying case_sensitive=True.
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
In the case there is no match parse() will return None.
'''
p = Parser(format, extra_types=extra_types, case_sensitive=case_sensitive)
return p.parse(string, evaluate_result=evaluate_result) | [
"def",
"parse",
"(",
"format",
",",
"string",
",",
"extra_types",
"=",
"None",
",",
"evaluate_result",
"=",
"True",
",",
"case_sensitive",
"=",
"False",
")",
":",
"p",
"=",
"Parser",
"(",
"format",
",",
"extra_types",
"=",
"extra_types",
",",
"case_sensitive",
"=",
"case_sensitive",
")",
"return",
"p",
".",
"parse",
"(",
"string",
",",
"evaluate_result",
"=",
"evaluate_result",
")"
] | Using "format" attempt to pull values from "string".
The format must match the string contents exactly. If the value
you're looking for is instead just a part of the string use
search().
If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False the return value will be a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
The default behaviour is to match strings case insensitively. You may match with
case by specifying case_sensitive=True.
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
In the case there is no match parse() will return None. | [
"Using",
"format",
"attempt",
"to",
"pull",
"values",
"from",
"string",
"."
] | python | train |
lvjiyong/configreset | configreset/__init__.py | https://github.com/lvjiyong/configreset/blob/cde0a426e993a6aa483d6934358e61750c944de9/configreset/__init__.py#L90-L108 | def load(items, default_section=_DEFAULT_SECTION):
"""
从混合类型组中读取配置
:param default_section:
:param items:
:return:
"""
settings = []
assert isinstance(items, list), 'items必须为list'
logger.debug(items)
for item in items:
if _is_conf(item):
settings.append(load_from_ini(item, default_section))
else:
settings.append(load_from_name(item))
logger.debug(settings)
return merge(settings) | [
"def",
"load",
"(",
"items",
",",
"default_section",
"=",
"_DEFAULT_SECTION",
")",
":",
"settings",
"=",
"[",
"]",
"assert",
"isinstance",
"(",
"items",
",",
"list",
")",
",",
"'items必须为list'",
"logger",
".",
"debug",
"(",
"items",
")",
"for",
"item",
"in",
"items",
":",
"if",
"_is_conf",
"(",
"item",
")",
":",
"settings",
".",
"append",
"(",
"load_from_ini",
"(",
"item",
",",
"default_section",
")",
")",
"else",
":",
"settings",
".",
"append",
"(",
"load_from_name",
"(",
"item",
")",
")",
"logger",
".",
"debug",
"(",
"settings",
")",
"return",
"merge",
"(",
"settings",
")"
] | 从混合类型组中读取配置
:param default_section:
:param items:
:return: | [
"从混合类型组中读取配置",
":",
"param",
"default_section",
":",
":",
"param",
"items",
":",
":",
"return",
":"
] | python | train |
pymc-devs/pymc | pymc/StepMethods.py | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L303-L312 | def logp_plus_loglike(self):
'''
The summed log-probability of all stochastic variables that depend on
self.stochastics, and self.stochastics.
'''
sum = logp_of_set(self.markov_blanket)
if self.verbose > 2:
print_('\t' + self._id +
' Current log-likelihood plus current log-probability', sum)
return sum | [
"def",
"logp_plus_loglike",
"(",
"self",
")",
":",
"sum",
"=",
"logp_of_set",
"(",
"self",
".",
"markov_blanket",
")",
"if",
"self",
".",
"verbose",
">",
"2",
":",
"print_",
"(",
"'\\t'",
"+",
"self",
".",
"_id",
"+",
"' Current log-likelihood plus current log-probability'",
",",
"sum",
")",
"return",
"sum"
] | The summed log-probability of all stochastic variables that depend on
self.stochastics, and self.stochastics. | [
"The",
"summed",
"log",
"-",
"probability",
"of",
"all",
"stochastic",
"variables",
"that",
"depend",
"on",
"self",
".",
"stochastics",
"and",
"self",
".",
"stochastics",
"."
] | python | train |
iotile/coretools | iotilebuild/iotile/build/config/site_scons/autobuild.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/site_scons/autobuild.py#L152-L176 | def autobuild_arm_program(elfname, test_dir=os.path.join('firmware', 'test'), patch=True):
"""
Build the an ARM module for all targets and build all unit tests. If pcb files are given, also build those.
"""
try:
#Build for all targets
family = utilities.get_family('module_settings.json')
family.for_all_targets(family.tile.short_name, lambda x: arm.build_program(family.tile, elfname, x, patch=patch))
#Build all unit tests
unit_test.build_units(os.path.join('firmware','test'), family.targets(family.tile.short_name))
Alias('release', os.path.join('build', 'output'))
Alias('test', os.path.join('build', 'test', 'output'))
Default(['release', 'test'])
autobuild_release(family)
if os.path.exists('doc'):
autobuild_documentation(family.tile)
except IOTileException as e:
print(e.format())
sys.exit(1) | [
"def",
"autobuild_arm_program",
"(",
"elfname",
",",
"test_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'firmware'",
",",
"'test'",
")",
",",
"patch",
"=",
"True",
")",
":",
"try",
":",
"#Build for all targets",
"family",
"=",
"utilities",
".",
"get_family",
"(",
"'module_settings.json'",
")",
"family",
".",
"for_all_targets",
"(",
"family",
".",
"tile",
".",
"short_name",
",",
"lambda",
"x",
":",
"arm",
".",
"build_program",
"(",
"family",
".",
"tile",
",",
"elfname",
",",
"x",
",",
"patch",
"=",
"patch",
")",
")",
"#Build all unit tests",
"unit_test",
".",
"build_units",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'firmware'",
",",
"'test'",
")",
",",
"family",
".",
"targets",
"(",
"family",
".",
"tile",
".",
"short_name",
")",
")",
"Alias",
"(",
"'release'",
",",
"os",
".",
"path",
".",
"join",
"(",
"'build'",
",",
"'output'",
")",
")",
"Alias",
"(",
"'test'",
",",
"os",
".",
"path",
".",
"join",
"(",
"'build'",
",",
"'test'",
",",
"'output'",
")",
")",
"Default",
"(",
"[",
"'release'",
",",
"'test'",
"]",
")",
"autobuild_release",
"(",
"family",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"'doc'",
")",
":",
"autobuild_documentation",
"(",
"family",
".",
"tile",
")",
"except",
"IOTileException",
"as",
"e",
":",
"print",
"(",
"e",
".",
"format",
"(",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Build the an ARM module for all targets and build all unit tests. If pcb files are given, also build those. | [
"Build",
"the",
"an",
"ARM",
"module",
"for",
"all",
"targets",
"and",
"build",
"all",
"unit",
"tests",
".",
"If",
"pcb",
"files",
"are",
"given",
"also",
"build",
"those",
"."
] | python | train |
pokerregion/poker | poker/room/pokerstars.py | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L372-L375 | def prepend_note(self, player, text):
"""Prepend text to an already existing note."""
note = self._find_note(player)
note.text = text + note.text | [
"def",
"prepend_note",
"(",
"self",
",",
"player",
",",
"text",
")",
":",
"note",
"=",
"self",
".",
"_find_note",
"(",
"player",
")",
"note",
".",
"text",
"=",
"text",
"+",
"note",
".",
"text"
] | Prepend text to an already existing note. | [
"Prepend",
"text",
"to",
"an",
"already",
"existing",
"note",
"."
] | python | train |
LEMS/pylems | lems/model/structure.py | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/structure.py#L86-L95 | def toxml(self):
"""
Exports this object into a LEMS XML object
"""
return '<Tunnel name="{0}"'.format(self.name) + \
' endA="{0}"'.format(self.end_a) + \
' endB="{0}"'.format(self.end_b) + \
' componentA="{0}"'.format(self.component_a) + \
' componentB="{0}"'.format(self.component_b) + '/>' | [
"def",
"toxml",
"(",
"self",
")",
":",
"return",
"'<Tunnel name=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"name",
")",
"+",
"' endA=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"end_a",
")",
"+",
"' endB=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"end_b",
")",
"+",
"' componentA=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"component_a",
")",
"+",
"' componentB=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"component_b",
")",
"+",
"'/>'"
] | Exports this object into a LEMS XML object | [
"Exports",
"this",
"object",
"into",
"a",
"LEMS",
"XML",
"object"
] | python | train |
google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flow.py#L795-L810 | def NotifyAboutEnd(self):
"""Send out a final notification about the end of this flow."""
flow_ref = None
if self.runner_args.client_id:
flow_ref = rdf_objects.FlowReference(
client_id=self.client_id, flow_id=self.urn.Basename())
num_results = len(self.ResultCollection())
notification_lib.Notify(
self.creator, rdf_objects.UserNotification.Type.TYPE_FLOW_RUN_COMPLETED,
"Flow %s completed with %d %s" %
(self.__class__.__name__, num_results, num_results == 1 and "result" or
"results"),
rdf_objects.ObjectReference(
reference_type=rdf_objects.ObjectReference.Type.FLOW,
flow=flow_ref)) | [
"def",
"NotifyAboutEnd",
"(",
"self",
")",
":",
"flow_ref",
"=",
"None",
"if",
"self",
".",
"runner_args",
".",
"client_id",
":",
"flow_ref",
"=",
"rdf_objects",
".",
"FlowReference",
"(",
"client_id",
"=",
"self",
".",
"client_id",
",",
"flow_id",
"=",
"self",
".",
"urn",
".",
"Basename",
"(",
")",
")",
"num_results",
"=",
"len",
"(",
"self",
".",
"ResultCollection",
"(",
")",
")",
"notification_lib",
".",
"Notify",
"(",
"self",
".",
"creator",
",",
"rdf_objects",
".",
"UserNotification",
".",
"Type",
".",
"TYPE_FLOW_RUN_COMPLETED",
",",
"\"Flow %s completed with %d %s\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"num_results",
",",
"num_results",
"==",
"1",
"and",
"\"result\"",
"or",
"\"results\"",
")",
",",
"rdf_objects",
".",
"ObjectReference",
"(",
"reference_type",
"=",
"rdf_objects",
".",
"ObjectReference",
".",
"Type",
".",
"FLOW",
",",
"flow",
"=",
"flow_ref",
")",
")"
] | Send out a final notification about the end of this flow. | [
"Send",
"out",
"a",
"final",
"notification",
"about",
"the",
"end",
"of",
"this",
"flow",
"."
] | python | train |
pypa/pipenv | pipenv/patched/notpip/_vendor/cachecontrol/caches/redis_cache.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/cachecontrol/caches/redis_cache.py#L25-L29 | def clear(self):
"""Helper for clearing all the keys in a database. Use with
caution!"""
for key in self.conn.keys():
self.conn.delete(key) | [
"def",
"clear",
"(",
"self",
")",
":",
"for",
"key",
"in",
"self",
".",
"conn",
".",
"keys",
"(",
")",
":",
"self",
".",
"conn",
".",
"delete",
"(",
"key",
")"
] | Helper for clearing all the keys in a database. Use with
caution! | [
"Helper",
"for",
"clearing",
"all",
"the",
"keys",
"in",
"a",
"database",
".",
"Use",
"with",
"caution!"
] | python | train |
openstax/cnx-publishing | cnxpublishing/session.py | https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/session.py#L5-L9 | def includeme(config):
"""Configures the session manager"""
settings = config.registry.settings
session_factory = SignedCookieSessionFactory(settings['session_key'])
config.set_session_factory(session_factory) | [
"def",
"includeme",
"(",
"config",
")",
":",
"settings",
"=",
"config",
".",
"registry",
".",
"settings",
"session_factory",
"=",
"SignedCookieSessionFactory",
"(",
"settings",
"[",
"'session_key'",
"]",
")",
"config",
".",
"set_session_factory",
"(",
"session_factory",
")"
] | Configures the session manager | [
"Configures",
"the",
"session",
"manager"
] | python | valid |
Kitware/tangelo | tangelo/tangelo/util.py | https://github.com/Kitware/tangelo/blob/470034ee9b3d7a01becc1ce5fddc7adc1d5263ef/tangelo/tangelo/util.py#L170-L214 | def module_cache_get(cache, module):
"""
Import a module with an optional yaml config file, but only if we haven't
imported it already.
:param cache: object which holds information on which modules and config
files have been loaded and whether config files should be
loaded.
:param module: the path of the module to load.
:returns: the loaded module.
"""
if getattr(cache, "config", False):
config_file = module[:-2] + "yaml"
if config_file not in cache.config_files and os.path.exists(config_file):
try:
config = yaml_safe_load(config_file, type=dict)
except TypeError as e:
tangelo.log_warning("TANGELO", "Bad configuration in file %s: %s" % (config_file, e))
raise
except IOError:
tangelo.log_warning("TANGELO", "Could not open config file %s" % (config_file))
raise
except ValueError as e:
tangelo.log_warning("TANGELO", "Error reading config file %s: %s" % (config_file, e))
raise
cache.config_files[config_file] = True
else:
config = {}
cherrypy.config["module-config"][module] = config
cherrypy.config["module-store"].setdefault(module, {})
# If two threads are importing the same module nearly concurrently, we
# could load it twice unless we use the import lock.
imp.acquire_lock()
try:
if module not in cache.modules:
name = module[:-3]
# load the module.
service = imp.load_source(name, module)
cache.modules[module] = service
else:
service = cache.modules[module]
finally:
imp.release_lock()
return service | [
"def",
"module_cache_get",
"(",
"cache",
",",
"module",
")",
":",
"if",
"getattr",
"(",
"cache",
",",
"\"config\"",
",",
"False",
")",
":",
"config_file",
"=",
"module",
"[",
":",
"-",
"2",
"]",
"+",
"\"yaml\"",
"if",
"config_file",
"not",
"in",
"cache",
".",
"config_files",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"config_file",
")",
":",
"try",
":",
"config",
"=",
"yaml_safe_load",
"(",
"config_file",
",",
"type",
"=",
"dict",
")",
"except",
"TypeError",
"as",
"e",
":",
"tangelo",
".",
"log_warning",
"(",
"\"TANGELO\"",
",",
"\"Bad configuration in file %s: %s\"",
"%",
"(",
"config_file",
",",
"e",
")",
")",
"raise",
"except",
"IOError",
":",
"tangelo",
".",
"log_warning",
"(",
"\"TANGELO\"",
",",
"\"Could not open config file %s\"",
"%",
"(",
"config_file",
")",
")",
"raise",
"except",
"ValueError",
"as",
"e",
":",
"tangelo",
".",
"log_warning",
"(",
"\"TANGELO\"",
",",
"\"Error reading config file %s: %s\"",
"%",
"(",
"config_file",
",",
"e",
")",
")",
"raise",
"cache",
".",
"config_files",
"[",
"config_file",
"]",
"=",
"True",
"else",
":",
"config",
"=",
"{",
"}",
"cherrypy",
".",
"config",
"[",
"\"module-config\"",
"]",
"[",
"module",
"]",
"=",
"config",
"cherrypy",
".",
"config",
"[",
"\"module-store\"",
"]",
".",
"setdefault",
"(",
"module",
",",
"{",
"}",
")",
"# If two threads are importing the same module nearly concurrently, we",
"# could load it twice unless we use the import lock.",
"imp",
".",
"acquire_lock",
"(",
")",
"try",
":",
"if",
"module",
"not",
"in",
"cache",
".",
"modules",
":",
"name",
"=",
"module",
"[",
":",
"-",
"3",
"]",
"# load the module.",
"service",
"=",
"imp",
".",
"load_source",
"(",
"name",
",",
"module",
")",
"cache",
".",
"modules",
"[",
"module",
"]",
"=",
"service",
"else",
":",
"service",
"=",
"cache",
".",
"modules",
"[",
"module",
"]",
"finally",
":",
"imp",
".",
"release_lock",
"(",
")",
"return",
"service"
] | Import a module with an optional yaml config file, but only if we haven't
imported it already.
:param cache: object which holds information on which modules and config
files have been loaded and whether config files should be
loaded.
:param module: the path of the module to load.
:returns: the loaded module. | [
"Import",
"a",
"module",
"with",
"an",
"optional",
"yaml",
"config",
"file",
"but",
"only",
"if",
"we",
"haven",
"t",
"imported",
"it",
"already",
"."
] | python | train |
tornadoweb/tornado | tornado/web.py | https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/web.py#L1424-L1448 | def _get_raw_xsrf_token(self) -> Tuple[Optional[int], bytes, float]:
"""Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies)
"""
if not hasattr(self, "_raw_xsrf_token"):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
assert token is not None
assert timestamp is not None
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token | [
"def",
"_get_raw_xsrf_token",
"(",
"self",
")",
"->",
"Tuple",
"[",
"Optional",
"[",
"int",
"]",
",",
"bytes",
",",
"float",
"]",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_raw_xsrf_token\"",
")",
":",
"cookie",
"=",
"self",
".",
"get_cookie",
"(",
"\"_xsrf\"",
")",
"if",
"cookie",
":",
"version",
",",
"token",
",",
"timestamp",
"=",
"self",
".",
"_decode_xsrf_token",
"(",
"cookie",
")",
"else",
":",
"version",
",",
"token",
",",
"timestamp",
"=",
"None",
",",
"None",
",",
"None",
"if",
"token",
"is",
"None",
":",
"version",
"=",
"None",
"token",
"=",
"os",
".",
"urandom",
"(",
"16",
")",
"timestamp",
"=",
"time",
".",
"time",
"(",
")",
"assert",
"token",
"is",
"not",
"None",
"assert",
"timestamp",
"is",
"not",
"None",
"self",
".",
"_raw_xsrf_token",
"=",
"(",
"version",
",",
"token",
",",
"timestamp",
")",
"return",
"self",
".",
"_raw_xsrf_token"
] | Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies) | [
"Read",
"or",
"generate",
"the",
"xsrf",
"token",
"in",
"its",
"raw",
"form",
"."
] | python | train |
BlackEarth/bxml | bxml/xml.py | https://github.com/BlackEarth/bxml/blob/8fbea5dad7fadc7b854ddbeff6ecfb55aaceeb77/bxml/xml.py#L476-L480 | def tag_name(cls, tag):
"""return the name of the tag, with the namespace removed"""
while isinstance(tag, etree._Element):
tag = tag.tag
return tag.split('}')[-1] | [
"def",
"tag_name",
"(",
"cls",
",",
"tag",
")",
":",
"while",
"isinstance",
"(",
"tag",
",",
"etree",
".",
"_Element",
")",
":",
"tag",
"=",
"tag",
".",
"tag",
"return",
"tag",
".",
"split",
"(",
"'}'",
")",
"[",
"-",
"1",
"]"
] | return the name of the tag, with the namespace removed | [
"return",
"the",
"name",
"of",
"the",
"tag",
"with",
"the",
"namespace",
"removed"
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L10750-L10771 | def mag_cal_report_encode(self, compass_id, cal_mask, cal_status, autosaved, fitness, ofs_x, ofs_y, ofs_z, diag_x, diag_y, diag_z, offdiag_x, offdiag_y, offdiag_z):
'''
Reports results of completed compass calibration. Sent until
MAG_CAL_ACK received.
compass_id : Compass being calibrated (uint8_t)
cal_mask : Bitmask of compasses being calibrated (uint8_t)
cal_status : Status (see MAG_CAL_STATUS enum) (uint8_t)
autosaved : 0=requires a MAV_CMD_DO_ACCEPT_MAG_CAL, 1=saved to parameters (uint8_t)
fitness : RMS milligauss residuals (float)
ofs_x : X offset (float)
ofs_y : Y offset (float)
ofs_z : Z offset (float)
diag_x : X diagonal (matrix 11) (float)
diag_y : Y diagonal (matrix 22) (float)
diag_z : Z diagonal (matrix 33) (float)
offdiag_x : X off-diagonal (matrix 12 and 21) (float)
offdiag_y : Y off-diagonal (matrix 13 and 31) (float)
offdiag_z : Z off-diagonal (matrix 32 and 23) (float)
'''
return MAVLink_mag_cal_report_message(compass_id, cal_mask, cal_status, autosaved, fitness, ofs_x, ofs_y, ofs_z, diag_x, diag_y, diag_z, offdiag_x, offdiag_y, offdiag_z) | [
"def",
"mag_cal_report_encode",
"(",
"self",
",",
"compass_id",
",",
"cal_mask",
",",
"cal_status",
",",
"autosaved",
",",
"fitness",
",",
"ofs_x",
",",
"ofs_y",
",",
"ofs_z",
",",
"diag_x",
",",
"diag_y",
",",
"diag_z",
",",
"offdiag_x",
",",
"offdiag_y",
",",
"offdiag_z",
")",
":",
"return",
"MAVLink_mag_cal_report_message",
"(",
"compass_id",
",",
"cal_mask",
",",
"cal_status",
",",
"autosaved",
",",
"fitness",
",",
"ofs_x",
",",
"ofs_y",
",",
"ofs_z",
",",
"diag_x",
",",
"diag_y",
",",
"diag_z",
",",
"offdiag_x",
",",
"offdiag_y",
",",
"offdiag_z",
")"
] | Reports results of completed compass calibration. Sent until
MAG_CAL_ACK received.
compass_id : Compass being calibrated (uint8_t)
cal_mask : Bitmask of compasses being calibrated (uint8_t)
cal_status : Status (see MAG_CAL_STATUS enum) (uint8_t)
autosaved : 0=requires a MAV_CMD_DO_ACCEPT_MAG_CAL, 1=saved to parameters (uint8_t)
fitness : RMS milligauss residuals (float)
ofs_x : X offset (float)
ofs_y : Y offset (float)
ofs_z : Z offset (float)
diag_x : X diagonal (matrix 11) (float)
diag_y : Y diagonal (matrix 22) (float)
diag_z : Z diagonal (matrix 33) (float)
offdiag_x : X off-diagonal (matrix 12 and 21) (float)
offdiag_y : Y off-diagonal (matrix 13 and 31) (float)
offdiag_z : Z off-diagonal (matrix 32 and 23) (float) | [
"Reports",
"results",
"of",
"completed",
"compass",
"calibration",
".",
"Sent",
"until",
"MAG_CAL_ACK",
"received",
"."
] | python | train |
apache/spark | python/pyspark/heapq3.py | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L595-L673 | def merge(iterables, key=None, reverse=False):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
If *key* is not None, applies a key function to each element to determine
its sort order.
>>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len))
['dog', 'cat', 'fish', 'horse', 'kangaroo']
'''
h = []
h_append = h.append
if reverse:
_heapify = _heapify_max
_heappop = _heappop_max
_heapreplace = _heapreplace_max
direction = -1
else:
_heapify = heapify
_heappop = heappop
_heapreplace = heapreplace
direction = 1
if key is None:
for order, it in enumerate(map(iter, iterables)):
try:
h_append([next(it), order * direction, it])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
value, order, it = s = h[0]
yield value
s[0] = next(it) # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
value, order, it = h[0]
yield value
for value in it:
yield value
return
for order, it in enumerate(map(iter, iterables)):
try:
value = next(it)
h_append([key(value), order * direction, value, it])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
key_value, order, value, it = s = h[0]
yield value
value = next(it)
s[0] = key(value)
s[2] = value
_heapreplace(h, s)
except StopIteration:
_heappop(h)
if h:
key_value, order, value, it = h[0]
yield value
for value in it:
yield value | [
"def",
"merge",
"(",
"iterables",
",",
"key",
"=",
"None",
",",
"reverse",
"=",
"False",
")",
":",
"h",
"=",
"[",
"]",
"h_append",
"=",
"h",
".",
"append",
"if",
"reverse",
":",
"_heapify",
"=",
"_heapify_max",
"_heappop",
"=",
"_heappop_max",
"_heapreplace",
"=",
"_heapreplace_max",
"direction",
"=",
"-",
"1",
"else",
":",
"_heapify",
"=",
"heapify",
"_heappop",
"=",
"heappop",
"_heapreplace",
"=",
"heapreplace",
"direction",
"=",
"1",
"if",
"key",
"is",
"None",
":",
"for",
"order",
",",
"it",
"in",
"enumerate",
"(",
"map",
"(",
"iter",
",",
"iterables",
")",
")",
":",
"try",
":",
"h_append",
"(",
"[",
"next",
"(",
"it",
")",
",",
"order",
"*",
"direction",
",",
"it",
"]",
")",
"except",
"StopIteration",
":",
"pass",
"_heapify",
"(",
"h",
")",
"while",
"len",
"(",
"h",
")",
">",
"1",
":",
"try",
":",
"while",
"True",
":",
"value",
",",
"order",
",",
"it",
"=",
"s",
"=",
"h",
"[",
"0",
"]",
"yield",
"value",
"s",
"[",
"0",
"]",
"=",
"next",
"(",
"it",
")",
"# raises StopIteration when exhausted",
"_heapreplace",
"(",
"h",
",",
"s",
")",
"# restore heap condition",
"except",
"StopIteration",
":",
"_heappop",
"(",
"h",
")",
"# remove empty iterator",
"if",
"h",
":",
"# fast case when only a single iterator remains",
"value",
",",
"order",
",",
"it",
"=",
"h",
"[",
"0",
"]",
"yield",
"value",
"for",
"value",
"in",
"it",
":",
"yield",
"value",
"return",
"for",
"order",
",",
"it",
"in",
"enumerate",
"(",
"map",
"(",
"iter",
",",
"iterables",
")",
")",
":",
"try",
":",
"value",
"=",
"next",
"(",
"it",
")",
"h_append",
"(",
"[",
"key",
"(",
"value",
")",
",",
"order",
"*",
"direction",
",",
"value",
",",
"it",
"]",
")",
"except",
"StopIteration",
":",
"pass",
"_heapify",
"(",
"h",
")",
"while",
"len",
"(",
"h",
")",
">",
"1",
":",
"try",
":",
"while",
"True",
":",
"key_value",
",",
"order",
",",
"value",
",",
"it",
"=",
"s",
"=",
"h",
"[",
"0",
"]",
"yield",
"value",
"value",
"=",
"next",
"(",
"it",
")",
"s",
"[",
"0",
"]",
"=",
"key",
"(",
"value",
")",
"s",
"[",
"2",
"]",
"=",
"value",
"_heapreplace",
"(",
"h",
",",
"s",
")",
"except",
"StopIteration",
":",
"_heappop",
"(",
"h",
")",
"if",
"h",
":",
"key_value",
",",
"order",
",",
"value",
",",
"it",
"=",
"h",
"[",
"0",
"]",
"yield",
"value",
"for",
"value",
"in",
"it",
":",
"yield",
"value"
] | Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
If *key* is not None, applies a key function to each element to determine
its sort order.
>>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len))
['dog', 'cat', 'fish', 'horse', 'kangaroo'] | [
"Merge",
"multiple",
"sorted",
"inputs",
"into",
"a",
"single",
"sorted",
"output",
"."
] | python | train |
pantsbuild/pants | src/python/pants/base/exception_sink.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/exception_sink.py#L377-L404 | def _log_unhandled_exception_and_exit(cls, exc_class=None, exc=None, tb=None, add_newline=False):
"""A sys.excepthook implementation which logs the error and exits with failure."""
exc_class = exc_class or sys.exc_info()[0]
exc = exc or sys.exc_info()[1]
tb = tb or sys.exc_info()[2]
# This exception was raised by a signal handler with the intent to exit the program.
if exc_class == SignalHandler.SignalHandledNonLocalExit:
return cls._handle_signal_gracefully(exc.signum, exc.signame, exc.traceback_lines)
extra_err_msg = None
try:
# Always output the unhandled exception details into a log file, including the traceback.
exception_log_entry = cls._format_unhandled_exception_log(exc, tb, add_newline,
should_print_backtrace=True)
cls.log_exception(exception_log_entry)
except Exception as e:
extra_err_msg = 'Additional error logging unhandled exception {}: {}'.format(exc, e)
logger.error(extra_err_msg)
# Generate an unhandled exception report fit to be printed to the terminal (respecting the
# Exiter's should_print_backtrace field).
stderr_printed_error = cls._format_unhandled_exception_log(
exc, tb, add_newline,
should_print_backtrace=cls._should_print_backtrace_to_terminal)
if extra_err_msg:
stderr_printed_error = '{}\n{}'.format(stderr_printed_error, extra_err_msg)
cls._exit_with_failure(stderr_printed_error) | [
"def",
"_log_unhandled_exception_and_exit",
"(",
"cls",
",",
"exc_class",
"=",
"None",
",",
"exc",
"=",
"None",
",",
"tb",
"=",
"None",
",",
"add_newline",
"=",
"False",
")",
":",
"exc_class",
"=",
"exc_class",
"or",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
"exc",
"=",
"exc",
"or",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"tb",
"=",
"tb",
"or",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
"# This exception was raised by a signal handler with the intent to exit the program.",
"if",
"exc_class",
"==",
"SignalHandler",
".",
"SignalHandledNonLocalExit",
":",
"return",
"cls",
".",
"_handle_signal_gracefully",
"(",
"exc",
".",
"signum",
",",
"exc",
".",
"signame",
",",
"exc",
".",
"traceback_lines",
")",
"extra_err_msg",
"=",
"None",
"try",
":",
"# Always output the unhandled exception details into a log file, including the traceback.",
"exception_log_entry",
"=",
"cls",
".",
"_format_unhandled_exception_log",
"(",
"exc",
",",
"tb",
",",
"add_newline",
",",
"should_print_backtrace",
"=",
"True",
")",
"cls",
".",
"log_exception",
"(",
"exception_log_entry",
")",
"except",
"Exception",
"as",
"e",
":",
"extra_err_msg",
"=",
"'Additional error logging unhandled exception {}: {}'",
".",
"format",
"(",
"exc",
",",
"e",
")",
"logger",
".",
"error",
"(",
"extra_err_msg",
")",
"# Generate an unhandled exception report fit to be printed to the terminal (respecting the",
"# Exiter's should_print_backtrace field).",
"stderr_printed_error",
"=",
"cls",
".",
"_format_unhandled_exception_log",
"(",
"exc",
",",
"tb",
",",
"add_newline",
",",
"should_print_backtrace",
"=",
"cls",
".",
"_should_print_backtrace_to_terminal",
")",
"if",
"extra_err_msg",
":",
"stderr_printed_error",
"=",
"'{}\\n{}'",
".",
"format",
"(",
"stderr_printed_error",
",",
"extra_err_msg",
")",
"cls",
".",
"_exit_with_failure",
"(",
"stderr_printed_error",
")"
] | A sys.excepthook implementation which logs the error and exits with failure. | [
"A",
"sys",
".",
"excepthook",
"implementation",
"which",
"logs",
"the",
"error",
"and",
"exits",
"with",
"failure",
"."
] | python | train |
blockstack/virtualchain | virtualchain/lib/config.py | https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/config.py#L64-L91 | def get_logger(name=None):
"""
Get virtualchain's logger
"""
level = logging.CRITICAL
if DEBUG:
logging.disable(logging.NOTSET)
level = logging.DEBUG
if name is None:
name = "<unknown>"
log = logging.getLogger(name=name)
log.setLevel( level )
console = logging.StreamHandler()
console.setLevel( level )
log_format = ('[%(asctime)s] [%(levelname)s] [%(module)s:%(lineno)d] (' + str(os.getpid()) + '.%(thread)d) %(message)s' if DEBUG else '%(message)s')
formatter = logging.Formatter( log_format )
console.setFormatter(formatter)
log.propagate = False
if len(log.handlers) > 0:
for i in xrange(0, len(log.handlers)):
log.handlers.pop(0)
log.addHandler(console)
return log | [
"def",
"get_logger",
"(",
"name",
"=",
"None",
")",
":",
"level",
"=",
"logging",
".",
"CRITICAL",
"if",
"DEBUG",
":",
"logging",
".",
"disable",
"(",
"logging",
".",
"NOTSET",
")",
"level",
"=",
"logging",
".",
"DEBUG",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"<unknown>\"",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"name",
"=",
"name",
")",
"log",
".",
"setLevel",
"(",
"level",
")",
"console",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"console",
".",
"setLevel",
"(",
"level",
")",
"log_format",
"=",
"(",
"'[%(asctime)s] [%(levelname)s] [%(module)s:%(lineno)d] ('",
"+",
"str",
"(",
"os",
".",
"getpid",
"(",
")",
")",
"+",
"'.%(thread)d) %(message)s'",
"if",
"DEBUG",
"else",
"'%(message)s'",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"log_format",
")",
"console",
".",
"setFormatter",
"(",
"formatter",
")",
"log",
".",
"propagate",
"=",
"False",
"if",
"len",
"(",
"log",
".",
"handlers",
")",
">",
"0",
":",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"log",
".",
"handlers",
")",
")",
":",
"log",
".",
"handlers",
".",
"pop",
"(",
"0",
")",
"log",
".",
"addHandler",
"(",
"console",
")",
"return",
"log"
] | Get virtualchain's logger | [
"Get",
"virtualchain",
"s",
"logger"
] | python | train |
rigetti/pyquil | pyquil/reference_simulator.py | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/reference_simulator.py#L198-L209 | def do_gate_matrix(self, matrix: np.ndarray,
qubits: Sequence[int]) -> 'AbstractQuantumSimulator':
"""
Apply an arbitrary unitary; not necessarily a named gate.
:param matrix: The unitary matrix to apply. No checks are done
:param qubits: A list of qubits to apply the unitary to.
:return: ``self`` to support method chaining.
"""
unitary = lifted_gate_matrix(matrix=matrix, qubit_inds=qubits, n_qubits=self.n_qubits)
self.density = unitary.dot(self.density).dot(np.conj(unitary).T)
return self | [
"def",
"do_gate_matrix",
"(",
"self",
",",
"matrix",
":",
"np",
".",
"ndarray",
",",
"qubits",
":",
"Sequence",
"[",
"int",
"]",
")",
"->",
"'AbstractQuantumSimulator'",
":",
"unitary",
"=",
"lifted_gate_matrix",
"(",
"matrix",
"=",
"matrix",
",",
"qubit_inds",
"=",
"qubits",
",",
"n_qubits",
"=",
"self",
".",
"n_qubits",
")",
"self",
".",
"density",
"=",
"unitary",
".",
"dot",
"(",
"self",
".",
"density",
")",
".",
"dot",
"(",
"np",
".",
"conj",
"(",
"unitary",
")",
".",
"T",
")",
"return",
"self"
] | Apply an arbitrary unitary; not necessarily a named gate.
:param matrix: The unitary matrix to apply. No checks are done
:param qubits: A list of qubits to apply the unitary to.
:return: ``self`` to support method chaining. | [
"Apply",
"an",
"arbitrary",
"unitary",
";",
"not",
"necessarily",
"a",
"named",
"gate",
"."
] | python | train |
juju/charm-helpers | charmhelpers/contrib/openstack/ip.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/ip.py#L117-L187 | def resolve_address(endpoint_type=PUBLIC, override=True):
"""Return unit address depending on net config.
If unit is clustered with vip(s) and has net splits defined, return vip on
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type
:param override: Accept hostname overrides or not
"""
resolved_address = None
if override:
resolved_address = _get_address_override(endpoint_type)
if resolved_address:
return resolved_address
vips = config('vip')
if vips:
vips = vips.split()
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
binding = ADDRESS_MAP[endpoint_type]['binding']
clustered = is_clustered()
if clustered and vips:
if net_addr:
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
else:
# NOTE: endeavour to check vips against network space
# bindings
try:
bound_cidr = resolve_network_cidr(
network_get_primary_address(binding)
)
for vip in vips:
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
except (NotImplementedError, NoNetworkBinding):
# If no net-splits configured and no support for extra
# bindings/network spaces so we expect a single vip
resolved_address = vips[0]
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(net_fallback)
if net_addr:
resolved_address = get_address_in_network(net_addr, fallback_addr)
else:
# NOTE: only try to use extra bindings if legacy network
# configuration is not in use
try:
resolved_address = network_get_primary_address(binding)
except (NotImplementedError, NoNetworkBinding):
resolved_address = fallback_addr
if resolved_address is None:
raise ValueError("Unable to resolve a suitable IP address based on "
"charm state and configuration. (net_type=%s, "
"clustered=%s)" % (net_type, clustered))
return resolved_address | [
"def",
"resolve_address",
"(",
"endpoint_type",
"=",
"PUBLIC",
",",
"override",
"=",
"True",
")",
":",
"resolved_address",
"=",
"None",
"if",
"override",
":",
"resolved_address",
"=",
"_get_address_override",
"(",
"endpoint_type",
")",
"if",
"resolved_address",
":",
"return",
"resolved_address",
"vips",
"=",
"config",
"(",
"'vip'",
")",
"if",
"vips",
":",
"vips",
"=",
"vips",
".",
"split",
"(",
")",
"net_type",
"=",
"ADDRESS_MAP",
"[",
"endpoint_type",
"]",
"[",
"'config'",
"]",
"net_addr",
"=",
"config",
"(",
"net_type",
")",
"net_fallback",
"=",
"ADDRESS_MAP",
"[",
"endpoint_type",
"]",
"[",
"'fallback'",
"]",
"binding",
"=",
"ADDRESS_MAP",
"[",
"endpoint_type",
"]",
"[",
"'binding'",
"]",
"clustered",
"=",
"is_clustered",
"(",
")",
"if",
"clustered",
"and",
"vips",
":",
"if",
"net_addr",
":",
"for",
"vip",
"in",
"vips",
":",
"if",
"is_address_in_network",
"(",
"net_addr",
",",
"vip",
")",
":",
"resolved_address",
"=",
"vip",
"break",
"else",
":",
"# NOTE: endeavour to check vips against network space",
"# bindings",
"try",
":",
"bound_cidr",
"=",
"resolve_network_cidr",
"(",
"network_get_primary_address",
"(",
"binding",
")",
")",
"for",
"vip",
"in",
"vips",
":",
"if",
"is_address_in_network",
"(",
"bound_cidr",
",",
"vip",
")",
":",
"resolved_address",
"=",
"vip",
"break",
"except",
"(",
"NotImplementedError",
",",
"NoNetworkBinding",
")",
":",
"# If no net-splits configured and no support for extra",
"# bindings/network spaces so we expect a single vip",
"resolved_address",
"=",
"vips",
"[",
"0",
"]",
"else",
":",
"if",
"config",
"(",
"'prefer-ipv6'",
")",
":",
"fallback_addr",
"=",
"get_ipv6_addr",
"(",
"exc_list",
"=",
"vips",
")",
"[",
"0",
"]",
"else",
":",
"fallback_addr",
"=",
"unit_get",
"(",
"net_fallback",
")",
"if",
"net_addr",
":",
"resolved_address",
"=",
"get_address_in_network",
"(",
"net_addr",
",",
"fallback_addr",
")",
"else",
":",
"# NOTE: only try to use extra bindings if legacy network",
"# configuration is not in use",
"try",
":",
"resolved_address",
"=",
"network_get_primary_address",
"(",
"binding",
")",
"except",
"(",
"NotImplementedError",
",",
"NoNetworkBinding",
")",
":",
"resolved_address",
"=",
"fallback_addr",
"if",
"resolved_address",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Unable to resolve a suitable IP address based on \"",
"\"charm state and configuration. (net_type=%s, \"",
"\"clustered=%s)\"",
"%",
"(",
"net_type",
",",
"clustered",
")",
")",
"return",
"resolved_address"
] | Return unit address depending on net config.
If unit is clustered with vip(s) and has net splits defined, return vip on
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type
:param override: Accept hostname overrides or not | [
"Return",
"unit",
"address",
"depending",
"on",
"net",
"config",
"."
] | python | train |
yatiml/yatiml | yatiml/constructors.py | https://github.com/yatiml/yatiml/blob/4f55c058b72388350f0af3076ac3ea9bc1c142b0/yatiml/constructors.py#L206-L231 | def __check_no_missing_attributes(self, node: yaml.Node,
mapping: CommentedMap) -> None:
"""Checks that all required attributes are present.
Also checks that they're of the correct type.
Args:
mapping: The mapping with subobjects of this object.
Raises:
RecognitionError: if an attribute is missing or the type \
is incorrect.
"""
logger.debug('Checking presence of required attributes')
for name, type_, required in class_subobjects(self.class_):
if required and name not in mapping:
raise RecognitionError(('{}{}Missing attribute {} needed for'
' constructing a {}').format(
node.start_mark, os.linesep, name,
self.class_.__name__))
if name in mapping and not self.__type_matches(
mapping[name], type_):
raise RecognitionError(('{}{}Attribute {} has incorrect type'
' {}, expecting a {}').format(
node.start_mark, os.linesep, name,
type(mapping[name]), type_)) | [
"def",
"__check_no_missing_attributes",
"(",
"self",
",",
"node",
":",
"yaml",
".",
"Node",
",",
"mapping",
":",
"CommentedMap",
")",
"->",
"None",
":",
"logger",
".",
"debug",
"(",
"'Checking presence of required attributes'",
")",
"for",
"name",
",",
"type_",
",",
"required",
"in",
"class_subobjects",
"(",
"self",
".",
"class_",
")",
":",
"if",
"required",
"and",
"name",
"not",
"in",
"mapping",
":",
"raise",
"RecognitionError",
"(",
"(",
"'{}{}Missing attribute {} needed for'",
"' constructing a {}'",
")",
".",
"format",
"(",
"node",
".",
"start_mark",
",",
"os",
".",
"linesep",
",",
"name",
",",
"self",
".",
"class_",
".",
"__name__",
")",
")",
"if",
"name",
"in",
"mapping",
"and",
"not",
"self",
".",
"__type_matches",
"(",
"mapping",
"[",
"name",
"]",
",",
"type_",
")",
":",
"raise",
"RecognitionError",
"(",
"(",
"'{}{}Attribute {} has incorrect type'",
"' {}, expecting a {}'",
")",
".",
"format",
"(",
"node",
".",
"start_mark",
",",
"os",
".",
"linesep",
",",
"name",
",",
"type",
"(",
"mapping",
"[",
"name",
"]",
")",
",",
"type_",
")",
")"
] | Checks that all required attributes are present.
Also checks that they're of the correct type.
Args:
mapping: The mapping with subobjects of this object.
Raises:
RecognitionError: if an attribute is missing or the type \
is incorrect. | [
"Checks",
"that",
"all",
"required",
"attributes",
"are",
"present",
"."
] | python | train |
liamw9534/bt-manager | bt_manager/codecs.py | https://github.com/liamw9534/bt-manager/blob/51be2919394ce8134c698359649bfad09eedf4ec/bt_manager/codecs.py#L111-L157 | def _init_sbc_config(self, config):
"""
Translator from namedtuple config representation to
the sbc_t type.
:param namedtuple config: See :py:class:`.SBCCodecConfig`
:returns:
"""
if (config.channel_mode == SBCChannelMode.CHANNEL_MODE_MONO):
self.config.mode = self.codec.SBC_MODE_MONO
elif (config.channel_mode == SBCChannelMode.CHANNEL_MODE_STEREO):
self.config.mode = self.codec.SBC_MODE_STEREO
elif (config.channel_mode == SBCChannelMode.CHANNEL_MODE_DUAL):
self.config.mode = self.codec.SBC_MODE_DUAL_CHANNEL
elif (config.channel_mode == SBCChannelMode.CHANNEL_MODE_JOINT_STEREO):
self.config.mode = self.codec.SBC_MODE_JOINT_STEREO
if (config.frequency == SBCSamplingFrequency.FREQ_16KHZ):
self.config.frequency = self.codec.SBC_FREQ_16000
elif (config.frequency == SBCSamplingFrequency.FREQ_32KHZ):
self.config.frequency = self.codec.SBC_FREQ_32000
elif (config.frequency == SBCSamplingFrequency.FREQ_44_1KHZ):
self.config.frequency = self.codec.SBC_FREQ_44100
elif (config.frequency == SBCSamplingFrequency.FREQ_48KHZ):
self.config.frequency = self.codec.SBC_FREQ_48000
if (config.allocation_method == SBCAllocationMethod.LOUDNESS):
self.config.allocation = self.codec.SBC_AM_LOUDNESS
elif (config.allocation_method == SBCAllocationMethod.SNR):
self.config.allocation = self.codec.SBC_AM_SNR
if (config.subbands == SBCSubbands.SUBBANDS_4):
self.config.subbands = self.codec.SBC_SB_4
elif (config.subbands == SBCSubbands.SUBBANDS_8):
self.config.subbands = self.codec.SBC_SB_8
if (config.block_length == SBCBlocks.BLOCKS_4):
self.config.blocks = self.codec.SBC_BLK_4
elif (config.block_length == SBCBlocks.BLOCKS_8):
self.config.blocks = self.codec.SBC_BLK_8
elif (config.block_length == SBCBlocks.BLOCKS_12):
self.config.blocks = self.codec.SBC_BLK_12
elif (config.block_length == SBCBlocks.BLOCKS_16):
self.config.blocks = self.codec.SBC_BLK_16
self.config.bitpool = config.max_bitpool
self.config.endian = self.codec.SBC_LE | [
"def",
"_init_sbc_config",
"(",
"self",
",",
"config",
")",
":",
"if",
"(",
"config",
".",
"channel_mode",
"==",
"SBCChannelMode",
".",
"CHANNEL_MODE_MONO",
")",
":",
"self",
".",
"config",
".",
"mode",
"=",
"self",
".",
"codec",
".",
"SBC_MODE_MONO",
"elif",
"(",
"config",
".",
"channel_mode",
"==",
"SBCChannelMode",
".",
"CHANNEL_MODE_STEREO",
")",
":",
"self",
".",
"config",
".",
"mode",
"=",
"self",
".",
"codec",
".",
"SBC_MODE_STEREO",
"elif",
"(",
"config",
".",
"channel_mode",
"==",
"SBCChannelMode",
".",
"CHANNEL_MODE_DUAL",
")",
":",
"self",
".",
"config",
".",
"mode",
"=",
"self",
".",
"codec",
".",
"SBC_MODE_DUAL_CHANNEL",
"elif",
"(",
"config",
".",
"channel_mode",
"==",
"SBCChannelMode",
".",
"CHANNEL_MODE_JOINT_STEREO",
")",
":",
"self",
".",
"config",
".",
"mode",
"=",
"self",
".",
"codec",
".",
"SBC_MODE_JOINT_STEREO",
"if",
"(",
"config",
".",
"frequency",
"==",
"SBCSamplingFrequency",
".",
"FREQ_16KHZ",
")",
":",
"self",
".",
"config",
".",
"frequency",
"=",
"self",
".",
"codec",
".",
"SBC_FREQ_16000",
"elif",
"(",
"config",
".",
"frequency",
"==",
"SBCSamplingFrequency",
".",
"FREQ_32KHZ",
")",
":",
"self",
".",
"config",
".",
"frequency",
"=",
"self",
".",
"codec",
".",
"SBC_FREQ_32000",
"elif",
"(",
"config",
".",
"frequency",
"==",
"SBCSamplingFrequency",
".",
"FREQ_44_1KHZ",
")",
":",
"self",
".",
"config",
".",
"frequency",
"=",
"self",
".",
"codec",
".",
"SBC_FREQ_44100",
"elif",
"(",
"config",
".",
"frequency",
"==",
"SBCSamplingFrequency",
".",
"FREQ_48KHZ",
")",
":",
"self",
".",
"config",
".",
"frequency",
"=",
"self",
".",
"codec",
".",
"SBC_FREQ_48000",
"if",
"(",
"config",
".",
"allocation_method",
"==",
"SBCAllocationMethod",
".",
"LOUDNESS",
")",
":",
"self",
".",
"config",
".",
"allocation",
"=",
"self",
".",
"codec",
".",
"SBC_AM_LOUDNESS",
"elif",
"(",
"config",
".",
"allocation_method",
"==",
"SBCAllocationMethod",
".",
"SNR",
")",
":",
"self",
".",
"config",
".",
"allocation",
"=",
"self",
".",
"codec",
".",
"SBC_AM_SNR",
"if",
"(",
"config",
".",
"subbands",
"==",
"SBCSubbands",
".",
"SUBBANDS_4",
")",
":",
"self",
".",
"config",
".",
"subbands",
"=",
"self",
".",
"codec",
".",
"SBC_SB_4",
"elif",
"(",
"config",
".",
"subbands",
"==",
"SBCSubbands",
".",
"SUBBANDS_8",
")",
":",
"self",
".",
"config",
".",
"subbands",
"=",
"self",
".",
"codec",
".",
"SBC_SB_8",
"if",
"(",
"config",
".",
"block_length",
"==",
"SBCBlocks",
".",
"BLOCKS_4",
")",
":",
"self",
".",
"config",
".",
"blocks",
"=",
"self",
".",
"codec",
".",
"SBC_BLK_4",
"elif",
"(",
"config",
".",
"block_length",
"==",
"SBCBlocks",
".",
"BLOCKS_8",
")",
":",
"self",
".",
"config",
".",
"blocks",
"=",
"self",
".",
"codec",
".",
"SBC_BLK_8",
"elif",
"(",
"config",
".",
"block_length",
"==",
"SBCBlocks",
".",
"BLOCKS_12",
")",
":",
"self",
".",
"config",
".",
"blocks",
"=",
"self",
".",
"codec",
".",
"SBC_BLK_12",
"elif",
"(",
"config",
".",
"block_length",
"==",
"SBCBlocks",
".",
"BLOCKS_16",
")",
":",
"self",
".",
"config",
".",
"blocks",
"=",
"self",
".",
"codec",
".",
"SBC_BLK_16",
"self",
".",
"config",
".",
"bitpool",
"=",
"config",
".",
"max_bitpool",
"self",
".",
"config",
".",
"endian",
"=",
"self",
".",
"codec",
".",
"SBC_LE"
] | Translator from namedtuple config representation to
the sbc_t type.
:param namedtuple config: See :py:class:`.SBCCodecConfig`
:returns: | [
"Translator",
"from",
"namedtuple",
"config",
"representation",
"to",
"the",
"sbc_t",
"type",
"."
] | python | train |
raphaelvallat/pingouin | pingouin/correlation.py | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/correlation.py#L15-L107 | def skipped(x, y, method='spearman'):
"""
Skipped correlation (Rousselet and Pernet 2012).
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
method : str
Method used to compute the correlation after outlier removal. Can be
either 'spearman' (default) or 'pearson'.
Returns
-------
r : float
Skipped correlation coefficient.
pval : float
Two-tailed p-value.
outliers : array of bool
Indicate if value is an outlier or not
Notes
-----
The skipped correlation involves multivariate outlier detection using a
projection technique (Wilcox, 2004, 2005). First, a robust estimator of
multivariate location and scatter, for instance the minimum covariance
determinant estimator (MCD; Rousseeuw, 1984; Rousseeuw and van Driessen,
1999; Hubert et al., 2008) is computed. Second, data points are
orthogonally projected on lines joining each of the data point to the
location estimator. Third, outliers are detected using a robust technique.
Finally, Spearman correlations are computed on the remaining data points
and calculations are adjusted by taking into account the dependency among
the remaining data points.
Code inspired by Matlab code from Cyril Pernet and Guillaume
Rousselet [1]_.
Requires scikit-learn.
References
----------
.. [1] Pernet CR, Wilcox R, Rousselet GA. Robust Correlation Analyses:
False Positive and Power Validation Using a New Open Source Matlab
Toolbox. Frontiers in Psychology. 2012;3:606.
doi:10.3389/fpsyg.2012.00606.
"""
# Check that sklearn is installed
from pingouin.utils import _is_sklearn_installed
_is_sklearn_installed(raise_error=True)
from scipy.stats import chi2
from sklearn.covariance import MinCovDet
X = np.column_stack((x, y))
nrows, ncols = X.shape
gval = np.sqrt(chi2.ppf(0.975, 2))
# Compute center and distance to center
center = MinCovDet(random_state=42).fit(X).location_
B = X - center
B2 = B**2
bot = B2.sum(axis=1)
# Loop over rows
dis = np.zeros(shape=(nrows, nrows))
for i in np.arange(nrows):
if bot[i] != 0:
dis[i, :] = np.linalg.norm(B * B2[i, :] / bot[i], axis=1)
# Detect outliers
def idealf(x):
"""Compute the ideal fourths IQR (Wilcox 2012).
"""
n = len(x)
j = int(np.floor(n / 4 + 5 / 12))
y = np.sort(x)
g = (n / 4) - j + (5 / 12)
low = (1 - g) * y[j - 1] + g * y[j]
k = n - j + 1
up = (1 - g) * y[k - 1] + g * y[k - 2]
return up - low
# One can either use the MAD or the IQR (see Wilcox 2012)
# MAD = mad(dis, axis=1)
iqr = np.apply_along_axis(idealf, 1, dis)
thresh = (np.median(dis, axis=1) + gval * iqr)
outliers = np.apply_along_axis(np.greater, 0, dis, thresh).any(axis=0)
# Compute correlation on remaining data
if method == 'spearman':
r, pval = spearmanr(X[~outliers, 0], X[~outliers, 1])
else:
r, pval = pearsonr(X[~outliers, 0], X[~outliers, 1])
return r, pval, outliers | [
"def",
"skipped",
"(",
"x",
",",
"y",
",",
"method",
"=",
"'spearman'",
")",
":",
"# Check that sklearn is installed",
"from",
"pingouin",
".",
"utils",
"import",
"_is_sklearn_installed",
"_is_sklearn_installed",
"(",
"raise_error",
"=",
"True",
")",
"from",
"scipy",
".",
"stats",
"import",
"chi2",
"from",
"sklearn",
".",
"covariance",
"import",
"MinCovDet",
"X",
"=",
"np",
".",
"column_stack",
"(",
"(",
"x",
",",
"y",
")",
")",
"nrows",
",",
"ncols",
"=",
"X",
".",
"shape",
"gval",
"=",
"np",
".",
"sqrt",
"(",
"chi2",
".",
"ppf",
"(",
"0.975",
",",
"2",
")",
")",
"# Compute center and distance to center",
"center",
"=",
"MinCovDet",
"(",
"random_state",
"=",
"42",
")",
".",
"fit",
"(",
"X",
")",
".",
"location_",
"B",
"=",
"X",
"-",
"center",
"B2",
"=",
"B",
"**",
"2",
"bot",
"=",
"B2",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"# Loop over rows",
"dis",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"nrows",
",",
"nrows",
")",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"nrows",
")",
":",
"if",
"bot",
"[",
"i",
"]",
"!=",
"0",
":",
"dis",
"[",
"i",
",",
":",
"]",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"B",
"*",
"B2",
"[",
"i",
",",
":",
"]",
"/",
"bot",
"[",
"i",
"]",
",",
"axis",
"=",
"1",
")",
"# Detect outliers",
"def",
"idealf",
"(",
"x",
")",
":",
"\"\"\"Compute the ideal fourths IQR (Wilcox 2012).\n \"\"\"",
"n",
"=",
"len",
"(",
"x",
")",
"j",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"n",
"/",
"4",
"+",
"5",
"/",
"12",
")",
")",
"y",
"=",
"np",
".",
"sort",
"(",
"x",
")",
"g",
"=",
"(",
"n",
"/",
"4",
")",
"-",
"j",
"+",
"(",
"5",
"/",
"12",
")",
"low",
"=",
"(",
"1",
"-",
"g",
")",
"*",
"y",
"[",
"j",
"-",
"1",
"]",
"+",
"g",
"*",
"y",
"[",
"j",
"]",
"k",
"=",
"n",
"-",
"j",
"+",
"1",
"up",
"=",
"(",
"1",
"-",
"g",
")",
"*",
"y",
"[",
"k",
"-",
"1",
"]",
"+",
"g",
"*",
"y",
"[",
"k",
"-",
"2",
"]",
"return",
"up",
"-",
"low",
"# One can either use the MAD or the IQR (see Wilcox 2012)",
"# MAD = mad(dis, axis=1)",
"iqr",
"=",
"np",
".",
"apply_along_axis",
"(",
"idealf",
",",
"1",
",",
"dis",
")",
"thresh",
"=",
"(",
"np",
".",
"median",
"(",
"dis",
",",
"axis",
"=",
"1",
")",
"+",
"gval",
"*",
"iqr",
")",
"outliers",
"=",
"np",
".",
"apply_along_axis",
"(",
"np",
".",
"greater",
",",
"0",
",",
"dis",
",",
"thresh",
")",
".",
"any",
"(",
"axis",
"=",
"0",
")",
"# Compute correlation on remaining data",
"if",
"method",
"==",
"'spearman'",
":",
"r",
",",
"pval",
"=",
"spearmanr",
"(",
"X",
"[",
"~",
"outliers",
",",
"0",
"]",
",",
"X",
"[",
"~",
"outliers",
",",
"1",
"]",
")",
"else",
":",
"r",
",",
"pval",
"=",
"pearsonr",
"(",
"X",
"[",
"~",
"outliers",
",",
"0",
"]",
",",
"X",
"[",
"~",
"outliers",
",",
"1",
"]",
")",
"return",
"r",
",",
"pval",
",",
"outliers"
] | Skipped correlation (Rousselet and Pernet 2012).
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
method : str
Method used to compute the correlation after outlier removal. Can be
either 'spearman' (default) or 'pearson'.
Returns
-------
r : float
Skipped correlation coefficient.
pval : float
Two-tailed p-value.
outliers : array of bool
Indicate if value is an outlier or not
Notes
-----
The skipped correlation involves multivariate outlier detection using a
projection technique (Wilcox, 2004, 2005). First, a robust estimator of
multivariate location and scatter, for instance the minimum covariance
determinant estimator (MCD; Rousseeuw, 1984; Rousseeuw and van Driessen,
1999; Hubert et al., 2008) is computed. Second, data points are
orthogonally projected on lines joining each of the data point to the
location estimator. Third, outliers are detected using a robust technique.
Finally, Spearman correlations are computed on the remaining data points
and calculations are adjusted by taking into account the dependency among
the remaining data points.
Code inspired by Matlab code from Cyril Pernet and Guillaume
Rousselet [1]_.
Requires scikit-learn.
References
----------
.. [1] Pernet CR, Wilcox R, Rousselet GA. Robust Correlation Analyses:
False Positive and Power Validation Using a New Open Source Matlab
Toolbox. Frontiers in Psychology. 2012;3:606.
doi:10.3389/fpsyg.2012.00606. | [
"Skipped",
"correlation",
"(",
"Rousselet",
"and",
"Pernet",
"2012",
")",
"."
] | python | train |
IdentityPython/SATOSA | src/satosa/util.py | https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/util.py#L81-L93 | def rndstr(size=16, alphabet=""):
"""
Returns a string of random ascii characters or digits
:type size: int
:type alphabet: str
:param size: The length of the string
:param alphabet: A string with characters.
:return: string
"""
rng = random.SystemRandom()
if not alphabet:
alphabet = string.ascii_letters[0:52] + string.digits
return type(alphabet)().join(rng.choice(alphabet) for _ in range(size)) | [
"def",
"rndstr",
"(",
"size",
"=",
"16",
",",
"alphabet",
"=",
"\"\"",
")",
":",
"rng",
"=",
"random",
".",
"SystemRandom",
"(",
")",
"if",
"not",
"alphabet",
":",
"alphabet",
"=",
"string",
".",
"ascii_letters",
"[",
"0",
":",
"52",
"]",
"+",
"string",
".",
"digits",
"return",
"type",
"(",
"alphabet",
")",
"(",
")",
".",
"join",
"(",
"rng",
".",
"choice",
"(",
"alphabet",
")",
"for",
"_",
"in",
"range",
"(",
"size",
")",
")"
] | Returns a string of random ascii characters or digits
:type size: int
:type alphabet: str
:param size: The length of the string
:param alphabet: A string with characters.
:return: string | [
"Returns",
"a",
"string",
"of",
"random",
"ascii",
"characters",
"or",
"digits",
":",
"type",
"size",
":",
"int",
":",
"type",
"alphabet",
":",
"str",
":",
"param",
"size",
":",
"The",
"length",
"of",
"the",
"string",
":",
"param",
"alphabet",
":",
"A",
"string",
"with",
"characters",
".",
":",
"return",
":",
"string"
] | python | train |
the01/paps-settings | setup.py | https://github.com/the01/paps-settings/blob/48fb65eb0fa7929a0bb381c6dad28d0197b44c83/setup.py#L29-L42 | def get_version():
"""
Parse the version information from the init file
"""
version_file = os.path.join("paps_settings", "__init__.py")
initfile_lines = open(version_file, 'rt').readlines()
version_reg = r"^__version__ = ['\"]([^'\"]*)['\"]"
for line in initfile_lines:
mo = re.search(version_reg, line, re.M)
if mo:
return mo.group(1)
raise RuntimeError(
"Unable to find version string in {}".format(version_file)
) | [
"def",
"get_version",
"(",
")",
":",
"version_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"paps_settings\"",
",",
"\"__init__.py\"",
")",
"initfile_lines",
"=",
"open",
"(",
"version_file",
",",
"'rt'",
")",
".",
"readlines",
"(",
")",
"version_reg",
"=",
"r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"",
"for",
"line",
"in",
"initfile_lines",
":",
"mo",
"=",
"re",
".",
"search",
"(",
"version_reg",
",",
"line",
",",
"re",
".",
"M",
")",
"if",
"mo",
":",
"return",
"mo",
".",
"group",
"(",
"1",
")",
"raise",
"RuntimeError",
"(",
"\"Unable to find version string in {}\"",
".",
"format",
"(",
"version_file",
")",
")"
] | Parse the version information from the init file | [
"Parse",
"the",
"version",
"information",
"from",
"the",
"init",
"file"
] | python | train |
vertexproject/synapse | synapse/cryotank.py | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/cryotank.py#L285-L312 | async def init(self, name, conf=None):
'''
Generate a new CryoTank with a given name or get an reference to an existing CryoTank.
Args:
name (str): Name of the CryoTank.
Returns:
CryoTank: A CryoTank instance.
'''
tank = self.tanks.get(name)
if tank is not None:
return tank
iden = s_common.guid()
logger.info('Creating new tank: %s', name)
path = s_common.genpath(self.dirn, 'tanks', iden)
tank = await CryoTank.anit(path, conf)
node = await self.names.open((name,))
await node.set((iden, conf))
self.tanks.put(name, tank)
return tank | [
"async",
"def",
"init",
"(",
"self",
",",
"name",
",",
"conf",
"=",
"None",
")",
":",
"tank",
"=",
"self",
".",
"tanks",
".",
"get",
"(",
"name",
")",
"if",
"tank",
"is",
"not",
"None",
":",
"return",
"tank",
"iden",
"=",
"s_common",
".",
"guid",
"(",
")",
"logger",
".",
"info",
"(",
"'Creating new tank: %s'",
",",
"name",
")",
"path",
"=",
"s_common",
".",
"genpath",
"(",
"self",
".",
"dirn",
",",
"'tanks'",
",",
"iden",
")",
"tank",
"=",
"await",
"CryoTank",
".",
"anit",
"(",
"path",
",",
"conf",
")",
"node",
"=",
"await",
"self",
".",
"names",
".",
"open",
"(",
"(",
"name",
",",
")",
")",
"await",
"node",
".",
"set",
"(",
"(",
"iden",
",",
"conf",
")",
")",
"self",
".",
"tanks",
".",
"put",
"(",
"name",
",",
"tank",
")",
"return",
"tank"
] | Generate a new CryoTank with a given name or get an reference to an existing CryoTank.
Args:
name (str): Name of the CryoTank.
Returns:
CryoTank: A CryoTank instance. | [
"Generate",
"a",
"new",
"CryoTank",
"with",
"a",
"given",
"name",
"or",
"get",
"an",
"reference",
"to",
"an",
"existing",
"CryoTank",
"."
] | python | train |
CivicSpleen/ambry | ambry/run.py | https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/run.py#L90-L129 | def find_config_file(file_name, extra_path=None, load_user=True):
"""
Find a configuration file in one of these directories, tried in this order:
- A path provided as an argument
- A path specified by the AMBRY_CONFIG environmenal variable
- ambry in a path specified by the VIRTUAL_ENV environmental variable
- ~/ambry
- /etc/ambry
:param file_name:
:param extra_path:
:param load_user:
:param path:
:return:
"""
paths = []
if extra_path is not None:
paths.append(extra_path)
if os.getenv(ENVAR.CONFIG):
paths.append(os.getenv(ENVAR.CONFIG))
if os.getenv(ENVAR.VIRT):
paths.append(os.path.join(os.getenv(ENVAR.VIRT), USER_DIR))
if load_user:
paths.append(os.path.expanduser('~/' + USER_DIR))
paths.append(ROOT_DIR)
for path in paths:
if os.path.isdir(path) and os.path.exists(os.path.join(path, file_name)):
f = os.path.join(path, file_name)
return f
raise ConfigurationError(
"Failed to find configuration file '{}'. Looked for : {} ".format(file_name, paths)) | [
"def",
"find_config_file",
"(",
"file_name",
",",
"extra_path",
"=",
"None",
",",
"load_user",
"=",
"True",
")",
":",
"paths",
"=",
"[",
"]",
"if",
"extra_path",
"is",
"not",
"None",
":",
"paths",
".",
"append",
"(",
"extra_path",
")",
"if",
"os",
".",
"getenv",
"(",
"ENVAR",
".",
"CONFIG",
")",
":",
"paths",
".",
"append",
"(",
"os",
".",
"getenv",
"(",
"ENVAR",
".",
"CONFIG",
")",
")",
"if",
"os",
".",
"getenv",
"(",
"ENVAR",
".",
"VIRT",
")",
":",
"paths",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getenv",
"(",
"ENVAR",
".",
"VIRT",
")",
",",
"USER_DIR",
")",
")",
"if",
"load_user",
":",
"paths",
".",
"append",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/'",
"+",
"USER_DIR",
")",
")",
"paths",
".",
"append",
"(",
"ROOT_DIR",
")",
"for",
"path",
"in",
"paths",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"file_name",
")",
")",
":",
"f",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"file_name",
")",
"return",
"f",
"raise",
"ConfigurationError",
"(",
"\"Failed to find configuration file '{}'. Looked for : {} \"",
".",
"format",
"(",
"file_name",
",",
"paths",
")",
")"
] | Find a configuration file in one of these directories, tried in this order:
- A path provided as an argument
- A path specified by the AMBRY_CONFIG environmenal variable
- ambry in a path specified by the VIRTUAL_ENV environmental variable
- ~/ambry
- /etc/ambry
:param file_name:
:param extra_path:
:param load_user:
:param path:
:return: | [
"Find",
"a",
"configuration",
"file",
"in",
"one",
"of",
"these",
"directories",
"tried",
"in",
"this",
"order",
":"
] | python | train |
openego/ding0 | ding0/examples/example_parallel_multiple_grid_districts.py | https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/examples/example_parallel_multiple_grid_districts.py#L191-L217 | def process_metadata(meta):
"""
Merge metadata of run on multiple grid districts
Parameters
----------
meta: list of dict
Metadata of run of each MV grid district
Returns
-------
dict
Single metadata dict including merge metadata
"""
mvgds = []
metadata = meta[0]
for mvgd in meta:
if isinstance(mvgd['mv_grid_districts'], list):
mvgds.extend(mvgd['mv_grid_districts'])
else:
mvgds.append(mvgd['mv_grid_districts'])
metadata['mv_grid_districts'] = mvgds
return metadata | [
"def",
"process_metadata",
"(",
"meta",
")",
":",
"mvgds",
"=",
"[",
"]",
"metadata",
"=",
"meta",
"[",
"0",
"]",
"for",
"mvgd",
"in",
"meta",
":",
"if",
"isinstance",
"(",
"mvgd",
"[",
"'mv_grid_districts'",
"]",
",",
"list",
")",
":",
"mvgds",
".",
"extend",
"(",
"mvgd",
"[",
"'mv_grid_districts'",
"]",
")",
"else",
":",
"mvgds",
".",
"append",
"(",
"mvgd",
"[",
"'mv_grid_districts'",
"]",
")",
"metadata",
"[",
"'mv_grid_districts'",
"]",
"=",
"mvgds",
"return",
"metadata"
] | Merge metadata of run on multiple grid districts
Parameters
----------
meta: list of dict
Metadata of run of each MV grid district
Returns
-------
dict
Single metadata dict including merge metadata | [
"Merge",
"metadata",
"of",
"run",
"on",
"multiple",
"grid",
"districts"
] | python | train |
spyder-ide/spyder | spyder/utils/stringmatching.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/stringmatching.py#L179-L230 | def get_search_scores(query, choices, ignore_case=True, template='{}',
valid_only=False, sort=False):
"""Search for query inside choices and return a list of tuples.
Returns a list of tuples of text with the enriched text (if a template is
provided) and a score for the match. Lower scores imply a better match.
Parameters
----------
query : str
String with letters to search in each choice (in order of appearance).
choices : list of str
List of sentences/words in which to search for the 'query' letters.
ignore_case : bool, optional
Optional value perform a case insensitive search (True by default).
template : str, optional
Optional template string to surround letters found in choices. This is
useful when using a rich text editor ('{}' by default).
Examples: '<b>{}</b>', '<code>{}</code>', '<i>{}</i>'
Returns
-------
results : list of tuples
List of tuples where the first item is the text (enriched if a
template was used) and a search score. Lower scores means better match.
"""
# First remove spaces from query
query = query.replace(' ', '')
pattern = get_search_regex(query, ignore_case)
results = []
for choice in choices:
r = re.search(pattern, choice)
if query and r:
result = get_search_score(query, choice, ignore_case=ignore_case,
apply_regex=False, template=template)
else:
if query:
result = (choice, choice, NOT_FOUND_SCORE)
else:
result = (choice, choice, NO_SCORE)
if valid_only:
if result[-1] != NOT_FOUND_SCORE:
results.append(result)
else:
results.append(result)
if sort:
results = sorted(results, key=lambda row: row[-1])
return results | [
"def",
"get_search_scores",
"(",
"query",
",",
"choices",
",",
"ignore_case",
"=",
"True",
",",
"template",
"=",
"'{}'",
",",
"valid_only",
"=",
"False",
",",
"sort",
"=",
"False",
")",
":",
"# First remove spaces from query",
"query",
"=",
"query",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"pattern",
"=",
"get_search_regex",
"(",
"query",
",",
"ignore_case",
")",
"results",
"=",
"[",
"]",
"for",
"choice",
"in",
"choices",
":",
"r",
"=",
"re",
".",
"search",
"(",
"pattern",
",",
"choice",
")",
"if",
"query",
"and",
"r",
":",
"result",
"=",
"get_search_score",
"(",
"query",
",",
"choice",
",",
"ignore_case",
"=",
"ignore_case",
",",
"apply_regex",
"=",
"False",
",",
"template",
"=",
"template",
")",
"else",
":",
"if",
"query",
":",
"result",
"=",
"(",
"choice",
",",
"choice",
",",
"NOT_FOUND_SCORE",
")",
"else",
":",
"result",
"=",
"(",
"choice",
",",
"choice",
",",
"NO_SCORE",
")",
"if",
"valid_only",
":",
"if",
"result",
"[",
"-",
"1",
"]",
"!=",
"NOT_FOUND_SCORE",
":",
"results",
".",
"append",
"(",
"result",
")",
"else",
":",
"results",
".",
"append",
"(",
"result",
")",
"if",
"sort",
":",
"results",
"=",
"sorted",
"(",
"results",
",",
"key",
"=",
"lambda",
"row",
":",
"row",
"[",
"-",
"1",
"]",
")",
"return",
"results"
] | Search for query inside choices and return a list of tuples.
Returns a list of tuples of text with the enriched text (if a template is
provided) and a score for the match. Lower scores imply a better match.
Parameters
----------
query : str
String with letters to search in each choice (in order of appearance).
choices : list of str
List of sentences/words in which to search for the 'query' letters.
ignore_case : bool, optional
Optional value perform a case insensitive search (True by default).
template : str, optional
Optional template string to surround letters found in choices. This is
useful when using a rich text editor ('{}' by default).
Examples: '<b>{}</b>', '<code>{}</code>', '<i>{}</i>'
Returns
-------
results : list of tuples
List of tuples where the first item is the text (enriched if a
template was used) and a search score. Lower scores means better match. | [
"Search",
"for",
"query",
"inside",
"choices",
"and",
"return",
"a",
"list",
"of",
"tuples",
"."
] | python | train |
planetlabs/datalake-common | datalake_common/record.py | https://github.com/planetlabs/datalake-common/blob/f0864732ac8cf26df4bea62600aee13b19321a93/datalake_common/record.py#L65-L71 | def list_from_url(cls, url):
'''return a list of DatalakeRecords for the specified url'''
key = cls._get_key(url)
metadata = cls._get_metadata_from_key(key)
ct = cls._get_create_time(key)
time_buckets = cls.get_time_buckets_from_metadata(metadata)
return [cls(url, metadata, t, ct, key.size) for t in time_buckets] | [
"def",
"list_from_url",
"(",
"cls",
",",
"url",
")",
":",
"key",
"=",
"cls",
".",
"_get_key",
"(",
"url",
")",
"metadata",
"=",
"cls",
".",
"_get_metadata_from_key",
"(",
"key",
")",
"ct",
"=",
"cls",
".",
"_get_create_time",
"(",
"key",
")",
"time_buckets",
"=",
"cls",
".",
"get_time_buckets_from_metadata",
"(",
"metadata",
")",
"return",
"[",
"cls",
"(",
"url",
",",
"metadata",
",",
"t",
",",
"ct",
",",
"key",
".",
"size",
")",
"for",
"t",
"in",
"time_buckets",
"]"
] | return a list of DatalakeRecords for the specified url | [
"return",
"a",
"list",
"of",
"DatalakeRecords",
"for",
"the",
"specified",
"url"
] | python | train |
uw-it-aca/uw-restclients | restclients/r25/reservations.py | https://github.com/uw-it-aca/uw-restclients/blob/e12dcd32bf5296b6ebdf71798031594afb7852cb/restclients/r25/reservations.py#L12-L23 | def get_reservations(**kwargs):
"""
Return a list of reservations matching the passed filter.
Supported kwargs are listed at
http://knowledge25.collegenet.com/display/WSW/reservations.xml
"""
kwargs["scope"] = "extended"
url = "/r25ws/servlet/wrd/run/reservations.xml"
if len(kwargs):
url += "?%s" % urlencode(kwargs)
return reservations_from_xml(get_resource(url)) | [
"def",
"get_reservations",
"(",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"\"scope\"",
"]",
"=",
"\"extended\"",
"url",
"=",
"\"/r25ws/servlet/wrd/run/reservations.xml\"",
"if",
"len",
"(",
"kwargs",
")",
":",
"url",
"+=",
"\"?%s\"",
"%",
"urlencode",
"(",
"kwargs",
")",
"return",
"reservations_from_xml",
"(",
"get_resource",
"(",
"url",
")",
")"
] | Return a list of reservations matching the passed filter.
Supported kwargs are listed at
http://knowledge25.collegenet.com/display/WSW/reservations.xml | [
"Return",
"a",
"list",
"of",
"reservations",
"matching",
"the",
"passed",
"filter",
".",
"Supported",
"kwargs",
"are",
"listed",
"at",
"http",
":",
"//",
"knowledge25",
".",
"collegenet",
".",
"com",
"/",
"display",
"/",
"WSW",
"/",
"reservations",
".",
"xml"
] | python | train |
has2k1/plotnine | plotnine/utils.py | https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/utils.py#L386-L395 | def uniquecols(df):
"""
Return unique columns
This is used for figuring out which columns are
constant within a group
"""
bool_idx = df.apply(lambda col: len(np.unique(col)) == 1, axis=0)
df = df.loc[:, bool_idx].iloc[0:1, :].reset_index(drop=True)
return df | [
"def",
"uniquecols",
"(",
"df",
")",
":",
"bool_idx",
"=",
"df",
".",
"apply",
"(",
"lambda",
"col",
":",
"len",
"(",
"np",
".",
"unique",
"(",
"col",
")",
")",
"==",
"1",
",",
"axis",
"=",
"0",
")",
"df",
"=",
"df",
".",
"loc",
"[",
":",
",",
"bool_idx",
"]",
".",
"iloc",
"[",
"0",
":",
"1",
",",
":",
"]",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"return",
"df"
] | Return unique columns
This is used for figuring out which columns are
constant within a group | [
"Return",
"unique",
"columns"
] | python | train |
JukeboxPipeline/jukebox-core | src/jukeboxcore/plugins.py | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/plugins.py#L369-L397 | def load_plugin(self, p):
"""Load the specified plugin
:param p: The plugin to load
:type p: Subclass of JB_Plugin
:returns: None
:rtype: None
:raises: errors.PluginInitError
"""
if p.is_loaded():
return
# load required plugins first
reqnames = p.required
reqplugins = []
for name in reqnames:
try:
reqplugins.append(self.__plugins[name])
except KeyError as e:
log.error("Required Plugin %s not found. Cannot load %s." % (name, p))
raise errors.PluginInitError('Required Plugin %s not found. Cannot load %s. Reason: %s' % (name, p, e))
for plug in reqplugins:
try:
self.load_plugin(plug)
except errors.PluginInitError as e:
log.error("Required Plugin %s could not be loaded. Cannot load %s" % (plug, p))
raise errors.PluginInitError('Required Plugin %s could not be loaded. Cannot load %s. Reason: %s' % (plug,p, e))
# load the actual plugin
p._load()
log.info('Initialized the plugin: %s' % p) | [
"def",
"load_plugin",
"(",
"self",
",",
"p",
")",
":",
"if",
"p",
".",
"is_loaded",
"(",
")",
":",
"return",
"# load required plugins first",
"reqnames",
"=",
"p",
".",
"required",
"reqplugins",
"=",
"[",
"]",
"for",
"name",
"in",
"reqnames",
":",
"try",
":",
"reqplugins",
".",
"append",
"(",
"self",
".",
"__plugins",
"[",
"name",
"]",
")",
"except",
"KeyError",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"Required Plugin %s not found. Cannot load %s.\"",
"%",
"(",
"name",
",",
"p",
")",
")",
"raise",
"errors",
".",
"PluginInitError",
"(",
"'Required Plugin %s not found. Cannot load %s. Reason: %s'",
"%",
"(",
"name",
",",
"p",
",",
"e",
")",
")",
"for",
"plug",
"in",
"reqplugins",
":",
"try",
":",
"self",
".",
"load_plugin",
"(",
"plug",
")",
"except",
"errors",
".",
"PluginInitError",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"Required Plugin %s could not be loaded. Cannot load %s\"",
"%",
"(",
"plug",
",",
"p",
")",
")",
"raise",
"errors",
".",
"PluginInitError",
"(",
"'Required Plugin %s could not be loaded. Cannot load %s. Reason: %s'",
"%",
"(",
"plug",
",",
"p",
",",
"e",
")",
")",
"# load the actual plugin",
"p",
".",
"_load",
"(",
")",
"log",
".",
"info",
"(",
"'Initialized the plugin: %s'",
"%",
"p",
")"
] | Load the specified plugin
:param p: The plugin to load
:type p: Subclass of JB_Plugin
:returns: None
:rtype: None
:raises: errors.PluginInitError | [
"Load",
"the",
"specified",
"plugin"
] | python | train |
ladybug-tools/ladybug | ladybug/sunpath.py | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/sunpath.py#L481-L488 | def _calculate_solar_time(self, hour, eq_of_time, is_solar_time):
"""Calculate Solar time for an hour."""
if is_solar_time:
return hour
return (
(hour * 60 + eq_of_time + 4 * math.degrees(self._longitude) -
60 * self.time_zone) % 1440) / 60 | [
"def",
"_calculate_solar_time",
"(",
"self",
",",
"hour",
",",
"eq_of_time",
",",
"is_solar_time",
")",
":",
"if",
"is_solar_time",
":",
"return",
"hour",
"return",
"(",
"(",
"hour",
"*",
"60",
"+",
"eq_of_time",
"+",
"4",
"*",
"math",
".",
"degrees",
"(",
"self",
".",
"_longitude",
")",
"-",
"60",
"*",
"self",
".",
"time_zone",
")",
"%",
"1440",
")",
"/",
"60"
] | Calculate Solar time for an hour. | [
"Calculate",
"Solar",
"time",
"for",
"an",
"hour",
"."
] | python | train |
bokeh/bokeh | bokeh/embed/util.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/embed/util.py#L305-L315 | def submodel_has_python_callbacks(models):
''' Traverses submodels to check for Python (event) callbacks
'''
has_python_callback = False
for model in collect_models(models):
if len(model._callbacks) > 0 or len(model._event_callbacks) > 0:
has_python_callback = True
break
return has_python_callback | [
"def",
"submodel_has_python_callbacks",
"(",
"models",
")",
":",
"has_python_callback",
"=",
"False",
"for",
"model",
"in",
"collect_models",
"(",
"models",
")",
":",
"if",
"len",
"(",
"model",
".",
"_callbacks",
")",
">",
"0",
"or",
"len",
"(",
"model",
".",
"_event_callbacks",
")",
">",
"0",
":",
"has_python_callback",
"=",
"True",
"break",
"return",
"has_python_callback"
] | Traverses submodels to check for Python (event) callbacks | [
"Traverses",
"submodels",
"to",
"check",
"for",
"Python",
"(",
"event",
")",
"callbacks"
] | python | train |
MakerReduxCorp/PLOD | PLOD/internal.py | https://github.com/MakerReduxCorp/PLOD/blob/707502cd928e5be6bd5e46d7f6de7da0e188cf1e/PLOD/internal.py#L204-L209 | def get_member(row, key):
''' properly detects if a an attribute exists '''
(target, tkey, tvalue) = dict_crawl(row, key)
if target:
return tvalue
return None | [
"def",
"get_member",
"(",
"row",
",",
"key",
")",
":",
"(",
"target",
",",
"tkey",
",",
"tvalue",
")",
"=",
"dict_crawl",
"(",
"row",
",",
"key",
")",
"if",
"target",
":",
"return",
"tvalue",
"return",
"None"
] | properly detects if a an attribute exists | [
"properly",
"detects",
"if",
"a",
"an",
"attribute",
"exists"
] | python | train |
paylogic/pip-accel | pip_accel/config.py | https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/config.py#L226-L237 | def data_directory(self):
"""
The absolute pathname of the directory where pip-accel's data files are stored (a string).
- Environment variable: ``$PIP_ACCEL_CACHE``
- Configuration option: ``data-directory``
- Default: ``/var/cache/pip-accel`` if running as ``root``, ``~/.pip-accel`` otherwise
"""
return expand_path(self.get(property_name='data_directory',
environment_variable='PIP_ACCEL_CACHE',
configuration_option='data-directory',
default='/var/cache/pip-accel' if is_root() else '~/.pip-accel')) | [
"def",
"data_directory",
"(",
"self",
")",
":",
"return",
"expand_path",
"(",
"self",
".",
"get",
"(",
"property_name",
"=",
"'data_directory'",
",",
"environment_variable",
"=",
"'PIP_ACCEL_CACHE'",
",",
"configuration_option",
"=",
"'data-directory'",
",",
"default",
"=",
"'/var/cache/pip-accel'",
"if",
"is_root",
"(",
")",
"else",
"'~/.pip-accel'",
")",
")"
] | The absolute pathname of the directory where pip-accel's data files are stored (a string).
- Environment variable: ``$PIP_ACCEL_CACHE``
- Configuration option: ``data-directory``
- Default: ``/var/cache/pip-accel`` if running as ``root``, ``~/.pip-accel`` otherwise | [
"The",
"absolute",
"pathname",
"of",
"the",
"directory",
"where",
"pip",
"-",
"accel",
"s",
"data",
"files",
"are",
"stored",
"(",
"a",
"string",
")",
"."
] | python | train |
PyHDI/Pyverilog | pyverilog/vparser/parser.py | https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1291-L1294 | def p_negedgesig(self, p):
'edgesig : NEGEDGE edgesig_base'
p[0] = Sens(p[2], 'negedge', lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | [
"def",
"p_negedgesig",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"Sens",
"(",
"p",
"[",
"2",
"]",
",",
"'negedge'",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")",
"p",
".",
"set_lineno",
"(",
"0",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] | edgesig : NEGEDGE edgesig_base | [
"edgesig",
":",
"NEGEDGE",
"edgesig_base"
] | python | train |
mozilla/treeherder | treeherder/auth/backends.py | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/auth/backends.py#L169-L182 | def _calculate_session_expiry(self, request, user_info):
"""Returns the number of seconds after which the Django session should expire."""
access_token_expiry_timestamp = self._get_access_token_expiry(request)
id_token_expiry_timestamp = self._get_id_token_expiry(user_info)
now_in_seconds = int(time.time())
# The session length is set to match whichever token expiration time is closer.
earliest_expiration_timestamp = min(access_token_expiry_timestamp, id_token_expiry_timestamp)
seconds_until_expiry = earliest_expiration_timestamp - now_in_seconds
if seconds_until_expiry <= 0:
raise AuthError('Session expiry time has already passed!')
return seconds_until_expiry | [
"def",
"_calculate_session_expiry",
"(",
"self",
",",
"request",
",",
"user_info",
")",
":",
"access_token_expiry_timestamp",
"=",
"self",
".",
"_get_access_token_expiry",
"(",
"request",
")",
"id_token_expiry_timestamp",
"=",
"self",
".",
"_get_id_token_expiry",
"(",
"user_info",
")",
"now_in_seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"# The session length is set to match whichever token expiration time is closer.",
"earliest_expiration_timestamp",
"=",
"min",
"(",
"access_token_expiry_timestamp",
",",
"id_token_expiry_timestamp",
")",
"seconds_until_expiry",
"=",
"earliest_expiration_timestamp",
"-",
"now_in_seconds",
"if",
"seconds_until_expiry",
"<=",
"0",
":",
"raise",
"AuthError",
"(",
"'Session expiry time has already passed!'",
")",
"return",
"seconds_until_expiry"
] | Returns the number of seconds after which the Django session should expire. | [
"Returns",
"the",
"number",
"of",
"seconds",
"after",
"which",
"the",
"Django",
"session",
"should",
"expire",
"."
] | python | train |
wuher/devil | devil/resource.py | https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/resource.py#L173-L186 | def _format_response(self, request, response):
""" Format response using appropriate datamapper.
Take the devil response and turn it into django response, ready to
be returned to the client.
"""
res = datamapper.format(request, response, self)
# data is now formatted, let's check if the status_code is set
if res.status_code is 0:
res.status_code = 200
# apply headers
self._add_resposne_headers(res, response)
return res | [
"def",
"_format_response",
"(",
"self",
",",
"request",
",",
"response",
")",
":",
"res",
"=",
"datamapper",
".",
"format",
"(",
"request",
",",
"response",
",",
"self",
")",
"# data is now formatted, let's check if the status_code is set",
"if",
"res",
".",
"status_code",
"is",
"0",
":",
"res",
".",
"status_code",
"=",
"200",
"# apply headers",
"self",
".",
"_add_resposne_headers",
"(",
"res",
",",
"response",
")",
"return",
"res"
] | Format response using appropriate datamapper.
Take the devil response and turn it into django response, ready to
be returned to the client. | [
"Format",
"response",
"using",
"appropriate",
"datamapper",
"."
] | python | train |
ga4gh/ga4gh-server | ga4gh/server/datamodel/genotype_phenotype.py | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/genotype_phenotype.py#L102-L113 | def _extractAssociationsDetails(self, associations):
"""
Given a set of results from our search query, return the
`details` (feature,environment,phenotype)
"""
detailedURIRef = []
for row in associations.bindings:
if 'feature' in row:
detailedURIRef.append(row['feature'])
detailedURIRef.append(row['environment'])
detailedURIRef.append(row['phenotype'])
return detailedURIRef | [
"def",
"_extractAssociationsDetails",
"(",
"self",
",",
"associations",
")",
":",
"detailedURIRef",
"=",
"[",
"]",
"for",
"row",
"in",
"associations",
".",
"bindings",
":",
"if",
"'feature'",
"in",
"row",
":",
"detailedURIRef",
".",
"append",
"(",
"row",
"[",
"'feature'",
"]",
")",
"detailedURIRef",
".",
"append",
"(",
"row",
"[",
"'environment'",
"]",
")",
"detailedURIRef",
".",
"append",
"(",
"row",
"[",
"'phenotype'",
"]",
")",
"return",
"detailedURIRef"
] | Given a set of results from our search query, return the
`details` (feature,environment,phenotype) | [
"Given",
"a",
"set",
"of",
"results",
"from",
"our",
"search",
"query",
"return",
"the",
"details",
"(",
"feature",
"environment",
"phenotype",
")"
] | python | train |
ianmiell/shutit | shutit_pexpect.py | https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_pexpect.py#L2159-L2241 | def multisend(self, sendspec):
"""Multisend. Same as send, except it takes multiple sends and expects in a dict that are
processed while waiting for the end "expect" argument supplied.
@param send: See send()
@param send_dict: See ShutItSendSpec
@param expect: See send()
@param timeout: See send()
@param check_exit: See send()
@param fail_on_empty_before: See send()
@param record_command: See send()
@param exit_values: See send()
@param escape: See send()
@param echo: See send()
@param note: See send()
@param secret: See send()
@param check_sudo: See send()
@param remove_on_match See ShutItSendSpec
@param loglevel: See send()
@return: The pexpect return value (ie which expected
string in the list matched).
If return is -1, the task was backgrounded. See also multisend.
@rtype: int
"""
shutit = self.shutit
shutit.handle_note(sendspec.note)
expect = sendspec.expect or self.default_expect
send_iteration = sendspec.send
expect_list = list(sendspec.send_dict)
# Put breakout item(s) in last.
n_breakout_items = 0
shutit.log('In multisend, send: ' + sendspec.send, level=logging.DEBUG)
if isinstance(expect, str):
shutit.log('Adding: "' + expect + '" to expect list.', level=logging.DEBUG)
expect_list.append(expect)
n_breakout_items = 1
elif isinstance(expect, list):
shutit.log('Adding: "' + str(expect) + '" to expect list.', level=logging.DEBUG)
for item in expect:
expect_list.append(item)
n_breakout_items += 1
shutit.log('Number of breakout items: ' + str(n_breakout_items), level=logging.DEBUG)
while True:
# If it's the last n items in the list, it's the breakout one.
# Must be a separate sendspec object each time, must be run .
res = self.send(ShutItSendSpec(self,
send=send_iteration,
expect=expect_list,
check_exit=sendspec.check_exit,
fail_on_empty_before=sendspec.fail_on_empty_before,
timeout=sendspec.timeout,
record_command=sendspec.record_command,
exit_values=sendspec.exit_values,
echo=self.shutit.get_echo_override(sendspec.echo),
escape=sendspec.escape,
secret=sendspec.secret,
check_sudo=sendspec.check_sudo,
nonewline=sendspec.nonewline,
ignore_background=sendspec.ignore_background,
run_in_background=False,
block_other_commands=True,
loglevel=sendspec.loglevel))
if res == -1:
# Will be run in the background later.
shutit.log('Multisend will be run in the background: ' + str(send_iteration), level=logging.INFO)
return -1
if res >= len(expect_list) - n_breakout_items:
break
else:
next_send = sendspec.send_dict[expect_list[res]][0]
if next_send is None:
shutit.log('None found in next_send - is there no password in the send_dict (first item in array in referenced res)?', level=logging.WARNING)
remove_items = sendspec.send_dict[expect_list[res]][1]
send_iteration = next_send
if sendspec.remove_on_match and remove_items:
shutit.log('Have matched a password (' + expect_list[res] + '), removing password expects from list in readiness of a prompt', level=logging.DEBUG)
if isinstance(expect, str):
expect_list = [expect]
elif isinstance(expect, list):
expect_list = expect
self.shutit.handle_note_after(note=sendspec.note)
return res | [
"def",
"multisend",
"(",
"self",
",",
"sendspec",
")",
":",
"shutit",
"=",
"self",
".",
"shutit",
"shutit",
".",
"handle_note",
"(",
"sendspec",
".",
"note",
")",
"expect",
"=",
"sendspec",
".",
"expect",
"or",
"self",
".",
"default_expect",
"send_iteration",
"=",
"sendspec",
".",
"send",
"expect_list",
"=",
"list",
"(",
"sendspec",
".",
"send_dict",
")",
"# Put breakout item(s) in last.",
"n_breakout_items",
"=",
"0",
"shutit",
".",
"log",
"(",
"'In multisend, send: '",
"+",
"sendspec",
".",
"send",
",",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"if",
"isinstance",
"(",
"expect",
",",
"str",
")",
":",
"shutit",
".",
"log",
"(",
"'Adding: \"'",
"+",
"expect",
"+",
"'\" to expect list.'",
",",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"expect_list",
".",
"append",
"(",
"expect",
")",
"n_breakout_items",
"=",
"1",
"elif",
"isinstance",
"(",
"expect",
",",
"list",
")",
":",
"shutit",
".",
"log",
"(",
"'Adding: \"'",
"+",
"str",
"(",
"expect",
")",
"+",
"'\" to expect list.'",
",",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"for",
"item",
"in",
"expect",
":",
"expect_list",
".",
"append",
"(",
"item",
")",
"n_breakout_items",
"+=",
"1",
"shutit",
".",
"log",
"(",
"'Number of breakout items: '",
"+",
"str",
"(",
"n_breakout_items",
")",
",",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"while",
"True",
":",
"# If it's the last n items in the list, it's the breakout one.",
"# Must be a separate sendspec object each time, must be run .",
"res",
"=",
"self",
".",
"send",
"(",
"ShutItSendSpec",
"(",
"self",
",",
"send",
"=",
"send_iteration",
",",
"expect",
"=",
"expect_list",
",",
"check_exit",
"=",
"sendspec",
".",
"check_exit",
",",
"fail_on_empty_before",
"=",
"sendspec",
".",
"fail_on_empty_before",
",",
"timeout",
"=",
"sendspec",
".",
"timeout",
",",
"record_command",
"=",
"sendspec",
".",
"record_command",
",",
"exit_values",
"=",
"sendspec",
".",
"exit_values",
",",
"echo",
"=",
"self",
".",
"shutit",
".",
"get_echo_override",
"(",
"sendspec",
".",
"echo",
")",
",",
"escape",
"=",
"sendspec",
".",
"escape",
",",
"secret",
"=",
"sendspec",
".",
"secret",
",",
"check_sudo",
"=",
"sendspec",
".",
"check_sudo",
",",
"nonewline",
"=",
"sendspec",
".",
"nonewline",
",",
"ignore_background",
"=",
"sendspec",
".",
"ignore_background",
",",
"run_in_background",
"=",
"False",
",",
"block_other_commands",
"=",
"True",
",",
"loglevel",
"=",
"sendspec",
".",
"loglevel",
")",
")",
"if",
"res",
"==",
"-",
"1",
":",
"# Will be run in the background later.",
"shutit",
".",
"log",
"(",
"'Multisend will be run in the background: '",
"+",
"str",
"(",
"send_iteration",
")",
",",
"level",
"=",
"logging",
".",
"INFO",
")",
"return",
"-",
"1",
"if",
"res",
">=",
"len",
"(",
"expect_list",
")",
"-",
"n_breakout_items",
":",
"break",
"else",
":",
"next_send",
"=",
"sendspec",
".",
"send_dict",
"[",
"expect_list",
"[",
"res",
"]",
"]",
"[",
"0",
"]",
"if",
"next_send",
"is",
"None",
":",
"shutit",
".",
"log",
"(",
"'None found in next_send - is there no password in the send_dict (first item in array in referenced res)?'",
",",
"level",
"=",
"logging",
".",
"WARNING",
")",
"remove_items",
"=",
"sendspec",
".",
"send_dict",
"[",
"expect_list",
"[",
"res",
"]",
"]",
"[",
"1",
"]",
"send_iteration",
"=",
"next_send",
"if",
"sendspec",
".",
"remove_on_match",
"and",
"remove_items",
":",
"shutit",
".",
"log",
"(",
"'Have matched a password ('",
"+",
"expect_list",
"[",
"res",
"]",
"+",
"'), removing password expects from list in readiness of a prompt'",
",",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"if",
"isinstance",
"(",
"expect",
",",
"str",
")",
":",
"expect_list",
"=",
"[",
"expect",
"]",
"elif",
"isinstance",
"(",
"expect",
",",
"list",
")",
":",
"expect_list",
"=",
"expect",
"self",
".",
"shutit",
".",
"handle_note_after",
"(",
"note",
"=",
"sendspec",
".",
"note",
")",
"return",
"res"
] | Multisend. Same as send, except it takes multiple sends and expects in a dict that are
processed while waiting for the end "expect" argument supplied.
@param send: See send()
@param send_dict: See ShutItSendSpec
@param expect: See send()
@param timeout: See send()
@param check_exit: See send()
@param fail_on_empty_before: See send()
@param record_command: See send()
@param exit_values: See send()
@param escape: See send()
@param echo: See send()
@param note: See send()
@param secret: See send()
@param check_sudo: See send()
@param remove_on_match See ShutItSendSpec
@param loglevel: See send()
@return: The pexpect return value (ie which expected
string in the list matched).
If return is -1, the task was backgrounded. See also multisend.
@rtype: int | [
"Multisend",
".",
"Same",
"as",
"send",
"except",
"it",
"takes",
"multiple",
"sends",
"and",
"expects",
"in",
"a",
"dict",
"that",
"are",
"processed",
"while",
"waiting",
"for",
"the",
"end",
"expect",
"argument",
"supplied",
"."
] | python | train |
dangunter/smoqe | smoqe/query.py | https://github.com/dangunter/smoqe/blob/70aa8ec1e9df875b9d21c71cbded95c595fe2aad/smoqe/query.py#L472-L481 | def get_conflicts(self):
"""Get conflicts in constraints, if any.
:return: Description of each conflict, empty if none.
:rtype: list(str)
"""
conflicts = []
if self._array and self._range:
conflicts.append('cannot use range expressions on arrays')
return conflicts | [
"def",
"get_conflicts",
"(",
"self",
")",
":",
"conflicts",
"=",
"[",
"]",
"if",
"self",
".",
"_array",
"and",
"self",
".",
"_range",
":",
"conflicts",
".",
"append",
"(",
"'cannot use range expressions on arrays'",
")",
"return",
"conflicts"
] | Get conflicts in constraints, if any.
:return: Description of each conflict, empty if none.
:rtype: list(str) | [
"Get",
"conflicts",
"in",
"constraints",
"if",
"any",
"."
] | python | train |
sirfoga/pyhal | hal/streams/user.py | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/streams/user.py#L116-L151 | def get_number(self, question, min_i=float("-inf"), max_i=float("inf"),
just_these=None):
"""Parses answer and gets number
:param question: Question: to ask user
:param min_i: min acceptable number
:param max_i: max acceptable number
:param just_these: Accept only these numbers
:return: User answer
"""
try:
user_answer = self.get_answer(question)
user_answer = float(user_answer)
if min_i < user_answer < max_i:
if just_these:
if user_answer in just_these:
return user_answer
exc = "Number cannot be accepted. Just these: "
exc += str(just_these)
raise Exception(exc)
return user_answer
exc = "Number is not within limits. "
exc += "Min is " + str(min_i) + ". Max is " + str(max_i) + ""
raise Exception(exc)
except Exception as exc:
print(str(exc))
return self.get_number(
self.last_question,
min_i=min_i,
max_i=max_i,
just_these=just_these
) | [
"def",
"get_number",
"(",
"self",
",",
"question",
",",
"min_i",
"=",
"float",
"(",
"\"-inf\"",
")",
",",
"max_i",
"=",
"float",
"(",
"\"inf\"",
")",
",",
"just_these",
"=",
"None",
")",
":",
"try",
":",
"user_answer",
"=",
"self",
".",
"get_answer",
"(",
"question",
")",
"user_answer",
"=",
"float",
"(",
"user_answer",
")",
"if",
"min_i",
"<",
"user_answer",
"<",
"max_i",
":",
"if",
"just_these",
":",
"if",
"user_answer",
"in",
"just_these",
":",
"return",
"user_answer",
"exc",
"=",
"\"Number cannot be accepted. Just these: \"",
"exc",
"+=",
"str",
"(",
"just_these",
")",
"raise",
"Exception",
"(",
"exc",
")",
"return",
"user_answer",
"exc",
"=",
"\"Number is not within limits. \"",
"exc",
"+=",
"\"Min is \"",
"+",
"str",
"(",
"min_i",
")",
"+",
"\". Max is \"",
"+",
"str",
"(",
"max_i",
")",
"+",
"\"\"",
"raise",
"Exception",
"(",
"exc",
")",
"except",
"Exception",
"as",
"exc",
":",
"print",
"(",
"str",
"(",
"exc",
")",
")",
"return",
"self",
".",
"get_number",
"(",
"self",
".",
"last_question",
",",
"min_i",
"=",
"min_i",
",",
"max_i",
"=",
"max_i",
",",
"just_these",
"=",
"just_these",
")"
] | Parses answer and gets number
:param question: Question: to ask user
:param min_i: min acceptable number
:param max_i: max acceptable number
:param just_these: Accept only these numbers
:return: User answer | [
"Parses",
"answer",
"and",
"gets",
"number"
] | python | train |
IdentityPython/fedoidcmsg | src/fedoidcmsg/file_system.py | https://github.com/IdentityPython/fedoidcmsg/blob/d30107be02521fa6cdfe285da3b6b0cdd153c8cc/src/fedoidcmsg/file_system.py#L170-L188 | def sync(self):
"""
Goes through the directory and builds a local cache based on
the content of the directory.
"""
if not os.path.isdir(self.fdir):
os.makedirs(self.fdir)
for f in os.listdir(self.fdir):
fname = os.path.join(self.fdir, f)
if not os.path.isfile(fname):
continue
if f in self.fmtime:
if self.is_changed(f):
self.db[f] = self._read_info(fname)
else:
mtime = self.get_mtime(fname)
self.db[f] = self._read_info(fname)
self.fmtime[f] = mtime | [
"def",
"sync",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"fdir",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"fdir",
")",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"fdir",
")",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"fdir",
",",
"f",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"fname",
")",
":",
"continue",
"if",
"f",
"in",
"self",
".",
"fmtime",
":",
"if",
"self",
".",
"is_changed",
"(",
"f",
")",
":",
"self",
".",
"db",
"[",
"f",
"]",
"=",
"self",
".",
"_read_info",
"(",
"fname",
")",
"else",
":",
"mtime",
"=",
"self",
".",
"get_mtime",
"(",
"fname",
")",
"self",
".",
"db",
"[",
"f",
"]",
"=",
"self",
".",
"_read_info",
"(",
"fname",
")",
"self",
".",
"fmtime",
"[",
"f",
"]",
"=",
"mtime"
] | Goes through the directory and builds a local cache based on
the content of the directory. | [
"Goes",
"through",
"the",
"directory",
"and",
"builds",
"a",
"local",
"cache",
"based",
"on",
"the",
"content",
"of",
"the",
"directory",
"."
] | python | test |
AustralianSynchrotron/lightflow | lightflow/scripts/cli.py | https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/scripts/cli.py#L313-L327 | def worker_stop(obj, worker_ids):
""" Stop running workers.
\b
WORKER_IDS: The IDs of the worker that should be stopped or none to stop them all.
"""
if len(worker_ids) == 0:
msg = 'Would you like to stop all workers?'
else:
msg = '\n{}\n\n{}'.format('\n'.join(worker_ids),
'Would you like to stop these workers?')
if click.confirm(msg, default=True, abort=True):
stop_worker(obj['config'],
worker_ids=list(worker_ids) if len(worker_ids) > 0 else None) | [
"def",
"worker_stop",
"(",
"obj",
",",
"worker_ids",
")",
":",
"if",
"len",
"(",
"worker_ids",
")",
"==",
"0",
":",
"msg",
"=",
"'Would you like to stop all workers?'",
"else",
":",
"msg",
"=",
"'\\n{}\\n\\n{}'",
".",
"format",
"(",
"'\\n'",
".",
"join",
"(",
"worker_ids",
")",
",",
"'Would you like to stop these workers?'",
")",
"if",
"click",
".",
"confirm",
"(",
"msg",
",",
"default",
"=",
"True",
",",
"abort",
"=",
"True",
")",
":",
"stop_worker",
"(",
"obj",
"[",
"'config'",
"]",
",",
"worker_ids",
"=",
"list",
"(",
"worker_ids",
")",
"if",
"len",
"(",
"worker_ids",
")",
">",
"0",
"else",
"None",
")"
] | Stop running workers.
\b
WORKER_IDS: The IDs of the worker that should be stopped or none to stop them all. | [
"Stop",
"running",
"workers",
"."
] | python | train |
mwouts/jupytext | demo/Matplotlib example.py | https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/demo/Matplotlib example.py#L84-L178 | def stack_hist(ax, stacked_data, sty_cycle, bottoms=None,
hist_func=None, labels=None,
plot_func=None, plot_kwargs=None):
"""
ax : axes.Axes
The axes to add artists too
stacked_data : array or Mapping
A (N, M) shaped array. The first dimension will be iterated over to
compute histograms row-wise
sty_cycle : Cycler or operable of dict
Style to apply to each set
bottoms : array, optional
The initial positions of the bottoms, defaults to 0
hist_func : callable, optional
Must have signature `bin_vals, bin_edges = f(data)`.
`bin_edges` expected to be one longer than `bin_vals`
labels : list of str, optional
The label for each set.
If not given and stacked data is an array defaults to 'default set {n}'
If stacked_data is a mapping, and labels is None, default to the keys
(which may come out in a random order).
If stacked_data is a mapping and labels is given then only
the columns listed by be plotted.
plot_func : callable, optional
Function to call to draw the histogram must have signature:
ret = plot_func(ax, edges, top, bottoms=bottoms,
label=label, **kwargs)
plot_kwargs : dict, optional
Any extra kwargs to pass through to the plotting function. This
will be the same for all calls to the plotting function and will
over-ride the values in cycle.
Returns
-------
arts : dict
Dictionary of artists keyed on their labels
"""
# deal with default binning function
if hist_func is None:
hist_func = np.histogram
# deal with default plotting function
if plot_func is None:
plot_func = filled_hist
# deal with default
if plot_kwargs is None:
plot_kwargs = {}
print(plot_kwargs)
try:
l_keys = stacked_data.keys()
label_data = True
if labels is None:
labels = l_keys
except AttributeError:
label_data = False
if labels is None:
labels = itertools.repeat(None)
if label_data:
loop_iter = enumerate((stacked_data[lab], lab, s) for lab, s in
zip(labels, sty_cycle))
else:
loop_iter = enumerate(zip(stacked_data, labels, sty_cycle))
arts = {}
for j, (data, label, sty) in loop_iter:
if label is None:
label = 'dflt set {n}'.format(n=j)
label = sty.pop('label', label)
vals, edges = hist_func(data)
if bottoms is None:
bottoms = np.zeros_like(vals)
top = bottoms + vals
print(sty)
sty.update(plot_kwargs)
print(sty)
ret = plot_func(ax, edges, top, bottoms=bottoms,
label=label, **sty)
bottoms = top
arts[label] = ret
ax.legend(fontsize=10)
return arts | [
"def",
"stack_hist",
"(",
"ax",
",",
"stacked_data",
",",
"sty_cycle",
",",
"bottoms",
"=",
"None",
",",
"hist_func",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"plot_func",
"=",
"None",
",",
"plot_kwargs",
"=",
"None",
")",
":",
"# deal with default binning function",
"if",
"hist_func",
"is",
"None",
":",
"hist_func",
"=",
"np",
".",
"histogram",
"# deal with default plotting function",
"if",
"plot_func",
"is",
"None",
":",
"plot_func",
"=",
"filled_hist",
"# deal with default",
"if",
"plot_kwargs",
"is",
"None",
":",
"plot_kwargs",
"=",
"{",
"}",
"print",
"(",
"plot_kwargs",
")",
"try",
":",
"l_keys",
"=",
"stacked_data",
".",
"keys",
"(",
")",
"label_data",
"=",
"True",
"if",
"labels",
"is",
"None",
":",
"labels",
"=",
"l_keys",
"except",
"AttributeError",
":",
"label_data",
"=",
"False",
"if",
"labels",
"is",
"None",
":",
"labels",
"=",
"itertools",
".",
"repeat",
"(",
"None",
")",
"if",
"label_data",
":",
"loop_iter",
"=",
"enumerate",
"(",
"(",
"stacked_data",
"[",
"lab",
"]",
",",
"lab",
",",
"s",
")",
"for",
"lab",
",",
"s",
"in",
"zip",
"(",
"labels",
",",
"sty_cycle",
")",
")",
"else",
":",
"loop_iter",
"=",
"enumerate",
"(",
"zip",
"(",
"stacked_data",
",",
"labels",
",",
"sty_cycle",
")",
")",
"arts",
"=",
"{",
"}",
"for",
"j",
",",
"(",
"data",
",",
"label",
",",
"sty",
")",
"in",
"loop_iter",
":",
"if",
"label",
"is",
"None",
":",
"label",
"=",
"'dflt set {n}'",
".",
"format",
"(",
"n",
"=",
"j",
")",
"label",
"=",
"sty",
".",
"pop",
"(",
"'label'",
",",
"label",
")",
"vals",
",",
"edges",
"=",
"hist_func",
"(",
"data",
")",
"if",
"bottoms",
"is",
"None",
":",
"bottoms",
"=",
"np",
".",
"zeros_like",
"(",
"vals",
")",
"top",
"=",
"bottoms",
"+",
"vals",
"print",
"(",
"sty",
")",
"sty",
".",
"update",
"(",
"plot_kwargs",
")",
"print",
"(",
"sty",
")",
"ret",
"=",
"plot_func",
"(",
"ax",
",",
"edges",
",",
"top",
",",
"bottoms",
"=",
"bottoms",
",",
"label",
"=",
"label",
",",
"*",
"*",
"sty",
")",
"bottoms",
"=",
"top",
"arts",
"[",
"label",
"]",
"=",
"ret",
"ax",
".",
"legend",
"(",
"fontsize",
"=",
"10",
")",
"return",
"arts"
] | ax : axes.Axes
The axes to add artists too
stacked_data : array or Mapping
A (N, M) shaped array. The first dimension will be iterated over to
compute histograms row-wise
sty_cycle : Cycler or operable of dict
Style to apply to each set
bottoms : array, optional
The initial positions of the bottoms, defaults to 0
hist_func : callable, optional
Must have signature `bin_vals, bin_edges = f(data)`.
`bin_edges` expected to be one longer than `bin_vals`
labels : list of str, optional
The label for each set.
If not given and stacked data is an array defaults to 'default set {n}'
If stacked_data is a mapping, and labels is None, default to the keys
(which may come out in a random order).
If stacked_data is a mapping and labels is given then only
the columns listed by be plotted.
plot_func : callable, optional
Function to call to draw the histogram must have signature:
ret = plot_func(ax, edges, top, bottoms=bottoms,
label=label, **kwargs)
plot_kwargs : dict, optional
Any extra kwargs to pass through to the plotting function. This
will be the same for all calls to the plotting function and will
over-ride the values in cycle.
Returns
-------
arts : dict
Dictionary of artists keyed on their labels | [
"ax",
":",
"axes",
".",
"Axes",
"The",
"axes",
"to",
"add",
"artists",
"too"
] | python | train |
twisted/mantissa | xmantissa/people.py | https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/people.py#L514-L522 | def getEmailAddresses(self):
"""
Return an iterator of all email addresses associated with this person.
@return: an iterator of unicode strings in RFC2822 address format.
"""
return self.store.query(
EmailAddress,
EmailAddress.person == self).getColumn('address') | [
"def",
"getEmailAddresses",
"(",
"self",
")",
":",
"return",
"self",
".",
"store",
".",
"query",
"(",
"EmailAddress",
",",
"EmailAddress",
".",
"person",
"==",
"self",
")",
".",
"getColumn",
"(",
"'address'",
")"
] | Return an iterator of all email addresses associated with this person.
@return: an iterator of unicode strings in RFC2822 address format. | [
"Return",
"an",
"iterator",
"of",
"all",
"email",
"addresses",
"associated",
"with",
"this",
"person",
"."
] | python | train |
olitheolix/qtmacs | qtmacs/qtmacsmain_macros.py | https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/qtmacsmain_macros.py#L293-L307 | def qteReplayKeysequenceHook(self, msgObj):
"""
Replay the macro sequence.
"""
# Quit if there is nothing to replay.
if self.recorded_keysequence.toString() == '':
return
# Stop the recording before the replay, if necessary.
if self.qteRecording:
return
# Simulate the key presses.
self.qteMain.qteEmulateKeypresses(self.recorded_keysequence) | [
"def",
"qteReplayKeysequenceHook",
"(",
"self",
",",
"msgObj",
")",
":",
"# Quit if there is nothing to replay.",
"if",
"self",
".",
"recorded_keysequence",
".",
"toString",
"(",
")",
"==",
"''",
":",
"return",
"# Stop the recording before the replay, if necessary.",
"if",
"self",
".",
"qteRecording",
":",
"return",
"# Simulate the key presses.",
"self",
".",
"qteMain",
".",
"qteEmulateKeypresses",
"(",
"self",
".",
"recorded_keysequence",
")"
] | Replay the macro sequence. | [
"Replay",
"the",
"macro",
"sequence",
"."
] | python | train |
lago-project/lago | lago/templates.py | https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/templates.py#L325-L352 | def from_url(cls, path):
"""
Instantiate a :class:`TemplateRepository` instance from the data in a
file or url
Args:
path (str): Path or url to the json file to load
Returns:
TemplateRepository: A new instance
"""
if os.path.isfile(path):
with open(path) as fd:
data = fd.read()
else:
try:
response = urllib.urlopen(path)
if response.code >= 300:
raise RuntimeError('Unable to load repo from %s' % path)
data = response.read()
response.close()
except IOError:
raise RuntimeError(
'Unable to load repo from %s (IO error)' % path
)
return cls(json.loads(data), path) | [
"def",
"from_url",
"(",
"cls",
",",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
")",
"as",
"fd",
":",
"data",
"=",
"fd",
".",
"read",
"(",
")",
"else",
":",
"try",
":",
"response",
"=",
"urllib",
".",
"urlopen",
"(",
"path",
")",
"if",
"response",
".",
"code",
">=",
"300",
":",
"raise",
"RuntimeError",
"(",
"'Unable to load repo from %s'",
"%",
"path",
")",
"data",
"=",
"response",
".",
"read",
"(",
")",
"response",
".",
"close",
"(",
")",
"except",
"IOError",
":",
"raise",
"RuntimeError",
"(",
"'Unable to load repo from %s (IO error)'",
"%",
"path",
")",
"return",
"cls",
"(",
"json",
".",
"loads",
"(",
"data",
")",
",",
"path",
")"
] | Instantiate a :class:`TemplateRepository` instance from the data in a
file or url
Args:
path (str): Path or url to the json file to load
Returns:
TemplateRepository: A new instance | [
"Instantiate",
"a",
":",
"class",
":",
"TemplateRepository",
"instance",
"from",
"the",
"data",
"in",
"a",
"file",
"or",
"url"
] | python | train |
bunq/sdk_python | bunq/sdk/exception_factory.py | https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/exception_factory.py#L101-L119 | def _generate_message_error(cls, response_code, messages, response_id):
"""
:type response_code: int
:type messages: list[str]
:type response_id: str
:rtype: str
"""
line_response_code = cls._FORMAT_RESPONSE_CODE_LINE \
.format(response_code)
line_response_id = cls._FORMAT_RESPONSE_ID_LINE.format(response_id)
line_error_message = cls._FORMAT_ERROR_MESSAGE_LINE.format(
cls._GLUE_ERROR_MESSAGE_STRING_EMPTY.join(messages)
)
return cls._glue_all_error_message(
[line_response_code, line_response_id, line_error_message]
) | [
"def",
"_generate_message_error",
"(",
"cls",
",",
"response_code",
",",
"messages",
",",
"response_id",
")",
":",
"line_response_code",
"=",
"cls",
".",
"_FORMAT_RESPONSE_CODE_LINE",
".",
"format",
"(",
"response_code",
")",
"line_response_id",
"=",
"cls",
".",
"_FORMAT_RESPONSE_ID_LINE",
".",
"format",
"(",
"response_id",
")",
"line_error_message",
"=",
"cls",
".",
"_FORMAT_ERROR_MESSAGE_LINE",
".",
"format",
"(",
"cls",
".",
"_GLUE_ERROR_MESSAGE_STRING_EMPTY",
".",
"join",
"(",
"messages",
")",
")",
"return",
"cls",
".",
"_glue_all_error_message",
"(",
"[",
"line_response_code",
",",
"line_response_id",
",",
"line_error_message",
"]",
")"
] | :type response_code: int
:type messages: list[str]
:type response_id: str
:rtype: str | [
":",
"type",
"response_code",
":",
"int",
":",
"type",
"messages",
":",
"list",
"[",
"str",
"]",
":",
"type",
"response_id",
":",
"str"
] | python | train |
prompt-toolkit/pyvim | pyvim/window_arrangement.py | https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/window_arrangement.py#L254-L261 | def get_editor_buffer_for_buffer_name(self, buffer_name):
"""
Return the `EditorBuffer` for this buffer_name.
When not found, return None
"""
for eb in self.editor_buffers:
if eb.buffer_name == buffer_name:
return eb | [
"def",
"get_editor_buffer_for_buffer_name",
"(",
"self",
",",
"buffer_name",
")",
":",
"for",
"eb",
"in",
"self",
".",
"editor_buffers",
":",
"if",
"eb",
".",
"buffer_name",
"==",
"buffer_name",
":",
"return",
"eb"
] | Return the `EditorBuffer` for this buffer_name.
When not found, return None | [
"Return",
"the",
"EditorBuffer",
"for",
"this",
"buffer_name",
".",
"When",
"not",
"found",
"return",
"None"
] | python | train |
flatangle/flatlib | flatlib/ephem/tools.py | https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/ephem/tools.py#L56-L73 | def syzygyJD(jd):
""" Finds the latest new or full moon and
returns the julian date of that event.
"""
sun = swe.sweObjectLon(const.SUN, jd)
moon = swe.sweObjectLon(const.MOON, jd)
dist = angle.distance(sun, moon)
# Offset represents the Syzygy type.
# Zero is conjunction and 180 is opposition.
offset = 180 if (dist >= 180) else 0
while abs(dist) > MAX_ERROR:
jd = jd - dist / 13.1833 # Moon mean daily motion
sun = swe.sweObjectLon(const.SUN, jd)
moon = swe.sweObjectLon(const.MOON, jd)
dist = angle.closestdistance(sun - offset, moon)
return jd | [
"def",
"syzygyJD",
"(",
"jd",
")",
":",
"sun",
"=",
"swe",
".",
"sweObjectLon",
"(",
"const",
".",
"SUN",
",",
"jd",
")",
"moon",
"=",
"swe",
".",
"sweObjectLon",
"(",
"const",
".",
"MOON",
",",
"jd",
")",
"dist",
"=",
"angle",
".",
"distance",
"(",
"sun",
",",
"moon",
")",
"# Offset represents the Syzygy type. ",
"# Zero is conjunction and 180 is opposition.",
"offset",
"=",
"180",
"if",
"(",
"dist",
">=",
"180",
")",
"else",
"0",
"while",
"abs",
"(",
"dist",
")",
">",
"MAX_ERROR",
":",
"jd",
"=",
"jd",
"-",
"dist",
"/",
"13.1833",
"# Moon mean daily motion",
"sun",
"=",
"swe",
".",
"sweObjectLon",
"(",
"const",
".",
"SUN",
",",
"jd",
")",
"moon",
"=",
"swe",
".",
"sweObjectLon",
"(",
"const",
".",
"MOON",
",",
"jd",
")",
"dist",
"=",
"angle",
".",
"closestdistance",
"(",
"sun",
"-",
"offset",
",",
"moon",
")",
"return",
"jd"
] | Finds the latest new or full moon and
returns the julian date of that event. | [
"Finds",
"the",
"latest",
"new",
"or",
"full",
"moon",
"and",
"returns",
"the",
"julian",
"date",
"of",
"that",
"event",
"."
] | python | train |
facelessuser/wcmatch | wcmatch/glob.py | https://github.com/facelessuser/wcmatch/blob/d153e7007cc73b994ae1ba553dc4584039f5c212/wcmatch/glob.py#L235-L315 | def _glob(self, curdir, this, rest):
"""
Handle glob flow.
There are really only a couple of cases:
- File name.
- File name pattern (magic).
- Directory.
- Directory name pattern (magic).
- Extra slashes `////`.
- `globstar` `**`.
"""
is_magic = this.is_magic
dir_only = this.dir_only
target = this.pattern
is_globstar = this.is_globstar
if is_magic and is_globstar:
# Glob star directory `**`.
# Throw away multiple consecutive `globstars`
# and acquire the pattern after the `globstars` if available.
this = rest.pop(0) if rest else None
globstar_end = this is None
while this and not globstar_end:
if this:
dir_only = this.dir_only
target = this.pattern
if this and this.is_globstar:
this = rest.pop(0) if rest else None
if this is None:
globstar_end = True
else:
break
if globstar_end:
target = None
# We match `**/next` during a deep glob, so what ever comes back,
# we will send back through `_glob` with pattern after `next` (`**/next/after`).
# So grab `after` if available.
this = rest.pop(0) if rest else None
# Deep searching is the unique case where we
# might feed in a `None` for the next pattern to match.
# Deep glob will account for this.
matcher = self._get_matcher(target)
# If our pattern ends with `curdir/**`, but does not start with `**` it matches zero or more,
# so it should return `curdir/`, signifying `curdir` + no match.
# If a pattern follows `**/something`, we always get the appropriate
# return already, so this isn't needed in that case.
# There is one quirk though with Bash, if `curdir` had magic before `**`, Bash
# omits the trailing `/`. We don't worry about that.
if globstar_end and curdir:
yield os.path.join(curdir, self.empty)
# Search
for path in self._glob_dir(curdir, matcher, dir_only, deep=True):
if this:
yield from self._glob(path, this, rest[:])
else:
yield path
elif not dir_only:
# Files: no need to recursively search at this point as we are done.
matcher = self._get_matcher(target)
yield from self._glob_dir(curdir, matcher)
else:
# Directory: search current directory against pattern
# and feed the results back through with the next pattern.
this = rest.pop(0) if rest else None
matcher = self._get_matcher(target)
for path in self._glob_dir(curdir, matcher, True):
if this:
yield from self._glob(path, this, rest[:])
else:
yield path | [
"def",
"_glob",
"(",
"self",
",",
"curdir",
",",
"this",
",",
"rest",
")",
":",
"is_magic",
"=",
"this",
".",
"is_magic",
"dir_only",
"=",
"this",
".",
"dir_only",
"target",
"=",
"this",
".",
"pattern",
"is_globstar",
"=",
"this",
".",
"is_globstar",
"if",
"is_magic",
"and",
"is_globstar",
":",
"# Glob star directory `**`.",
"# Throw away multiple consecutive `globstars`",
"# and acquire the pattern after the `globstars` if available.",
"this",
"=",
"rest",
".",
"pop",
"(",
"0",
")",
"if",
"rest",
"else",
"None",
"globstar_end",
"=",
"this",
"is",
"None",
"while",
"this",
"and",
"not",
"globstar_end",
":",
"if",
"this",
":",
"dir_only",
"=",
"this",
".",
"dir_only",
"target",
"=",
"this",
".",
"pattern",
"if",
"this",
"and",
"this",
".",
"is_globstar",
":",
"this",
"=",
"rest",
".",
"pop",
"(",
"0",
")",
"if",
"rest",
"else",
"None",
"if",
"this",
"is",
"None",
":",
"globstar_end",
"=",
"True",
"else",
":",
"break",
"if",
"globstar_end",
":",
"target",
"=",
"None",
"# We match `**/next` during a deep glob, so what ever comes back,",
"# we will send back through `_glob` with pattern after `next` (`**/next/after`).",
"# So grab `after` if available.",
"this",
"=",
"rest",
".",
"pop",
"(",
"0",
")",
"if",
"rest",
"else",
"None",
"# Deep searching is the unique case where we",
"# might feed in a `None` for the next pattern to match.",
"# Deep glob will account for this.",
"matcher",
"=",
"self",
".",
"_get_matcher",
"(",
"target",
")",
"# If our pattern ends with `curdir/**`, but does not start with `**` it matches zero or more,",
"# so it should return `curdir/`, signifying `curdir` + no match.",
"# If a pattern follows `**/something`, we always get the appropriate",
"# return already, so this isn't needed in that case.",
"# There is one quirk though with Bash, if `curdir` had magic before `**`, Bash",
"# omits the trailing `/`. We don't worry about that.",
"if",
"globstar_end",
"and",
"curdir",
":",
"yield",
"os",
".",
"path",
".",
"join",
"(",
"curdir",
",",
"self",
".",
"empty",
")",
"# Search",
"for",
"path",
"in",
"self",
".",
"_glob_dir",
"(",
"curdir",
",",
"matcher",
",",
"dir_only",
",",
"deep",
"=",
"True",
")",
":",
"if",
"this",
":",
"yield",
"from",
"self",
".",
"_glob",
"(",
"path",
",",
"this",
",",
"rest",
"[",
":",
"]",
")",
"else",
":",
"yield",
"path",
"elif",
"not",
"dir_only",
":",
"# Files: no need to recursively search at this point as we are done.",
"matcher",
"=",
"self",
".",
"_get_matcher",
"(",
"target",
")",
"yield",
"from",
"self",
".",
"_glob_dir",
"(",
"curdir",
",",
"matcher",
")",
"else",
":",
"# Directory: search current directory against pattern",
"# and feed the results back through with the next pattern.",
"this",
"=",
"rest",
".",
"pop",
"(",
"0",
")",
"if",
"rest",
"else",
"None",
"matcher",
"=",
"self",
".",
"_get_matcher",
"(",
"target",
")",
"for",
"path",
"in",
"self",
".",
"_glob_dir",
"(",
"curdir",
",",
"matcher",
",",
"True",
")",
":",
"if",
"this",
":",
"yield",
"from",
"self",
".",
"_glob",
"(",
"path",
",",
"this",
",",
"rest",
"[",
":",
"]",
")",
"else",
":",
"yield",
"path"
] | Handle glob flow.
There are really only a couple of cases:
- File name.
- File name pattern (magic).
- Directory.
- Directory name pattern (magic).
- Extra slashes `////`.
- `globstar` `**`. | [
"Handle",
"glob",
"flow",
"."
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/util/fetching.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/util/fetching.py#L113-L153 | def update(self, cur_value, mesg=None):
"""Update progressbar with current value of process
Parameters
----------
cur_value : number
Current value of process. Should be <= max_value (but this is not
enforced). The percent of the progressbar will be computed as
(cur_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''.
"""
# Ensure floating-point division so we can get fractions of a percent
# for the progressbar.
self.cur_value = cur_value
progress = float(self.cur_value) / self.max_value
num_chars = int(progress * self.max_chars)
num_left = self.max_chars - num_chars
# Update the message
if mesg is not None:
self.mesg = mesg
# The \r tells the cursor to return to the beginning of the line rather
# than starting a new line. This allows us to have a progressbar-style
# display in the console window.
bar = self.template.format(self.progress_character * num_chars,
' ' * num_left,
progress * 100,
self.spinner_symbols[self.spinner_index],
self.mesg)
sys.stdout.write(bar)
# Increament the spinner
if self.spinner:
self.spinner_index = (self.spinner_index + 1) % self.n_spinner
# Force a flush because sometimes when using bash scripts and pipes,
# the output is not printed until after the program exits.
sys.stdout.flush() | [
"def",
"update",
"(",
"self",
",",
"cur_value",
",",
"mesg",
"=",
"None",
")",
":",
"# Ensure floating-point division so we can get fractions of a percent",
"# for the progressbar.",
"self",
".",
"cur_value",
"=",
"cur_value",
"progress",
"=",
"float",
"(",
"self",
".",
"cur_value",
")",
"/",
"self",
".",
"max_value",
"num_chars",
"=",
"int",
"(",
"progress",
"*",
"self",
".",
"max_chars",
")",
"num_left",
"=",
"self",
".",
"max_chars",
"-",
"num_chars",
"# Update the message",
"if",
"mesg",
"is",
"not",
"None",
":",
"self",
".",
"mesg",
"=",
"mesg",
"# The \\r tells the cursor to return to the beginning of the line rather",
"# than starting a new line. This allows us to have a progressbar-style",
"# display in the console window.",
"bar",
"=",
"self",
".",
"template",
".",
"format",
"(",
"self",
".",
"progress_character",
"*",
"num_chars",
",",
"' '",
"*",
"num_left",
",",
"progress",
"*",
"100",
",",
"self",
".",
"spinner_symbols",
"[",
"self",
".",
"spinner_index",
"]",
",",
"self",
".",
"mesg",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"bar",
")",
"# Increament the spinner",
"if",
"self",
".",
"spinner",
":",
"self",
".",
"spinner_index",
"=",
"(",
"self",
".",
"spinner_index",
"+",
"1",
")",
"%",
"self",
".",
"n_spinner",
"# Force a flush because sometimes when using bash scripts and pipes,",
"# the output is not printed until after the program exits.",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] | Update progressbar with current value of process
Parameters
----------
cur_value : number
Current value of process. Should be <= max_value (but this is not
enforced). The percent of the progressbar will be computed as
(cur_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''. | [
"Update",
"progressbar",
"with",
"current",
"value",
"of",
"process"
] | python | train |
peri-source/peri | peri/opt/optimize.py | https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2408-L2435 | def do_levmarq_n_directions(s, directions, max_iter=2, run_length=2,
damping=1e-3, collect_stats=False, marquardt_damping=True, **kwargs):
"""
Optimization of a state along a specific set of directions in parameter
space.
Parameters
----------
s : :class:`peri.states.State`
The state to optimize
directions : np.ndarray
[n,d] element numpy.ndarray of the n directions in the d-
dimensional space to optimize along. `directions` is trans-
formed to a unit vector internally
Other Parameters
----------------
Any parameters passed to LMEngine.
"""
# normal = direction / np.sqrt(np.dot(direction, direction))
normals = np.array([d/np.sqrt(np.dot(d,d)) for d in directions])
if np.isnan(normals).any():
raise ValueError('`directions` must not be 0s or contain nan')
obj = OptState(s, normals)
lo = LMOptObj(obj, max_iter=max_iter, run_length=run_length, damping=
damping, marquardt_damping=marquardt_damping, **kwargs)
lo.do_run_1()
if collect_stats:
return lo.get_termination_stats() | [
"def",
"do_levmarq_n_directions",
"(",
"s",
",",
"directions",
",",
"max_iter",
"=",
"2",
",",
"run_length",
"=",
"2",
",",
"damping",
"=",
"1e-3",
",",
"collect_stats",
"=",
"False",
",",
"marquardt_damping",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# normal = direction / np.sqrt(np.dot(direction, direction))",
"normals",
"=",
"np",
".",
"array",
"(",
"[",
"d",
"/",
"np",
".",
"sqrt",
"(",
"np",
".",
"dot",
"(",
"d",
",",
"d",
")",
")",
"for",
"d",
"in",
"directions",
"]",
")",
"if",
"np",
".",
"isnan",
"(",
"normals",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'`directions` must not be 0s or contain nan'",
")",
"obj",
"=",
"OptState",
"(",
"s",
",",
"normals",
")",
"lo",
"=",
"LMOptObj",
"(",
"obj",
",",
"max_iter",
"=",
"max_iter",
",",
"run_length",
"=",
"run_length",
",",
"damping",
"=",
"damping",
",",
"marquardt_damping",
"=",
"marquardt_damping",
",",
"*",
"*",
"kwargs",
")",
"lo",
".",
"do_run_1",
"(",
")",
"if",
"collect_stats",
":",
"return",
"lo",
".",
"get_termination_stats",
"(",
")"
] | Optimization of a state along a specific set of directions in parameter
space.
Parameters
----------
s : :class:`peri.states.State`
The state to optimize
directions : np.ndarray
[n,d] element numpy.ndarray of the n directions in the d-
dimensional space to optimize along. `directions` is trans-
formed to a unit vector internally
Other Parameters
----------------
Any parameters passed to LMEngine. | [
"Optimization",
"of",
"a",
"state",
"along",
"a",
"specific",
"set",
"of",
"directions",
"in",
"parameter",
"space",
"."
] | python | valid |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py#L1087-L1113 | def pre_message(self):
'''read timestamp if needed'''
# read the timestamp
if self.filesize != 0:
self.percent = (100.0 * self.f.tell()) / self.filesize
if self.notimestamps:
return
if self.planner_format:
tbuf = self.f.read(21)
if len(tbuf) != 21 or tbuf[0] != '-' or tbuf[20] != ':':
raise RuntimeError('bad planner timestamp %s' % tbuf)
hnsec = self._two64 + float(tbuf[0:20])
t = hnsec * 1.0e-7 # convert to seconds
t -= 719163 * 24 * 60 * 60 # convert to 1970 base
self._link = 0
else:
tbuf = self.f.read(8)
if len(tbuf) != 8:
return
(tusec,) = struct.unpack('>Q', tbuf)
t = tusec * 1.0e-6
if (self._last_timestamp is not None and
self._last_message.get_type() == "BAD_DATA" and
abs(t - self._last_timestamp) > 3*24*60*60):
t = self.scan_timestamp(tbuf)
self._link = tusec & 0x3
self._timestamp = t | [
"def",
"pre_message",
"(",
"self",
")",
":",
"# read the timestamp",
"if",
"self",
".",
"filesize",
"!=",
"0",
":",
"self",
".",
"percent",
"=",
"(",
"100.0",
"*",
"self",
".",
"f",
".",
"tell",
"(",
")",
")",
"/",
"self",
".",
"filesize",
"if",
"self",
".",
"notimestamps",
":",
"return",
"if",
"self",
".",
"planner_format",
":",
"tbuf",
"=",
"self",
".",
"f",
".",
"read",
"(",
"21",
")",
"if",
"len",
"(",
"tbuf",
")",
"!=",
"21",
"or",
"tbuf",
"[",
"0",
"]",
"!=",
"'-'",
"or",
"tbuf",
"[",
"20",
"]",
"!=",
"':'",
":",
"raise",
"RuntimeError",
"(",
"'bad planner timestamp %s'",
"%",
"tbuf",
")",
"hnsec",
"=",
"self",
".",
"_two64",
"+",
"float",
"(",
"tbuf",
"[",
"0",
":",
"20",
"]",
")",
"t",
"=",
"hnsec",
"*",
"1.0e-7",
"# convert to seconds",
"t",
"-=",
"719163",
"*",
"24",
"*",
"60",
"*",
"60",
"# convert to 1970 base",
"self",
".",
"_link",
"=",
"0",
"else",
":",
"tbuf",
"=",
"self",
".",
"f",
".",
"read",
"(",
"8",
")",
"if",
"len",
"(",
"tbuf",
")",
"!=",
"8",
":",
"return",
"(",
"tusec",
",",
")",
"=",
"struct",
".",
"unpack",
"(",
"'>Q'",
",",
"tbuf",
")",
"t",
"=",
"tusec",
"*",
"1.0e-6",
"if",
"(",
"self",
".",
"_last_timestamp",
"is",
"not",
"None",
"and",
"self",
".",
"_last_message",
".",
"get_type",
"(",
")",
"==",
"\"BAD_DATA\"",
"and",
"abs",
"(",
"t",
"-",
"self",
".",
"_last_timestamp",
")",
">",
"3",
"*",
"24",
"*",
"60",
"*",
"60",
")",
":",
"t",
"=",
"self",
".",
"scan_timestamp",
"(",
"tbuf",
")",
"self",
".",
"_link",
"=",
"tusec",
"&",
"0x3",
"self",
".",
"_timestamp",
"=",
"t"
] | read timestamp if needed | [
"read",
"timestamp",
"if",
"needed"
] | python | train |
estnltk/estnltk | estnltk/syntax/utils.py | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/utils.py#L669-L743 | def get_children( self, **kwargs ):
''' Recursively collects and returns all subtrees of given tree (if no
arguments are given), or, alternatively, collects and returns subtrees
satisfying some specific criteria (pre-specified in the arguments);
Parameters
-----------
depth_limit : int
Specifies how deep into the subtrees of this tree the search goes;
Examples:
depth_limit=2 -- children of this node, and also children's
direct children are considered as collectibles;
depth_limit=1 -- only children of this node are considered;
depth_limit=0 -- the end of search (only this node is considered);
Default: unbounded ( the search is not limited by depth )
include_self : bool
Specifies whether this tree should also be included as a collectible
subtree. If this tree is includes, it still must satisfy all the
criteria before it is included in the collection;
Default: False
sorted : bool
Specifies returned trees should be sorted in the ascending order of
word_ids (basically: by the order of words in the text);
If sorting is not applied, there is no guarantee that resulting trees
follow the order of words in text;
Default: False
Following parameters can be used to set conditions for subtrees:
-----------------------------------------------------------------
label : str
Syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis; If the node does not have the
label, the node will be discarded;
label_regexp : str
A regular expression pattern (as string) describing the
syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis;
If none of the node's labels matches the pattern, the node
will be discarded;
word_template : estnltk.mw_verbs.utils.WordTemplate
A WordTemplate describing morphological constraints imposed
to the word of the node;
If the word's morphological features do not match the template,
the node will be discarded;
'''
depth_limit = kwargs.get('depth_limit', 922337203685477580) # Just a nice big number to
# assure that by default,
# there is no depth limit ...
include_self = kwargs.get('include_self', False)
sorted_by_word_ids = kwargs.get('sorted', False)
subtrees = []
if include_self:
if self._satisfies_conditions( self, **kwargs ):
subtrees.append( self )
if depth_limit >= 1 and self.children:
# 1) Add children of given tree
for child in self.children:
if self._satisfies_conditions( child, **kwargs ):
subtrees.append(child)
# 2) Collect children of given tree's children
kwargs['include_self'] = False
kwargs['depth_limit'] = depth_limit - 1
for child in self.children:
childs_results = child.get_children( **kwargs )
if childs_results:
subtrees.extend(childs_results)
if sorted_by_word_ids:
# Sort by word_id-s, in ascending order
subtrees = sorted(subtrees, key=lambda x: x.word_id)
return subtrees | [
"def",
"get_children",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"depth_limit",
"=",
"kwargs",
".",
"get",
"(",
"'depth_limit'",
",",
"922337203685477580",
")",
"# Just a nice big number to",
"# assure that by default, ",
"# there is no depth limit ...",
"include_self",
"=",
"kwargs",
".",
"get",
"(",
"'include_self'",
",",
"False",
")",
"sorted_by_word_ids",
"=",
"kwargs",
".",
"get",
"(",
"'sorted'",
",",
"False",
")",
"subtrees",
"=",
"[",
"]",
"if",
"include_self",
":",
"if",
"self",
".",
"_satisfies_conditions",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"subtrees",
".",
"append",
"(",
"self",
")",
"if",
"depth_limit",
">=",
"1",
"and",
"self",
".",
"children",
":",
"# 1) Add children of given tree",
"for",
"child",
"in",
"self",
".",
"children",
":",
"if",
"self",
".",
"_satisfies_conditions",
"(",
"child",
",",
"*",
"*",
"kwargs",
")",
":",
"subtrees",
".",
"append",
"(",
"child",
")",
"# 2) Collect children of given tree's children",
"kwargs",
"[",
"'include_self'",
"]",
"=",
"False",
"kwargs",
"[",
"'depth_limit'",
"]",
"=",
"depth_limit",
"-",
"1",
"for",
"child",
"in",
"self",
".",
"children",
":",
"childs_results",
"=",
"child",
".",
"get_children",
"(",
"*",
"*",
"kwargs",
")",
"if",
"childs_results",
":",
"subtrees",
".",
"extend",
"(",
"childs_results",
")",
"if",
"sorted_by_word_ids",
":",
"# Sort by word_id-s, in ascending order",
"subtrees",
"=",
"sorted",
"(",
"subtrees",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"word_id",
")",
"return",
"subtrees"
] | Recursively collects and returns all subtrees of given tree (if no
arguments are given), or, alternatively, collects and returns subtrees
satisfying some specific criteria (pre-specified in the arguments);
Parameters
-----------
depth_limit : int
Specifies how deep into the subtrees of this tree the search goes;
Examples:
depth_limit=2 -- children of this node, and also children's
direct children are considered as collectibles;
depth_limit=1 -- only children of this node are considered;
depth_limit=0 -- the end of search (only this node is considered);
Default: unbounded ( the search is not limited by depth )
include_self : bool
Specifies whether this tree should also be included as a collectible
subtree. If this tree is includes, it still must satisfy all the
criteria before it is included in the collection;
Default: False
sorted : bool
Specifies returned trees should be sorted in the ascending order of
word_ids (basically: by the order of words in the text);
If sorting is not applied, there is no guarantee that resulting trees
follow the order of words in text;
Default: False
Following parameters can be used to set conditions for subtrees:
-----------------------------------------------------------------
label : str
Syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis; If the node does not have the
label, the node will be discarded;
label_regexp : str
A regular expression pattern (as string) describing the
syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis;
If none of the node's labels matches the pattern, the node
will be discarded;
word_template : estnltk.mw_verbs.utils.WordTemplate
A WordTemplate describing morphological constraints imposed
to the word of the node;
If the word's morphological features do not match the template,
the node will be discarded; | [
"Recursively",
"collects",
"and",
"returns",
"all",
"subtrees",
"of",
"given",
"tree",
"(",
"if",
"no",
"arguments",
"are",
"given",
")",
"or",
"alternatively",
"collects",
"and",
"returns",
"subtrees",
"satisfying",
"some",
"specific",
"criteria",
"(",
"pre",
"-",
"specified",
"in",
"the",
"arguments",
")",
";",
"Parameters",
"-----------",
"depth_limit",
":",
"int",
"Specifies",
"how",
"deep",
"into",
"the",
"subtrees",
"of",
"this",
"tree",
"the",
"search",
"goes",
";",
"Examples",
":",
"depth_limit",
"=",
"2",
"--",
"children",
"of",
"this",
"node",
"and",
"also",
"children",
"s",
"direct",
"children",
"are",
"considered",
"as",
"collectibles",
";",
"depth_limit",
"=",
"1",
"--",
"only",
"children",
"of",
"this",
"node",
"are",
"considered",
";",
"depth_limit",
"=",
"0",
"--",
"the",
"end",
"of",
"search",
"(",
"only",
"this",
"node",
"is",
"considered",
")",
";",
"Default",
":",
"unbounded",
"(",
"the",
"search",
"is",
"not",
"limited",
"by",
"depth",
")",
"include_self",
":",
"bool",
"Specifies",
"whether",
"this",
"tree",
"should",
"also",
"be",
"included",
"as",
"a",
"collectible",
"subtree",
".",
"If",
"this",
"tree",
"is",
"includes",
"it",
"still",
"must",
"satisfy",
"all",
"the",
"criteria",
"before",
"it",
"is",
"included",
"in",
"the",
"collection",
";",
"Default",
":",
"False",
"sorted",
":",
"bool",
"Specifies",
"returned",
"trees",
"should",
"be",
"sorted",
"in",
"the",
"ascending",
"order",
"of",
"word_ids",
"(",
"basically",
":",
"by",
"the",
"order",
"of",
"words",
"in",
"the",
"text",
")",
";",
"If",
"sorting",
"is",
"not",
"applied",
"there",
"is",
"no",
"guarantee",
"that",
"resulting",
"trees",
"follow",
"the",
"order",
"of",
"words",
"in",
"text",
";",
"Default",
":",
"False",
"Following",
"parameters",
"can",
"be",
"used",
"to",
"set",
"conditions",
"for",
"subtrees",
":",
"-----------------------------------------------------------------",
"label",
":",
"str",
"Syntactic",
"label",
"(",
"e",
".",
"g",
"."
] | python | train |
jantman/pypi-download-stats | pypi_download_stats/outputgenerator.py | https://github.com/jantman/pypi-download-stats/blob/44a7a6bbcd61a9e7f02bd02c52584a98183f80c5/pypi_download_stats/outputgenerator.py#L197-L223 | def _generate_graph(self, name, title, stats_data, y_name):
"""
Generate a downloads graph; append it to ``self._graphs``.
:param name: HTML name of the graph, also used in ``self.GRAPH_KEYS``
:type name: str
:param title: human-readable title for the graph
:type title: str
:param stats_data: data dict from ``self._stats``
:type stats_data: dict
:param y_name: Y axis metric name
:type y_name: str
"""
logger.debug('Generating chart data for %s graph', name)
orig_data, labels = self._data_dict_to_bokeh_chart_data(stats_data)
data = self._limit_data(orig_data)
logger.debug('Generating %s graph', name)
script, div = FancyAreaGraph(
name, '%s %s' % (self.project_name, title), data, labels,
y_name).generate_graph()
logger.debug('%s graph generated', name)
self._graphs[name] = {
'title': title,
'script': script,
'div': div,
'raw_data': stats_data
} | [
"def",
"_generate_graph",
"(",
"self",
",",
"name",
",",
"title",
",",
"stats_data",
",",
"y_name",
")",
":",
"logger",
".",
"debug",
"(",
"'Generating chart data for %s graph'",
",",
"name",
")",
"orig_data",
",",
"labels",
"=",
"self",
".",
"_data_dict_to_bokeh_chart_data",
"(",
"stats_data",
")",
"data",
"=",
"self",
".",
"_limit_data",
"(",
"orig_data",
")",
"logger",
".",
"debug",
"(",
"'Generating %s graph'",
",",
"name",
")",
"script",
",",
"div",
"=",
"FancyAreaGraph",
"(",
"name",
",",
"'%s %s'",
"%",
"(",
"self",
".",
"project_name",
",",
"title",
")",
",",
"data",
",",
"labels",
",",
"y_name",
")",
".",
"generate_graph",
"(",
")",
"logger",
".",
"debug",
"(",
"'%s graph generated'",
",",
"name",
")",
"self",
".",
"_graphs",
"[",
"name",
"]",
"=",
"{",
"'title'",
":",
"title",
",",
"'script'",
":",
"script",
",",
"'div'",
":",
"div",
",",
"'raw_data'",
":",
"stats_data",
"}"
] | Generate a downloads graph; append it to ``self._graphs``.
:param name: HTML name of the graph, also used in ``self.GRAPH_KEYS``
:type name: str
:param title: human-readable title for the graph
:type title: str
:param stats_data: data dict from ``self._stats``
:type stats_data: dict
:param y_name: Y axis metric name
:type y_name: str | [
"Generate",
"a",
"downloads",
"graph",
";",
"append",
"it",
"to",
"self",
".",
"_graphs",
"."
] | python | train |
svartalf/python-2gis | dgis/__init__.py | https://github.com/svartalf/python-2gis/blob/6eccd6073c99494b7abf20b38a5455cbd55d6420/dgis/__init__.py#L144-L158 | def geo_search(self, **kwargs):
"""Geo search
http://api.2gis.ru/doc/geo/search/
"""
if 'types' in kwargs:
kwargs['types'] = ','.join(kwargs['types'])
bound = kwargs.pop('bound', False)
if bound:
kwargs['bound[point1]'] = bound[0]
kwargs['bound[point2]'] = bound[1]
return self._geo_search(**kwargs) | [
"def",
"geo_search",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'types'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'types'",
"]",
"=",
"','",
".",
"join",
"(",
"kwargs",
"[",
"'types'",
"]",
")",
"bound",
"=",
"kwargs",
".",
"pop",
"(",
"'bound'",
",",
"False",
")",
"if",
"bound",
":",
"kwargs",
"[",
"'bound[point1]'",
"]",
"=",
"bound",
"[",
"0",
"]",
"kwargs",
"[",
"'bound[point2]'",
"]",
"=",
"bound",
"[",
"1",
"]",
"return",
"self",
".",
"_geo_search",
"(",
"*",
"*",
"kwargs",
")"
] | Geo search
http://api.2gis.ru/doc/geo/search/ | [
"Geo",
"search"
] | python | train |
globus/globus-cli | globus_cli/parsing/detect_and_decorate.py | https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/parsing/detect_and_decorate.py#L1-L27 | def detect_and_decorate(decorator, args, kwargs):
"""
Helper for applying a decorator when it is applied directly, and also
applying it when it is given arguments and then applied to a function.
"""
# special behavior when invoked with only one non-keyword argument: act as
# a normal decorator, decorating and returning that argument with
# click.option
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return decorator(args[0])
# if we're not doing that, we should see no positional args
# the alternative behavior is to fall through and discard *args, but this
# will probably confuse someone in the future when their arguments are
# silently discarded
elif len(args) != 0:
raise ValueError("this decorator cannot take positional args")
# final case: got 0 or more kwargs, no positionals
# do the function-which-returns-a-decorator dance to produce a
# new decorator based on the arguments given
else:
def inner_decorator(f):
return decorator(f, **kwargs)
return inner_decorator | [
"def",
"detect_and_decorate",
"(",
"decorator",
",",
"args",
",",
"kwargs",
")",
":",
"# special behavior when invoked with only one non-keyword argument: act as",
"# a normal decorator, decorating and returning that argument with",
"# click.option",
"if",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"len",
"(",
"kwargs",
")",
"==",
"0",
"and",
"callable",
"(",
"args",
"[",
"0",
"]",
")",
":",
"return",
"decorator",
"(",
"args",
"[",
"0",
"]",
")",
"# if we're not doing that, we should see no positional args",
"# the alternative behavior is to fall through and discard *args, but this",
"# will probably confuse someone in the future when their arguments are",
"# silently discarded",
"elif",
"len",
"(",
"args",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"this decorator cannot take positional args\"",
")",
"# final case: got 0 or more kwargs, no positionals",
"# do the function-which-returns-a-decorator dance to produce a",
"# new decorator based on the arguments given",
"else",
":",
"def",
"inner_decorator",
"(",
"f",
")",
":",
"return",
"decorator",
"(",
"f",
",",
"*",
"*",
"kwargs",
")",
"return",
"inner_decorator"
] | Helper for applying a decorator when it is applied directly, and also
applying it when it is given arguments and then applied to a function. | [
"Helper",
"for",
"applying",
"a",
"decorator",
"when",
"it",
"is",
"applied",
"directly",
"and",
"also",
"applying",
"it",
"when",
"it",
"is",
"given",
"arguments",
"and",
"then",
"applied",
"to",
"a",
"function",
"."
] | python | train |
MisterY/gnucash-portfolio | gnucash_portfolio/accounts.py | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/accounts.py#L244-L256 | def find_by_name(self, term: str, include_placeholders: bool = False) -> List[Account]:
""" Search for account by part of the name """
query = (
self.query
.filter(Account.name.like('%' + term + '%'))
.order_by(Account.name)
)
# Exclude placeholder accounts?
if not include_placeholders:
query = query.filter(Account.placeholder == 0)
# print(generic.get_sql(query))
return query.all() | [
"def",
"find_by_name",
"(",
"self",
",",
"term",
":",
"str",
",",
"include_placeholders",
":",
"bool",
"=",
"False",
")",
"->",
"List",
"[",
"Account",
"]",
":",
"query",
"=",
"(",
"self",
".",
"query",
".",
"filter",
"(",
"Account",
".",
"name",
".",
"like",
"(",
"'%'",
"+",
"term",
"+",
"'%'",
")",
")",
".",
"order_by",
"(",
"Account",
".",
"name",
")",
")",
"# Exclude placeholder accounts?",
"if",
"not",
"include_placeholders",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"Account",
".",
"placeholder",
"==",
"0",
")",
"# print(generic.get_sql(query))",
"return",
"query",
".",
"all",
"(",
")"
] | Search for account by part of the name | [
"Search",
"for",
"account",
"by",
"part",
"of",
"the",
"name"
] | python | train |
saltstack/salt | salt/utils/data.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/data.py#L557-L596 | def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):
'''
Traverse a dict or list using a colon-delimited (or otherwise delimited,
using the 'delimiter' param) target string. The target 'foo:bar:0' will
return data['foo']['bar'][0] if this value exists, and will otherwise
return the dict in the default argument.
Function will automatically determine the target type.
The target 'foo:bar:0' will return data['foo']['bar'][0] if data like
{'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
then return data['foo']['bar']['0']
'''
ptr = data
for each in key.split(delimiter):
if isinstance(ptr, list):
try:
idx = int(each)
except ValueError:
embed_match = False
# Index was not numeric, lets look at any embedded dicts
for embedded in (x for x in ptr if isinstance(x, dict)):
try:
ptr = embedded[each]
embed_match = True
break
except KeyError:
pass
if not embed_match:
# No embedded dicts matched, return the default
return default
else:
try:
ptr = ptr[idx]
except IndexError:
return default
else:
try:
ptr = ptr[each]
except (KeyError, TypeError):
return default
return ptr | [
"def",
"traverse_dict_and_list",
"(",
"data",
",",
"key",
",",
"default",
"=",
"None",
",",
"delimiter",
"=",
"DEFAULT_TARGET_DELIM",
")",
":",
"ptr",
"=",
"data",
"for",
"each",
"in",
"key",
".",
"split",
"(",
"delimiter",
")",
":",
"if",
"isinstance",
"(",
"ptr",
",",
"list",
")",
":",
"try",
":",
"idx",
"=",
"int",
"(",
"each",
")",
"except",
"ValueError",
":",
"embed_match",
"=",
"False",
"# Index was not numeric, lets look at any embedded dicts",
"for",
"embedded",
"in",
"(",
"x",
"for",
"x",
"in",
"ptr",
"if",
"isinstance",
"(",
"x",
",",
"dict",
")",
")",
":",
"try",
":",
"ptr",
"=",
"embedded",
"[",
"each",
"]",
"embed_match",
"=",
"True",
"break",
"except",
"KeyError",
":",
"pass",
"if",
"not",
"embed_match",
":",
"# No embedded dicts matched, return the default",
"return",
"default",
"else",
":",
"try",
":",
"ptr",
"=",
"ptr",
"[",
"idx",
"]",
"except",
"IndexError",
":",
"return",
"default",
"else",
":",
"try",
":",
"ptr",
"=",
"ptr",
"[",
"each",
"]",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"return",
"default",
"return",
"ptr"
] | Traverse a dict or list using a colon-delimited (or otherwise delimited,
using the 'delimiter' param) target string. The target 'foo:bar:0' will
return data['foo']['bar'][0] if this value exists, and will otherwise
return the dict in the default argument.
Function will automatically determine the target type.
The target 'foo:bar:0' will return data['foo']['bar'][0] if data like
{'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
then return data['foo']['bar']['0'] | [
"Traverse",
"a",
"dict",
"or",
"list",
"using",
"a",
"colon",
"-",
"delimited",
"(",
"or",
"otherwise",
"delimited",
"using",
"the",
"delimiter",
"param",
")",
"target",
"string",
".",
"The",
"target",
"foo",
":",
"bar",
":",
"0",
"will",
"return",
"data",
"[",
"foo",
"]",
"[",
"bar",
"]",
"[",
"0",
"]",
"if",
"this",
"value",
"exists",
"and",
"will",
"otherwise",
"return",
"the",
"dict",
"in",
"the",
"default",
"argument",
".",
"Function",
"will",
"automatically",
"determine",
"the",
"target",
"type",
".",
"The",
"target",
"foo",
":",
"bar",
":",
"0",
"will",
"return",
"data",
"[",
"foo",
"]",
"[",
"bar",
"]",
"[",
"0",
"]",
"if",
"data",
"like",
"{",
"foo",
":",
"{",
"bar",
":",
"[",
"baz",
"]",
"}}",
"if",
"data",
"like",
"{",
"foo",
":",
"{",
"bar",
":",
"{",
"0",
":",
"baz",
"}}}",
"then",
"return",
"data",
"[",
"foo",
"]",
"[",
"bar",
"]",
"[",
"0",
"]"
] | python | train |
gwastro/pycbc-glue | pycbc_glue/iterutils.py | https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/iterutils.py#L252-L325 | def inorder(*iterables, **kwargs):
"""
A generator that yields the values from several ordered iterables
in order.
Example:
>>> x = [0, 1, 2, 3]
>>> y = [1.5, 2.5, 3.5, 4.5]
>>> z = [1.75, 2.25, 3.75, 4.25]
>>> list(inorder(x, y, z))
[0, 1, 1.5, 1.75, 2, 2.25, 2.5, 3, 3.5, 3.75, 4.25, 4.5]
>>> list(inorder(x, y, z, key=lambda x: x * x))
[0, 1, 1.5, 1.75, 2, 2.25, 2.5, 3, 3.5, 3.75, 4.25, 4.5]
>>> x.sort(key=lambda x: abs(x-3))
>>> y.sort(key=lambda x: abs(x-3))
>>> z.sort(key=lambda x: abs(x-3))
>>> list(inorder(x, y, z, key=lambda x: abs(x - 3)))
[3, 2.5, 3.5, 2.25, 3.75, 2, 1.75, 4.25, 1.5, 4.5, 1, 0]
>>> x = [3, 2, 1, 0]
>>> y = [4.5, 3.5, 2.5, 1.5]
>>> z = [4.25, 3.75, 2.25, 1.75]
>>> list(inorder(x, y, z, reverse = True))
[4.5, 4.25, 3.75, 3.5, 3, 2.5, 2.25, 2, 1.75, 1.5, 1, 0]
>>> list(inorder(x, y, z, key = lambda x: -x))
[4.5, 4.25, 3.75, 3.5, 3, 2.5, 2.25, 2, 1.75, 1.5, 1, 0]
NOTE: this function will never reverse the order of elements in
the input iterables. If the reverse keyword argument is False (the
default) then the input sequences must yield elements in increasing
order, likewise if the keyword argument is True then the input
sequences must yield elements in decreasing order. Failure to
adhere to this yields undefined results, and for performance
reasons no check is performed to validate the element order in the
input sequences.
"""
reverse = kwargs.pop("reverse", False)
keyfunc = kwargs.pop("key", lambda x: x) # default = identity
if kwargs:
raise TypeError("invalid keyword argument '%s'" % kwargs.keys()[0])
nextvals = {}
for iterable in iterables:
next = iter(iterable).next
try:
nextval = next()
nextvals[next] = keyfunc(nextval), nextval, next
except StopIteration:
pass
if not nextvals:
# all sequences are empty
return
if reverse:
select = max
else:
select = min
values = nextvals.itervalues
if len(nextvals) > 1:
while 1:
_, val, next = select(values())
yield val
try:
nextval = next()
nextvals[next] = keyfunc(nextval), nextval, next
except StopIteration:
del nextvals[next]
if len(nextvals) < 2:
break
# exactly one sequence remains, short circuit and drain it
(_, val, next), = values()
yield val
while 1:
yield next() | [
"def",
"inorder",
"(",
"*",
"iterables",
",",
"*",
"*",
"kwargs",
")",
":",
"reverse",
"=",
"kwargs",
".",
"pop",
"(",
"\"reverse\"",
",",
"False",
")",
"keyfunc",
"=",
"kwargs",
".",
"pop",
"(",
"\"key\"",
",",
"lambda",
"x",
":",
"x",
")",
"# default = identity",
"if",
"kwargs",
":",
"raise",
"TypeError",
"(",
"\"invalid keyword argument '%s'\"",
"%",
"kwargs",
".",
"keys",
"(",
")",
"[",
"0",
"]",
")",
"nextvals",
"=",
"{",
"}",
"for",
"iterable",
"in",
"iterables",
":",
"next",
"=",
"iter",
"(",
"iterable",
")",
".",
"next",
"try",
":",
"nextval",
"=",
"next",
"(",
")",
"nextvals",
"[",
"next",
"]",
"=",
"keyfunc",
"(",
"nextval",
")",
",",
"nextval",
",",
"next",
"except",
"StopIteration",
":",
"pass",
"if",
"not",
"nextvals",
":",
"# all sequences are empty",
"return",
"if",
"reverse",
":",
"select",
"=",
"max",
"else",
":",
"select",
"=",
"min",
"values",
"=",
"nextvals",
".",
"itervalues",
"if",
"len",
"(",
"nextvals",
")",
">",
"1",
":",
"while",
"1",
":",
"_",
",",
"val",
",",
"next",
"=",
"select",
"(",
"values",
"(",
")",
")",
"yield",
"val",
"try",
":",
"nextval",
"=",
"next",
"(",
")",
"nextvals",
"[",
"next",
"]",
"=",
"keyfunc",
"(",
"nextval",
")",
",",
"nextval",
",",
"next",
"except",
"StopIteration",
":",
"del",
"nextvals",
"[",
"next",
"]",
"if",
"len",
"(",
"nextvals",
")",
"<",
"2",
":",
"break",
"# exactly one sequence remains, short circuit and drain it",
"(",
"_",
",",
"val",
",",
"next",
")",
",",
"=",
"values",
"(",
")",
"yield",
"val",
"while",
"1",
":",
"yield",
"next",
"(",
")"
] | A generator that yields the values from several ordered iterables
in order.
Example:
>>> x = [0, 1, 2, 3]
>>> y = [1.5, 2.5, 3.5, 4.5]
>>> z = [1.75, 2.25, 3.75, 4.25]
>>> list(inorder(x, y, z))
[0, 1, 1.5, 1.75, 2, 2.25, 2.5, 3, 3.5, 3.75, 4.25, 4.5]
>>> list(inorder(x, y, z, key=lambda x: x * x))
[0, 1, 1.5, 1.75, 2, 2.25, 2.5, 3, 3.5, 3.75, 4.25, 4.5]
>>> x.sort(key=lambda x: abs(x-3))
>>> y.sort(key=lambda x: abs(x-3))
>>> z.sort(key=lambda x: abs(x-3))
>>> list(inorder(x, y, z, key=lambda x: abs(x - 3)))
[3, 2.5, 3.5, 2.25, 3.75, 2, 1.75, 4.25, 1.5, 4.5, 1, 0]
>>> x = [3, 2, 1, 0]
>>> y = [4.5, 3.5, 2.5, 1.5]
>>> z = [4.25, 3.75, 2.25, 1.75]
>>> list(inorder(x, y, z, reverse = True))
[4.5, 4.25, 3.75, 3.5, 3, 2.5, 2.25, 2, 1.75, 1.5, 1, 0]
>>> list(inorder(x, y, z, key = lambda x: -x))
[4.5, 4.25, 3.75, 3.5, 3, 2.5, 2.25, 2, 1.75, 1.5, 1, 0]
NOTE: this function will never reverse the order of elements in
the input iterables. If the reverse keyword argument is False (the
default) then the input sequences must yield elements in increasing
order, likewise if the keyword argument is True then the input
sequences must yield elements in decreasing order. Failure to
adhere to this yields undefined results, and for performance
reasons no check is performed to validate the element order in the
input sequences. | [
"A",
"generator",
"that",
"yields",
"the",
"values",
"from",
"several",
"ordered",
"iterables",
"in",
"order",
"."
] | python | train |
gijzelaerr/python-snap7 | snap7/common.py | https://github.com/gijzelaerr/python-snap7/blob/a6db134c7a3a2ef187b9eca04669221d6fc634c3/snap7/common.py#L68-L87 | def error_text(error, context="client"):
"""Returns a textual explanation of a given error number
:param error: an error integer
:param context: server, client or partner
:returns: the error string
"""
assert context in ("client", "server", "partner")
logger.debug("error text for %s" % hex(error))
len_ = 1024
text_type = c_char * len_
text = text_type()
library = load_library()
if context == "client":
library.Cli_ErrorText(error, text, len_)
elif context == "server":
library.Srv_ErrorText(error, text, len_)
elif context == "partner":
library.Par_ErrorText(error, text, len_)
return text.value | [
"def",
"error_text",
"(",
"error",
",",
"context",
"=",
"\"client\"",
")",
":",
"assert",
"context",
"in",
"(",
"\"client\"",
",",
"\"server\"",
",",
"\"partner\"",
")",
"logger",
".",
"debug",
"(",
"\"error text for %s\"",
"%",
"hex",
"(",
"error",
")",
")",
"len_",
"=",
"1024",
"text_type",
"=",
"c_char",
"*",
"len_",
"text",
"=",
"text_type",
"(",
")",
"library",
"=",
"load_library",
"(",
")",
"if",
"context",
"==",
"\"client\"",
":",
"library",
".",
"Cli_ErrorText",
"(",
"error",
",",
"text",
",",
"len_",
")",
"elif",
"context",
"==",
"\"server\"",
":",
"library",
".",
"Srv_ErrorText",
"(",
"error",
",",
"text",
",",
"len_",
")",
"elif",
"context",
"==",
"\"partner\"",
":",
"library",
".",
"Par_ErrorText",
"(",
"error",
",",
"text",
",",
"len_",
")",
"return",
"text",
".",
"value"
] | Returns a textual explanation of a given error number
:param error: an error integer
:param context: server, client or partner
:returns: the error string | [
"Returns",
"a",
"textual",
"explanation",
"of",
"a",
"given",
"error",
"number"
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/visuals/isoline.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/isoline.py#L14-L91 | def iso_mesh_line(vertices, tris, vertex_data, levels):
"""Generate an isocurve from vertex data in a surface mesh.
Parameters
----------
vertices : ndarray, shape (Nv, 3)
Vertex coordinates.
tris : ndarray, shape (Nf, 3)
Indices of triangular element into the vertices array.
vertex_data : ndarray, shape (Nv,)
data at vertex.
levels : ndarray, shape (Nl,)
Levels at which to generate an isocurve
Returns
-------
lines : ndarray, shape (Nvout, 3)
Vertex coordinates for lines points
connects : ndarray, shape (Ne, 2)
Indices of line element into the vertex array.
vertex_level: ndarray, shape (Nvout,)
level for vertex in lines
Notes
-----
Uses a marching squares algorithm to generate the isolines.
"""
lines = None
connects = None
vertex_level = None
level_index = None
if not all([isinstance(x, np.ndarray) for x in (vertices, tris,
vertex_data, levels)]):
raise ValueError('all inputs must be numpy arrays')
if vertices.shape[1] <= 3:
verts = vertices
elif vertices.shape[1] == 4:
verts = vertices[:, :-1]
else:
verts = None
if (verts is not None and tris.shape[1] == 3 and
vertex_data.shape[0] == verts.shape[0]):
edges = np.vstack((tris.reshape((-1)),
np.roll(tris, -1, axis=1).reshape((-1)))).T
edge_datas = vertex_data[edges]
edge_coors = verts[edges].reshape(tris.shape[0]*3, 2, 3)
for lev in levels:
# index for select edges with vertices have only False - True
# or True - False at extremity
index = (edge_datas >= lev)
index = index[:, 0] ^ index[:, 1] # xor calculation
# Selectect edge
edge_datas_Ok = edge_datas[index, :]
xyz = edge_coors[index]
# Linear interpolation
ratio = np.array([(lev - edge_datas_Ok[:, 0]) /
(edge_datas_Ok[:, 1] - edge_datas_Ok[:, 0])])
point = xyz[:, 0, :] + ratio.T * (xyz[:, 1, :] - xyz[:, 0, :])
nbr = point.shape[0]//2
if connects is not None:
connect = np.arange(0, nbr*2).reshape((nbr, 2)) + \
len(lines)
connects = np.append(connects, connect, axis=0)
lines = np.append(lines, point, axis=0)
vertex_level = np.append(vertex_level,
np.zeros(len(point)) +
lev)
level_index = np.append(level_index, np.array(len(point)))
else:
lines = point
connects = np.arange(0, nbr*2).reshape((nbr, 2))
vertex_level = np.zeros(len(point)) + lev
level_index = np.array(len(point))
vertex_level = vertex_level.reshape((vertex_level.size, 1))
return lines, connects, vertex_level, level_index | [
"def",
"iso_mesh_line",
"(",
"vertices",
",",
"tris",
",",
"vertex_data",
",",
"levels",
")",
":",
"lines",
"=",
"None",
"connects",
"=",
"None",
"vertex_level",
"=",
"None",
"level_index",
"=",
"None",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
"for",
"x",
"in",
"(",
"vertices",
",",
"tris",
",",
"vertex_data",
",",
"levels",
")",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'all inputs must be numpy arrays'",
")",
"if",
"vertices",
".",
"shape",
"[",
"1",
"]",
"<=",
"3",
":",
"verts",
"=",
"vertices",
"elif",
"vertices",
".",
"shape",
"[",
"1",
"]",
"==",
"4",
":",
"verts",
"=",
"vertices",
"[",
":",
",",
":",
"-",
"1",
"]",
"else",
":",
"verts",
"=",
"None",
"if",
"(",
"verts",
"is",
"not",
"None",
"and",
"tris",
".",
"shape",
"[",
"1",
"]",
"==",
"3",
"and",
"vertex_data",
".",
"shape",
"[",
"0",
"]",
"==",
"verts",
".",
"shape",
"[",
"0",
"]",
")",
":",
"edges",
"=",
"np",
".",
"vstack",
"(",
"(",
"tris",
".",
"reshape",
"(",
"(",
"-",
"1",
")",
")",
",",
"np",
".",
"roll",
"(",
"tris",
",",
"-",
"1",
",",
"axis",
"=",
"1",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
")",
")",
")",
")",
".",
"T",
"edge_datas",
"=",
"vertex_data",
"[",
"edges",
"]",
"edge_coors",
"=",
"verts",
"[",
"edges",
"]",
".",
"reshape",
"(",
"tris",
".",
"shape",
"[",
"0",
"]",
"*",
"3",
",",
"2",
",",
"3",
")",
"for",
"lev",
"in",
"levels",
":",
"# index for select edges with vertices have only False - True",
"# or True - False at extremity",
"index",
"=",
"(",
"edge_datas",
">=",
"lev",
")",
"index",
"=",
"index",
"[",
":",
",",
"0",
"]",
"^",
"index",
"[",
":",
",",
"1",
"]",
"# xor calculation",
"# Selectect edge",
"edge_datas_Ok",
"=",
"edge_datas",
"[",
"index",
",",
":",
"]",
"xyz",
"=",
"edge_coors",
"[",
"index",
"]",
"# Linear interpolation",
"ratio",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"lev",
"-",
"edge_datas_Ok",
"[",
":",
",",
"0",
"]",
")",
"/",
"(",
"edge_datas_Ok",
"[",
":",
",",
"1",
"]",
"-",
"edge_datas_Ok",
"[",
":",
",",
"0",
"]",
")",
"]",
")",
"point",
"=",
"xyz",
"[",
":",
",",
"0",
",",
":",
"]",
"+",
"ratio",
".",
"T",
"*",
"(",
"xyz",
"[",
":",
",",
"1",
",",
":",
"]",
"-",
"xyz",
"[",
":",
",",
"0",
",",
":",
"]",
")",
"nbr",
"=",
"point",
".",
"shape",
"[",
"0",
"]",
"//",
"2",
"if",
"connects",
"is",
"not",
"None",
":",
"connect",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"nbr",
"*",
"2",
")",
".",
"reshape",
"(",
"(",
"nbr",
",",
"2",
")",
")",
"+",
"len",
"(",
"lines",
")",
"connects",
"=",
"np",
".",
"append",
"(",
"connects",
",",
"connect",
",",
"axis",
"=",
"0",
")",
"lines",
"=",
"np",
".",
"append",
"(",
"lines",
",",
"point",
",",
"axis",
"=",
"0",
")",
"vertex_level",
"=",
"np",
".",
"append",
"(",
"vertex_level",
",",
"np",
".",
"zeros",
"(",
"len",
"(",
"point",
")",
")",
"+",
"lev",
")",
"level_index",
"=",
"np",
".",
"append",
"(",
"level_index",
",",
"np",
".",
"array",
"(",
"len",
"(",
"point",
")",
")",
")",
"else",
":",
"lines",
"=",
"point",
"connects",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"nbr",
"*",
"2",
")",
".",
"reshape",
"(",
"(",
"nbr",
",",
"2",
")",
")",
"vertex_level",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"point",
")",
")",
"+",
"lev",
"level_index",
"=",
"np",
".",
"array",
"(",
"len",
"(",
"point",
")",
")",
"vertex_level",
"=",
"vertex_level",
".",
"reshape",
"(",
"(",
"vertex_level",
".",
"size",
",",
"1",
")",
")",
"return",
"lines",
",",
"connects",
",",
"vertex_level",
",",
"level_index"
] | Generate an isocurve from vertex data in a surface mesh.
Parameters
----------
vertices : ndarray, shape (Nv, 3)
Vertex coordinates.
tris : ndarray, shape (Nf, 3)
Indices of triangular element into the vertices array.
vertex_data : ndarray, shape (Nv,)
data at vertex.
levels : ndarray, shape (Nl,)
Levels at which to generate an isocurve
Returns
-------
lines : ndarray, shape (Nvout, 3)
Vertex coordinates for lines points
connects : ndarray, shape (Ne, 2)
Indices of line element into the vertex array.
vertex_level: ndarray, shape (Nvout,)
level for vertex in lines
Notes
-----
Uses a marching squares algorithm to generate the isolines. | [
"Generate",
"an",
"isocurve",
"from",
"vertex",
"data",
"in",
"a",
"surface",
"mesh",
"."
] | python | train |
espressif/esptool | esptool.py | https://github.com/espressif/esptool/blob/c583756c118039cfcfe256f7a3285618914d16a5/esptool.py#L1185-L1199 | def get_flash_crypt_config(self):
""" bit 3 in efuse_rd_disable[3:0] is mapped to flash_crypt_config
this bit is at position 19 in EFUSE_BLK0_RDATA0_REG """
word0 = self.read_efuse(0)
rd_disable = (word0 >> 19) & 0x1
if rd_disable == 0:
""" we can read the flash_crypt_config efuse value
so go & read it (EFUSE_BLK0_RDATA5_REG[31:28]) """
word5 = self.read_efuse(5)
word5 = (word5 >> 28) & 0xF
return word5
else:
# if read of the efuse is disabled we assume it is set correctly
return 0xF | [
"def",
"get_flash_crypt_config",
"(",
"self",
")",
":",
"word0",
"=",
"self",
".",
"read_efuse",
"(",
"0",
")",
"rd_disable",
"=",
"(",
"word0",
">>",
"19",
")",
"&",
"0x1",
"if",
"rd_disable",
"==",
"0",
":",
"\"\"\" we can read the flash_crypt_config efuse value\n so go & read it (EFUSE_BLK0_RDATA5_REG[31:28]) \"\"\"",
"word5",
"=",
"self",
".",
"read_efuse",
"(",
"5",
")",
"word5",
"=",
"(",
"word5",
">>",
"28",
")",
"&",
"0xF",
"return",
"word5",
"else",
":",
"# if read of the efuse is disabled we assume it is set correctly",
"return",
"0xF"
] | bit 3 in efuse_rd_disable[3:0] is mapped to flash_crypt_config
this bit is at position 19 in EFUSE_BLK0_RDATA0_REG | [
"bit",
"3",
"in",
"efuse_rd_disable",
"[",
"3",
":",
"0",
"]",
"is",
"mapped",
"to",
"flash_crypt_config",
"this",
"bit",
"is",
"at",
"position",
"19",
"in",
"EFUSE_BLK0_RDATA0_REG"
] | python | train |
tBuLi/symfit | symfit/core/support.py | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L108-L160 | def sympy_to_py(func, args):
"""
Turn a symbolic expression into a Python lambda function,
which has the names of the variables and parameters as it's argument names.
:param func: sympy expression
:param args: variables and parameters in this model
:return: lambda function to be used for numerical evaluation of the model.
"""
# replace the derivatives with printable variables.
derivatives = {var: Variable(var.name) for var in args
if isinstance(var, sympy.Derivative)}
func = func.xreplace(derivatives)
args = [derivatives[var] if isinstance(var, sympy.Derivative) else var
for var in args]
lambdafunc = lambdify(args, func, printer=SymfitNumPyPrinter,
dummify=False)
# Check if the names of the lambda function are what we expect
signature = inspect_sig.signature(lambdafunc)
sig_parameters = OrderedDict(signature.parameters)
for arg, lambda_arg in zip(args, sig_parameters):
if arg.name != lambda_arg:
break
else: # Lambdifying succesful!
return lambdafunc
# If we are here (very rare), then one of the lambda arg is still a Dummy.
# In this case we will manually handle the naming.
lambda_names = sig_parameters.keys()
arg_names = [arg.name for arg in args]
conversion = dict(zip(arg_names, lambda_names))
# Wrap the lambda such that arg names are translated into the correct dummy
# symbol names
@wraps(lambdafunc)
def wrapped_lambdafunc(*ordered_args, **kwargs):
converted_kwargs = {conversion[k]: v for k, v in kwargs.items()}
return lambdafunc(*ordered_args, **converted_kwargs)
# Update the signature of wrapped_lambdafunc to math our args
new_sig_parameters = OrderedDict()
for arg_name, dummy_name in conversion.items():
if arg_name == dummy_name: # Already has the correct name
new_sig_parameters[arg_name] = sig_parameters[arg_name]
else: # Change the dummy inspect.Parameter to the correct name
param = sig_parameters[dummy_name]
param = param.replace(name=arg_name)
new_sig_parameters[arg_name] = param
wrapped_lambdafunc.__signature__ = signature.replace(
parameters=new_sig_parameters.values()
)
return wrapped_lambdafunc | [
"def",
"sympy_to_py",
"(",
"func",
",",
"args",
")",
":",
"# replace the derivatives with printable variables.",
"derivatives",
"=",
"{",
"var",
":",
"Variable",
"(",
"var",
".",
"name",
")",
"for",
"var",
"in",
"args",
"if",
"isinstance",
"(",
"var",
",",
"sympy",
".",
"Derivative",
")",
"}",
"func",
"=",
"func",
".",
"xreplace",
"(",
"derivatives",
")",
"args",
"=",
"[",
"derivatives",
"[",
"var",
"]",
"if",
"isinstance",
"(",
"var",
",",
"sympy",
".",
"Derivative",
")",
"else",
"var",
"for",
"var",
"in",
"args",
"]",
"lambdafunc",
"=",
"lambdify",
"(",
"args",
",",
"func",
",",
"printer",
"=",
"SymfitNumPyPrinter",
",",
"dummify",
"=",
"False",
")",
"# Check if the names of the lambda function are what we expect",
"signature",
"=",
"inspect_sig",
".",
"signature",
"(",
"lambdafunc",
")",
"sig_parameters",
"=",
"OrderedDict",
"(",
"signature",
".",
"parameters",
")",
"for",
"arg",
",",
"lambda_arg",
"in",
"zip",
"(",
"args",
",",
"sig_parameters",
")",
":",
"if",
"arg",
".",
"name",
"!=",
"lambda_arg",
":",
"break",
"else",
":",
"# Lambdifying succesful!",
"return",
"lambdafunc",
"# If we are here (very rare), then one of the lambda arg is still a Dummy.",
"# In this case we will manually handle the naming.",
"lambda_names",
"=",
"sig_parameters",
".",
"keys",
"(",
")",
"arg_names",
"=",
"[",
"arg",
".",
"name",
"for",
"arg",
"in",
"args",
"]",
"conversion",
"=",
"dict",
"(",
"zip",
"(",
"arg_names",
",",
"lambda_names",
")",
")",
"# Wrap the lambda such that arg names are translated into the correct dummy",
"# symbol names",
"@",
"wraps",
"(",
"lambdafunc",
")",
"def",
"wrapped_lambdafunc",
"(",
"*",
"ordered_args",
",",
"*",
"*",
"kwargs",
")",
":",
"converted_kwargs",
"=",
"{",
"conversion",
"[",
"k",
"]",
":",
"v",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"}",
"return",
"lambdafunc",
"(",
"*",
"ordered_args",
",",
"*",
"*",
"converted_kwargs",
")",
"# Update the signature of wrapped_lambdafunc to math our args",
"new_sig_parameters",
"=",
"OrderedDict",
"(",
")",
"for",
"arg_name",
",",
"dummy_name",
"in",
"conversion",
".",
"items",
"(",
")",
":",
"if",
"arg_name",
"==",
"dummy_name",
":",
"# Already has the correct name",
"new_sig_parameters",
"[",
"arg_name",
"]",
"=",
"sig_parameters",
"[",
"arg_name",
"]",
"else",
":",
"# Change the dummy inspect.Parameter to the correct name",
"param",
"=",
"sig_parameters",
"[",
"dummy_name",
"]",
"param",
"=",
"param",
".",
"replace",
"(",
"name",
"=",
"arg_name",
")",
"new_sig_parameters",
"[",
"arg_name",
"]",
"=",
"param",
"wrapped_lambdafunc",
".",
"__signature__",
"=",
"signature",
".",
"replace",
"(",
"parameters",
"=",
"new_sig_parameters",
".",
"values",
"(",
")",
")",
"return",
"wrapped_lambdafunc"
] | Turn a symbolic expression into a Python lambda function,
which has the names of the variables and parameters as it's argument names.
:param func: sympy expression
:param args: variables and parameters in this model
:return: lambda function to be used for numerical evaluation of the model. | [
"Turn",
"a",
"symbolic",
"expression",
"into",
"a",
"Python",
"lambda",
"function",
"which",
"has",
"the",
"names",
"of",
"the",
"variables",
"and",
"parameters",
"as",
"it",
"s",
"argument",
"names",
"."
] | python | train |
inveniosoftware-contrib/json-merger | json_merger/conflict.py | https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/conflict.py#L95-L97 | def with_prefix(self, root_path):
"""Returns a new conflict with a prepended prefix as a path."""
return Conflict(self.conflict_type, root_path + self.path, self.body) | [
"def",
"with_prefix",
"(",
"self",
",",
"root_path",
")",
":",
"return",
"Conflict",
"(",
"self",
".",
"conflict_type",
",",
"root_path",
"+",
"self",
".",
"path",
",",
"self",
".",
"body",
")"
] | Returns a new conflict with a prepended prefix as a path. | [
"Returns",
"a",
"new",
"conflict",
"with",
"a",
"prepended",
"prefix",
"as",
"a",
"path",
"."
] | python | train |
AndrewIngram/django-extra-views | extra_views/formsets.py | https://github.com/AndrewIngram/django-extra-views/blob/188e1bf1f15a44d9a599028d020083af9fb43ea7/extra_views/formsets.py#L206-L219 | def get_formset_kwargs(self):
"""
Returns the keyword arguments for instantiating the formset.
"""
# Perform deprecation check
if hasattr(self, 'save_as_new'):
klass = type(self).__name__
raise DeprecationWarning(
'Setting `{0}.save_as_new` at the class level is now '
'deprecated. Set `{0}.formset_kwargs` instead.'.format(klass)
)
kwargs = super(BaseInlineFormSetFactory, self).get_formset_kwargs()
kwargs['instance'] = self.object
return kwargs | [
"def",
"get_formset_kwargs",
"(",
"self",
")",
":",
"# Perform deprecation check",
"if",
"hasattr",
"(",
"self",
",",
"'save_as_new'",
")",
":",
"klass",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
"raise",
"DeprecationWarning",
"(",
"'Setting `{0}.save_as_new` at the class level is now '",
"'deprecated. Set `{0}.formset_kwargs` instead.'",
".",
"format",
"(",
"klass",
")",
")",
"kwargs",
"=",
"super",
"(",
"BaseInlineFormSetFactory",
",",
"self",
")",
".",
"get_formset_kwargs",
"(",
")",
"kwargs",
"[",
"'instance'",
"]",
"=",
"self",
".",
"object",
"return",
"kwargs"
] | Returns the keyword arguments for instantiating the formset. | [
"Returns",
"the",
"keyword",
"arguments",
"for",
"instantiating",
"the",
"formset",
"."
] | python | valid |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L151-L180 | def _add_default_entries(input_dict, defaults_dict):
"""
Add the entries in defaults dict into input_dict if they don't exist in input_dict
This is based on the accepted answer at
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
:param dict input_dict: The dict to be updated
:param dict defaults_dict: Dict containing the defaults for entries in input_dict
:return: updated dict
:rtype: dict
"""
for key, value in defaults_dict.iteritems():
if key == 'patients':
print('Cannot default `patients`.')
continue
if isinstance(value, dict):
if key not in input_dict or input_dict[key] is None:
# User didn't specify anython for the tool, but the entry was still in there so we
# just copy over the whole defaults dict
input_dict[key] = value
else:
r = _add_default_entries(input_dict.get(key, {}), value)
input_dict[key] = r
else:
# Only write if not in input_dict
if key not in input_dict or input_dict[key] is None:
# Either the user didn't have the entry, or had it without a value
input_dict[key] = value
return input_dict | [
"def",
"_add_default_entries",
"(",
"input_dict",
",",
"defaults_dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"defaults_dict",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"==",
"'patients'",
":",
"print",
"(",
"'Cannot default `patients`.'",
")",
"continue",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"if",
"key",
"not",
"in",
"input_dict",
"or",
"input_dict",
"[",
"key",
"]",
"is",
"None",
":",
"# User didn't specify anython for the tool, but the entry was still in there so we",
"# just copy over the whole defaults dict",
"input_dict",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"r",
"=",
"_add_default_entries",
"(",
"input_dict",
".",
"get",
"(",
"key",
",",
"{",
"}",
")",
",",
"value",
")",
"input_dict",
"[",
"key",
"]",
"=",
"r",
"else",
":",
"# Only write if not in input_dict",
"if",
"key",
"not",
"in",
"input_dict",
"or",
"input_dict",
"[",
"key",
"]",
"is",
"None",
":",
"# Either the user didn't have the entry, or had it without a value",
"input_dict",
"[",
"key",
"]",
"=",
"value",
"return",
"input_dict"
] | Add the entries in defaults dict into input_dict if they don't exist in input_dict
This is based on the accepted answer at
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
:param dict input_dict: The dict to be updated
:param dict defaults_dict: Dict containing the defaults for entries in input_dict
:return: updated dict
:rtype: dict | [
"Add",
"the",
"entries",
"in",
"defaults",
"dict",
"into",
"input_dict",
"if",
"they",
"don",
"t",
"exist",
"in",
"input_dict"
] | python | train |
LLNL/scraper | scraper/util.py | https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/util.py#L161-L196 | def compute_labor_hours(sloc, month_hours='cocomo_book'):
"""
Compute the labor hours, given a count of source lines of code
The intention is to use the COCOMO II model to compute this value.
References:
- http://csse.usc.edu/tools/cocomoii.php
- http://docs.python-guide.org/en/latest/scenarios/scrape/
"""
# Calculation of hours in a month
if month_hours == 'hours_per_year':
# Use number of working hours in a year:
# (40 Hours / week) * (52 weeks / year) / (12 months / year) ~= 173.33
HOURS_PER_PERSON_MONTH = 40.0 * 52 / 12
else:
# Use value from COCOMO II Book (month_hours=='cocomo_book'):
# Reference: https://dl.acm.org/citation.cfm?id=557000
# This is the value used by the Code.gov team:
# https://github.com/GSA/code-gov/blob/master/LABOR_HOUR_CALC.md
HOURS_PER_PERSON_MONTH = 152.0
cocomo_url = 'http://csse.usc.edu/tools/cocomoii.php'
page = requests.post(cocomo_url, data={'new_size': sloc})
try:
person_months = float(EFFORT_REGEX.search(page.text).group(1))
except AttributeError:
logger.error('Unable to find Person Months in page text: sloc=%s', sloc)
# If there is no match, and .search(..) returns None
person_months = 0
labor_hours = person_months * HOURS_PER_PERSON_MONTH
logger.debug('sloc=%d labor_hours=%d', sloc, labor_hours)
return labor_hours | [
"def",
"compute_labor_hours",
"(",
"sloc",
",",
"month_hours",
"=",
"'cocomo_book'",
")",
":",
"# Calculation of hours in a month",
"if",
"month_hours",
"==",
"'hours_per_year'",
":",
"# Use number of working hours in a year:",
"# (40 Hours / week) * (52 weeks / year) / (12 months / year) ~= 173.33",
"HOURS_PER_PERSON_MONTH",
"=",
"40.0",
"*",
"52",
"/",
"12",
"else",
":",
"# Use value from COCOMO II Book (month_hours=='cocomo_book'):",
"# Reference: https://dl.acm.org/citation.cfm?id=557000",
"# This is the value used by the Code.gov team:",
"# https://github.com/GSA/code-gov/blob/master/LABOR_HOUR_CALC.md",
"HOURS_PER_PERSON_MONTH",
"=",
"152.0",
"cocomo_url",
"=",
"'http://csse.usc.edu/tools/cocomoii.php'",
"page",
"=",
"requests",
".",
"post",
"(",
"cocomo_url",
",",
"data",
"=",
"{",
"'new_size'",
":",
"sloc",
"}",
")",
"try",
":",
"person_months",
"=",
"float",
"(",
"EFFORT_REGEX",
".",
"search",
"(",
"page",
".",
"text",
")",
".",
"group",
"(",
"1",
")",
")",
"except",
"AttributeError",
":",
"logger",
".",
"error",
"(",
"'Unable to find Person Months in page text: sloc=%s'",
",",
"sloc",
")",
"# If there is no match, and .search(..) returns None",
"person_months",
"=",
"0",
"labor_hours",
"=",
"person_months",
"*",
"HOURS_PER_PERSON_MONTH",
"logger",
".",
"debug",
"(",
"'sloc=%d labor_hours=%d'",
",",
"sloc",
",",
"labor_hours",
")",
"return",
"labor_hours"
] | Compute the labor hours, given a count of source lines of code
The intention is to use the COCOMO II model to compute this value.
References:
- http://csse.usc.edu/tools/cocomoii.php
- http://docs.python-guide.org/en/latest/scenarios/scrape/ | [
"Compute",
"the",
"labor",
"hours",
"given",
"a",
"count",
"of",
"source",
"lines",
"of",
"code"
] | python | test |
CivicSpleen/ambry | ambry/orm/database.py | https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L470-L474 | def root_dataset(self):
"""Return the root dataset, which hold configuration values for the library"""
ds = self.dataset(ROOT_CONFIG_NAME_V)
ds._database = self
return ds | [
"def",
"root_dataset",
"(",
"self",
")",
":",
"ds",
"=",
"self",
".",
"dataset",
"(",
"ROOT_CONFIG_NAME_V",
")",
"ds",
".",
"_database",
"=",
"self",
"return",
"ds"
] | Return the root dataset, which hold configuration values for the library | [
"Return",
"the",
"root",
"dataset",
"which",
"hold",
"configuration",
"values",
"for",
"the",
"library"
] | python | train |
pvlib/pvlib-python | pvlib/spa.py | https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/spa.py#L1263-L1294 | def earthsun_distance(unixtime, delta_t, numthreads):
"""
Calculates the distance from the earth to the sun using the
NREL SPA algorithm described in [1].
Parameters
----------
unixtime : numpy array
Array of unix/epoch timestamps to calculate solar position for.
Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC.
A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9
delta_t : float
Difference between terrestrial time and UT. USNO has tables.
numthreads : int
Number to threads to use for calculation (if using numba)
Returns
-------
R : array
Earth-Sun distance in AU.
References
----------
[1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar
radiation applications. Technical report: NREL/TP-560- 34302. Golden,
USA, http://www.nrel.gov.
"""
R = solar_position(unixtime, 0, 0, 0, 0, 0, delta_t,
0, numthreads, esd=True)[0]
return R | [
"def",
"earthsun_distance",
"(",
"unixtime",
",",
"delta_t",
",",
"numthreads",
")",
":",
"R",
"=",
"solar_position",
"(",
"unixtime",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"delta_t",
",",
"0",
",",
"numthreads",
",",
"esd",
"=",
"True",
")",
"[",
"0",
"]",
"return",
"R"
] | Calculates the distance from the earth to the sun using the
NREL SPA algorithm described in [1].
Parameters
----------
unixtime : numpy array
Array of unix/epoch timestamps to calculate solar position for.
Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC.
A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9
delta_t : float
Difference between terrestrial time and UT. USNO has tables.
numthreads : int
Number to threads to use for calculation (if using numba)
Returns
-------
R : array
Earth-Sun distance in AU.
References
----------
[1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar
radiation applications. Technical report: NREL/TP-560- 34302. Golden,
USA, http://www.nrel.gov. | [
"Calculates",
"the",
"distance",
"from",
"the",
"earth",
"to",
"the",
"sun",
"using",
"the",
"NREL",
"SPA",
"algorithm",
"described",
"in",
"[",
"1",
"]",
"."
] | python | train |
PythonCharmers/python-future | src/future/backports/http/cookiejar.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L618-L627 | def eff_request_host(request):
"""Return a tuple (request-host, effective request-host name).
As defined by RFC 2965, except both are lowercased.
"""
erhn = req_host = request_host(request)
if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
erhn = req_host + ".local"
return req_host, erhn | [
"def",
"eff_request_host",
"(",
"request",
")",
":",
"erhn",
"=",
"req_host",
"=",
"request_host",
"(",
"request",
")",
"if",
"req_host",
".",
"find",
"(",
"\".\"",
")",
"==",
"-",
"1",
"and",
"not",
"IPV4_RE",
".",
"search",
"(",
"req_host",
")",
":",
"erhn",
"=",
"req_host",
"+",
"\".local\"",
"return",
"req_host",
",",
"erhn"
] | Return a tuple (request-host, effective request-host name).
As defined by RFC 2965, except both are lowercased. | [
"Return",
"a",
"tuple",
"(",
"request",
"-",
"host",
"effective",
"request",
"-",
"host",
"name",
")",
"."
] | python | train |
quantmind/pulsar-cloud | cloud/pusher.py | https://github.com/quantmind/pulsar-cloud/blob/dc2ff8ab5c9a1c2cfb1270581d30454d1a606cf9/cloud/pusher.py#L149-L175 | def on_message(self, websocket, message):
'''Handle websocket incoming messages
'''
waiter = self._waiter
self._waiter = None
encoded = json.loads(message)
event = encoded.get('event')
channel = encoded.get('channel')
data = json.loads(encoded.get('data'))
try:
if event == PUSHER_ERROR:
raise PusherError(data['message'], data['code'])
elif event == PUSHER_CONNECTION:
self.socket_id = data.get('socket_id')
self.logger.info('Succesfully connected on socket %s',
self.socket_id)
waiter.set_result(self.socket_id)
elif event == PUSHER_SUBSCRIBED:
self.logger.info('Succesfully subscribed to %s',
encoded.get('channel'))
elif channel:
self[channel]._event(event, data)
except Exception as exc:
if waiter:
waiter.set_exception(exc)
else:
self.logger.exception('pusher error') | [
"def",
"on_message",
"(",
"self",
",",
"websocket",
",",
"message",
")",
":",
"waiter",
"=",
"self",
".",
"_waiter",
"self",
".",
"_waiter",
"=",
"None",
"encoded",
"=",
"json",
".",
"loads",
"(",
"message",
")",
"event",
"=",
"encoded",
".",
"get",
"(",
"'event'",
")",
"channel",
"=",
"encoded",
".",
"get",
"(",
"'channel'",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"encoded",
".",
"get",
"(",
"'data'",
")",
")",
"try",
":",
"if",
"event",
"==",
"PUSHER_ERROR",
":",
"raise",
"PusherError",
"(",
"data",
"[",
"'message'",
"]",
",",
"data",
"[",
"'code'",
"]",
")",
"elif",
"event",
"==",
"PUSHER_CONNECTION",
":",
"self",
".",
"socket_id",
"=",
"data",
".",
"get",
"(",
"'socket_id'",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Succesfully connected on socket %s'",
",",
"self",
".",
"socket_id",
")",
"waiter",
".",
"set_result",
"(",
"self",
".",
"socket_id",
")",
"elif",
"event",
"==",
"PUSHER_SUBSCRIBED",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Succesfully subscribed to %s'",
",",
"encoded",
".",
"get",
"(",
"'channel'",
")",
")",
"elif",
"channel",
":",
"self",
"[",
"channel",
"]",
".",
"_event",
"(",
"event",
",",
"data",
")",
"except",
"Exception",
"as",
"exc",
":",
"if",
"waiter",
":",
"waiter",
".",
"set_exception",
"(",
"exc",
")",
"else",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'pusher error'",
")"
] | Handle websocket incoming messages | [
"Handle",
"websocket",
"incoming",
"messages"
] | python | valid |
gbowerman/azurerm | azurerm/amsrp.py | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/amsrp.py#L600-L629 | def create_streaming_endpoint(access_token, name, description="New Streaming Endpoint", \
scale_units="1"):
'''Create Media Service Streaming Endpoint.
Args:
access_token (str): A valid Azure authentication token.
name (str): A Media Service Streaming Endpoint Name.
description (str): A Media Service Streaming Endpoint Description.
scale_units (str): A Media Service Scale Units Number.
Returns:
HTTP response. JSON body.
'''
path = '/StreamingEndpoints'
endpoint = ''.join([ams_rest_endpoint, path])
body = '{ \
"Id":null, \
"Name":"' + name + '", \
"Description":"' + description + '", \
"Created":"0001-01-01T00:00:00", \
"LastModified":"0001-01-01T00:00:00", \
"State":null, \
"HostName":null, \
"ScaleUnits":"' + scale_units + '", \
"CrossSiteAccessPolicies":{ \
"ClientAccessPolicy":"<access-policy><cross-domain-access><policy><allow-from http-request-headers=\\"*\\"><domain uri=\\"http://*\\" /></allow-from><grant-to><resource path=\\"/\\" include-subpaths=\\"false\\" /></grant-to></policy></cross-domain-access></access-policy>", \
"CrossDomainPolicy":"<?xml version=\\"1.0\\"?><!DOCTYPE cross-domain-policy SYSTEM \\"http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd\\"><cross-domain-policy><allow-access-from domain=\\"*\\" /></cross-domain-policy>" \
} \
}'
return do_ams_post(endpoint, path, body, access_token) | [
"def",
"create_streaming_endpoint",
"(",
"access_token",
",",
"name",
",",
"description",
"=",
"\"New Streaming Endpoint\"",
",",
"scale_units",
"=",
"\"1\"",
")",
":",
"path",
"=",
"'/StreamingEndpoints'",
"endpoint",
"=",
"''",
".",
"join",
"(",
"[",
"ams_rest_endpoint",
",",
"path",
"]",
")",
"body",
"=",
"'{ \\\n\t\t\"Id\":null, \\\n\t\t\"Name\":\"'",
"+",
"name",
"+",
"'\", \\\n\t\t\"Description\":\"'",
"+",
"description",
"+",
"'\", \\\n\t\t\"Created\":\"0001-01-01T00:00:00\", \\\n\t\t\"LastModified\":\"0001-01-01T00:00:00\", \\\n\t\t\"State\":null, \\\n\t\t\"HostName\":null, \\\n\t\t\"ScaleUnits\":\"'",
"+",
"scale_units",
"+",
"'\", \\\n\t\t\"CrossSiteAccessPolicies\":{ \\\n\t\t\t\"ClientAccessPolicy\":\"<access-policy><cross-domain-access><policy><allow-from http-request-headers=\\\\\"*\\\\\"><domain uri=\\\\\"http://*\\\\\" /></allow-from><grant-to><resource path=\\\\\"/\\\\\" include-subpaths=\\\\\"false\\\\\" /></grant-to></policy></cross-domain-access></access-policy>\", \\\n\t\t\t\"CrossDomainPolicy\":\"<?xml version=\\\\\"1.0\\\\\"?><!DOCTYPE cross-domain-policy SYSTEM \\\\\"http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd\\\\\"><cross-domain-policy><allow-access-from domain=\\\\\"*\\\\\" /></cross-domain-policy>\" \\\n\t\t} \\\n\t}'",
"return",
"do_ams_post",
"(",
"endpoint",
",",
"path",
",",
"body",
",",
"access_token",
")"
] | Create Media Service Streaming Endpoint.
Args:
access_token (str): A valid Azure authentication token.
name (str): A Media Service Streaming Endpoint Name.
description (str): A Media Service Streaming Endpoint Description.
scale_units (str): A Media Service Scale Units Number.
Returns:
HTTP response. JSON body. | [
"Create",
"Media",
"Service",
"Streaming",
"Endpoint",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/learning/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/sessions.py#L5183-L5206 | def assign_proficiency_to_objective_bank(self, proficiency_id, objective_bank_id):
"""Adds an existing ``Proficiency`` to a ``ObjectiveBank``.
arg: proficiency_id (osid.id.Id): the ``Id`` of the
``Proficiency``
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
``ObjectiveBank``
raise: AlreadyExists - ``proficiency_id`` is already mapped to
``objective_bank_id``
raise: NotFound - ``proficiency_id`` or ``objective_bank_id``
not found
raise: NullArgument - ``proficiency_id`` or
``objective_bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
mgr = self._get_provider_manager('LEARNING', local=True)
lookup_session = mgr.get_objective_bank_lookup_session(proxy=self._proxy)
lookup_session.get_objective_bank(objective_bank_id) # to raise NotFound
self._assign_object_to_catalog(proficiency_id, objective_bank_id) | [
"def",
"assign_proficiency_to_objective_bank",
"(",
"self",
",",
"proficiency_id",
",",
"objective_bank_id",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin",
"mgr",
"=",
"self",
".",
"_get_provider_manager",
"(",
"'LEARNING'",
",",
"local",
"=",
"True",
")",
"lookup_session",
"=",
"mgr",
".",
"get_objective_bank_lookup_session",
"(",
"proxy",
"=",
"self",
".",
"_proxy",
")",
"lookup_session",
".",
"get_objective_bank",
"(",
"objective_bank_id",
")",
"# to raise NotFound",
"self",
".",
"_assign_object_to_catalog",
"(",
"proficiency_id",
",",
"objective_bank_id",
")"
] | Adds an existing ``Proficiency`` to a ``ObjectiveBank``.
arg: proficiency_id (osid.id.Id): the ``Id`` of the
``Proficiency``
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
``ObjectiveBank``
raise: AlreadyExists - ``proficiency_id`` is already mapped to
``objective_bank_id``
raise: NotFound - ``proficiency_id`` or ``objective_bank_id``
not found
raise: NullArgument - ``proficiency_id`` or
``objective_bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | [
"Adds",
"an",
"existing",
"Proficiency",
"to",
"a",
"ObjectiveBank",
"."
] | python | train |
rfk/tnetstring | tnetstring/__init__.py | https://github.com/rfk/tnetstring/blob/146381498a07d6053e044375562be08ef16017c2/tnetstring/__init__.py#L86-L171 | def _rdumpq(q,size,value,encoding=None):
"""Dump value as a tnetstring, to a deque instance, last chunks first.
This function generates the tnetstring representation of the given value,
pushing chunks of the output onto the given deque instance. It pushes
the last chunk first, then recursively generates more chunks.
When passed in the current size of the string in the queue, it will return
the new size of the string in the queue.
Operating last-chunk-first makes it easy to calculate the size written
for recursive structures without having to build their representation as
a string. This is measurably faster than generating the intermediate
strings, especially on deeply nested structures.
"""
write = q.appendleft
if value is None:
write("0:~")
return size + 3
if value is True:
write("4:true!")
return size + 7
if value is False:
write("5:false!")
return size + 8
if isinstance(value,(int,long)):
data = str(value)
ldata = len(data)
span = str(ldata)
write("#")
write(data)
write(":")
write(span)
return size + 2 + len(span) + ldata
if isinstance(value,(float,)):
# Use repr() for float rather than str().
# It round-trips more accurately.
# Probably unnecessary in later python versions that
# use David Gay's ftoa routines.
data = repr(value)
ldata = len(data)
span = str(ldata)
write("^")
write(data)
write(":")
write(span)
return size + 2 + len(span) + ldata
if isinstance(value,str):
lvalue = len(value)
span = str(lvalue)
write(",")
write(value)
write(":")
write(span)
return size + 2 + len(span) + lvalue
if isinstance(value,(list,tuple,)):
write("]")
init_size = size = size + 1
for item in reversed(value):
size = _rdumpq(q,size,item,encoding)
span = str(size - init_size)
write(":")
write(span)
return size + 1 + len(span)
if isinstance(value,dict):
write("}")
init_size = size = size + 1
for (k,v) in value.iteritems():
size = _rdumpq(q,size,v,encoding)
size = _rdumpq(q,size,k,encoding)
span = str(size - init_size)
write(":")
write(span)
return size + 1 + len(span)
if isinstance(value,unicode):
if encoding is None:
raise ValueError("must specify encoding to dump unicode strings")
value = value.encode(encoding)
lvalue = len(value)
span = str(lvalue)
write(",")
write(value)
write(":")
write(span)
return size + 2 + len(span) + lvalue
raise ValueError("unserializable object") | [
"def",
"_rdumpq",
"(",
"q",
",",
"size",
",",
"value",
",",
"encoding",
"=",
"None",
")",
":",
"write",
"=",
"q",
".",
"appendleft",
"if",
"value",
"is",
"None",
":",
"write",
"(",
"\"0:~\"",
")",
"return",
"size",
"+",
"3",
"if",
"value",
"is",
"True",
":",
"write",
"(",
"\"4:true!\"",
")",
"return",
"size",
"+",
"7",
"if",
"value",
"is",
"False",
":",
"write",
"(",
"\"5:false!\"",
")",
"return",
"size",
"+",
"8",
"if",
"isinstance",
"(",
"value",
",",
"(",
"int",
",",
"long",
")",
")",
":",
"data",
"=",
"str",
"(",
"value",
")",
"ldata",
"=",
"len",
"(",
"data",
")",
"span",
"=",
"str",
"(",
"ldata",
")",
"write",
"(",
"\"#\"",
")",
"write",
"(",
"data",
")",
"write",
"(",
"\":\"",
")",
"write",
"(",
"span",
")",
"return",
"size",
"+",
"2",
"+",
"len",
"(",
"span",
")",
"+",
"ldata",
"if",
"isinstance",
"(",
"value",
",",
"(",
"float",
",",
")",
")",
":",
"# Use repr() for float rather than str().",
"# It round-trips more accurately.",
"# Probably unnecessary in later python versions that",
"# use David Gay's ftoa routines.",
"data",
"=",
"repr",
"(",
"value",
")",
"ldata",
"=",
"len",
"(",
"data",
")",
"span",
"=",
"str",
"(",
"ldata",
")",
"write",
"(",
"\"^\"",
")",
"write",
"(",
"data",
")",
"write",
"(",
"\":\"",
")",
"write",
"(",
"span",
")",
"return",
"size",
"+",
"2",
"+",
"len",
"(",
"span",
")",
"+",
"ldata",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"lvalue",
"=",
"len",
"(",
"value",
")",
"span",
"=",
"str",
"(",
"lvalue",
")",
"write",
"(",
"\",\"",
")",
"write",
"(",
"value",
")",
"write",
"(",
"\":\"",
")",
"write",
"(",
"span",
")",
"return",
"size",
"+",
"2",
"+",
"len",
"(",
"span",
")",
"+",
"lvalue",
"if",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
",",
")",
")",
":",
"write",
"(",
"\"]\"",
")",
"init_size",
"=",
"size",
"=",
"size",
"+",
"1",
"for",
"item",
"in",
"reversed",
"(",
"value",
")",
":",
"size",
"=",
"_rdumpq",
"(",
"q",
",",
"size",
",",
"item",
",",
"encoding",
")",
"span",
"=",
"str",
"(",
"size",
"-",
"init_size",
")",
"write",
"(",
"\":\"",
")",
"write",
"(",
"span",
")",
"return",
"size",
"+",
"1",
"+",
"len",
"(",
"span",
")",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"write",
"(",
"\"}\"",
")",
"init_size",
"=",
"size",
"=",
"size",
"+",
"1",
"for",
"(",
"k",
",",
"v",
")",
"in",
"value",
".",
"iteritems",
"(",
")",
":",
"size",
"=",
"_rdumpq",
"(",
"q",
",",
"size",
",",
"v",
",",
"encoding",
")",
"size",
"=",
"_rdumpq",
"(",
"q",
",",
"size",
",",
"k",
",",
"encoding",
")",
"span",
"=",
"str",
"(",
"size",
"-",
"init_size",
")",
"write",
"(",
"\":\"",
")",
"write",
"(",
"span",
")",
"return",
"size",
"+",
"1",
"+",
"len",
"(",
"span",
")",
"if",
"isinstance",
"(",
"value",
",",
"unicode",
")",
":",
"if",
"encoding",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"must specify encoding to dump unicode strings\"",
")",
"value",
"=",
"value",
".",
"encode",
"(",
"encoding",
")",
"lvalue",
"=",
"len",
"(",
"value",
")",
"span",
"=",
"str",
"(",
"lvalue",
")",
"write",
"(",
"\",\"",
")",
"write",
"(",
"value",
")",
"write",
"(",
"\":\"",
")",
"write",
"(",
"span",
")",
"return",
"size",
"+",
"2",
"+",
"len",
"(",
"span",
")",
"+",
"lvalue",
"raise",
"ValueError",
"(",
"\"unserializable object\"",
")"
] | Dump value as a tnetstring, to a deque instance, last chunks first.
This function generates the tnetstring representation of the given value,
pushing chunks of the output onto the given deque instance. It pushes
the last chunk first, then recursively generates more chunks.
When passed in the current size of the string in the queue, it will return
the new size of the string in the queue.
Operating last-chunk-first makes it easy to calculate the size written
for recursive structures without having to build their representation as
a string. This is measurably faster than generating the intermediate
strings, especially on deeply nested structures. | [
"Dump",
"value",
"as",
"a",
"tnetstring",
"to",
"a",
"deque",
"instance",
"last",
"chunks",
"first",
"."
] | python | train |
ska-sa/purr | Purr/Editors.py | https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Editors.py#L259-L328 | def _makeDPItem(self, parent, dp, after=None):
"""Creates listview item for data product 'dp', inserts it after item 'after'"""
if parent:
item = QTreeWidgetItem(parent, after)
else:
item = QTreeWidgetItem()
item.setTextAlignment(self.ColAction, Qt.AlignRight | Qt.AlignVCenter)
item.setTextAlignment(self.ColFilename, Qt.AlignRight | Qt.AlignVCenter)
item.setTextAlignment(self.ColType, Qt.AlignLeft | Qt.AlignVCenter)
item.setTextAlignment(self.ColRename, Qt.AlignLeft | Qt.AlignVCenter)
item.setTextAlignment(self.ColRender, Qt.AlignHCenter | Qt.AlignVCenter)
item.setTextAlignment(self.ColComment, Qt.AlignLeft | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsDragEnabled | Qt.ItemIsDropEnabled | Qt.ItemIsEnabled)
item._dp = dp
# init stuff for combobox functions above
item._combobox_option_list = {}
item._combobox_current_index = {}
item._combobox_current_value = {}
item._combobox_indices = {}
item._combobox_changed = {}
# setup available policies for new or archived items, set initial policy
if dp.archived:
item._menu = self._archived_dp_menu
item._combobox_option_list[self.ColAction] = policies = self._policy_list_archived
policy = "keep"
else:
item._combobox_option_list[self.ColAction] = policies = self._policy_list_default
policy = dp.policy
# create reverse mapping from policy names to indices
item._combobox_indices[self.ColAction] = dict([(name, num) for num, (name, icon) in enumerate(policies)])
# init item policy
self.setItemPolicy(item, policy)
# set other columns
basename = os.path.basename(dp.sourcepath)
name, ext = os.path.splitext(basename)
item.setText(self.ColFilename, name)
item.setText(self.ColType, ext)
item.setToolTip(self.ColFilename, basename)
item.setToolTip(self.ColType, basename)
item.setData(self.ColComment, Qt.EditRole, QVariant(dp.comment or ""))
# make sure new filenames are unique
filename = _sanitizeFilename(dp.filename)
if not dp.archived:
# tack on .tgz prefix onto dirs
if os.path.isdir(dp.sourcepath) and not filename.endswith(".tgz"):
filename += ".tgz"
# form up set of taken names
taken_names = set()
for i0, dp0 in self.getItemDPList():
if dp0.policy not in ["remove", "ignore", "banish"]:
taken_names.add(str(i0.text(self.ColRename)))
# ensure uniqueness of filename
filename = _makeUniqueFilename(taken_names, filename)
item.setData(self.ColRename, Qt.EditRole, QVariant(filename))
# get list of available renderers
item._renderers = Purr.Render.getRenderers(dp.fullpath or dp.sourcepath)
item._render = 0
item._combobox_option_list[self.ColRender] = [(name, None) for name in item._renderers]
# create reverse mapping from renderer names to indices
item._combobox_indices[self.ColRender] = dict([(name, num) for num, name in enumerate(item._renderers)])
# for archived items, try to find renderer in list
if dp.archived:
try:
item._render = item._renderers.index(dp.render)
except:
pass
self._updateItemComboBoxIndex(item, self.ColRender, item._render)
# add to map of items
self.dpitems[dp.fullpath or dp.sourcepath] = item
return item | [
"def",
"_makeDPItem",
"(",
"self",
",",
"parent",
",",
"dp",
",",
"after",
"=",
"None",
")",
":",
"if",
"parent",
":",
"item",
"=",
"QTreeWidgetItem",
"(",
"parent",
",",
"after",
")",
"else",
":",
"item",
"=",
"QTreeWidgetItem",
"(",
")",
"item",
".",
"setTextAlignment",
"(",
"self",
".",
"ColAction",
",",
"Qt",
".",
"AlignRight",
"|",
"Qt",
".",
"AlignVCenter",
")",
"item",
".",
"setTextAlignment",
"(",
"self",
".",
"ColFilename",
",",
"Qt",
".",
"AlignRight",
"|",
"Qt",
".",
"AlignVCenter",
")",
"item",
".",
"setTextAlignment",
"(",
"self",
".",
"ColType",
",",
"Qt",
".",
"AlignLeft",
"|",
"Qt",
".",
"AlignVCenter",
")",
"item",
".",
"setTextAlignment",
"(",
"self",
".",
"ColRename",
",",
"Qt",
".",
"AlignLeft",
"|",
"Qt",
".",
"AlignVCenter",
")",
"item",
".",
"setTextAlignment",
"(",
"self",
".",
"ColRender",
",",
"Qt",
".",
"AlignHCenter",
"|",
"Qt",
".",
"AlignVCenter",
")",
"item",
".",
"setTextAlignment",
"(",
"self",
".",
"ColComment",
",",
"Qt",
".",
"AlignLeft",
"|",
"Qt",
".",
"AlignVCenter",
")",
"item",
".",
"setFlags",
"(",
"Qt",
".",
"ItemIsSelectable",
"|",
"Qt",
".",
"ItemIsDragEnabled",
"|",
"Qt",
".",
"ItemIsDropEnabled",
"|",
"Qt",
".",
"ItemIsEnabled",
")",
"item",
".",
"_dp",
"=",
"dp",
"# init stuff for combobox functions above",
"item",
".",
"_combobox_option_list",
"=",
"{",
"}",
"item",
".",
"_combobox_current_index",
"=",
"{",
"}",
"item",
".",
"_combobox_current_value",
"=",
"{",
"}",
"item",
".",
"_combobox_indices",
"=",
"{",
"}",
"item",
".",
"_combobox_changed",
"=",
"{",
"}",
"# setup available policies for new or archived items, set initial policy",
"if",
"dp",
".",
"archived",
":",
"item",
".",
"_menu",
"=",
"self",
".",
"_archived_dp_menu",
"item",
".",
"_combobox_option_list",
"[",
"self",
".",
"ColAction",
"]",
"=",
"policies",
"=",
"self",
".",
"_policy_list_archived",
"policy",
"=",
"\"keep\"",
"else",
":",
"item",
".",
"_combobox_option_list",
"[",
"self",
".",
"ColAction",
"]",
"=",
"policies",
"=",
"self",
".",
"_policy_list_default",
"policy",
"=",
"dp",
".",
"policy",
"# create reverse mapping from policy names to indices",
"item",
".",
"_combobox_indices",
"[",
"self",
".",
"ColAction",
"]",
"=",
"dict",
"(",
"[",
"(",
"name",
",",
"num",
")",
"for",
"num",
",",
"(",
"name",
",",
"icon",
")",
"in",
"enumerate",
"(",
"policies",
")",
"]",
")",
"# init item policy",
"self",
".",
"setItemPolicy",
"(",
"item",
",",
"policy",
")",
"# set other columns",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"dp",
".",
"sourcepath",
")",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"basename",
")",
"item",
".",
"setText",
"(",
"self",
".",
"ColFilename",
",",
"name",
")",
"item",
".",
"setText",
"(",
"self",
".",
"ColType",
",",
"ext",
")",
"item",
".",
"setToolTip",
"(",
"self",
".",
"ColFilename",
",",
"basename",
")",
"item",
".",
"setToolTip",
"(",
"self",
".",
"ColType",
",",
"basename",
")",
"item",
".",
"setData",
"(",
"self",
".",
"ColComment",
",",
"Qt",
".",
"EditRole",
",",
"QVariant",
"(",
"dp",
".",
"comment",
"or",
"\"\"",
")",
")",
"# make sure new filenames are unique",
"filename",
"=",
"_sanitizeFilename",
"(",
"dp",
".",
"filename",
")",
"if",
"not",
"dp",
".",
"archived",
":",
"# tack on .tgz prefix onto dirs",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dp",
".",
"sourcepath",
")",
"and",
"not",
"filename",
".",
"endswith",
"(",
"\".tgz\"",
")",
":",
"filename",
"+=",
"\".tgz\"",
"# form up set of taken names",
"taken_names",
"=",
"set",
"(",
")",
"for",
"i0",
",",
"dp0",
"in",
"self",
".",
"getItemDPList",
"(",
")",
":",
"if",
"dp0",
".",
"policy",
"not",
"in",
"[",
"\"remove\"",
",",
"\"ignore\"",
",",
"\"banish\"",
"]",
":",
"taken_names",
".",
"add",
"(",
"str",
"(",
"i0",
".",
"text",
"(",
"self",
".",
"ColRename",
")",
")",
")",
"# ensure uniqueness of filename",
"filename",
"=",
"_makeUniqueFilename",
"(",
"taken_names",
",",
"filename",
")",
"item",
".",
"setData",
"(",
"self",
".",
"ColRename",
",",
"Qt",
".",
"EditRole",
",",
"QVariant",
"(",
"filename",
")",
")",
"# get list of available renderers",
"item",
".",
"_renderers",
"=",
"Purr",
".",
"Render",
".",
"getRenderers",
"(",
"dp",
".",
"fullpath",
"or",
"dp",
".",
"sourcepath",
")",
"item",
".",
"_render",
"=",
"0",
"item",
".",
"_combobox_option_list",
"[",
"self",
".",
"ColRender",
"]",
"=",
"[",
"(",
"name",
",",
"None",
")",
"for",
"name",
"in",
"item",
".",
"_renderers",
"]",
"# create reverse mapping from renderer names to indices",
"item",
".",
"_combobox_indices",
"[",
"self",
".",
"ColRender",
"]",
"=",
"dict",
"(",
"[",
"(",
"name",
",",
"num",
")",
"for",
"num",
",",
"name",
"in",
"enumerate",
"(",
"item",
".",
"_renderers",
")",
"]",
")",
"# for archived items, try to find renderer in list",
"if",
"dp",
".",
"archived",
":",
"try",
":",
"item",
".",
"_render",
"=",
"item",
".",
"_renderers",
".",
"index",
"(",
"dp",
".",
"render",
")",
"except",
":",
"pass",
"self",
".",
"_updateItemComboBoxIndex",
"(",
"item",
",",
"self",
".",
"ColRender",
",",
"item",
".",
"_render",
")",
"# add to map of items",
"self",
".",
"dpitems",
"[",
"dp",
".",
"fullpath",
"or",
"dp",
".",
"sourcepath",
"]",
"=",
"item",
"return",
"item"
] | Creates listview item for data product 'dp', inserts it after item 'after | [
"Creates",
"listview",
"item",
"for",
"data",
"product",
"dp",
"inserts",
"it",
"after",
"item",
"after"
] | python | train |
mongodb/mongo-python-driver | pymongo/common.py | https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/common.py#L459-L465 | def validate_is_document_type(option, value):
"""Validate the type of method arguments that expect a MongoDB document."""
if not isinstance(value, (abc.MutableMapping, RawBSONDocument)):
raise TypeError("%s must be an instance of dict, bson.son.SON, "
"bson.raw_bson.RawBSONDocument, or "
"a type that inherits from "
"collections.MutableMapping" % (option,)) | [
"def",
"validate_is_document_type",
"(",
"option",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"(",
"abc",
".",
"MutableMapping",
",",
"RawBSONDocument",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"%s must be an instance of dict, bson.son.SON, \"",
"\"bson.raw_bson.RawBSONDocument, or \"",
"\"a type that inherits from \"",
"\"collections.MutableMapping\"",
"%",
"(",
"option",
",",
")",
")"
] | Validate the type of method arguments that expect a MongoDB document. | [
"Validate",
"the",
"type",
"of",
"method",
"arguments",
"that",
"expect",
"a",
"MongoDB",
"document",
"."
] | python | train |
aws/aws-iot-device-sdk-python | AWSIoTPythonSDK/core/protocol/paho/client.py | https://github.com/aws/aws-iot-device-sdk-python/blob/f0aa2ce34b21dd2e44f4fb7e1d058656aaf2fc62/AWSIoTPythonSDK/core/protocol/paho/client.py#L1164-L1188 | def loop_write(self, max_packets=1):
"""Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Use want_write() to determine if there is data waiting to be written.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_packet) + 1
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_write()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS | [
"def",
"loop_write",
"(",
"self",
",",
"max_packets",
"=",
"1",
")",
":",
"if",
"self",
".",
"_sock",
"is",
"None",
"and",
"self",
".",
"_ssl",
"is",
"None",
":",
"return",
"MQTT_ERR_NO_CONN",
"max_packets",
"=",
"len",
"(",
"self",
".",
"_out_packet",
")",
"+",
"1",
"if",
"max_packets",
"<",
"1",
":",
"max_packets",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"max_packets",
")",
":",
"rc",
"=",
"self",
".",
"_packet_write",
"(",
")",
"if",
"rc",
">",
"0",
":",
"return",
"self",
".",
"_loop_rc_handle",
"(",
"rc",
")",
"elif",
"rc",
"==",
"MQTT_ERR_AGAIN",
":",
"return",
"MQTT_ERR_SUCCESS",
"return",
"MQTT_ERR_SUCCESS"
] | Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Use want_write() to determine if there is data waiting to be written.
Do not use if you are using the threaded interface loop_start(). | [
"Process",
"read",
"network",
"events",
".",
"Use",
"in",
"place",
"of",
"calling",
"loop",
"()",
"if",
"you",
"wish",
"to",
"handle",
"your",
"client",
"reads",
"as",
"part",
"of",
"your",
"own",
"application",
"."
] | python | train |
CitrineInformatics/pif-dft | dfttopif/drivers.py | https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/drivers.py#L216-L233 | def convert(files, **kwargs):
"""
Wrap directory to pif as a dice extension
:param files: a list of files, which must be non-empty
:param kwargs: any additional keyword arguments
:return: the created pif
"""
if len(files) < 1:
raise ValueError("Files needs to be a non-empty list")
if len(files) == 1:
if os.path.isfile(files[0]):
return files_to_pif(files, **kwargs)
else:
return directory_to_pif(files[0], **kwargs)
else:
return files_to_pif([x for x in files if os.path.isfile(x)], **kwargs) | [
"def",
"convert",
"(",
"files",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"files",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Files needs to be a non-empty list\"",
")",
"if",
"len",
"(",
"files",
")",
"==",
"1",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"files",
"[",
"0",
"]",
")",
":",
"return",
"files_to_pif",
"(",
"files",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"directory_to_pif",
"(",
"files",
"[",
"0",
"]",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"files_to_pif",
"(",
"[",
"x",
"for",
"x",
"in",
"files",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"x",
")",
"]",
",",
"*",
"*",
"kwargs",
")"
] | Wrap directory to pif as a dice extension
:param files: a list of files, which must be non-empty
:param kwargs: any additional keyword arguments
:return: the created pif | [
"Wrap",
"directory",
"to",
"pif",
"as",
"a",
"dice",
"extension",
":",
"param",
"files",
":",
"a",
"list",
"of",
"files",
"which",
"must",
"be",
"non",
"-",
"empty",
":",
"param",
"kwargs",
":",
"any",
"additional",
"keyword",
"arguments",
":",
"return",
":",
"the",
"created",
"pif"
] | python | train |
Azure/azure-storage-python | azure-storage-file/azure/storage/file/fileservice.py | https://github.com/Azure/azure-storage-python/blob/52327354b192cbcf6b7905118ec6b5d57fa46275/azure-storage-file/azure/storage/file/fileservice.py#L2336-L2391 | def update_range(self, share_name, directory_name, file_name, data,
start_range, end_range, validate_content=False, timeout=None):
'''
Writes the bytes specified by the request body into the specified range.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param bytes data:
Content of the range.
:param int start_range:
Start of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
file.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('data', data)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(share_name, directory_name, file_name)
request.query = {
'comp': 'range',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-write': 'update',
}
_validate_and_format_range_headers(
request, start_range, end_range)
request.body = _get_data_bytes_only('data', data)
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
self._perform_request(request) | [
"def",
"update_range",
"(",
"self",
",",
"share_name",
",",
"directory_name",
",",
"file_name",
",",
"data",
",",
"start_range",
",",
"end_range",
",",
"validate_content",
"=",
"False",
",",
"timeout",
"=",
"None",
")",
":",
"_validate_not_none",
"(",
"'share_name'",
",",
"share_name",
")",
"_validate_not_none",
"(",
"'file_name'",
",",
"file_name",
")",
"_validate_not_none",
"(",
"'data'",
",",
"data",
")",
"request",
"=",
"HTTPRequest",
"(",
")",
"request",
".",
"method",
"=",
"'PUT'",
"request",
".",
"host_locations",
"=",
"self",
".",
"_get_host_locations",
"(",
")",
"request",
".",
"path",
"=",
"_get_path",
"(",
"share_name",
",",
"directory_name",
",",
"file_name",
")",
"request",
".",
"query",
"=",
"{",
"'comp'",
":",
"'range'",
",",
"'timeout'",
":",
"_int_to_str",
"(",
"timeout",
")",
",",
"}",
"request",
".",
"headers",
"=",
"{",
"'x-ms-write'",
":",
"'update'",
",",
"}",
"_validate_and_format_range_headers",
"(",
"request",
",",
"start_range",
",",
"end_range",
")",
"request",
".",
"body",
"=",
"_get_data_bytes_only",
"(",
"'data'",
",",
"data",
")",
"if",
"validate_content",
":",
"computed_md5",
"=",
"_get_content_md5",
"(",
"request",
".",
"body",
")",
"request",
".",
"headers",
"[",
"'Content-MD5'",
"]",
"=",
"_to_str",
"(",
"computed_md5",
")",
"self",
".",
"_perform_request",
"(",
"request",
")"
] | Writes the bytes specified by the request body into the specified range.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param bytes data:
Content of the range.
:param int start_range:
Start of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
file.
:param int timeout:
The timeout parameter is expressed in seconds. | [
"Writes",
"the",
"bytes",
"specified",
"by",
"the",
"request",
"body",
"into",
"the",
"specified",
"range",
".",
":",
"param",
"str",
"share_name",
":",
"Name",
"of",
"existing",
"share",
".",
":",
"param",
"str",
"directory_name",
":",
"The",
"path",
"to",
"the",
"directory",
".",
":",
"param",
"str",
"file_name",
":",
"Name",
"of",
"existing",
"file",
".",
":",
"param",
"bytes",
"data",
":",
"Content",
"of",
"the",
"range",
".",
":",
"param",
"int",
"start_range",
":",
"Start",
"of",
"byte",
"range",
"to",
"use",
"for",
"updating",
"a",
"section",
"of",
"the",
"file",
".",
"The",
"range",
"can",
"be",
"up",
"to",
"4",
"MB",
"in",
"size",
".",
"The",
"start_range",
"and",
"end_range",
"params",
"are",
"inclusive",
".",
"Ex",
":",
"start_range",
"=",
"0",
"end_range",
"=",
"511",
"will",
"download",
"first",
"512",
"bytes",
"of",
"file",
".",
":",
"param",
"int",
"end_range",
":",
"End",
"of",
"byte",
"range",
"to",
"use",
"for",
"updating",
"a",
"section",
"of",
"the",
"file",
".",
"The",
"range",
"can",
"be",
"up",
"to",
"4",
"MB",
"in",
"size",
".",
"The",
"start_range",
"and",
"end_range",
"params",
"are",
"inclusive",
".",
"Ex",
":",
"start_range",
"=",
"0",
"end_range",
"=",
"511",
"will",
"download",
"first",
"512",
"bytes",
"of",
"file",
".",
":",
"param",
"bool",
"validate_content",
":",
"If",
"true",
"calculates",
"an",
"MD5",
"hash",
"of",
"the",
"page",
"content",
".",
"The",
"storage",
"service",
"checks",
"the",
"hash",
"of",
"the",
"content",
"that",
"has",
"arrived",
"with",
"the",
"hash",
"that",
"was",
"sent",
".",
"This",
"is",
"primarily",
"valuable",
"for",
"detecting",
"bitflips",
"on",
"the",
"wire",
"if",
"using",
"http",
"instead",
"of",
"https",
"as",
"https",
"(",
"the",
"default",
")",
"will",
"already",
"validate",
".",
"Note",
"that",
"this",
"MD5",
"hash",
"is",
"not",
"stored",
"with",
"the",
"file",
".",
":",
"param",
"int",
"timeout",
":",
"The",
"timeout",
"parameter",
"is",
"expressed",
"in",
"seconds",
"."
] | python | train |
kwikteam/phy | phy/cluster/views/trace.py | https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/views/trace.py#L246-L300 | def set_interval(self, interval=None, change_status=True,
force_update=False):
"""Display the traces and spikes in a given interval."""
if interval is None:
interval = self._interval
interval = self._restrict_interval(interval)
if not force_update and interval == self._interval:
return
self._interval = interval
start, end = interval
self.clear()
# Set the status message.
if change_status:
self.set_status('Interval: {:.3f} s - {:.3f} s'.format(start, end))
# Load the traces.
traces = self.traces(interval)
# Find the data bounds.
ymin, ymax = traces.data.min(), traces.data.max()
data_bounds = (start, ymin, end, ymax)
# Used for spike click.
self._data_bounds = data_bounds
self._waveform_times = []
# Plot the traces.
self._plot_traces(traces.data,
color=traces.get('color', None),
data_bounds=data_bounds,
)
# Plot the spikes.
waveforms = traces.waveforms
assert isinstance(waveforms, list)
for w in waveforms:
self._plot_waveforms(waveforms=w.data,
color=w.color,
channel_ids=w.get('channel_ids', None),
start_time=w.start_time,
data_bounds=data_bounds,
)
self._waveform_times.append((w.start_time,
w.spike_id,
w.spike_cluster,
w.get('channel_ids', None),
))
# Plot the labels.
if self.do_show_labels:
self._plot_labels(traces.data, data_bounds=data_bounds)
self.build()
self.update() | [
"def",
"set_interval",
"(",
"self",
",",
"interval",
"=",
"None",
",",
"change_status",
"=",
"True",
",",
"force_update",
"=",
"False",
")",
":",
"if",
"interval",
"is",
"None",
":",
"interval",
"=",
"self",
".",
"_interval",
"interval",
"=",
"self",
".",
"_restrict_interval",
"(",
"interval",
")",
"if",
"not",
"force_update",
"and",
"interval",
"==",
"self",
".",
"_interval",
":",
"return",
"self",
".",
"_interval",
"=",
"interval",
"start",
",",
"end",
"=",
"interval",
"self",
".",
"clear",
"(",
")",
"# Set the status message.",
"if",
"change_status",
":",
"self",
".",
"set_status",
"(",
"'Interval: {:.3f} s - {:.3f} s'",
".",
"format",
"(",
"start",
",",
"end",
")",
")",
"# Load the traces.",
"traces",
"=",
"self",
".",
"traces",
"(",
"interval",
")",
"# Find the data bounds.",
"ymin",
",",
"ymax",
"=",
"traces",
".",
"data",
".",
"min",
"(",
")",
",",
"traces",
".",
"data",
".",
"max",
"(",
")",
"data_bounds",
"=",
"(",
"start",
",",
"ymin",
",",
"end",
",",
"ymax",
")",
"# Used for spike click.",
"self",
".",
"_data_bounds",
"=",
"data_bounds",
"self",
".",
"_waveform_times",
"=",
"[",
"]",
"# Plot the traces.",
"self",
".",
"_plot_traces",
"(",
"traces",
".",
"data",
",",
"color",
"=",
"traces",
".",
"get",
"(",
"'color'",
",",
"None",
")",
",",
"data_bounds",
"=",
"data_bounds",
",",
")",
"# Plot the spikes.",
"waveforms",
"=",
"traces",
".",
"waveforms",
"assert",
"isinstance",
"(",
"waveforms",
",",
"list",
")",
"for",
"w",
"in",
"waveforms",
":",
"self",
".",
"_plot_waveforms",
"(",
"waveforms",
"=",
"w",
".",
"data",
",",
"color",
"=",
"w",
".",
"color",
",",
"channel_ids",
"=",
"w",
".",
"get",
"(",
"'channel_ids'",
",",
"None",
")",
",",
"start_time",
"=",
"w",
".",
"start_time",
",",
"data_bounds",
"=",
"data_bounds",
",",
")",
"self",
".",
"_waveform_times",
".",
"append",
"(",
"(",
"w",
".",
"start_time",
",",
"w",
".",
"spike_id",
",",
"w",
".",
"spike_cluster",
",",
"w",
".",
"get",
"(",
"'channel_ids'",
",",
"None",
")",
",",
")",
")",
"# Plot the labels.",
"if",
"self",
".",
"do_show_labels",
":",
"self",
".",
"_plot_labels",
"(",
"traces",
".",
"data",
",",
"data_bounds",
"=",
"data_bounds",
")",
"self",
".",
"build",
"(",
")",
"self",
".",
"update",
"(",
")"
] | Display the traces and spikes in a given interval. | [
"Display",
"the",
"traces",
"and",
"spikes",
"in",
"a",
"given",
"interval",
"."
] | python | train |
etal/biofrills | biofrills/pairutils.py | https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/pairutils.py#L119-L124 | def identity_abs(aseq, bseq):
"""Compute absolute identity (# matching sites) between sequence strings."""
assert len(aseq) == len(bseq)
return sum(a == b
for a, b in zip(aseq, bseq)
if not (a in '-.' and b in '-.')) | [
"def",
"identity_abs",
"(",
"aseq",
",",
"bseq",
")",
":",
"assert",
"len",
"(",
"aseq",
")",
"==",
"len",
"(",
"bseq",
")",
"return",
"sum",
"(",
"a",
"==",
"b",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"aseq",
",",
"bseq",
")",
"if",
"not",
"(",
"a",
"in",
"'-.'",
"and",
"b",
"in",
"'-.'",
")",
")"
] | Compute absolute identity (# matching sites) between sequence strings. | [
"Compute",
"absolute",
"identity",
"(",
"#",
"matching",
"sites",
")",
"between",
"sequence",
"strings",
"."
] | python | train |
annoviko/pyclustering | pyclustering/nnet/legion.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/legion.py#L305-L320 | def __create_stimulus(self, stimulus):
"""!
@brief Create stimulus for oscillators in line with stimulus map and parameters.
@param[in] stimulus (list): Stimulus for oscillators that is represented by list, number of stimulus should be equal number of oscillators.
"""
if (len(stimulus) != self._num_osc):
raise NameError("Number of stimulus should be equal number of oscillators in the network.");
else:
self._stimulus = [];
for val in stimulus:
if (val > 0): self._stimulus.append(self._params.I);
else: self._stimulus.append(0); | [
"def",
"__create_stimulus",
"(",
"self",
",",
"stimulus",
")",
":",
"if",
"(",
"len",
"(",
"stimulus",
")",
"!=",
"self",
".",
"_num_osc",
")",
":",
"raise",
"NameError",
"(",
"\"Number of stimulus should be equal number of oscillators in the network.\"",
")",
"else",
":",
"self",
".",
"_stimulus",
"=",
"[",
"]",
"for",
"val",
"in",
"stimulus",
":",
"if",
"(",
"val",
">",
"0",
")",
":",
"self",
".",
"_stimulus",
".",
"append",
"(",
"self",
".",
"_params",
".",
"I",
")",
"else",
":",
"self",
".",
"_stimulus",
".",
"append",
"(",
"0",
")"
] | !
@brief Create stimulus for oscillators in line with stimulus map and parameters.
@param[in] stimulus (list): Stimulus for oscillators that is represented by list, number of stimulus should be equal number of oscillators. | [
"!"
] | python | valid |
readbeyond/aeneas | aeneas/audiofile.py | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/audiofile.py#L466-L499 | def preallocate_memory(self, capacity):
"""
Preallocate memory to store audio samples,
to avoid repeated new allocations and copies
while performing several consecutive append operations.
If ``self.__samples`` is not initialized,
it will become an array of ``capacity`` zeros.
If ``capacity`` is larger than the current capacity,
the current ``self.__samples`` will be extended with zeros.
If ``capacity`` is smaller than the current capacity,
the first ``capacity`` values of ``self.__samples``
will be retained.
:param int capacity: the new capacity, in number of samples
:raises: ValueError: if ``capacity`` is negative
.. versionadded:: 1.5.0
"""
if capacity < 0:
raise ValueError(u"The capacity value cannot be negative")
if self.__samples is None:
self.log(u"Not initialized")
self.__samples = numpy.zeros(capacity)
self.__samples_length = 0
else:
self.log([u"Previous sample length was (samples): %d", self.__samples_length])
self.log([u"Previous sample capacity was (samples): %d", self.__samples_capacity])
self.__samples = numpy.resize(self.__samples, capacity)
self.__samples_length = min(self.__samples_length, capacity)
self.__samples_capacity = capacity
self.log([u"Current sample capacity is (samples): %d", self.__samples_capacity]) | [
"def",
"preallocate_memory",
"(",
"self",
",",
"capacity",
")",
":",
"if",
"capacity",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"u\"The capacity value cannot be negative\"",
")",
"if",
"self",
".",
"__samples",
"is",
"None",
":",
"self",
".",
"log",
"(",
"u\"Not initialized\"",
")",
"self",
".",
"__samples",
"=",
"numpy",
".",
"zeros",
"(",
"capacity",
")",
"self",
".",
"__samples_length",
"=",
"0",
"else",
":",
"self",
".",
"log",
"(",
"[",
"u\"Previous sample length was (samples): %d\"",
",",
"self",
".",
"__samples_length",
"]",
")",
"self",
".",
"log",
"(",
"[",
"u\"Previous sample capacity was (samples): %d\"",
",",
"self",
".",
"__samples_capacity",
"]",
")",
"self",
".",
"__samples",
"=",
"numpy",
".",
"resize",
"(",
"self",
".",
"__samples",
",",
"capacity",
")",
"self",
".",
"__samples_length",
"=",
"min",
"(",
"self",
".",
"__samples_length",
",",
"capacity",
")",
"self",
".",
"__samples_capacity",
"=",
"capacity",
"self",
".",
"log",
"(",
"[",
"u\"Current sample capacity is (samples): %d\"",
",",
"self",
".",
"__samples_capacity",
"]",
")"
] | Preallocate memory to store audio samples,
to avoid repeated new allocations and copies
while performing several consecutive append operations.
If ``self.__samples`` is not initialized,
it will become an array of ``capacity`` zeros.
If ``capacity`` is larger than the current capacity,
the current ``self.__samples`` will be extended with zeros.
If ``capacity`` is smaller than the current capacity,
the first ``capacity`` values of ``self.__samples``
will be retained.
:param int capacity: the new capacity, in number of samples
:raises: ValueError: if ``capacity`` is negative
.. versionadded:: 1.5.0 | [
"Preallocate",
"memory",
"to",
"store",
"audio",
"samples",
"to",
"avoid",
"repeated",
"new",
"allocations",
"and",
"copies",
"while",
"performing",
"several",
"consecutive",
"append",
"operations",
"."
] | python | train |